Merge branch 'gpio/merge' of git://git.secretlab.ca/git/linux-2.6 into grant

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
diff --git a/.mailmap b/.mailmap
index 9b0d026..658003a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -111,5 +111,8 @@
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
index d535757..679ce35 100644
--- a/Documentation/ABI/testing/sysfs-block-rssd
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -6,13 +6,21 @@
                 hardware registers.
                     - S ACTive
                     - Command Issue
-                    - Allocated
                     - Completed
                     - PORT IRQ STAT
                     - HOST IRQ STAT
+                    - Allocated
+                    - Commands in Q
 
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:   This is a read-only file. Indicates the status of the device.
+Description:    This is a read-only file. Indicates the status of the device.
+
+What:           /sys/block/rssd*/flags
+Date:           May 2012
+KernelVersion:  3.5
+Contact:        Asai Thambi S P <asamymuthupa@micron.com>
+Description:    This is a read-only file. Dumps the flags in port and driver
+                data structure
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
new file mode 100644
index 0000000..469d09c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -0,0 +1,77 @@
+What:		/sys/bus/fcoe/ctlr_X
+Date:		March 2012
+KernelVersion:	TBD
+Contact:	Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:	'FCoE Controller' instances on the fcoe bus
+Attributes:
+
+	fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
+			  this value will change the dev_loss_tmo for all
+			  FCFs discovered by this controller.
+
+	lesb_link_fail:   Link Error Status Block (LESB) link failure count.
+
+	lesb_vlink_fail:  Link Error Status Block (LESB) virtual link
+			  failure count.
+
+	lesb_miss_fka:    Link Error Status Block (LESB) missed FCoE
+			  Initialization Protocol (FIP) Keep-Alives (FKA).
+
+	lesb_symb_err:    Link Error Status Block (LESB) symbolic error count.
+
+	lesb_err_block:   Link Error Status Block (LESB) block error count.
+
+	lesb_fcs_error:   Link Error Status Block (LESB) Fibre Channel
+			  Serivces error count.
+
+Notes: ctlr_X (global increment starting at 0)
+
+What:		/sys/bus/fcoe/fcf_X
+Date:		March 2012
+KernelVersion:	TBD
+Contact:	Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:	'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
+		Forwarder, which is a FCoE switch that can accept FCoE
+		(Ethernet) packets, unpack them, and forward the embedded
+		Fibre Channel frames into a FC fabric. It can also take
+		outbound FC frames and pack them in Ethernet packets to
+		be sent to their destination on the Ethernet segment.
+Attributes:
+
+	fabric_name: Identifies the fabric that the FCF services.
+
+	switch_name: Identifies the FCF.
+
+	priority:    The switch's priority amongst other FCFs on the same
+		     fabric.
+
+	selected:    1 indicates that the switch has been selected for use;
+		     0 indicates that the swich will not be used.
+
+	fc_map:      The Fibre Channel MAP
+
+	vfid:	     The Virtual Fabric ID
+
+	mac:         The FCF's MAC address
+
+	fka_peroid:  The FIP Keep-Alive peroid
+
+	fabric_state: The internal kernel state
+		      "Unknown" - Initialization value
+		      "Disconnected" - No link to the FCF/fabric
+		      "Connected" - Host is connected to the FCF
+		      "Deleted" - FCF is being removed from the system
+
+	dev_loss_tmo: The device loss timeout peroid for this FCF.
+
+Notes: A device loss infrastructre similar to the FC Transport's
+       is present in fcoe_sysfs. It is nice to have so that a
+       link flapping adapter doesn't continually advance the count
+       used to identify the discovered FCF. FCFs will exist in a
+       "Disconnected" state until either the timer expires and the
+       FCF becomes "Deleted" or the FCF is rediscovered and becomes
+       "Connected."
+
+
+Users: The first user of this interface will be the fcoeadm application,
+       which is commonly packaged in the fcoe-utils package.
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
new file mode 100644
index 0000000..1b62230
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
@@ -0,0 +1,15 @@
+What:		/sys/bus/i2c/devices/.../output_hvled[n]
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the controlling backlight device for high-voltage current
+		sink HVLED[n] (n = 1, 2) (0, 1).
+
+What:		/sys/bus/i2c/devices/.../output_lvled[n]
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the controlling led device for low-voltage current sink
+		LVLED[n] (n = 1..5) (0..3).
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 5bc8a47..cfedf63 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -219,6 +219,7 @@
 What:		/sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_voltage_scale
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_accel_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
@@ -273,6 +274,7 @@
 What:		/sys/.../iio:deviceX/in_voltageX_scale_available
 What:		/sys/.../iio:deviceX/in_voltage-voltage_scale_available
 What:		/sys/.../iio:deviceX/out_voltageX_scale_available
+What:		/sys/.../iio:deviceX/out_altvoltageX_scale_available
 What:		/sys/.../iio:deviceX/in_capacitance_scale_available
 KernelVersion:	2.635
 Contact:	linux-iio@vger.kernel.org
@@ -298,14 +300,19 @@
 		gives the 3dB frequency of the filter in Hz.
 
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_raw
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Raw (unscaled, no bias etc.) output voltage for
 		channel Y.  The number must always be specified and
 		unique if the output corresponds to a single channel.
+		While DAC like devices typically use out_voltage,
+		a continuous frequency generating device, such as
+		a DDS or PLL should use out_altvoltage.
 
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY&Z_raw
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -316,6 +323,8 @@
 
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown_mode
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown_mode
 KernelVersion:	2.6.38
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -330,6 +339,8 @@
 
 What:		/sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
 What:		/sys/.../iio:deviceX/out_voltage_powerdown_mode_available
+What:		/sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
+What:		/sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
 KernelVersion:	2.6.38
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -338,6 +349,8 @@
 
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
 What:		/sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown
 KernelVersion:	2.6.38
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -346,6 +359,24 @@
 		normal operation. Y may be suppressed if all outputs are
 		controlled together.
 
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
+KernelVersion:	3.4.0
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Output frequency for channel Y in Hz. The number must always be
+		specified and unique if the output corresponds to a single
+		channel.
+
+What:		/sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
+KernelVersion:	3.4.0
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Phase in radians of one frequency/clock output Y
+		(out_altvoltageY) relative to another frequency/clock output
+		(out_altvoltageZ) of the device X. The number must always be
+		specified and unique if the output corresponds to a single
+		channel.
+
 What:		/sys/bus/iio/devices/iio:deviceX/events
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index dbedafb..bcd88eb 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -65,11 +65,11 @@
 Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
 -------------------------------------------------------------
 
-id
+snap_id
 
 	The rados internal snapshot id assigned for this snapshot
 
-size
+snap_size
 
 	The size of the image when this snapshot was taken.
 
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
new file mode 100644
index 0000000..77cf7ac
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
@@ -0,0 +1,48 @@
+What:		/sys/class/backlight/<backlight>/als_channel
+Date:		May 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Get the ALS output channel used as input in
+		ALS-current-control mode (0, 1), where
+
+		0 - out_current0 (backlight 0)
+		1 - out_current1 (backlight 1)
+
+What:		/sys/class/backlight/<backlight>/als_en
+Date:		May 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Enable ALS-current-control mode (0, 1).
+
+What:		/sys/class/backlight/<backlight>/id
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Get the id of this backlight (0, 1).
+
+What:		/sys/class/backlight/<backlight>/linear
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the brightness-mapping mode (0, 1), where
+
+		0 - exponential mode
+		1 - linear mode
+
+What:		/sys/class/backlight/<backlight>/pwm
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the PWM-input control mask (5 bits), where
+
+		bit 5 - PWM-input enabled in Zone 4
+		bit 4 - PWM-input enabled in Zone 3
+		bit 3 - PWM-input enabled in Zone 2
+		bit 2 - PWM-input enabled in Zone 1
+		bit 1 - PWM-input enabled in Zone 0
+		bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
new file mode 100644
index 0000000..620ebb3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
@@ -0,0 +1,65 @@
+What:		/sys/class/leds/<led>/als_channel
+Date:		May 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the ALS output channel to use as input in
+		ALS-current-control mode (1, 2), where
+
+		1 - out_current1
+		2 - out_current2
+
+What:		/sys/class/leds/<led>/als_en
+Date:		May 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Enable ALS-current-control mode (0, 1).
+
+What:		/sys/class/leds/<led>/falltime
+What:		/sys/class/leds/<led>/risetime
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the pattern generator fall and rise times (0..7), where
+
+		0 - 2048 us
+		1 - 262 ms
+		2 - 524 ms
+		3 - 1.049 s
+		4 - 2.097 s
+		5 - 4.194 s
+		6 - 8.389 s
+		7 - 16.78 s
+
+What:		/sys/class/leds/<led>/id
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Get the id of this led (0..3).
+
+What:		/sys/class/leds/<led>/linear
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the brightness-mapping mode (0, 1), where
+
+		0 - exponential mode
+		1 - linear mode
+
+What:		/sys/class/leds/<led>/pwm
+Date:		April 2012
+KernelVersion:	3.5
+Contact:	Johan Hovold <jhovold@gmail.com>
+Description:
+		Set the PWM-input control mask (5 bits), where
+
+		bit 5 - PWM-input enabled in Zone 4
+		bit 4 - PWM-input enabled in Zone 3
+		bit 3 - PWM-input enabled in Zone 2
+		bit 2 - PWM-input enabled in Zone 1
+		bit 1 - PWM-input enabled in Zone 0
+		bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index 4d55a18..db1ad7e 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -123,3 +123,54 @@
 		half page, or a quarter page).
 
 		In the case of ECC NOR, it is the ECC block size.
+
+What:		/sys/class/mtd/mtdX/ecc_strength
+Date:		April 2012
+KernelVersion:	3.4
+Contact:	linux-mtd@lists.infradead.org
+Description:
+		Maximum number of bit errors that the device is capable of
+		correcting within each region covering an ecc step.  This will
+		always be a non-negative integer.  Note that some devices will
+		have multiple ecc steps within each writesize region.
+
+		In the case of devices lacking any ECC capability, it is 0.
+
+What:		/sys/class/mtd/mtdX/bitflip_threshold
+Date:		April 2012
+KernelVersion:	3.4
+Contact:	linux-mtd@lists.infradead.org
+Description:
+		This allows the user to examine and adjust the criteria by which
+		mtd returns -EUCLEAN from mtd_read().  If the maximum number of
+		bit errors that were corrected on any single region comprising
+		an ecc step (as reported by the driver) equals or exceeds this
+		value, -EUCLEAN is returned.  Otherwise, absent an error, 0 is
+		returned.  Higher layers (e.g., UBI) use this return code as an
+		indication that an erase block may be degrading and should be
+		scrutinized as a candidate for being marked as bad.
+
+		The initial value may be specified by the flash device driver.
+		If not, then the default value is ecc_strength.
+
+		The introduction of this feature brings a subtle change to the
+		meaning of the -EUCLEAN return code.  Previously, it was
+		interpreted to mean simply "one or more bit errors were
+		corrected".  Its new interpretation can be phrased as "a
+		dangerously high number of bit errors were corrected on one or
+		more regions comprising an ecc step".  The precise definition of
+		"dangerously high" can be adjusted by the user with
+		bitflip_threshold.  Users are discouraged from doing this,
+		however, unless they know what they are doing and have intimate
+		knowledge of the properties of their device.  Broadly speaking,
+		bitflip_threshold should be low enough to detect genuine erase
+		block degradation, but high enough to avoid the consequences of
+		a persistent return value of -EUCLEAN on devices where sticky
+		bitflips occur.  Note that if bitflip_threshold exceeds
+		ecc_strength, -EUCLEAN is never returned by mtd_read().
+		Conversely, if bitflip_threshold is zero, -EUCLEAN is always
+		returned, absent a hard error.
+
+		This is generally applicable only to NAND flash devices with ECC
+		capability.  It is ignored on devices lacking ECC capability;
+		i.e., devices for which ecc_strength is zero.
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index c58b236..cb9258b 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -671,8 +671,9 @@
 		Chapter 14: Allocating memory
 
 The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc().  Please refer to
-the API documentation for further information about them.
+kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
+vzalloc().  Please refer to the API documentation for further information
+about them.
 
 The preferred form for passing a size of a struct is the following:
 
@@ -686,6 +687,17 @@
 from void pointer to any other pointer type is guaranteed by the C programming
 language.
 
+The preferred form for allocating an array is the following:
+
+	p = kmalloc_array(n, sizeof(...), ...);
+
+The preferred form for allocating a zeroed array is the following:
+
+	p = kcalloc(n, sizeof(...), ...);
+
+Both forms check for overflow on the allocation size n * sizeof(...),
+and return NULL if that occurred.
+
 
 		Chapter 15: The inline disease
 
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index f5ac15e..e58934c 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -986,13 +986,13 @@
 	  <row id="V4L2-PIX-FMT-Y4">
 	    <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
 	    <entry>'Y04 '</entry>
-	    <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+	    <entry>Old 4-bit greyscale format. Only the most significant 4 bits of each byte are used,
 the other bits are set to 0.</entry>
 	  </row>
 	  <row id="V4L2-PIX-FMT-Y6">
 	    <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
 	    <entry>'Y06 '</entry>
-	    <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+	    <entry>Old 6-bit greyscale format. Only the most significant 6 bits of each byte are used,
 the other bits are set to 0.</entry>
 	  </row>
 	</tbody>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 015c561..008c2d7 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -560,6 +560,7 @@
     &sub-g-tuner;
     &sub-log-status;
     &sub-overlay;
+    &sub-prepare-buf;
     &sub-qbuf;
     &sub-querybuf;
     &sub-querycap;
@@ -567,7 +568,6 @@
     &sub-query-dv-preset;
     &sub-query-dv-timings;
     &sub-querystd;
-    &sub-prepare-buf;
     &sub-reqbufs;
     &sub-s-hw-freq-seek;
     &sub-streamon;
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index 765549f..a2474ec 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -108,10 +108,9 @@
 /></entry>
 	  </row>
 	  <row>
-	    <entry>__u32</entry>
+	    <entry>struct&nbsp;v4l2_format</entry>
 	    <entry><structfield>format</structfield></entry>
-	    <entry>Filled in by the application, preserved by the driver.
-	    See <xref linkend="v4l2-format" />.</entry>
+	    <entry>Filled in by the application, preserved by the driver.</entry>
 	  </row>
 	  <row>
 	    <entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
index e8714aa..98a856f 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
@@ -89,7 +89,7 @@
 	  <row>
 	    <entry></entry>
 	    <entry>&v4l2-event-frame-sync;</entry>
-            <entry><structfield>frame</structfield></entry>
+            <entry><structfield>frame_sync</structfield></entry>
 	    <entry>Event data for event V4L2_EVENT_FRAME_SYNC.</entry>
 	  </row>
 	  <row>
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 0c674be..e0aedb7 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -1119,8 +1119,6 @@
 		These constants are defined in nand.h. They are ored together to describe
 		the chip functionality.
      		<programlisting>
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR	0x00000001
 /* Buswitdh is 16 bit */
 #define NAND_BUSWIDTH_16	0x00000002
 /* Device supports partial programming without padding */
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 4468ce2..c379a2a 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -150,7 +150,8 @@
 
 Look through the MAINTAINERS file and the source code, and determine
 if your change applies to a specific subsystem of the kernel, with
-an assigned maintainer.  If so, e-mail that person.
+an assigned maintainer.  If so, e-mail that person.  The script
+scripts/get_maintainer.pl can be very useful at this step.
 
 If no maintainer is listed, or the maintainer does not respond, send
 your patch to the primary Linux kernel developer's mailing list,
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
index 888ae7b..a564cee 100644
--- a/Documentation/arm/OMAP/DSS
+++ b/Documentation/arm/OMAP/DSS
@@ -47,6 +47,51 @@
 modelling the hardware overlays, omapdss supports virtual overlays and overlay
 managers. These can be used when updating a display with CPU or system DMA.
 
+omapdss driver support for audio
+--------------------------------
+There exist several display technologies and standards that support audio as
+well. Hence, it is relevant to update the DSS device driver to provide an audio
+interface that may be used by an audio driver or any other driver interested in
+the functionality.
+
+The audio_enable function is intended to prepare the relevant
+IP for playback (e.g., enabling an audio FIFO, taking in/out of reset
+some IP, enabling companion chips, etc). It is intended to be called before
+audio_start. The audio_disable function performs the reverse operation and is
+intended to be called after audio_stop.
+
+While a given DSS device driver may support audio, it is possible that for
+certain configurations audio is not supported (e.g., an HDMI display using a
+VESA video timing). The audio_supported function is intended to query whether
+the current configuration of the display supports audio.
+
+The audio_config function is intended to configure all the relevant audio
+parameters of the display. In order to make the function independent of any
+specific DSS device driver, a struct omap_dss_audio is defined. Its purpose
+is to contain all the required parameters for audio configuration. At the
+moment, such structure contains pointers to IEC-60958 channel status word
+and CEA-861 audio infoframe structures. This should be enough to support
+HDMI and DisplayPort, as both are based on CEA-861 and IEC-60958.
+
+The audio_enable/disable, audio_config and audio_supported functions could be
+implemented as functions that may sleep. Hence, they should not be called
+while holding a spinlock or a readlock.
+
+The audio_start/audio_stop function is intended to effectively start/stop audio
+playback after the configuration has taken place. These functions are designed
+to be used in an atomic context. Hence, audio_start should return quickly and be
+called only after all the needed resources for audio playback (audio FIFOs,
+DMA channels, companion chips, etc) have been enabled to begin data transfers.
+audio_stop is designed to only stop the audio transfers. The resources used
+for playback are released using audio_disable.
+
+The enum omap_dss_audio_state may be used to help the implementations of
+the interface to keep track of the audio state. The initial state is _DISABLED;
+then, the state transitions to _CONFIGURED, and then, when it is ready to
+play audio, to _ENABLED. The state _PLAYING is used when the audio is being
+rendered.
+
+
 Panel and controller drivers
 ----------------------------
 
@@ -156,6 +201,7 @@
 		"pal" and "ntsc"
 panel_name
 tear_elim	Tearing elimination 0=off, 1=on
+output_type	Output type (video encoder only): "composite" or "svideo"
 
 There are also some debugfs files at <debugfs>/omapdss/ which show information
 about clocks and registers.
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 28a9af9..65610bf 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -8,9 +8,8 @@
   weblink : http://www.st.com/spear
 
   The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
-  supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
-  SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
-  series is in progress.
+  supported by the 'spear' platform of ARM Linux. Currently SPEAr1310,
+  SPEAr1340, SPEAr300, SPEAr310, SPEAr320 and SPEAr600 SOCs are supported.
 
   Hierarchy in SPEAr is as follows:
 
@@ -26,36 +25,39 @@
 		- SPEAr600 (SOC)
 			- SPEAr600 Evaluation Board
 	- SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
-		- SPEAr1300 (SOC)
+		- SPEAr1310 (SOC)
+			- SPEAr1310 Evaluation Board
+		- SPEAr1340 (SOC)
+			- SPEAr1340 Evaluation Board
 
   Configuration
   -------------
 
   A generic configuration is provided for each machine, and can be used as the
   default by
-	make spear600_defconfig
-	make spear300_defconfig
-	make spear310_defconfig
-	make spear320_defconfig
+	make spear13xx_defconfig
+	make spear3xx_defconfig
+	make spear6xx_defconfig
 
   Layout
   ------
 
-  The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
-  SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+  The common files for multiple machine families (SPEAr3xx, SPEAr6xx and
+  SPEAr13xx) are located in the platform code contained in arch/arm/plat-spear
   with headers in plat/.
 
   Each machine series have a directory with name arch/arm/mach-spear followed by
   series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
 
-  Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
-  spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
-  specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
-  mach-spear* doesn't contains board specific files as they fully support
-  Flattened Device Tree.
+  Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c, for
+  spear6xx is mach-spear6xx/spear6xx.c and for spear13xx family is
+  mach-spear13xx/spear13xx.c. mach-spear* also contain soc/machine specific
+  files, like spear1310.c, spear1340.c spear300.c, spear310.c, spear320.c and
+  spear600.c.  mach-spear* doesn't contains board specific files as they fully
+  support Flattened Device Tree.
 
 
   Document Author
   ---------------
 
-  Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics
+  Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 9b1067a..dd88540 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -184,12 +184,14 @@
 page will eventually get charged for it (once it is uncharged from
 the cgroup that brought it in -- this will happen on memory pressure).
 
+But see section 8.2: when moving a task to another cgroup, its pages may
+be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
+
 Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
 When you do swapoff and make swapped-out pages of shmem(tmpfs) to
 be backed into memory in force, charges for pages are accounted against the
 caller of swapoff rather than the users of shmem.
 
-
 2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
 
 Swap Extension allows you to record charge for swap. A swapped-in page is
@@ -374,14 +376,15 @@
 tasks have migrated away from it. (because we charge against pages, not
 against tasks.)
 
-Such charges are freed or moved to their parent. At moving, both of RSS
-and CACHES are moved to parent.
-rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
+We move the stats to root (if use_hierarchy==0) or parent (if
+use_hierarchy==1), and no change on the charge except uncharging
+from the child.
 
 Charges recorded in swap information is not updated at removal of cgroup.
 Recorded information is discarded and a cgroup which uses swap (swapcache)
 will be charged as a new owner of it.
 
+About use_hierarchy, see Section 6.
 
 5. Misc. interfaces.
 
@@ -394,13 +397,15 @@
 
   Almost all pages tracked by this memory cgroup will be unmapped and freed.
   Some pages cannot be freed because they are locked or in-use. Such pages are
-  moved to parent and this cgroup will be empty. This may return -EBUSY if
-  VM is too busy to free/move all pages immediately.
+  moved to parent(if use_hierarchy==1) or root (if use_hierarchy==0) and this
+  cgroup will be empty.
 
   Typical use case of this interface is that calling this before rmdir().
   Because rmdir() moves all pages to parent, some out-of-use page caches can be
   moved to the parent. If you want to avoid that, force_empty will be useful.
 
+  About use_hierarchy, see Section 6.
+
 5.2 stat file
 
 memory.stat file includes following statistics
@@ -430,17 +435,10 @@
 hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
 			hierarchy under which memory cgroup is.
 
-total_cache		- sum of all children's "cache"
-total_rss		- sum of all children's "rss"
-total_mapped_file	- sum of all children's "cache"
-total_pgpgin		- sum of all children's "pgpgin"
-total_pgpgout		- sum of all children's "pgpgout"
-total_swap		- sum of all children's "swap"
-total_inactive_anon	- sum of all children's "inactive_anon"
-total_active_anon	- sum of all children's "active_anon"
-total_inactive_file	- sum of all children's "inactive_file"
-total_active_file	- sum of all children's "active_file"
-total_unevictable	- sum of all children's "unevictable"
+total_<counter>		- # hierarchical version of <counter>, which in
+			addition to the cgroup's own value includes the
+			sum of all hierarchical children's values of
+			<counter>, i.e. total_cache
 
 # The following additional stats are dependent on CONFIG_DEBUG_VM.
 
@@ -622,8 +620,7 @@
   bit | what type of charges would be moved ?
  -----+------------------------------------------------------------------------
    0  | A charge of an anonymous page(or swap of it) used by the target task.
-      | Those pages and swaps must be used only by the target task. You must
-      | enable Swap Extension(see 2.4) to enable move of swap charges.
+      | You must enable Swap Extension(see 2.4) to enable move of swap charges.
  -----+------------------------------------------------------------------------
    1  | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
       | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
@@ -636,8 +633,6 @@
 
 8.3 TODO
 
-- Implement madvise(2) to let users decide the vma to be moved or not to be
-  moved.
 - All of moving charge operations are done under cgroup_mutex. It's not good
   behavior to hold the mutex too long, so we may need some trick.
 
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f3c4ec3..0c4a344 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -92,6 +92,14 @@
 
 	The _locked routines imply that the res_counter->lock is taken.
 
+ f. void res_counter_uncharge_until
+		(struct res_counter *rc, struct res_counter *top,
+		 unsinged long val)
+
+	Almost same as res_cunter_uncharge() but propagation of uncharge
+	stops when rc == top. This is useful when kill a res_coutner in
+	child cgroup.
+
  2.1 Other accounting routines
 
     There are more routines that may help you with common needs, like
diff --git a/Documentation/cris/README b/Documentation/cris/README
index d9b0868..8dbdb1a 100644
--- a/Documentation/cris/README
+++ b/Documentation/cris/README
@@ -1,38 +1,34 @@
-Linux 2.4 on the CRIS architecture
-==================================
-$Id: README,v 1.7 2001/04/19 12:38:32 bjornw Exp $
+Linux on the CRIS architecture
+==============================
 
-This is a port of Linux 2.4 to Axis Communications ETRAX 100LX embedded 
-network CPU. For more information about CRIS and ETRAX please see further
-below.
+This is a port of Linux to Axis Communications ETRAX 100LX,
+ETRAX FS and ARTPEC-3 embedded network CPUs.
+
+For more information about CRIS and ETRAX please see further below.
 
 In order to compile this you need a version of gcc with support for the
-ETRAX chip family. Please see this link for more information on how to 
+ETRAX chip family. Please see this link for more information on how to
 download the compiler and other tools useful when building and booting
 software for the ETRAX platform:
 
-http://developer.axis.com/doc/software/devboard_lx/install-howto.html
-
-<more specific information should come in this document later>
+http://developer.axis.com/wiki/doku.php?id=axis:install-howto-2_20
 
 What is CRIS ?
 --------------
 
 CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU
 architecture in Axis Communication AB's range of embedded network CPU's,
-called ETRAX. The latest CPU is called ETRAX 100LX, where LX stands for
-'Linux' because the chip was designed to be a good host for the Linux
-operating system.
+called ETRAX.
 
 The ETRAX 100LX chip
 --------------------
 
-For reference, please see the press-release:
+For reference, please see the following link:
 
-http://www.axis.com/news/us/001101_etrax.htm
+http://www.axis.com/products/dev_etrax_100lx/index.htm
 
-The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad 
-range of  built-in interfaces, all with modern scatter/gather DMA.
+The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
+range of built-in interfaces, all with modern scatter/gather DMA.
 
 Memory interfaces:
 
@@ -51,20 +47,28 @@
 	* SCSI
 	* two parallel-ports
 	* two generic 8-bit ports
-	
-	(not all interfaces are available at the same time due to chip pin 
+
+	(not all interfaces are available at the same time due to chip pin
          multiplexing)
 
-The previous version of the ETRAX, the ETRAX 100, sits in almost all of
-Axis shipping thin-servers like the Axis 2100 web camera or the ETRAX 100
-developer-board. It lacks an MMU so the Linux we run on that is a version
-of uClinux (Linux 2.0 without MM-support) ported to the CRIS architecture.
-The new Linux 2.4 port has full MM and needs a CPU with an MMU, so it will
-not run on the ETRAX 100.
+ETRAX 100LX is CRISv10 architecture.
 
-A version of the Axis developer-board with ETRAX 100LX (running Linux
-2.4) is now available. For more information please see developer.axis.com.
 
+The ETRAX FS and ARTPEC-3 chips
+-------------------------------
+
+The ETRAX FS is a 200MHz 32-bit RISC processor with on-chip 16kB
+I-cache and 16kB D-cache and with a wide range of device interfaces
+including multiple high speed serial ports and an integrated USB 1.1 PHY.
+
+The ARTPEC-3 is a variant of the ETRAX FS with additional IO-units
+used by the Axis Communications network cameras.
+
+See below link for more information:
+
+http://www.axis.com/products/dev_etrax_fs/index.htm
+
+ETRAX FS and ARTPEC-3 are both CRISv32 architectures.
 
 Bootlog
 -------
@@ -182,10 +186,6 @@
 -rwxr-xr-x  1 342      100         16252  Jan 01 00:00 telnetd
 
 
-(All programs are statically linked to the libc at this point - we have not ported the
- shared libraries yet)
-
-
 
 
 
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 3370bc4..f5cfc62 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -287,6 +287,17 @@
 	the current transaction id is when you change it with this
 	compare-and-swap message.
 
+    reserve_metadata_snap
+
+        Reserve a copy of the data mapping btree for use by userland.
+        This allows userland to inspect the mappings as they were when
+        this message was executed.  Use the pool's status command to
+        get the root block associated with the metadata snapshot.
+
+    release_metadata_snap
+
+        Release a previously reserved copy of the data mapping btree.
+
 'thin' target
 -------------
 
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index bfbc771..ac9e751 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -1,6 +1,14 @@
 Freescale i.MX Platforms Device Tree Bindings
 -----------------------------------------------
 
+i.MX23 Evaluation Kit
+Required root node properties:
+    - compatible = "fsl,imx23-evk", "fsl,imx23";
+
+i.MX28 Evaluation Kit
+Required root node properties:
+    - compatible = "fsl,imx28-evk", "fsl,imx28";
+
 i.MX51 Babbage Board
 Required root node properties:
     - compatible = "fsl,imx51-babbage", "fsl,imx51";
@@ -29,6 +37,10 @@
 Required root node properties:
     - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
 
+i.MX6 Quad SABRE Smart Device Board
+Required root node properties:
+    - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
 Generic i.MX boards
 -------------------
 
diff --git a/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
new file mode 100644
index 0000000..f2f2171
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
@@ -0,0 +1,52 @@
+* Samsung Exynos Interrupt Combiner Controller
+
+Samsung's Exynos4 architecture includes a interrupt combiner controller which
+can combine interrupt sources as a group and provide a single interrupt request
+for the group. The interrupt request from each group are connected to a parent
+interrupt controller, such as GIC in case of Exynos4210.
+
+The interrupt combiner controller consists of multiple combiners. Upto eight
+interrupt sources can be connected to a combiner. The combiner outputs one
+combined interrupt for its eight interrupt sources. The combined interrupt
+is usually connected to a parent interrupt controller.
+
+A single node in the device tree is used to describe the interrupt combiner
+controller module (which includes multiple combiners). A combiner in the
+interrupt controller module shares config/control registers with other
+combiners. For example, a 32-bit interrupt enable/disable config register
+can accommodate upto 4 interrupt combiners (with each combiner supporting
+upto 8 interrupt sources).
+
+Required properties:
+- compatible: should be "samsung,exynos4210-combiner".
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: should be <2>. The meaning of the cells are
+	* First Cell: Combiner Group Number.
+	* Second Cell: Interrupt number within the group.
+- reg: Base address and size of interrupt combiner registers.
+- interrupts: The list of interrupts generated by the combiners which are then
+    connected to a parent interrupt controller. The format of the interrupt
+    specifier depends in the interrupt parent controller.
+
+Optional properties:
+- samsung,combiner-nr: The number of interrupt combiners supported. If this
+  property is not specified, the default number of combiners is assumed
+  to be 16.
+- interrupt-parent: pHandle of the parent interrupt controller, if not
+  inherited from the parent node.
+
+
+Example:
+
+	The following is a an example from the Exynos4210 SoC dtsi file.
+
+	combiner:interrupt-controller@10440000 {
+		compatible = "samsung,exynos4210-combiner";
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		reg = <0x10440000 0x1000>;
+		interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+			     <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+			     <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+			     <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/arm/spear-timer.txt
new file mode 100644
index 0000000..c001722
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spear-timer.txt
@@ -0,0 +1,18 @@
+* SPEAr ARM Timer
+
+** Timer node required properties:
+
+- compatible : Should be:
+	"st,spear-timer"
+- reg: Address range of the timer registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the timer interrupt number
+
+Example:
+
+	timer@f0000000 {
+		compatible = "st,spear-timer";
+		reg = <0xf0000000 0x400>;
+		interrupts = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/spear.txt b/Documentation/devicetree/bindings/arm/spear.txt
index aa5f355..0d42949 100644
--- a/Documentation/devicetree/bindings/arm/spear.txt
+++ b/Documentation/devicetree/bindings/arm/spear.txt
@@ -2,25 +2,25 @@
 ---------------------------------------
 
 Boards with the ST SPEAr600 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear600";
 
 Boards with the ST SPEAr300 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear300";
 
 Boards with the ST SPEAr310 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear310";
 
 Boards with the ST SPEAr320 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear320";
+
+Boards with the ST SPEAr1310 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear1310";
+
+Boards with the ST SPEAr1340 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear1340";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
new file mode 100644
index 0000000..234406d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -0,0 +1,11 @@
+NVIDIA Tegra AHB
+
+Required properties:
+- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+	ahb: ahb@6000c004 {
+		compatible = "nvidia,tegra20-ahb";
+		reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+	};
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
new file mode 100644
index 0000000..ded0398
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -0,0 +1,19 @@
+* Freescale MXS DMA
+
+Required properties:
+- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
+- reg : Should contain registers location and length
+
+Supported chips:
+imx23, imx28.
+
+Examples:
+dma-apbh@80004000 {
+	compatible = "fsl,imx28-dma-apbh";
+	reg = <0x80004000 2000>;
+};
+
+dma-apbx@80024000 {
+	compatible = "fsl,imx28-dma-apbx";
+	reg = <0x80024000 2000>;
+};
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
new file mode 100644
index 0000000..c0d85db
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -0,0 +1,17 @@
+* Synopsys Designware DMA Controller
+
+Required properties:
+- compatible: "snps,dma-spear1340"
+- reg: Address range of the DMAC registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the DMAC interrupt number
+
+Example:
+
+	dma@fc000000 {
+		compatible = "snps,dma-spear1340";
+		reg = <0xfc000000 0x1000>;
+		interrupt-parent = <&vic1>;
+		interrupts = <12>;
+	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
new file mode 100644
index 0000000..f93d514
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
@@ -0,0 +1,38 @@
+Lantiq SoC External Bus memory mapped GPIO controller
+
+By attaching hardware latches to the EBU it is possible to create output
+only gpios. This driver configures a special memory address, which when
+written to outputs 16 bit to the latches.
+
+The node describing the memory mapped GPIOs needs to be a child of the node
+describing the "lantiq,localbus".
+
+Required properties:
+- compatible : Should be "lantiq,gpio-mm-lantiq"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+  shift register cascade.
+
+Example:
+
+localbus@0 {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+		1 0 0x4000000 0x4000010>; /* addsel1 */
+	compatible = "lantiq,localbus", "simple-bus";
+
+	gpio_mm0: gpio@4000000 {
+		compatible = "lantiq,gpio-mm";
+		reg = <1 0x0 0x10>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		lantiq,shadow = <0x77f>
+	};
+}
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
new file mode 100644
index 0000000..0c35673
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
@@ -0,0 +1,87 @@
+* Freescale MXS GPIO controller
+
+The Freescale MXS GPIO controller is part of MXS PIN controller.  The
+GPIOs are organized in port/bank.  Each port consists of 32 GPIOs.
+
+As the GPIO controller is embedded in the PIN controller and all the
+GPIO ports share the same IO space with PIN controller, the GPIO node
+will be represented as sub-nodes of MXS pinctrl node.
+
+Required properties for GPIO node:
+- compatible : Should be "fsl,<soc>-gpio".  The supported SoCs include
+  imx23 and imx28.
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2.  The first cell is the GPIO number.
+  The second cell bits[3:0] is used to specify trigger type and level flags:
+      1 = low-to-high edge triggered.
+      2 = high-to-low edge triggered.
+      4 = active high level-sensitive.
+      8 = active low level-sensitive.
+
+Note: Each GPIO port should have an alias correctly numbered in "aliases"
+node.
+
+Examples:
+
+aliases {
+	gpio0 = &gpio0;
+	gpio1 = &gpio1;
+	gpio2 = &gpio2;
+	gpio3 = &gpio3;
+	gpio4 = &gpio4;
+};
+
+pinctrl@80018000 {
+	compatible = "fsl,imx28-pinctrl", "simple-bus";
+	reg = <0x80018000 2000>;
+
+	gpio0: gpio@0 {
+		compatible = "fsl,imx28-gpio";
+		interrupts = <127>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpio1: gpio@1 {
+		compatible = "fsl,imx28-gpio";
+		interrupts = <126>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpio2: gpio@2 {
+		compatible = "fsl,imx28-gpio";
+		interrupts = <125>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpio3: gpio@3 {
+		compatible = "fsl,imx28-gpio";
+		interrupts = <124>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpio4: gpio@4 {
+		compatible = "fsl,imx28-gpio";
+		interrupts = <123>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
new file mode 100644
index 0000000..854de13
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
@@ -0,0 +1,42 @@
+Lantiq SoC Serial To Parallel (STP) GPIO controller
+
+The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+peripheral controller used to drive external shift register cascades. At most
+3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+to drive the 2 LSBs of the cascade automatically.
+
+
+Required properties:
+- compatible : Should be "lantiq,gpio-stp-xway"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+  shift register cascade.
+- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
+  in the shift register cascade.
+- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
+  property can enable this feature.
+- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
+- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
+- lantiq,rising : use rising instead of falling edge for the shift register
+
+Example:
+
+gpio1: stp@E100BB0 {
+	compatible = "lantiq,gpio-stp-xway";
+	reg = <0xE100BB0 0x40>;
+	#gpio-cells = <2>;
+	gpio-controller;
+
+	lantiq,shadow = <0xffff>;
+	lantiq,groups = <0x7>;
+	lantiq,dsl = <0x3>;
+	lantiq,phy1 = <0x7>;
+	lantiq,phy2 = <0x7>;
+	/* lantiq,rising; */
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
new file mode 100644
index 0000000..ae8af16
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
@@ -0,0 +1,93 @@
+Pinctrl-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses pin multiplexing to
+route the I2C signals, and represents the pin multiplexing configuration
+using the pinctrl device tree bindings.
+
+                                 +-----+  +-----+
+                                 | dev |  | dev |
+    +------------------------+   +-----+  +-----+
+    | SoC                    |      |        |
+    |                   /----|------+--------+
+    |   +---+   +------+     | child bus A, on first set of pins
+    |   |I2C|---|Pinmux|     |
+    |   +---+   +------+     | child bus B, on second set of pins
+    |                   \----|------+--------+--------+
+    |                        |      |        |        |
+    +------------------------+  +-----+  +-----+  +-----+
+                                | dev |  | dev |  | dev |
+                                +-----+  +-----+  +-----+
+
+Required properties:
+- compatible: i2c-mux-pinctrl
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+
+Also required are:
+
+* Standard pinctrl properties that specify the pin mux state for each child
+  bus. See ../pinctrl/pinctrl-bindings.txt.
+
+* Standard I2C mux properties. See mux.txt in this directory.
+
+* I2C child bus nodes. See mux.txt in this directory.
+
+For each named state defined in the pinctrl-names property, an I2C child bus
+will be created. I2C child bus numbers are assigned based on the index into
+the pinctrl-names property.
+
+The only exception is that no bus will be created for a state named "idle". If
+such a state is defined, it must be the last entry in pinctrl-names. For
+example:
+
+	pinctrl-names = "ddc", "pta", "idle"  ->  ddc = bus 0, pta = bus 1
+	pinctrl-names = "ddc", "idle", "pta"  ->  Invalid ("idle" not last)
+	pinctrl-names = "idle", "ddc", "pta"  ->  Invalid ("idle" not last)
+
+Whenever an access is made to a device on a child bus, the relevant pinctrl
+state will be programmed into hardware.
+
+If an idle state is defined, whenever an access is not being made to a device
+on a child bus, the idle pinctrl state will be programmed into hardware.
+
+If an idle state is not defined, the most recently used pinctrl state will be
+left programmed into hardware whenever no access is being made of a device on
+a child bus.
+
+Example:
+
+	i2cmux {
+		compatible = "i2c-mux-pinctrl";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c-parent = <&i2c1>;
+
+		pinctrl-names = "ddc", "pta", "idle";
+		pinctrl-0 = <&state_i2cmux_ddc>;
+		pinctrl-1 = <&state_i2cmux_pta>;
+		pinctrl-2 = <&state_i2cmux_idle>;
+
+		i2c@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			eeprom {
+				compatible = "eeprom";
+				reg = <0x50>;
+			};
+		};
+
+		i2c@1 {
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			eeprom {
+				compatible = "eeprom";
+				reg = <0x50>;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
new file mode 100644
index 0000000..1bfc02d
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
@@ -0,0 +1,16 @@
+* Freescale MXS Inter IC (I2C) Controller
+
+Required properties:
+- compatible: Should be "fsl,<chip>-i2c"
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+
+Examples:
+
+i2c0: i2c@80058000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,imx28-i2c";
+	reg = <0x80058000 2000>;
+	interrupts = <111 68>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/mux.txt b/Documentation/devicetree/bindings/i2c/mux.txt
new file mode 100644
index 0000000..af84cce
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/mux.txt
@@ -0,0 +1,60 @@
+Common i2c bus multiplexer/switch properties.
+
+An i2c bus multiplexer/switch will have several child busses that are
+numbered uniquely in a device dependent manner.  The nodes for an i2c bus
+multiplexer/switch will have one child node for each child
+bus.
+
+Required properties:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Required properties for child nodes:
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg : The sub-bus number.
+
+Optional properties for child nodes:
+- Other properties specific to the multiplexer/switch hardware.
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+	/*
+	   An NXP pca9548 8 channel I2C multiplexer at address 0x70
+	   with two NXP pca8574 GPIO expanders attached, one each to
+	   ports 3 and 4.
+	 */
+
+	mux@70 {
+		compatible = "nxp,pca9548";
+		reg = <0x70>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+
+			gpio1: gpio@38 {
+				compatible = "nxp,pca8574";
+				reg = <0x38>;
+				#gpio-cells = <2>;
+				gpio-controller;
+			};
+		};
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <4>;
+
+			gpio2: gpio@38 {
+				compatible = "nxp,pca8574";
+				reg = <0x38>;
+				#gpio-cells = <2>;
+				gpio-controller;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
index 38832c7..b6cb5a1 100644
--- a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
@@ -6,14 +6,18 @@
   - compatible: value should be either of the following.
       (a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c.
       (b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
+      (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
+          inside HDMIPHY block found on several samsung SoCs
   - reg: physical base address of the controller and length of memory mapped
     region.
   - interrupts: interrupt number to the cpu.
   - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
-  - gpios: The order of the gpios should be the following: <SDA, SCL>.
-    The gpio specifier depends on the gpio controller.
 
 Optional properties:
+  - gpios: The order of the gpios should be the following: <SDA, SCL>.
+    The gpio specifier depends on the gpio controller. Required in all
+    cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
+    lines are permanently wired to the respective client
   - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
     specified, default value is 0.
   - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
diff --git a/Documentation/devicetree/bindings/i2c/xiic.txt b/Documentation/devicetree/bindings/i2c/xiic.txt
new file mode 100644
index 0000000..ceabbe9
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/xiic.txt
@@ -0,0 +1,22 @@
+Xilinx IIC controller:
+
+Required properties:
+- compatible : Must be "xlnx,xps-iic-2.00.a"
+- reg : IIC register location and length
+- interrupts : IIC controller unterrupt
+- #address-cells = <1>
+- #size-cells = <0>
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Example:
+
+	axi_iic_0: i2c@40800000 {
+		compatible = "xlnx,xps-iic-2.00.a";
+		interrupts = < 1 2 >;
+		reg = < 0x40800000 0x10000 >;
+
+		#size-cells = <0>;
+		#address-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
new file mode 100644
index 0000000..099d936
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
@@ -0,0 +1,14 @@
+NVIDIA Tegra 20 GART
+
+Required properties:
+- compatible: "nvidia,tegra20-gart"
+- reg: Two pairs of cells specifying the physical address and size of
+  the memory controller registers and the GART aperture respectively.
+
+Example:
+
+	gart {
+		compatible = "nvidia,tegra20-gart";
+		reg = <0x7000f024 0x00000018	/* controller registers */
+		       0x58000000 0x02000000>;	/* GART aperture */
+	};
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
new file mode 100644
index 0000000..1857f4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
@@ -0,0 +1,60 @@
+* Dialog DA9052/53 Power Management Integrated Circuit (PMIC)
+
+Required properties:
+- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
+			 "dlg,da9053-ab", or "dlg,da9053-bb"
+
+Sub-nodes:
+- regulators : Contain the regulator nodes. The DA9052/53 regulators are
+  bound using their names as listed below:
+
+    buck0     : regulator BUCK0
+    buck1     : regulator BUCK1
+    buck2     : regulator BUCK2
+    buck3     : regulator BUCK3
+    ldo4      : regulator LDO4
+    ldo5      : regulator LDO5
+    ldo6      : regulator LDO6
+    ldo7      : regulator LDO7
+    ldo8      : regulator LDO8
+    ldo9      : regulator LDO9
+    ldo10     : regulator LDO10
+    ldo11     : regulator LDO11
+    ldo12     : regulator LDO12
+    ldo13     : regulator LDO13
+
+  The bindings details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+Examples:
+
+i2c@63fc8000 { /* I2C1 */
+	status = "okay";
+
+	pmic: dialog@48 {
+		compatible = "dlg,da9053-aa";
+		reg = <0x48>;
+
+		regulators {
+			buck0 {
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2075000>;
+			};
+
+			buck1 {
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2075000>;
+			};
+
+			buck2 {
+				regulator-min-microvolt = <925000>;
+				regulator-max-microvolt = <2500000>;
+			};
+
+			buck3 {
+				regulator-min-microvolt = <925000>;
+				regulator-max-microvolt = <2500000>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
new file mode 100644
index 0000000..645f5ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -0,0 +1,133 @@
+TPS65910 Power Management Integrated Circuit
+
+Required properties:
+- compatible: "ti,tps65910" or "ti,tps65911"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- #gpio-cells: number of cells to describe a GPIO, this should be 2.
+  The first cell is the GPIO number.
+  The second cell is used to specify additional options <unused>.
+- gpio-controller: mark the device as a GPIO controller
+- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
+  The first cell is the IRQ number.
+  The second cell is the flags, encoded as the trigger masks from
+  Documentation/devicetree/bindings/interrupts.txt
+- regulators: This is the list of child nodes that specify the regulator
+  initialization data for defined regulators. Not all regulators for the given
+  device need to be present. The definition for each of these nodes is defined
+  using the standard binding for regulators found at
+  Documentation/devicetree/bindings/regulator/regulator.txt.
+
+  The valid names for regulators are:
+  tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
+            vaux2, vaux33, vmmc
+  tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
+            ldo6, ldo7, ldo8
+
+Optional properties:
+- ti,vmbch-threshold: (tps65911) main battery charged threshold
+  comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
+  comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,en-gpio-sleep: enable sleep control for gpios
+  There should be 9 entries here, one for each gpio.
+
+Regulator Optional properties:
+- ti,regulator-ext-sleep-control: enable external sleep
+  control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
+  If this property is not defined, it defaults to 0 (not enabled).
+
+Example:
+
+	pmu: tps65910@d2 {
+		compatible = "ti,tps65910";
+		reg = <0xd2>;
+		interrupt-parent = <&intc>;
+		interrupts = < 0 118 0x04 >;
+
+		#gpio-cells = <2>;
+		gpio-controller;
+
+		#interrupt-cells = <2>;
+		interrupt-controller;
+
+		ti,vmbch-threshold = 0;
+		ti,vmbch2-threshold = 0;
+
+		ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+
+		regulators {
+			vdd1_reg: vdd1 {
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			vdd2_reg: vdd2 {
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				ti,regulator-ext-sleep-control = <4>;
+			};
+			vddctrl_reg: vddctrl {
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-always-on;
+				regulator-boot-on;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			vio_reg: vio {
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-boot-on;
+				ti,regulator-ext-sleep-control = <1>;
+			};
+			ldo1_reg: ldo1 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo2_reg: ldo2 {
+				regulator-min-microvolt = <1050000>;
+				regulator-max-microvolt = <1050000>;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo3_reg: ldo3 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo4_reg: ldo4 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo5_reg: ldo5 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo6_reg: ldo6 {
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				ti,regulator-ext-sleep-control = <0>;
+			};
+			ldo7_reg: ldo7 {
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+				regulator-boot-on;
+				ti,regulator-ext-sleep-control = <1>;
+			};
+			ldo8_reg: ldo8 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				ti,regulator-ext-sleep-control = <1>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
new file mode 100644
index 0000000..bc67c6f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -0,0 +1,62 @@
+Texas Instruments TWL6040 family
+
+The TWL6040s are 8-channel high quality low-power audio codecs providing audio
+and vibra functionality on OMAP4+ platforms.
+They are connected ot the host processor via i2c for commands, McPDM for audio
+data and commands.
+
+Required properties:
+- compatible : Must be "ti,twl6040";
+- reg: must be 0x4b for i2c address
+- interrupts: twl6040 has one interrupt line connecteded to the main SoC
+- interrupt-parent: The parent interrupt controller
+- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
+
+- vio-supply: Regulator for the twl6040 VIO supply
+- v2v1-supply: Regulator for the twl6040 V2V1 supply
+
+Optional properties, nodes:
+- enable-active-high: To power on the twl6040 during boot.
+
+Vibra functionality
+Required properties:
+- vddvibl-supply: Regulator for the left vibra motor
+- vddvibr-supply: Regulator for the right vibra motor
+- vibra { }: Configuration section for vibra parameters containing the following
+	     properties:
+- ti,vibldrv-res: Resistance parameter for left driver
+- ti,vibrdrv-res: Resistance parameter for right driver
+- ti,viblmotor-res: Resistance parameter for left motor
+- ti,viblmotor-res: Resistance parameter for right motor
+
+Optional properties within vibra { } section:
+- vddvibl_uV: If the vddvibl default voltage need to be changed
+- vddvibr_uV: If the vddvibr default voltage need to be changed
+
+Example:
+&i2c1 {
+	twl6040: twl@4b {
+		compatible = "ti,twl6040";
+		reg = <0x4b>;
+
+		interrupts = <0 119 4>;
+		interrupt-parent = <&gic>;
+		twl6040,audpwron-gpio = <&gpio4 31 0>;
+
+		vio-supply = <&v1v8>;
+		v2v1-supply = <&v2v1>;
+		enable-active-high;
+
+		/* regulators for vibra motor */
+		vddvibl-supply = <&vbat>;
+		vddvibr-supply = <&vbat>;
+
+		vibra {
+			/* Vibra driver, motor resistance parameters */
+			ti,vibldrv-res = <8>;
+			ti,vibrdrv-res = <3>;
+			ti,viblmotor-res = <10>;
+			ti,vibrmotor-res = <10>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 64bcb8b..0d93b4b 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -11,9 +11,11 @@
   - interrupt-parent : interrupt source phandle.
   - clock-frequency : specifies eSDHC base clock frequency.
   - sdhci,wp-inverted : (optional) specifies that eSDHC controller
-    reports inverted write-protect state;
+    reports inverted write-protect state; New devices should use
+    the generic "wp-inverted" property.
   - sdhci,1-bit-only : (optional) specifies that a controller can
-    only handle 1-bit data transfers.
+    only handle 1-bit data transfers. New devices should use the
+    generic "bus-width = <1>" property.
   - sdhci,auto-cmd12: (optional) specifies that a controller can
     only handle auto CMD12.
 
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index ab22fe6..c7e404b 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -9,7 +9,7 @@
 - interrupts : Should contain eSDHC interrupt
 
 Optional properties:
-- fsl,card-wired : Indicate the card is wired to host permanently
+- non-removable : Indicate the card is wired to host permanently
 - fsl,cd-internal : Indicate to use controller internal card detection
 - fsl,wp-internal : Indicate to use controller internal write protection
 - cd-gpios : Specify GPIOs for card detection
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 89a0084..d64aea5 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -10,7 +10,8 @@
 
 Optional properties:
 - gpios : may specify GPIOs in this order: Card-Detect GPIO,
-  Write-Protect GPIO.
+  Write-Protect GPIO. Note that this does not follow the
+  binding from mmc.txt, for historic reasons.
 - interrupts : the interrupt of a card detect interrupt.
 - interrupt-parent : the phandle for the interrupt controller that
   services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
new file mode 100644
index 0000000..6e70dcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -0,0 +1,27 @@
+These properties are common to multiple MMC host controllers. Any host
+that requires the respective functionality should implement them using
+these definitions.
+
+Required properties:
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- cd-gpios : Specify GPIOs for card detection, see gpio binding
+- wp-gpios : Specify GPIOs for write protection, see gpio binding
+- cd-inverted: when present, polarity on the wp gpio line is inverted
+- wp-inverted: when present, polarity on the wp gpio line is inverted
+- non-removable: non-removable slot (like eMMC)
+- max-frequency: maximum operating clock frequency
+
+Example:
+
+sdhci@ab000000 {
+	compatible = "sdhci";
+	reg = <0xab000000 0x200>;
+	interrupts = <23>;
+	bus-width = <4>;
+	cd-gpios = <&gpio 69 0>;
+	cd-inverted;
+	wp-gpios = <&gpio 70 0>;
+	max-frequency = <50000000>;
+}
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
new file mode 100644
index 0000000..14a81d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -0,0 +1,19 @@
+* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
+
+The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
+reading and writing to MultiMedia and SD cards alike.
+
+Required properties:
+- compatible             : contains "arm,pl18x", "arm,primecell".
+- reg                    : contains pl18x registers and length.
+- interrupts             : contains the device IRQ(s).
+- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
+
+Optional properties:
+- wp-gpios               : contains any write protect (ro) gpios
+- cd-gpios               : contains any card detection gpios
+- cd-inverted            : indicates whether the cd gpio is inverted
+- max-frequency          : contains the maximum operating frequency
+- bus-width              : number of data lines, can be <1>, <4>, or <8>
+- mmc-cap-mmc-highspeed  : indicates whether MMC is high speed capable
+- mmc-cap-sd-highspeed   : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
new file mode 100644
index 0000000..14d870a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
@@ -0,0 +1,25 @@
+* Freescale MXS MMC controller
+
+The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
+to support MMC, SD, and SDIO types of memory cards.
+
+Required properties:
+- compatible: Should be "fsl,<chip>-mmc".  The supported chips include
+  imx23 and imx28.
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+- fsl,ssp-dma-channel: APBH DMA channel for the SSP
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- wp-gpios: Specify GPIOs for write protection
+
+Examples:
+
+ssp0: ssp@80010000 {
+	compatible = "fsl,imx28-mmc";
+	reg = <0x80010000 2000>;
+	interrupts = <96 82>;
+	fsl,ssp-dma-channel = <0>;
+	bus-width = <8>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
index 7e51154..f77c303 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
@@ -7,12 +7,12 @@
 - compatible : Should be "nvidia,<chip>-sdhci"
 - reg : Should contain SD/MMC registers location and length
 - interrupts : Should contain SD/MMC interrupt
+- bus-width : Number of data lines, can be <1>, <4>, or <8>
 
 Optional properties:
 - cd-gpios : Specify GPIOs for card detection
 - wp-gpios : Specify GPIOs for write protection
 - power-gpios : Specify GPIOs for power control
-- support-8bit : Boolean, indicates if 8-bit mode should be used.
 
 Example:
 
@@ -23,5 +23,5 @@
 	cd-gpios = <&gpio 69 0>; /* gpio PI5 */
 	wp-gpios = <&gpio 57 0>; /* gpio PH1 */
 	power-gpios = <&gpio 155 0>; /* gpio PT3 */
-	support-8bit;
+	bus-width = <8>;
 };
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index dbd4368..8a53958 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -15,7 +15,7 @@
 ti,dual-volt: boolean, supports dual voltage cards
 <supply-name>-supply: phandle to the regulator device tree node
 "supply-name" examples are "vmmc", "vmmc_aux" etc
-ti,bus-width: Number of data lines, default assumed is 1 if the property is missing.
+bus-width: Number of data lines, default assumed is 1 if the property is missing.
 cd-gpios: GPIOs for card detection
 wp-gpios: GPIOs for write protection
 ti,non-removable: non-removable slot (like eMMC)
@@ -27,7 +27,7 @@
 		reg = <0x4809c000 0x400>;
 		ti,hwmods = "mmc1";
 		ti,dual-volt;
-		ti,bus-width = <4>;
+		bus-width = <4>;
 		vmmc-supply = <&vmmc>; /* phandle to regulator node */
 		ti,non-removable;
 	};
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
new file mode 100644
index 0000000..1a5bbd3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -0,0 +1,33 @@
+* Freescale General-Purpose Media Interface (GPMI)
+
+The GPMI nand controller provides an interface to control the
+NAND flash chips. We support only one NAND chip now.
+
+Required properties:
+  - compatible : should be "fsl,<chip>-gpmi-nand"
+  - reg : should contain registers location and length for gpmi and bch.
+  - reg-names: Should contain the reg names "gpmi-nand" and "bch"
+  - interrupts : The first is the DMA interrupt number for GPMI.
+                 The second is the BCH interrupt number.
+  - interrupt-names : The interrupt names "gpmi-dma", "bch";
+  - fsl,gpmi-dma-channel : Should contain the dma channel it uses.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Examples:
+
+gpmi-nand@8000c000 {
+	compatible = "fsl,imx28-gpmi-nand";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg = <0x8000c000 2000>, <0x8000a000 2000>;
+	reg-names = "gpmi-nand", "bch";
+	interrupts = <88>, <41>;
+	interrupt-names = "gpmi-dma", "bch";
+	fsl,gpmi-dma-channel = <4>;
+
+	partition@0 {
+	...
+	};
+};
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.txt b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
new file mode 100644
index 0000000..b5833d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
@@ -0,0 +1,19 @@
+* Freescale's mxc_nand
+
+Required properties:
+- compatible: "fsl,imxXX-nand"
+- reg: address range of the nfc block
+- interrupts: irq to be used
+- nand-bus-width: see nand.txt
+- nand-ecc-mode: see nand.txt
+- nand-on-flash-bbt: see nand.txt
+
+Example:
+
+	nand@d8000000 {
+		compatible = "fsl,imx27-nand";
+		reg = <0xd8000000 0x1000>;
+		interrupts = <29>;
+		nand-bus-width = <8>;
+		nand-ecc-mode = "hw";
+	};
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index de43951..7ab9e1a 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -14,7 +14,7 @@
 
 Example:
 
-fec@83fec000 {
+ethernet@83fec000 {
 	compatible = "fsl,imx51-fec", "fsl,imx27-fec";
 	reg = <0x83fec000 0x4000>;
 	interrupts = <87>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
index 3664d37..b4480d5 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
@@ -4,6 +4,8 @@
 - compatible	: "st,spear300-pinmux"
 		: "st,spear310-pinmux"
 		: "st,spear320-pinmux"
+		: "st,spear1310-pinmux"
+		: "st,spear1340-pinmux"
 - reg		: Address range of the pinctrl registers
 - st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others.
 	- Its values for SPEAr300:
@@ -89,6 +91,37 @@
 	"rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp",
 	"i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp"
 
+For SPEAr1310 machines:
+	"i2c0_grp", "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp", "i2s0_grp",
+	"i2s1_grp", "clcd_grp", "clcd_high_res_grp", "arm_gpio_grp",
+	"smi_2_chips_grp", "smi_4_chips_grp", "gmii_grp", "rgmii_grp",
+	"smii_0_1_2_grp", "ras_mii_txclk_grp", "nand_8bit_grp",
+	"nand_16bit_grp", "nand_4_chips_grp", "keyboard_6x6_grp",
+	"keyboard_rowcol6_8_grp", "uart0_grp", "uart0_modem_grp",
+	"gpt0_tmr0_grp", "gpt0_tmr1_grp", "gpt1_tmr0_grp", "gpt1_tmr1_grp",
+	"sdhci_grp", "cf_grp", "xd_grp", "touch_xy_grp",
+	"uart1_disable_i2c_grp", "uart1_disable_sd_grp", "uart2_3_grp",
+	"uart4_grp", "uart5_grp", "rs485_0_1_tdm_0_1_grp", "i2c_1_2_grp",
+	"i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp", "i2c_4_5_dis_smi_grp",
+	"i2c4_dis_sd_grp", "i2c5_dis_sd_grp", "i2c_6_7_dis_kbd_grp",
+	"i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "can0_dis_nor_grp",
+	"can0_dis_sd_grp", "can1_dis_sd_grp", "can1_dis_kbd_grp", "pcie0_grp",
+	"pcie1_grp", "pcie2_grp", "sata0_grp", "sata1_grp", "sata2_grp",
+	"ssp1_dis_kbd_grp", "ssp1_dis_sd_grp", "gpt64_grp"
+
+For SPEAr1340 machines:
+	"pads_as_gpio_grp", "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp",
+	"keyboard_row_col_grp", "keyboard_col5_grp", "spdif_in_grp",
+	"spdif_out_grp", "gpt_0_1_grp", "pwm0_grp", "pwm1_grp", "pwm2_grp",
+	"pwm3_grp", "vip_mux_grp", "vip_mux_cam0_grp", "vip_mux_cam1_grp",
+	"vip_mux_cam2_grp", "vip_mux_cam3_grp", "cam0_grp", "cam1_grp",
+	"cam2_grp", "cam3_grp", "smi_grp", "ssp0_grp", "ssp0_cs1_grp",
+	"ssp0_cs2_grp", "ssp0_cs3_grp", "uart0_grp", "uart0_enh_grp",
+	"uart1_grp", "i2s_in_grp", "i2s_out_grp", "gmii_grp", "rgmii_grp",
+	"rmii_grp", "sgmii_grp", "i2c0_grp", "i2c1_grp", "cec0_grp", "cec1_grp",
+	"sdhci_grp", "cf_grp", "xd_grp", "clcd_grp", "arm_trace_grp",
+	"miphy_dbg_grp", "pcie_grp", "sata_grp"
+
 Valid values for function names are:
 For All SPEAr3xx machines:
 	"firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext",
@@ -106,3 +139,17 @@
 	"uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen",
 	"can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2",
 	"mii0_1", "i2c1", "i2c2"
+
+
+For SPEAr1310 machines:
+	"i2c0", "ssp0", "i2s0", "i2s1", "clcd", "arm_gpio", "smi", "gmii",
+	"rgmii", "smii_0_1_2", "ras_mii_txclk", "nand", "keyboard", "uart0",
+	"gpt0", "gpt1", "sdhci", "cf", "xd", "touchscreen", "uart1", "uart2_3",
+	"uart4", "uart5", "rs485_0_1_tdm_0_1", "i2c_1_2", "i2c3_i2s1",
+	"i2c_4_5", "i2c_6_7", "can0", "can1", "pci", "sata", "ssp1", "gpt64"
+
+For SPEAr1340 machines:
+	"pads_as_gpio", "fsmc", "keyboard", "spdif_in", "spdif_out", "gpt_0_1",
+	"pwm", "vip", "cam0", "cam1", "cam2", "cam3", "smi", "ssp0", "uart0",
+	"uart1", "i2s", "gmac", "i2c0", "i2c1", "cec0", "cec1", "sdhci", "cf",
+	"xd", "clcd", "arm_trace", "miphy_dbg", "pcie", "sata"
diff --git a/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
new file mode 100644
index 0000000..a87a1e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
@@ -0,0 +1,15 @@
+* NXP LPC32xx SoC Real Time Clock controller
+
+Required properties:
+- compatible: must be "nxp,lpc3220-rtc"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- interrupts: The RTC interrupt
+
+Example:
+
+	rtc@40024000 {
+		compatible = "nxp,lpc3220-rtc";
+		reg = <0x40024000 0x1000>;
+		interrupts = <52 0>;
+	};
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
new file mode 100644
index 0000000..ca67ac6
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -0,0 +1,17 @@
+* SPEAr RTC
+
+Required properties:
+- compatible : "st,spear600-rtc"
+- reg : Address range of the rtc registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the rtc interrupt number
+
+Example:
+
+	rtc@fc000000 {
+		compatible = "st,spear600-rtc";
+		reg = <0xfc000000 0x1000>;
+		interrupt-parent = <&vic1>;
+		interrupts = <12>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
new file mode 100644
index 0000000..fd8105f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ Digital Microphone Module
+
+Required properties:
+- compatible: "ti,omap4-dmic"
+- reg: Register location and size as an array:
+       <MPU access base address, size>,
+       <L3 interconnect address, size>;
+- interrupts: Interrupt number for DMIC
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
+
+Example:
+
+dmic: dmic@4012e000 {
+	compatible = "ti,omap4-dmic";
+	reg = <0x4012e000 0x7f>, /* MPU private access */
+	      <0x4902e000 0x7f>; /* L3 Interconnect */
+	interrupts = <0 114 0x4>;
+	interrupt-parent = <&gic>;
+	ti,hwmods = "dmic";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
new file mode 100644
index 0000000..0741dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ McPDM
+
+Required properties:
+- compatible: "ti,omap4-mcpdm"
+- reg: Register location and size as an array:
+       <MPU access base address, size>,
+       <L3 interconnect address, size>;
+- interrupts: Interrupt number for McPDM
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated to the McPDM
+
+Example:
+
+mcpdm: mcpdm@40132000 {
+	compatible = "ti,omap4-mcpdm";
+	reg = <0x40132000 0x7f>, /* MPU private access */
+	      <0x49032000 0x7f>; /* L3 Interconnect */
+	interrupts = <0 112 0x4>;
+	interrupt-parent = <&gic>;
+	ti,hwmods = "mcpdm";
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
index a9c0406..b462d0c 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
@@ -11,7 +11,7 @@
 
 Example:
 
-uart@73fbc000 {
+serial@73fbc000 {
 	compatible = "fsl,imx51-uart", "fsl,imx21-uart";
 	reg = <0x73fbc000 0x4000>;
 	interrupts = <31>;
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/tegra-usb.txt
index 007005d..e9b005d 100644
--- a/Documentation/devicetree/bindings/usb/tegra-usb.txt
+++ b/Documentation/devicetree/bindings/usb/tegra-usb.txt
@@ -12,6 +12,9 @@
  - nvidia,vbus-gpio : If present, specifies a gpio that needs to be
    activated for the bus to be powered.
 
+Required properties for phy_type == ulpi:
+  - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
+
 Optional properties:
   - dr_mode : dual role mode. Indicates the working mode for
    nvidia,tegra20-ehci compatible controllers.  Can be "host", "peripheral",
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 3bbd5c5..ad86fb8 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -29,13 +29,6 @@
    in memory, mapped into its own address space, so it can access the same area
    of memory.
 
-*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
-For this first version, A buffer shared using the dma_buf sharing API:
-- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
-  this framework.
-- with this new iteration of the dma-buf api cpu access from the kernel has been
-  enable, see below for the details.
-
 dma-buf operations for device dma only
 --------------------------------------
 
@@ -300,6 +293,17 @@
    Note that these calls need to always succeed. The exporter needs to complete
    any preparations that might fail in begin_cpu_access.
 
+   For some cases the overhead of kmap can be too high, a vmap interface
+   is introduced. This interface should be used very carefully, as vmalloc
+   space is a limited resources on many architectures.
+
+   Interfaces:
+      void *dma_buf_vmap(struct dma_buf *dmabuf)
+      void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+
+   The vmap call can fail if there is no vmap support in the exporter, or if it
+   runs out of vmalloc space. Fallback to kmap should be implemented.
+
 3. Finish access
 
    When the importer is done accessing the range specified in begin_cpu_access,
@@ -313,6 +317,83 @@
 				  enum dma_data_direction dir);
 
 
+Direct Userspace Access/mmap Support
+------------------------------------
+
+Being able to mmap an export dma-buf buffer object has 2 main use-cases:
+- CPU fallback processing in a pipeline and
+- supporting existing mmap interfaces in importers.
+
+1. CPU fallback processing in a pipeline
+
+   In many processing pipelines it is sometimes required that the cpu can access
+   the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
+   the need to handle this specially in userspace frameworks for buffer sharing
+   it's ideal if the dma_buf fd itself can be used to access the backing storage
+   from userspace using mmap.
+
+   Furthermore Android's ION framework already supports this (and is otherwise
+   rather similar to dma-buf from a userspace consumer side with using fds as
+   handles, too). So it's beneficial to support this in a similar fashion on
+   dma-buf to have a good transition path for existing Android userspace.
+
+   No special interfaces, userspace simply calls mmap on the dma-buf fd.
+
+2. Supporting existing mmap interfaces in exporters
+
+   Similar to the motivation for kernel cpu access it is again important that
+   the userspace code of a given importing subsystem can use the same interfaces
+   with a imported dma-buf buffer object as with a native buffer object. This is
+   especially important for drm where the userspace part of contemporary OpenGL,
+   X, and other drivers is huge, and reworking them to use a different way to
+   mmap a buffer rather invasive.
+
+   The assumption in the current dma-buf interfaces is that redirecting the
+   initial mmap is all that's needed. A survey of some of the existing
+   subsystems shows that no driver seems to do any nefarious thing like syncing
+   up with outstanding asynchronous processing on the device or allocating
+   special resources at fault time. So hopefully this is good enough, since
+   adding interfaces to intercept pagefaults and allow pte shootdowns would
+   increase the complexity quite a bit.
+
+   Interface:
+      int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+		       unsigned long);
+
+   If the importing subsystem simply provides a special-purpose mmap call to set
+   up a mapping in userspace, calling do_mmap with dma_buf->file will equally
+   achieve that for a dma-buf object.
+
+3. Implementation notes for exporters
+
+   Because dma-buf buffers have invariant size over their lifetime, the dma-buf
+   core checks whether a vma is too large and rejects such mappings. The
+   exporter hence does not need to duplicate this check.
+
+   Because existing importing subsystems might presume coherent mappings for
+   userspace, the exporter needs to set up a coherent mapping. If that's not
+   possible, it needs to fake coherency by manually shooting down ptes when
+   leaving the cpu domain and flushing caches at fault time. Note that all the
+   dma_buf files share the same anon inode, hence the exporter needs to replace
+   the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
+   requred. This is because the kernel uses the underlying inode's address_space
+   for vma tracking (and hence pte tracking at shootdown time with
+   unmap_mapping_range).
+
+   If the above shootdown dance turns out to be too expensive in certain
+   scenarios, we can extend dma-buf with a more explicit cache tracking scheme
+   for userspace mappings. But the current assumption is that using mmap is
+   always a slower path, so some inefficiencies should be acceptable.
+
+   Exporters that shoot down mappings (for any reasons) shall not do any
+   synchronization at fault time with outstanding device operations.
+   Synchronization is an orthogonal issue to sharing the backing storage of a
+   buffer and hence should not be handled by dma-buf itself. This is explictly
+   mentioned here because many people seem to want something like this, but if
+   different exporters handle this differently, buffer sharing can fail in
+   interesting ways depending upong the exporter (if userspace starts depending
+   upon this implicit synchronization).
+
 Miscellaneous notes
 -------------------
 
@@ -336,6 +417,20 @@
   the exporting driver to create a dmabuf fd must provide a way to let
   userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
 
+- If an exporter needs to manually flush caches and hence needs to fake
+  coherency for mmap support, it needs to be able to zap all the ptes pointing
+  at the backing storage. Now linux mm needs a struct address_space associated
+  with the struct file stored in vma->vm_file to do that with the function
+  unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
+  with the anon_file struct file, i.e. all dma_bufs share the same file.
+
+  Hence exporters need to setup their own file (and address_space) association
+  by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
+  callback. In the specific case of a gem driver the exporter could use the
+  shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
+  zap ptes by unmapping the corresponding range of the struct address_space
+  associated with their own file.
+
 References:
 [1] struct dma_buf_ops in include/linux/dma-buf.h
 [2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 4ba1eb7..56000b3 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -595,3 +595,20 @@
         flexible fashion.
 
 ----------------------------
+
+What:	at91-mci driver ("CONFIG_MMC_AT91")
+When:	3.7
+Why:	There are two mci drivers: at91-mci and atmel-mci. The PDC support
+	was added to atmel-mci as a first step to support more chips.
+	Then at91-mci was kept only for old IP versions (on at91rm9200 and
+	at91sam9261). The support of these IP versions has just been added
+	to atmel-mci, so atmel-mci can be used for all chips.
+Who:	Ludovic Desroches <ludovic.desroches@atmel.com>
+
+----------------------------
+
+What:	net/wanrouter/
+When:	June 2013
+Why:	Unsupported/unmaintained/unused since 2.6
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e..8e2da1e 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -60,8 +60,8 @@
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
-	void (*truncate_range)(struct inode *, loff_t, loff_t);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
+	void (*update_time)(struct inode *, struct timespec *, int);
 
 locking rules:
 	all may block
@@ -87,8 +87,9 @@
 getxattr:	no
 listxattr:	no
 removexattr:	yes
-truncate_range:	yes
 fiemap:		no
+update_time:	no
+
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
 	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index b100adc..293855e 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,9 +59,9 @@
 			Setting it to very large values will improve
 			performance.
 
-barrier=<0(*)|1>	This enables/disables the use of write barriers in
-barrier			the jbd code.  barrier=0 disables, barrier=1 enables.
-nobarrier	(*)	This also requires an IO stack which can support
+barrier=<0|1(*)>	This enables/disables the use of write barriers in
+barrier	(*)		the jbd code.  barrier=0 disables, barrier=1 enables.
+nobarrier		This also requires an IO stack which can support
 			barriers, and if jbd gets an error on a barrier
 			write, it will disable again with a warning.
 			Write barriers enforce proper on-disk ordering
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 74acd96..8c91d10 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -297,7 +297,8 @@
 be used instead.  It gets called whenever the inode is evicted, whether it has
 remaining links or not.  Caller does *not* evict the pagecache or inode-associated
 metadata buffers; getting rid of those is responsibility of method, as it had
-been for ->delete_inode().
+been for ->delete_inode(). Caller makes sure async writeback cannot be running
+for the inode while (or after) ->evict_inode() is called.
 
 	->drop_inode() returns int now; it's called on final iput() with
 inode->i_lock held and it returns true if filesystems wants the inode to be
@@ -306,14 +307,11 @@
 simply of return 1.  Note that all actual eviction work is done by caller after
 ->drop_inode() returns.
 
-	clear_inode() is gone; use end_writeback() instead.  As before, it must
-be called exactly once on each call of ->evict_inode() (as it used to be for
-each call of ->delete_inode()).  Unlike before, if you are using inode-associated
-metadata buffers (i.e. mark_buffer_dirty_inode()), it's your responsibility to
-call invalidate_inode_buffers() before end_writeback().
-	No async writeback (and thus no calls of ->write_inode()) will happen
-after end_writeback() returns, so actions that should not overlap with ->write_inode()
-(e.g. freeing on-disk inode if i_nlink is 0) ought to be done after that call.
+	As before, clear_inode() must be called exactly once on each call of
+->evict_inode() (as it used to be for each call of ->delete_inode()).  Unlike
+before, if you are using inode-associated metadata buffers (i.e.
+mark_buffer_dirty_inode()), it's your responsibility to call
+invalidate_inode_buffers() before clear_inode().
 
 	NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out
 if it's zero is not *and* *never* *had* *been* enough.  Final unlink() and iput()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ef088e5..fb0a6ae 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -40,6 +40,7 @@
   3.4	/proc/<pid>/coredump_filter - Core dump filtering settings
   3.5	/proc/<pid>/mountinfo - Information about mounts
   3.6	/proc/<pid>/comm  & /proc/<pid>/task/<tid>/comm
+  3.7   /proc/<pid>/task/<tid>/children - Information about task children
 
   4	Configuring procfs
   4.1	Mount options
@@ -310,6 +311,11 @@
   start_data    address above which program data+bss is placed
   end_data      address below which program data+bss is placed
   start_brk     address above which program heap can be expanded with brk()
+  arg_start     address above which program command line is placed
+  arg_end       address below which program command line is placed
+  env_start     address above which program environment is placed
+  env_end       address below which program environment is placed
+  exit_code     the thread's exit_code in the form reported by the waitpid system call
 ..............................................................................
 
 The /proc/PID/maps file containing the currently mapped memory regions and
@@ -743,6 +749,7 @@
 VmallocTotal:   112216 kB
 VmallocUsed:       428 kB
 VmallocChunk:   111088 kB
+AnonHugePages:   49152 kB
 
     MemTotal: Total usable ram (i.e. physical ram minus a few reserved
               bits and the kernel binary code)
@@ -776,6 +783,7 @@
        Dirty: Memory which is waiting to get written back to the disk
    Writeback: Memory which is actively being written back to the disk
    AnonPages: Non-file backed pages mapped into userspace page tables
+AnonHugePages: Non-file backed huge pages mapped into userspace page tables
       Mapped: files which have been mmaped, such as libraries
         Slab: in-kernel data structures cache
 SReclaimable: Part of Slab, that might be reclaimed, such as caches
@@ -1576,6 +1584,23 @@
 comm value.
 
 
+3.7	/proc/<pid>/task/<tid>/children - Information about task children
+-------------------------------------------------------------------------
+This file provides a fast way to retrieve first level children pids
+of a task pointed by <pid>/<tid> pair. The format is a space separated
+stream of pids.
+
+Note the "first level" here -- if a child has own children they will
+not be listed here, one needs to read /proc/<children-pid>/task/<tid>/children
+to obtain the descendants.
+
+Since this interface is intended to be fast and cheap it doesn't
+guarantee to provide precise results and some children might be
+skipped, especially if they've exited right after we printed their
+pids, so one need to either stop or freeze processes being inspected
+if precise results are needed.
+
+
 ------------------------------------------------------------------------------
 Configuring procfs
 ------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d04920..efd23f4 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -363,7 +363,7 @@
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
-	void (*truncate_range)(struct inode *, loff_t, loff_t);
+	void (*update_time)(struct inode *, struct timespec *, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -472,9 +472,9 @@
   removexattr: called by the VFS to remove an extended attribute from
   	a file. This method is called by removexattr(2) system call.
 
-  truncate_range: a method provided by the underlying filesystem to truncate a
-  	range of blocks , i.e. punch a hole somewhere in a file.
-
+  update_time: called by the VFS to update a specific time or the i_version of
+  	an inode.  If this is not defined the VFS will update the inode itself
+  	and call mark_inode_dirty_sync.
 
 The Address Space Object
 ========================
@@ -760,7 +760,7 @@
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-2.6.22, the following members are defined:
+3.5, the following members are defined:
 
 struct file_operations {
 	struct module *owner;
@@ -790,6 +790,8 @@
 	int (*flock) (struct file *, int, struct file_lock *);
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
+	int (*setlease)(struct file *, long arg, struct file_lock **);
+	long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -858,6 +860,11 @@
   splice_read: called by the VFS to splice data from file to a pipe. This
 	       method is used by the splice(2) system call
 
+  setlease: called by the VFS to set or release a file lock lease.
+	    setlease has the file_lock_lock held and must not sleep.
+
+  fallocate: called by the VFS to preallocate blocks or punch a hole.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index 84d46c0..c86b50c 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -6,7 +6,9 @@
     Prefix: 'coretemp'
     CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
                               0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
-                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
+                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+                              0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+                              0x36 (Cedar Trail Atom)
     Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
                Volume 3A: System Programming Guide
                http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -52,6 +54,17 @@
 
 Process		Processor					TjMax(C)
 
+22nm		Core i5/i7 Processors
+		i7 3920XM, 3820QM, 3720QM, 3667U, 3520M		105
+		i5 3427U, 3360M/3320M				105
+		i7 3770/3770K					105
+		i5 3570/3570K, 3550, 3470/3450			105
+		i7 3770S					103
+		i5 3570S/3550S, 3475S/3470S/3450S		103
+		i7 3770T					94
+		i5 3570T					94
+		i5 3470T					91
+
 32nm		Core i3/i5/i7 Processors
 		i7 660UM/640/620, 640LM/620, 620M, 610E		105
 		i5 540UM/520/430, 540M/520/450/430		105
@@ -65,6 +78,11 @@
 		U3400						105
 		P4505/P4500 					90
 
+32nm		Atom Processors
+		Z2460						90
+		D2700/2550/2500					100
+		N2850/2800/2650/2600				100
+
 45nm		Xeon Processors 5400 Quad-Core
 		X5492, X5482, X5472, X5470, X5460, X5450	85
 		E5472, E5462, E5450/40/30/20/10/05		85
@@ -85,6 +103,8 @@
 		N475/470/455/450				100
 		N280/270					90
 		330/230						125
+		E680/660/640/620				90
+		E680T/660T/640T/620T				110
 
 45nm		Core2 Processors
 		Solo ULV SU3500/3300				100
diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality
index 42c17c1..b0ff2ab 100644
--- a/Documentation/i2c/functionality
+++ b/Documentation/i2c/functionality
@@ -18,9 +18,9 @@
                                   adapters typically can not do these)
   I2C_FUNC_10BIT_ADDR             Handles the 10-bit address extensions
   I2C_FUNC_PROTOCOL_MANGLING      Knows about the I2C_M_IGNORE_NAK,
-                                  I2C_M_REV_DIR_ADDR, I2C_M_NOSTART and
-                                  I2C_M_NO_RD_ACK flags (which modify the
-                                  I2C protocol!)
+                                  I2C_M_REV_DIR_ADDR and I2C_M_NO_RD_ACK
+                                  flags (which modify the I2C protocol!)
+  I2C_FUNC_NOSTART                Can skip repeated start sequence
   I2C_FUNC_SMBUS_QUICK            Handles the SMBus write_quick command
   I2C_FUNC_SMBUS_READ_BYTE        Handles the SMBus read_byte command
   I2C_FUNC_SMBUS_WRITE_BYTE       Handles the SMBus write_byte command
@@ -50,6 +50,9 @@
                                   emulated by a real I2C adapter (using
                                   the transparent emulation layer)
 
+In kernel versions prior to 3.5 I2C_FUNC_NOSTART was implemented as
+part of I2C_FUNC_PROTOCOL_MANGLING.
+
 
 ADAPTER IMPLEMENTATION
 ----------------------
diff --git a/Documentation/i2c/i2c-protocol b/Documentation/i2c/i2c-protocol
index 10518dd..0b3e62d 100644
--- a/Documentation/i2c/i2c-protocol
+++ b/Documentation/i2c/i2c-protocol
@@ -49,7 +49,9 @@
 Modified transactions
 =====================
 
-We have found some I2C devices that needs the following modifications:
+The following modifications to the I2C protocol can also be generated,
+with the exception of I2C_M_NOSTART these are usually only needed to
+work around device issues:
 
   Flag I2C_M_NOSTART: 
     In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
@@ -60,6 +62,11 @@
     we do not generate Addr, but we do generate the startbit S. This will
     probably confuse all other clients on your bus, so don't try this.
 
+    This is often used to gather transmits from multiple data buffers in
+    system memory into something that appears as a single transfer to the
+    I2C device but may also be used between direction changes by some
+    rare devices.
+
   Flags I2C_M_REV_DIR_ADDR
     This toggles the Rd/Wr flag. That is, if you want to do a write, but
     need to emit an Rd instead of a Wr, or vice versa, you set this
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/i2c-mux-gpio
similarity index 85%
rename from Documentation/i2c/muxes/gpio-i2cmux
rename to Documentation/i2c/muxes/i2c-mux-gpio
index 811cd78..bd9b229 100644
--- a/Documentation/i2c/muxes/gpio-i2cmux
+++ b/Documentation/i2c/muxes/i2c-mux-gpio
@@ -1,11 +1,11 @@
-Kernel driver gpio-i2cmux
+Kernel driver i2c-gpio-mux
 
 Author: Peter Korsgaard <peter.korsgaard@barco.com>
 
 Description
 -----------
 
-gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments
 from a master I2C bus and a hardware MUX controlled through GPIO pins.
 
 E.G.:
@@ -26,16 +26,16 @@
 Usage
 -----
 
-gpio-i2cmux uses the platform bus, so you need to provide a struct
+i2c-gpio-mux uses the platform bus, so you need to provide a struct
 platform_device with the platform_data pointing to a struct
 gpio_i2cmux_platform_data with the I2C adapter number of the master
 bus, the number of bus segments to create and the GPIO pins used
-to control it. See include/linux/gpio-i2cmux.h for details.
+to control it. See include/linux/i2c-gpio-mux.h for details.
 
 E.G. something like this for a MUX providing 4 bus segments
 controlled through 3 GPIO pins:
 
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-gpio-mux.h>
 #include <linux/platform_device.h>
 
 static const unsigned myboard_gpiomux_gpios[] = {
@@ -57,7 +57,7 @@
 };
 
 static struct platform_device myboard_i2cmux = {
-	.name		= "gpio-i2cmux",
+	.name		= "i2c-gpio-mux",
 	.id		= 0,
 	.dev		= {
 		.platform_data	= &myboard_i2cmux_data,
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index 1ba84f3..4e1839c 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -362,5 +362,5 @@
     http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
 [2] newlib package (experimental), with initrd example
     http://sources.redhat.com/newlib/
-[3] Brouwer, Andries; "util-linux: Miscellaneous utilities for Linux"
-    ftp://ftp.win.tue.nl/pub/linux-local/utils/util-linux/
+[3] util-linux: Miscellaneous utilities for Linux
+    http://www.kernel.org/pub/linux/utils/util-linux/
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 68e32bb..6466704 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,10 @@
 --------------------------------------------------
 Additional options used for $(LD) when linking modules.
 
+LDFLAGS_vmlinux
+--------------------------------------------------
+Additional options passed to final link of vmlinux.
+
 KBUILD_VERBOSE
 --------------------------------------------------
 Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -214,3 +218,18 @@
 These two variables allow to override the user@host string displayed during
 boot and in /proc/version. The default value is the output of the commands
 whoami and host, respectively.
+
+KBUILD_LDS
+--------------------------------------------------
+The linker script with full path. Assigned by the top-level Makefile.
+
+KBUILD_VMLINUX_INIT
+--------------------------------------------------
+All object files for the init (first) part of vmlinux.
+Files specified with KBUILD_VMLINUX_INIT are linked first.
+
+KBUILD_VMLINUX_MAIN
+--------------------------------------------------
+All object files for the main part of vmlinux.
+KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
+all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 9d5f2a9..a09f1a6 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -53,15 +53,15 @@
 --------------------------------------------------
 (partially based on lkml email from/by Rob Landley, re: miniconfig)
 --------------------------------------------------
-The allyesconfig/allmodconfig/allnoconfig/randconfig variants can
-also use the environment variable KCONFIG_ALLCONFIG as a flag or a
-filename that contains config symbols that the user requires to be
-set to a specific value.  If KCONFIG_ALLCONFIG is used without a
-filename, "make *config" checks for a file named
-"all{yes/mod/no/def/random}.config" (corresponding to the *config command
-that was used) for symbol values that are to be forced.  If this file
-is not found, it checks for a file named "all.config" to contain forced
-values.
+The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
+use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
+that contains config symbols that the user requires to be set to a
+specific value.  If KCONFIG_ALLCONFIG is used without a filename where
+KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", "make *config"
+checks for a file named "all{yes/mod/no/def/random}.config"
+(corresponding to the *config command that was used) for symbol values
+that are to be forced.  If this file is not found, it checks for a
+file named "all.config" to contain forced values.
 
 This enables you to create "miniature" config (miniconfig) or custom
 config files containing just the config symbols that you are interested
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b69cfdc12..a92c5eb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -335,6 +335,12 @@
 					  requirements as needed. This option
 					  does not override iommu=pt
 
+	amd_iommu_dump=	[HW,X86-64]
+			Enable AMD IOMMU driver option to dump the ACPI table
+			for AMD IOMMU. With this option enabled, AMD IOMMU
+			driver will print ACPI tables for AMD IOMMU during
+			IOMMU initialization.
+
 	amijoy.map=	[HW,JOY] Amiga joystick support
 			Map of devices attached to JOY0DAT and JOY1DAT
 			Format: <a>,<b>
@@ -397,8 +403,6 @@
 	atkbd.softrepeat= [HW]
 			Use software keyboard repeat
 
-	autotest	[IA-64]
-
 	baycom_epp=	[HW,AX25]
 			Format: <io>,<mode>
 
@@ -508,6 +512,11 @@
 			Also note the kernel might malfunction if you disable
 			some critical bits.
 
+	cma=nn[MG]	[ARM,KNL]
+			Sets the size of kernel global memory area for contiguous
+			memory allocations. For more information, see
+			include/linux/dma-contiguous.h
+
 	cmo_free_hint=	[PPC] Format: { yes | no }
 			Specify whether pages are marked as being inactive
 			when they are freed.  This is used in CMO environments
@@ -515,6 +524,10 @@
 			a hypervisor.
 			Default: yes
 
+	coherent_pool=nn[KMG]	[ARM,KNL]
+			Sets the size of memory pool for coherent, atomic dma
+			allocations if Contiguous Memory Allocator (CMA) is used.
+
 	code_bytes	[X86] How many bytes of object code to print
 			in an oops report.
 			Range: 0 - 8192
@@ -1444,8 +1457,6 @@
 			devices can be requested on-demand with the
 			/dev/loop-control interface.
 
-	mcatest=	[IA-64]
-
 	mce		[X86-32] Machine Check Exception
 
 	mce=option	[X86-64] See Documentation/x86/x86_64/boot-options.txt
@@ -2532,6 +2543,15 @@
 
 	sched_debug	[KNL] Enables verbose scheduler debug messages.
 
+	skew_tick=	[KNL] Offset the periodic timer tick per cpu to mitigate
+			xtime_lock contention on larger systems, and/or RCU lock
+			contention on all systems with CONFIG_MAXSMP set.
+			Format: { "0" | "1" }
+			0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
+			1 -- enable.
+			Note: increases power consumption, thus should only be
+			enabled if running jitter sensitive (HPC/RT) workloads.
+
 	security=	[SECURITY] Choose a security module to enable at boot.
 			If this boot parameter is not specified, only the first
 			security module asking for security registration will be
diff --git a/Documentation/leds/ledtrig-transient.txt b/Documentation/leds/ledtrig-transient.txt
new file mode 100644
index 0000000..3bd38b4
--- /dev/null
+++ b/Documentation/leds/ledtrig-transient.txt
@@ -0,0 +1,152 @@
+LED Transient Trigger
+=====================
+
+The leds timer trigger does not currently have an interface to activate
+a one shot timer. The current support allows for setting two timers, one for
+specifying how long a state to be on, and the second for how long the state
+to be off. The delay_on value specifies the time period an LED should stay
+in on state, followed by a delay_off value that specifies how long the LED
+should stay in off state. The on and off cycle repeats until the trigger
+gets deactivated. There is no provision for one time activation to implement
+features that require an on or off state to be held just once and then stay in
+the original state forever.
+
+Without one shot timer interface, user space can still use timer trigger to
+set a timer to hold a state, however when user space application crashes or
+goes away without deactivating the timer, the hardware will be left in that
+state permanently.
+
+As a specific example of this use-case, let's look at vibrate feature on
+phones. Vibrate function on phones is implemented using PWM pins on SoC or
+PMIC. There is a need to activate one shot timer to control the vibrate
+feature, to prevent user space crashes leaving the phone in vibrate mode
+permanently causing the battery to drain.
+
+Transient trigger addresses the need for one shot timer activation. The
+transient trigger can be enabled and disabled just like the other leds
+triggers.
+
+When an led class device driver registers itself, it can specify all leds
+triggers it supports and a default trigger. During registration, activation
+routine for the default trigger gets called. During registration of an led
+class device, the LED state does not change.
+
+When the driver unregisters, deactivation routine for the currently active
+trigger will be called, and LED state is changed to LED_OFF.
+
+Driver suspend changes the LED state to LED_OFF and resume doesn't change
+the state. Please note that there is no explicit interaction between the
+suspend and resume actions and the currently enabled trigger. LED state
+changes are suspended while the driver is in suspend state. Any timers
+that are active at the time driver gets suspended, continue to run, without
+being able to actually change the LED state. Once driver is resumed, triggers
+start functioning again.
+
+LED state changes are controlled using brightness which is a common led
+class device property. When brightness is set to 0 from user space via
+echo 0 > brightness, it will result in deactivating the current trigger.
+
+Transient trigger uses standard register and unregister interfaces. During
+trigger registration, for each led class device that specifies this trigger
+as its default trigger, trigger activation routine will get called. During
+registration, the LED state does not change, unless there is another trigger
+active, in which case LED state changes to LED_OFF.
+
+During trigger unregistration, LED state gets changed to LED_OFF.
+
+Transient trigger activation routine doesn't change the LED state. It
+creates its properties and does its initialization. Transient trigger
+deactivation routine, will cancel any timer that is active before it cleans
+up and removes the properties it created. It will restore the LED state to
+non-transient state. When driver gets suspended, irrespective of the transient
+state, the LED state changes to LED_OFF.
+
+Transient trigger can be enabled and disabled from user space on led class
+devices, that support this trigger as shown below:
+
+echo transient > trigger
+echo none > trigger
+
+NOTE: Add a new property trigger state to control the state.
+
+This trigger exports three properties, activate, state, and duration. When
+transient trigger is activated these properties are set to default values.
+
+- duration allows setting timer value in msecs. The initial value is 0.
+- activate allows activating and deactivating the timer specified by
+  duration as needed. The initial and default value is 0.  This will allow
+  duration to be set after trigger activation.
+- state allows user to specify a transient state to be held for the specified
+  duration.
+
+	activate - one shot timer activate mechanism.
+		1 when activated, 0 when deactivated.
+		default value is zero when transient trigger is enabled,
+		to allow duration to be set.
+
+		activate state indicates a timer with a value of specified
+		duration running.
+		deactivated state indicates that there is no active timer
+		running.
+
+	duration - one shot timer value. When activate is set, duration value
+		is used to start a timer that runs once. This value doesn't
+		get changed by the trigger unless user does a set via
+		echo new_value > duration
+
+	state - transient state to be held. It has two values 0 or 1. 0 maps
+		to LED_OFF and 1 maps to LED_FULL. The specified state is
+		held for the duration of the one shot timer and then the
+		state gets changed to the non-transient state which is the
+		inverse of transient state.
+		If state = LED_FULL, when the timer runs out the state will
+		go back to LED_OFF.
+		If state = LED_OFF, when the timer runs out the state will
+		go back to LED_FULL.
+		Please note that current LED state is not checked prior to
+		changing the state to the specified state.
+		Driver could map these values to inverted depending on the
+		default states it defines for the LED in its brightness_set()
+		interface which is called from the led brightness_set()
+		interfaces to control the LED state.
+
+When timer expires activate goes back to deactivated state, duration is left
+at the set value to be used when activate is set at a future time. This will
+allow user app to set the time once and activate it to run it once for the
+specified value as needed. When timer expires, state is restored to the
+non-transient state which is the inverse of the transient state.
+
+	echo 1 > activate - starts timer = duration when duration is not 0.
+	echo 0 > activate - cancels currently running timer.
+	echo n > duration - stores timer value to be used upon next
+                            activate. Currently active timer if
+                            any, continues to run for the specified time.
+	echo 0 > duration - stores timer value to be used upon next
+                            activate. Currently active timer if any,
+                            continues to run for the specified time.
+	echo 1 > state    - stores desired transient state LED_FULL to be
+			    held for the specified duration.
+	echo 0 > state    - stores desired transient state LED_OFF to be
+			    held for the specified duration.
+
+What is not supported:
+======================
+- Timer activation is one shot and extending and/or shortening the timer
+  is not supported.
+
+Example use-case 1:
+	echo transient > trigger
+	echo n > duration
+	echo 1 > state
+repeat the following step as needed:
+	echo 1 > activate - start timer = duration to run once
+	echo 1 > activate - start timer = duration to run once
+	echo none > trigger
+
+This trigger is intended to be used for for the following example use cases:
+ - Control of vibrate (phones, tablets etc.) hardware by user space app.
+ - Use of LED by user space app as activity indicator.
+ - Use of LED by user space app as a kind of watchdog indicator -- as
+       long as the app is alive, it can keep the LED illuminated, if it dies
+       the LED will be extinguished automatically.
+ - Use by any user space app that needs a transient GPIO output.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index ab1e8d7..5cb9a19 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -10,8 +10,8 @@
 (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
 FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing this driver.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
+MAC 10/100 Universal version 4.0 have been used for developing this driver.
 
 This driver supports both the platform bus and PCI.
 
@@ -54,27 +54,27 @@
 When one or more packets are received, an interrupt happens. The interrupts
 are not queued so the driver has to scan all the descriptors in the ring during
 the receive process.
-This is based on NAPI so the interrupt handler signals only if there is work to be
-done, and it exits.
+This is based on NAPI so the interrupt handler signals only if there is work
+to be done, and it exits.
 Then the poll method will be scheduled at some future point.
 The incoming packets are stored, by the DMA, in a list of pre-allocated socket
 buffers in order to avoid the memcpy (Zero-copy).
 
 4.3) Timer-Driver Interrupt
-Instead of having the device that asynchronously notifies the frame receptions, the
-driver configures a timer to generate an interrupt at regular intervals.
-Based on the granularity of the timer, the frames that are received by the device
-will experience different levels of latency. Some NICs have dedicated timer
-device to perform this task. STMMAC can use either the RTC device or the TMU
-channel 2  on STLinux platforms.
+Instead of having the device that asynchronously notifies the frame receptions,
+the driver configures a timer to generate an interrupt at regular intervals.
+Based on the granularity of the timer, the frames that are received by the
+device will experience different levels of latency. Some NICs have dedicated
+timer device to perform this task. STMMAC can use either the RTC device or the
+TMU channel 2  on STLinux platforms.
 The timers frequency can be passed to the driver as parameter; when change it,
 take care of both hardware capability and network stability/performance impact.
-Several performance tests on STM platforms showed this optimisation allows to spare
-the CPU while having the maximum throughput.
+Several performance tests on STM platforms showed this optimisation allows to
+spare the CPU while having the maximum throughput.
 
 4.4) WOL
-Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
-core.
+Wake up on Lan feature through Magic and Unicast frames are supported for the
+GMAC core.
 
 4.5) DMA descriptors
 Driver handles both normal and enhanced descriptors. The latter has been only
@@ -106,7 +106,8 @@
 These are included in the include/linux/stmmac.h header file
 and detailed below as well:
 
- struct plat_stmmacenet_data {
+struct plat_stmmacenet_data {
+	char *phy_bus_name;
 	int bus_id;
 	int phy_addr;
 	int interface;
@@ -124,19 +125,24 @@
 	void (*bus_setup)(void __iomem *ioaddr);
 	int (*init)(struct platform_device *pdev);
 	void (*exit)(struct platform_device *pdev);
+	void *custom_cfg;
+	void *custom_data;
 	void *bsp_priv;
  };
 
 Where:
+ o phy_bus_name: phy bus name to attach to the stmmac.
  o bus_id: bus identifier.
  o phy_addr: the physical address can be passed from the platform.
 	    If it is set to -1 the driver will automatically
 	    detect it at run-time by probing all the 32 addresses.
  o interface: PHY device's interface.
  o mdio_bus_data: specific platform fields for the MDIO bus.
- o pbl: the Programmable Burst Length is maximum number of beats to
+ o dma_cfg: internal DMA parameters
+   o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
+   o fixed_burst/mixed_burst/burst_len
  o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -160,8 +166,9 @@
 	     this is sometime necessary on some platforms (e.g. ST boxes)
 	     where the HW needs to have set some PIO lines or system cfg
 	     registers.
- o custom_cfg: this is a custom configuration that can be passed while
-	      initialising the resources.
+ o custom_cfg/custom_data: this is a custom configuration that can be passed
+			   while initialising the resources.
+ o bsp_priv: another private poiter.
 
 For MDIO bus The we have:
 
@@ -180,7 +187,6 @@
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
-
 For DMA engine we have the following internal fields that should be
 tuned according to the HW capabilities.
 
diff --git a/Documentation/power/charger-manager.txt b/Documentation/power/charger-manager.txt
index fdcca99..b4f7f4b 100644
--- a/Documentation/power/charger-manager.txt
+++ b/Documentation/power/charger-manager.txt
@@ -44,6 +44,16 @@
 	Normally, the platform will need to resume and suspend some devices
 	that are used by Charger Manager.
 
+* Support for premature full-battery event handling
+	If the battery voltage drops by "fullbatt_vchkdrop_uV" after
+	"fullbatt_vchkdrop_ms" from the full-battery event, the framework
+	restarts charging. This check is also performed while suspended by
+	setting wakeup time accordingly and using suspend_again.
+
+* Support for uevent-notify
+	With the charger-related events, the device sends
+	notification to users with UEVENT.
+
 2. Global Charger-Manager Data related with suspend_again
 ========================================================
 In order to setup Charger Manager with suspend-again feature
@@ -55,7 +65,7 @@
 multiple instances of Charger Manager share the same charger_global_desc
 and it will manage in-suspend monitoring for all instances of Charger Manager.
 
-The user needs to provide all the two entries properly in order to activate
+The user needs to provide all the three entries properly in order to activate
 in-suspend monitoring:
 
 struct charger_global_desc {
@@ -74,6 +84,11 @@
 	same struct. If there is any other wakeup source triggered the
 	wakeup, it should return false. If the "rtc" is the only wakeup
 	reason, it should return true.
+
+bool assume_timer_stops_in_suspend;
+	: if true, Charger Manager assumes that
+	the timer (CM uses jiffies as timer) stops during suspend. Then, CM
+	assumes that the suspend-duration is same as the alarm length.
 };
 
 3. How to setup suspend_again
@@ -111,6 +126,16 @@
 	  CM_POLL_CHARGING_ONLY: poll this battery if and only if the
 				 battery is being charged.
 
+unsigned int fullbatt_vchkdrop_ms;
+unsigned int fullbatt_vchkdrop_uV;
+	: If both have non-zero values, Charger Manager will check the
+	battery voltage drop fullbatt_vchkdrop_ms after the battery is fully
+	charged. If the voltage drop is over fullbatt_vchkdrop_uV, Charger
+	Manager will try to recharge the battery by disabling and enabling
+	chargers. Recharge with voltage drop condition only (without delay
+	condition) is needed to be implemented with hardware interrupts from
+	fuel gauges or charger devices/chips.
+
 unsigned int fullbatt_uV;
 	: If specified with a non-zero value, Charger Manager assumes
 	that the battery is full (capacity = 100) if the battery is not being
@@ -122,6 +147,8 @@
 	this battery every polling_interval_ms or more frequently.
 
 enum data_source battery_present;
+	: CM_BATTERY_PRESENT: assume that the battery exists.
+	CM_NO_BATTERY: assume that the battery does not exists.
 	CM_FUEL_GAUGE: get battery presence information from fuel gauge.
 	CM_CHARGER_STAT: get battery presence from chargers.
 
@@ -151,7 +178,17 @@
 	the value of measure_battery_temp.
 };
 
-5. Other Considerations
+5. Notify Charger-Manager of charger events: cm_notify_event()
+=========================================================
+If there is an charger event is required to notify
+Charger Manager, a charger device driver that triggers the event can call
+cm_notify_event(psy, type, msg) to notify the corresponding Charger Manager.
+In the function, psy is the charger driver's power_supply pointer, which is
+associated with Charger-Manager. The parameter "type"
+is the same as irq's type (enum cm_event_types). The event message "msg" is
+optional and is effective only if the event type is "UNDESCRIBED" or "OTHERS".
+
+6. Other Considerations
 =======================
 
 At the charger/battery-related events such as battery-pulled-out,
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 9f16c51..211831d 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -84,6 +84,8 @@
 HEALTH - represents health of the battery, values corresponds to
 POWER_SUPPLY_HEALTH_*, defined in battery.h.
 
+VOLTAGE_OCV - open circuit voltage of the battery.
+
 VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and
 minimal power supply voltages. Maximal/minimal means values of voltages
 when battery considered "full"/"empty" at normal conditions. Yes, there is
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index f0ab5cf..4a7b54b 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -12,6 +12,12 @@
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 88fd7f5..13d6166 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -225,6 +225,13 @@
 maximum  message size value (it is every  message queue's attribute set during
 its creation).
 
+/proc/sys/fs/mqueue/msg_default is  a read/write  file for setting/getting the
+default number of messages in a queue value if attr parameter of mq_open(2) is
+NULL. If it exceed msg_max, the default value is initialized msg_max.
+
+/proc/sys/fs/mqueue/msgsize_default is a read/write file for setting/getting
+the default message size value if attr parameter of mq_open(2) is NULL. If it
+exceed msgsize_max, the default value is initialized msgsize_max.
 
 4. /proc/sys/fs/epoll - Configuration options for the epoll interface
 --------------------------------------------------------
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
new file mode 100644
index 0000000..37067cf
--- /dev/null
+++ b/Documentation/vm/frontswap.txt
@@ -0,0 +1,278 @@
+Frontswap provides a "transcendent memory" interface for swap pages.
+In some environments, dramatic performance savings may be obtained because
+swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
+
+(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
+and the only necessary changes to the core kernel for transcendent memory;
+all other supporting code -- the "backends" -- is implemented as drivers.
+See the LWN.net article "Transcendent memory in a nutshell" for a detailed
+overview of frontswap and related kernel parts:
+https://lwn.net/Articles/454795/ )
+
+Frontswap is so named because it can be thought of as the opposite of
+a "backing" store for a swap device.  The storage is assumed to be
+a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
+to the requirements of transcendent memory (such as Xen's "tmem", or
+in-kernel compressed memory, aka "zcache", or future RAM-like devices);
+this pseudo-RAM device is not directly accessible or addressable by the
+kernel and is of unknown and possibly time-varying size.  The driver
+links itself to frontswap by calling frontswap_register_ops to set the
+frontswap_ops funcs appropriately and the functions it provides must
+conform to certain policies as follows:
+
+An "init" prepares the device to receive frontswap pages associated
+with the specified swap device number (aka "type").  A "store" will
+copy the page to transcendent memory and associate it with the type and
+offset associated with the page. A "load" will copy the page, if found,
+from transcendent memory into kernel memory, but will NOT remove the page
+from from transcendent memory.  An "invalidate_page" will remove the page
+from transcendent memory and an "invalidate_area" will remove ALL pages
+associated with the swap type (e.g., like swapoff) and notify the "device"
+to refuse further stores with that swap type.
+
+Once a page is successfully stored, a matching load on the page will normally
+succeed.  So when the kernel finds itself in a situation where it needs
+to swap out a page, it first attempts to use frontswap.  If the store returns
+success, the data has been successfully saved to transcendent memory and
+a disk write and, if the data is later read back, a disk read are avoided.
+If a store returns failure, transcendent memory has rejected the data, and the
+page can be written to swap as usual.
+
+If a backend chooses, frontswap can be configured as a "writethrough
+cache" by calling frontswap_writethrough().  In this mode, the reduction
+in swap device writes is lost (and also a non-trivial performance advantage)
+in order to allow the backend to arbitrarily "reclaim" space used to
+store frontswap pages to more completely manage its memory usage.
+
+Note that if a page is stored and the page already exists in transcendent memory
+(a "duplicate" store), either the store succeeds and the data is overwritten,
+or the store fails AND the page is invalidated.  This ensures stale data may
+never be obtained from frontswap.
+
+If properly configured, monitoring of frontswap is done via debugfs in
+the /sys/kernel/debug/frontswap directory.  The effectiveness of
+frontswap can be measured (across all swap devices) with:
+
+failed_stores	- how many store attempts have failed
+loads		- how many loads were attempted (all should succeed)
+succ_stores	- how many store attempts have succeeded
+invalidates	- how many invalidates were attempted
+
+A backend implementation may provide additional metrics.
+
+FAQ
+
+1) Where's the value?
+
+When a workload starts swapping, performance falls through the floor.
+Frontswap significantly increases performance in many such workloads by
+providing a clean, dynamic interface to read and write swap pages to
+"transcendent memory" that is otherwise not directly addressable to the kernel.
+This interface is ideal when data is transformed to a different form
+and size (such as with compression) or secretly moved (as might be
+useful for write-balancing for some RAM-like devices).  Swap pages (and
+evicted page-cache pages) are a great use for this kind of slower-than-RAM-
+but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
+cleancache) interface to transcendent memory provides a nice way to read
+and write -- and indirectly "name" -- the pages.
+
+Frontswap -- and cleancache -- with a fairly small impact on the kernel,
+provides a huge amount of flexibility for more dynamic, flexible RAM
+utilization in various system configurations:
+
+In the single kernel case, aka "zcache", pages are compressed and
+stored in local memory, thus increasing the total anonymous pages
+that can be safely kept in RAM.  Zcache essentially trades off CPU
+cycles used in compression/decompression for better memory utilization.
+Benchmarks have shown little or no impact when memory pressure is
+low while providing a significant performance improvement (25%+)
+on some workloads under high memory pressure.
+
+"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
+support for clustered systems.  Frontswap pages are locally compressed
+as in zcache, but then "remotified" to another system's RAM.  This
+allows RAM to be dynamically load-balanced back-and-forth as needed,
+i.e. when system A is overcommitted, it can swap to system B, and
+vice versa.  RAMster can also be configured as a memory server so
+many servers in a cluster can swap, dynamically as needed, to a single
+server configured with a large amount of RAM... without pre-configuring
+how much of the RAM is available for each of the clients!
+
+In the virtual case, the whole point of virtualization is to statistically
+multiplex physical resources acrosst the varying demands of multiple
+virtual machines.  This is really hard to do with RAM and efforts to do
+it well with no kernel changes have essentially failed (except in some
+well-publicized special-case workloads).
+Specifically, the Xen Transcendent Memory backend allows otherwise
+"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
+virtual machines, but the pages can be compressed and deduplicated to
+optimize RAM utilization.  And when guest OS's are induced to surrender
+underutilized RAM (e.g. with "selfballooning"), sudden unexpected
+memory pressure may result in swapping; frontswap allows those pages
+to be swapped to and from hypervisor RAM (if overall host system memory
+conditions allow), thus mitigating the potentially awful performance impact
+of unplanned swapping.
+
+A KVM implementation is underway and has been RFC'ed to lkml.  And,
+using frontswap, investigation is also underway on the use of NVM as
+a memory extension technology.
+
+2) Sure there may be performance advantages in some situations, but
+   what's the space/time overhead of frontswap?
+
+If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
+nothingness and the only overhead is a few extra bytes per swapon'ed
+swap device.  If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
+registers, there is one extra global variable compared to zero for
+every swap page read or written.  If CONFIG_FRONTSWAP is enabled
+AND a frontswap backend registers AND the backend fails every "store"
+request (i.e. provides no memory despite claiming it might),
+CPU overhead is still negligible -- and since every frontswap fail
+precedes a swap page write-to-disk, the system is highly likely
+to be I/O bound and using a small fraction of a percent of a CPU
+will be irrelevant anyway.
+
+As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
+registers, one bit is allocated for every swap page for every swap
+device that is swapon'd.  This is added to the EIGHT bits (which
+was sixteen until about 2.6.34) that the kernel already allocates
+for every swap page for every swap device that is swapon'd.  (Hugh
+Dickins has observed that frontswap could probably steal one of
+the existing eight bits, but let's worry about that minor optimization
+later.)  For very large swap disks (which are rare) on a standard
+4K pagesize, this is 1MB per 32GB swap.
+
+When swap pages are stored in transcendent memory instead of written
+out to disk, there is a side effect that this may create more memory
+pressure that can potentially outweigh the other advantages.  A
+backend, such as zcache, must implement policies to carefully (but
+dynamically) manage memory limits to ensure this doesn't happen.
+
+3) OK, how about a quick overview of what this frontswap patch does
+   in terms that a kernel hacker can grok?
+
+Let's assume that a frontswap "backend" has registered during
+kernel initialization; this registration indicates that this
+frontswap backend has access to some "memory" that is not directly
+accessible by the kernel.  Exactly how much memory it provides is
+entirely dynamic and random.
+
+Whenever a swap-device is swapon'd frontswap_init() is called,
+passing the swap device number (aka "type") as a parameter.
+This notifies frontswap to expect attempts to "store" swap pages
+associated with that number.
+
+Whenever the swap subsystem is readying a page to write to a swap
+device (c.f swap_writepage()), frontswap_store is called.  Frontswap
+consults with the frontswap backend and if the backend says it does NOT
+have room, frontswap_store returns -1 and the kernel swaps the page
+to the swap device as normal.  Note that the response from the frontswap
+backend is unpredictable to the kernel; it may choose to never accept a
+page, it could accept every ninth page, or it might accept every
+page.  But if the backend does accept a page, the data from the page
+has already been copied and associated with the type and offset,
+and the backend guarantees the persistence of the data.  In this case,
+frontswap sets a bit in the "frontswap_map" for the swap device
+corresponding to the page offset on the swap device to which it would
+otherwise have written the data.
+
+When the swap subsystem needs to swap-in a page (swap_readpage()),
+it first calls frontswap_load() which checks the frontswap_map to
+see if the page was earlier accepted by the frontswap backend.  If
+it was, the page of data is filled from the frontswap backend and
+the swap-in is complete.  If not, the normal swap-in code is
+executed to obtain the page of data from the real swap device.
+
+So every time the frontswap backend accepts a page, a swap device read
+and (potentially) a swap device write are replaced by a "frontswap backend
+store" and (possibly) a "frontswap backend loads", which are presumably much
+faster.
+
+4) Can't frontswap be configured as a "special" swap device that is
+   just higher priority than any real swap device (e.g. like zswap,
+   or maybe swap-over-nbd/NFS)?
+
+No.  First, the existing swap subsystem doesn't allow for any kind of
+swap hierarchy.  Perhaps it could be rewritten to accomodate a hierarchy,
+but this would require fairly drastic changes.  Even if it were
+rewritten, the existing swap subsystem uses the block I/O layer which
+assumes a swap device is fixed size and any page in it is linearly
+addressable.  Frontswap barely touches the existing swap subsystem,
+and works around the constraints of the block I/O subsystem to provide
+a great deal of flexibility and dynamicity.
+
+For example, the acceptance of any swap page by the frontswap backend is
+entirely unpredictable. This is critical to the definition of frontswap
+backends because it grants completely dynamic discretion to the
+backend.  In zcache, one cannot know a priori how compressible a page is.
+"Poorly" compressible pages can be rejected, and "poorly" can itself be
+defined dynamically depending on current memory constraints.
+
+Further, frontswap is entirely synchronous whereas a real swap
+device is, by definition, asynchronous and uses block I/O.  The
+block I/O layer is not only unnecessary, but may perform "optimizations"
+that are inappropriate for a RAM-oriented device including delaying
+the write of some pages for a significant amount of time.  Synchrony is
+required to ensure the dynamicity of the backend and to avoid thorny race
+conditions that would unnecessarily and greatly complicate frontswap
+and/or the block I/O subsystem.  That said, only the initial "store"
+and "load" operations need be synchronous.  A separate asynchronous thread
+is free to manipulate the pages stored by frontswap.  For example,
+the "remotification" thread in RAMster uses standard asynchronous
+kernel sockets to move compressed frontswap pages to a remote machine.
+Similarly, a KVM guest-side implementation could do in-guest compression
+and use "batched" hypercalls.
+
+In a virtualized environment, the dynamicity allows the hypervisor
+(or host OS) to do "intelligent overcommit".  For example, it can
+choose to accept pages only until host-swapping might be imminent,
+then force guests to do their own swapping.
+
+There is a downside to the transcendent memory specifications for
+frontswap:  Since any "store" might fail, there must always be a real
+slot on a real swap device to swap the page.  Thus frontswap must be
+implemented as a "shadow" to every swapon'd device with the potential
+capability of holding every page that the swap device might have held
+and the possibility that it might hold no pages at all.  This means
+that frontswap cannot contain more pages than the total of swapon'd
+swap devices.  For example, if NO swap device is configured on some
+installation, frontswap is useless.  Swapless portable devices
+can still use frontswap but a backend for such devices must configure
+some kind of "ghost" swap device and ensure that it is never used.
+
+5) Why this weird definition about "duplicate stores"?  If a page
+   has been previously successfully stored, can't it always be
+   successfully overwritten?
+
+Nearly always it can, but no, sometimes it cannot.  Consider an example
+where data is compressed and the original 4K page has been compressed
+to 1K.  Now an attempt is made to overwrite the page with data that
+is non-compressible and so would take the entire 4K.  But the backend
+has no more space.  In this case, the store must be rejected.  Whenever
+frontswap rejects a store that would overwrite, it also must invalidate
+the old data and ensure that it is no longer accessible.  Since the
+swap subsystem then writes the new data to the read swap device,
+this is the correct course of action to ensure coherency.
+
+6) What is frontswap_shrink for?
+
+When the (non-frontswap) swap subsystem swaps out a page to a real
+swap device, that page is only taking up low-value pre-allocated disk
+space.  But if frontswap has placed a page in transcendent memory, that
+page may be taking up valuable real estate.  The frontswap_shrink
+routine allows code outside of the swap subsystem to force pages out
+of the memory managed by frontswap and back into kernel-addressable memory.
+For example, in RAMster, a "suction driver" thread will attempt
+to "repatriate" pages sent to a remote machine back to the local machine;
+this is driven using the frontswap_shrink mechanism when memory pressure
+subsides.
+
+7) Why does the frontswap patch create the new include file swapfile.h?
+
+The frontswap code depends on some swap-subsystem-internal data
+structures that have, over the years, moved back and forth between
+static and global.  This seemed a reasonable compromise:  Define
+them as global but declare them in a new include file that isn't
+included by the large number of source files that include swap.h.
+
+Dan Magenheimer, last updated April 9, 2012
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 4600cbe..7587493 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -16,7 +16,7 @@
     * Bits 0-4   swap type if swapped
     * Bits 5-54  swap offset if swapped
     * Bits 55-60 page shift (page size = 1<<page shift)
-    * Bit  61    reserved for future use
+    * Bit  61    page is file-page or shared-anon
     * Bit  62    page swapped
     * Bit  63    page present
 
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 6752870..b0c6d1b 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -17,7 +17,7 @@
 slabs that have data in them. See "slabinfo -h" for more options when
 running the command. slabinfo can be compiled with
 
-gcc -o slabinfo tools/slub/slabinfo.c
+gcc -o slabinfo tools/vm/slabinfo.c
 
 Some of the modes of operation of slabinfo require that slub debugging
 be enabled on the command line. F.e. no tracking information will be
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 29bdf62..f734bb2 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -166,6 +166,68 @@
 application that could have been using hugepages. This also applies to
 the regions registered in khugepaged.
 
+== Monitoring usage ==
+
+The number of transparent huge pages currently used by the system is
+available by reading the AnonHugePages field in /proc/meminfo. To
+identify what applications are using transparent huge pages, it is
+necessary to read /proc/PID/smaps and count the AnonHugePages fields
+for each mapping. Note that reading the smaps file is expensive and
+reading it frequently will incur overhead.
+
+There are a number of counters in /proc/vmstat that may be used to
+monitor how successfully the system is providing huge pages for use.
+
+thp_fault_alloc is incremented every time a huge page is successfully
+	allocated to handle a page fault. This applies to both the
+	first time a page is faulted and for COW faults.
+
+thp_collapse_alloc is incremented by khugepaged when it has found
+	a range of pages to collapse into one huge page and has
+	successfully allocated a new huge page to store the data.
+
+thp_fault_fallback is incremented if a page fault fails to allocate
+	a huge page and instead falls back to using small pages.
+
+thp_collapse_alloc_failed is incremented if khugepaged found a range
+	of pages that should be collapsed into one huge page but failed
+	the allocation.
+
+thp_split is incremented every time a huge page is split into base
+	pages. This can happen for a variety of reasons but a common
+	reason is that a huge page is old and is being reclaimed.
+
+As the system ages, allocating huge pages may be expensive as the
+system uses memory compaction to copy data around memory to free a
+huge page for use. There are some counters in /proc/vmstat to help
+monitor this overhead.
+
+compact_stall is incremented every time a process stalls to run
+	memory compaction so that a huge page is free for use.
+
+compact_success is incremented if the system compacted memory and
+	freed a huge page for use.
+
+compact_fail is incremented if the system tries to compact memory
+	but failed.
+
+compact_pages_moved is incremented each time a page is moved. If
+	this value is increasing rapidly, it implies that the system
+	is copying a lot of data to satisfy the huge page allocation.
+	It is possible that the cost of copying exceeds any savings
+	from reduced TLB misses.
+
+compact_pagemigrate_failed is incremented when the underlying mechanism
+	for moving a page failed.
+
+compact_blocks_moved is incremented each time memory compaction examines
+	a huge page aligned range of pages.
+
+It is possible to establish how long the stalls were using the function
+tracer to record how long was spent in __alloc_pages_nodemask and
+using the mm_page_alloc tracepoint to identify which allocations were
+for huge pages.
+
 == get_user_pages and follow_page ==
 
 get_user_pages and follow_page if run on a hugepage, will return the
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 25fe430..086638f 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 16-Mar-2012
+Last reviewed: 22-May-2012
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -39,6 +39,10 @@
 The watchdog device structure looks like this:
 
 struct watchdog_device {
+	int id;
+	struct cdev cdev;
+	struct device *dev;
+	struct device *parent;
 	const struct watchdog_info *info;
 	const struct watchdog_ops *ops;
 	unsigned int bootstatus;
@@ -46,10 +50,20 @@
 	unsigned int min_timeout;
 	unsigned int max_timeout;
 	void *driver_data;
+	struct mutex lock;
 	unsigned long status;
 };
 
 It contains following fields:
+* id: set by watchdog_register_device, id 0 is special. It has both a
+  /dev/watchdog0 cdev (dynamic major, minor 0) as well as the old
+  /dev/watchdog miscdev. The id is set automatically when calling
+  watchdog_register_device.
+* cdev: cdev for the dynamic /dev/watchdog<id> device nodes. This
+  field is also populated by watchdog_register_device.
+* dev: device under the watchdog class (created by watchdog_register_device).
+* parent: set this to the parent device (or NULL) before calling
+  watchdog_register_device.
 * info: a pointer to a watchdog_info structure. This structure gives some
   additional information about the watchdog timer itself. (Like it's unique name)
 * ops: a pointer to the list of watchdog operations that the watchdog supports.
@@ -61,6 +75,7 @@
 * driver_data: a pointer to the drivers private data of a watchdog device.
   This data should only be accessed via the watchdog_set_drvdata and
   watchdog_get_drvdata routines.
+* lock: Mutex for WatchDog Timer Driver Core internal use only.
 * status: this field contains a number of status bits that give extra
   information about the status of the device (Like: is the watchdog timer
   running/active, is the nowayout bit set, is the device opened via
@@ -78,6 +93,8 @@
 	unsigned int (*status)(struct watchdog_device *);
 	int (*set_timeout)(struct watchdog_device *, unsigned int);
 	unsigned int (*get_timeleft)(struct watchdog_device *);
+	void (*ref)(struct watchdog_device *);
+	void (*unref)(struct watchdog_device *);
 	long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
@@ -85,6 +102,21 @@
 driver's operations. This module owner will be used to lock the module when
 the watchdog is active. (This to avoid a system crash when you unload the
 module and /dev/watchdog is still open).
+
+If the watchdog_device struct is dynamically allocated, just locking the module
+is not enough and a driver also needs to define the ref and unref operations to
+ensure the structure holding the watchdog_device does not go away.
+
+The simplest (and usually sufficient) implementation of this is to:
+1) Add a kref struct to the same structure which is holding the watchdog_device
+2) Define a release callback for the kref which frees the struct holding both
+3) Call kref_init on this kref *before* calling watchdog_register_device()
+4) Define a ref operation calling kref_get on this kref
+5) Define a unref operation calling kref_put on this kref
+6) When it is time to cleanup:
+ * Do not kfree() the struct holding both, the last kref_put will do this!
+ * *After* calling watchdog_unregister_device() call kref_put on the kref
+
 Some operations are mandatory and some are optional. The mandatory operations
 are:
 * start: this is a pointer to the routine that starts the watchdog timer
@@ -125,6 +157,10 @@
   (Note: the WDIOF_SETTIMEOUT needs to be set in the options field of the
   watchdog's info structure).
 * get_timeleft: this routines returns the time that's left before a reset.
+* ref: the operation that calls kref_get on the kref of a dynamically
+  allocated watchdog_device struct.
+* unref: the operation that calls kref_put on the kref of a dynamically
+  allocated watchdog_device struct.
 * ioctl: if this routine is present then it will be called first before we do
   our own internal ioctl call handling. This routine should return -ENOIOCTLCMD
   if a command is not supported. The parameters that are passed to the ioctl
@@ -144,6 +180,11 @@
   (This bit should only be used by the WatchDog Timer Driver Core).
 * WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
   If this bit is set then the watchdog timer will not be able to stop.
+* WDOG_UNREGISTERED: this bit gets set by the WatchDog Timer Driver Core
+  after calling watchdog_unregister_device, and then checked before calling
+  any watchdog_ops, so that you can be sure that no operations (other then
+  unref) will get called after unregister, even if userspace still holds a
+  reference to /dev/watchdog
 
   To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
   timer device) you can either:
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 17ddd82..04fddbac 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -78,6 +78,11 @@
 wd1_timeout: Default watchdog1 timeout in 1/10secs
 wd2_timeout: Default watchdog2 timeout in 1/10secs
 -------------------------------------------------
+da9052wdt:
+timeout: Watchdog timeout in seconds. 2<= timeout <=131, default=2.048s
+nowayout: Watchdog cannot be stopped once started
+	(default=kernel config parameter)
+-------------------------------------------------
 davinci_wdt:
 heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
 -------------------------------------------------
diff --git a/Documentation/x86/efi-stub.txt b/Documentation/x86/efi-stub.txt
new file mode 100644
index 0000000..44e6bb6
--- /dev/null
+++ b/Documentation/x86/efi-stub.txt
@@ -0,0 +1,65 @@
+			  The EFI Boot Stub
+		     ---------------------------
+
+On the x86 platform, a bzImage can masquerade as a PE/COFF image,
+thereby convincing EFI firmware loaders to load it as an EFI
+executable. The code that modifies the bzImage header, along with the
+EFI-specific entry point that the firmware loader jumps to are
+collectively known as the "EFI boot stub", and live in
+arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
+respectively.
+
+By using the EFI boot stub it's possible to boot a Linux kernel
+without the use of a conventional EFI boot loader, such as grub or
+elilo. Since the EFI boot stub performs the jobs of a boot loader, in
+a certain sense it *IS* the boot loader.
+
+The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option.
+
+
+**** How to install bzImage.efi
+
+The bzImage located in arch/x86/boot/bzImage must be copied to the EFI
+System Partiion (ESP) and renamed with the extension ".efi". Without
+the extension the EFI firmware loader will refuse to execute it. It's
+not possible to execute bzImage.efi from the usual Linux file systems
+because EFI firmware doesn't have support for them.
+
+
+**** Passing kernel parameters from the EFI shell
+
+Arguments to the kernel can be passed after bzImage.efi, e.g.
+
+	fs0:> bzImage.efi console=ttyS0 root=/dev/sda4
+
+
+**** The "initrd=" option
+
+Like most boot loaders, the EFI stub allows the user to specify
+multiple initrd files using the "initrd=" option. This is the only EFI
+stub-specific command line parameter, everything else is passed to the
+kernel when it boots.
+
+The path to the initrd file must be an absolute path from the
+beginning of the ESP, relative path names do not work. Also, the path
+is an EFI-style path and directory elements must be separated with
+backslashes (\). For example, given the following directory layout,
+
+fs0:>
+	Kernels\
+			bzImage.efi
+			initrd-large.img
+
+	Ramdisks\
+			initrd-small.img
+			initrd-medium.img
+
+to boot with the initrd-large.img file if the current working
+directory is fs0:\Kernels, the following command must be used,
+
+	fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img
+
+Notice how bzImage.efi can be specified with a relative path. That's
+because the image we're executing is interpreted by the EFI shell,
+which understands relative paths, whereas the rest of the command line
+is passed to bzImage.efi.
diff --git a/MAINTAINERS b/MAINTAINERS
index 150a29f..eb22272 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -579,7 +579,7 @@
 F:	net/appletalk/
 
 ARASAN COMPACT FLASH PATA CONTROLLER
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:	linux-ide@vger.kernel.org
 S:	Maintained
 F:	include/linux/pata_arasan_cf_data.h
@@ -1077,7 +1077,7 @@
 ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
 M:	Kyungmin Park <kyungmin.park@samsung.com>
 M:	Kamil Debski <k.debski@samsung.com>
-M:     Jeongtae Park <jtp.park@samsung.com>
+M:	Jeongtae Park <jtp.park@samsung.com>
 L:	linux-arm-kernel@lists.infradead.org
 L:	linux-media@vger.kernel.org
 S:	Maintained
@@ -1646,11 +1646,11 @@
 F:	drivers/gpio/gpio-bt8xx.c
 
 BTRFS FILE SYSTEM
-M:	Chris Mason <chris.mason@oracle.com>
+M:	Chris Mason <chris.mason@fusionio.com>
 L:	linux-btrfs@vger.kernel.org
 W:	http://btrfs.wiki.kernel.org/
 Q:	http://patchwork.kernel.org/project/linux-btrfs/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
 S:	Maintained
 F:	Documentation/filesystems/btrfs.txt
 F:	fs/btrfs/
@@ -1743,10 +1743,10 @@
 CAPABILITIES
 M:	Serge Hallyn <serge.hallyn@canonical.com>
 L:	linux-security-module@vger.kernel.org
-S:	Supported	
+S:	Supported
 F:	include/linux/capability.h
 F:	security/capability.c
-F:	security/commoncap.c 
+F:	security/commoncap.c
 F:	kernel/capability.c
 
 CELL BROADBAND ENGINE ARCHITECTURE
@@ -1800,6 +1800,9 @@
 CFG80211 and NL80211
 M:	Johannes Berg <johannes@sipsolutions.net>
 L:	linux-wireless@vger.kernel.org
+W:	http://wireless.kernel.org/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:	Maintained
 F:	include/linux/nl80211.h
 F:	include/net/cfg80211.h
@@ -1905,6 +1908,16 @@
 F:	fs/coda/
 F:	include/linux/coda*.h
 
+COMMON CLK FRAMEWORK
+M:	Mike Turquette <mturquette@ti.com>
+M:	Mike Turquette <mturquette@linaro.org>
+L:	linux-arm-kernel@lists.infradead.org (same as CLK API & CLKDEV)
+T:	git git://git.linaro.org/people/mturquette/linux.git
+S:	Maintained
+F:	drivers/clk/clk.c
+F:	drivers/clk/clk-*
+F:	include/linux/clk-pr*
+
 COMMON INTERNET FILE SYSTEM (CIFS)
 M:	Steve French <sfrench@samba.org>
 L:	linux-cifs@vger.kernel.org
@@ -2136,11 +2149,11 @@
 F:	drivers/net/wan/pc300*
 
 CYTTSP TOUCHSCREEN DRIVER
-M:      Javier Martinez Canillas <javier@dowhile0.org>
-L:      linux-input@vger.kernel.org
-S:      Maintained
-F:      drivers/input/touchscreen/cyttsp*
-F:      include/linux/input/cyttsp.h
+M:	Javier Martinez Canillas <javier@dowhile0.org>
+L:	linux-input@vger.kernel.org
+S:	Maintained
+F:	drivers/input/touchscreen/cyttsp*
+F:	include/linux/input/cyttsp.h
 
 DAMA SLAVE for AX.25
 M:	Joerg Reuter <jreuter@yaina.de>
@@ -2260,7 +2273,7 @@
 F:	include/linux/dm-*.h
 
 DIOLAN U2C-12 I2C DRIVER
-M:	Guenter Roeck <guenter.roeck@ericsson.com>
+M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	drivers/i2c/busses/i2c-diolan-u2c.c
@@ -2808,6 +2821,12 @@
 F:	drivers/base/firmware*.c
 F:	include/linux/firmware.h
 
+FLOPPY DRIVER
+M:	Jiri Kosina <jkosina@suse.cz>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
+S:	Odd fixes
+F:	drivers/block/floppy.c
+
 FPU EMULATOR
 M:	Bill Metzenthen <billm@melbpc.org.au>
 W:	http://floatingpoint.sourceforge.net/emulator/index.html
@@ -2914,6 +2933,13 @@
 F:	include/linux/freezer.h
 F:	kernel/freezer.c
 
+FRONTSWAP API
+M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+F:	mm/frontswap.c
+F:	include/linux/frontswap.h
+
 FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
 L:	linux-cachefs@redhat.com
@@ -2978,9 +3004,9 @@
 M:	Peter Korsgaard <peter.korsgaard@barco.com>
 L:	linux-i2c@vger.kernel.org
 S:	Supported
-F:	drivers/i2c/muxes/gpio-i2cmux.c
-F:	include/linux/gpio-i2cmux.h
-F:	Documentation/i2c/muxes/gpio-i2cmux
+F:	drivers/i2c/muxes/i2c-mux-gpio.c
+F:	include/linux/i2c-mux-gpio.h
+F:	Documentation/i2c/muxes/i2c-mux-gpio
 
 GENERIC HDLC (WAN) DRIVERS
 M:	Krzysztof Halasa <khc@pm.waw.pl>
@@ -3122,7 +3148,7 @@
 
 HARDWARE MONITORING
 M:	Jean Delvare <khali@linux-fr.org>
-M:	Guenter Roeck <guenter.roeck@ericsson.com>
+M:	Guenter Roeck <linux@roeck-us.net>
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
 T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
@@ -3222,10 +3248,8 @@
 F:	include/linux/hrtimer.h
 
 HIGH-SPEED SCC DRIVER FOR AX.25
-M:	Klaus Kudielka <klaus.kudielka@ieee.org>
 L:	linux-hams@vger.kernel.org
-W:	http://www.nt.tuwien.ac.at/~kkudielk/Linux/
-S:	Maintained
+S:	Orphan
 F:	drivers/net/hamradio/dmascc.c
 F:	drivers/net/hamradio/scc.c
 
@@ -3372,6 +3396,12 @@
 S:	Supported
 F:	drivers/scsi/ips.*
 
+ICH LPC AND GPIO DRIVER
+M:	Peter Tyser <ptyser@xes-inc.com>
+S:	Maintained
+F:	drivers/mfd/lpc_ich.c
+F:	drivers/gpio/gpio-ich.c
+
 IDE SUBSYSTEM
 M:	"David S. Miller" <davem@davemloft.net>
 L:	linux-ide@vger.kernel.org
@@ -4076,6 +4106,8 @@
 LED SUBSYSTEM
 M:	Bryan Wu <bryan.wu@canonical.com>
 M:	Richard Purdie <rpurdie@rpsys.net>
+L:	linux-leds@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 S:	Maintained
 F:	drivers/leds/
 F:	include/linux/leds.h
@@ -4320,7 +4352,8 @@
 M:	Johannes Berg <johannes@sipsolutions.net>
 L:	linux-wireless@vger.kernel.org
 W:	http://linuxwireless.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:	Maintained
 F:	Documentation/networking/mac80211-injection.txt
 F:	include/net/mac80211.h
@@ -4331,7 +4364,8 @@
 M:	Mattias Nissler <mattias.nissler@gmx.de>
 L:	linux-wireless@vger.kernel.org
 W:	http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:	Maintained
 F:	net/mac80211/rc80211_pid*
 
@@ -4391,6 +4425,13 @@
 F:	drivers/video/matrox/matroxfb_*
 F:	include/linux/matroxfb.h
 
+MAX16065 HARDWARE MONITOR DRIVER
+M:	Guenter Roeck <linux@roeck-us.net>
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+F:	Documentation/hwmon/max16065
+F:	drivers/hwmon/max16065.c
+
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:	"Hans J. Koch" <hjk@hansjkoch.de>
 L:	lm-sensors@lm-sensors.org
@@ -4495,12 +4536,6 @@
 S:	Maintained
 F:	drivers/mmc/host/imxmmc.*
 
-MOUSE AND MISC DEVICES [GENERAL]
-M:	Alessandro Rubini <rubini@ipvvis.unipv.it>
-S:	Maintained
-F:	drivers/input/mouse/
-F:	include/linux/gpio_mouse.h
-
 MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:	Jiri Slaby <jirislaby@gmail.com>
 S:	Maintained
@@ -5135,10 +5170,10 @@
 F:	include/linux/leds-pca9532.h
 
 PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M:	Guenter Roeck <guenter.roeck@ericsson.com>
+M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
-F:	drivers/i2c/muxes/pca9541.c
+F:	drivers/i2c/muxes/i2c-mux-pca9541.c
 
 PCA9564/PCA9665 I2C BUS DRIVER
 M:	Wolfram Sang <w.sang@pengutronix.de>
@@ -5155,7 +5190,7 @@
 F:	drivers/firmware/pcdp.*
 
 PCI ERROR RECOVERY
-M:     Linas Vepstas <linasvepstas@gmail.com>
+M:	Linas Vepstas <linasvepstas@gmail.com>
 L:	linux-pci@vger.kernel.org
 S:	Supported
 F:	Documentation/PCI/pci-error-recovery.txt
@@ -5261,7 +5296,7 @@
 F:	drivers/pinctrl/
 
 PIN CONTROLLER - ST SPEAR
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
@@ -5285,7 +5320,7 @@
 F:	drivers/rtc/rtc-puv3.c
 
 PMBUS HARDWARE MONITORING DRIVERS
-M:	Guenter Roeck <guenter.roeck@ericsson.com>
+M:	Guenter Roeck <linux@roeck-us.net>
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
 W:	http://www.roeck-us.net/linux/drivers/
@@ -5323,7 +5358,7 @@
 T:	git git://git.infradead.org/battery-2.6.git
 S:	Maintained
 F:	include/linux/power_supply.h
-F:	drivers/power/power_supply*
+F:	drivers/power/
 
 PNP SUPPORT
 M:	Adam Belay <abelay@mit.edu>
@@ -5681,6 +5716,9 @@
 RFKILL
 M:	Johannes Berg <johannes@sipsolutions.net>
 L:	linux-wireless@vger.kernel.org
+W:	http://wireless.kernel.org/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:	Maintained
 F:	Documentation/rfkill.txt
 F:	net/rfkill/
@@ -5835,7 +5873,7 @@
 F:	drivers/tty/serial
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 S:	Maintained
 F:	include/linux/dw_dmac.h
 F:	drivers/dma/dw_dmac_regs.h
@@ -5983,7 +6021,7 @@
 F:	drivers/mmc/host/sdhci-s3c.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:	spear-devel@list.st.com
 L:	linux-mmc@vger.kernel.org
 S:	Maintained
@@ -6339,15 +6377,26 @@
 F:	include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
+M:	Shiraz Hashim <shiraz.hashim@st.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
 F:	arch/arm/plat-spear/
 
+SPEAR13XX MACHINE SUPPORT
+M:     Viresh Kumar <viresh.linux@gmail.com>
+M:	Shiraz Hashim <shiraz.hashim@st.com>
+L:	spear-devel@list.st.com
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W:	http://www.st.com/spear
+S:	Maintained
+F:	arch/arm/mach-spear13xx/
+
 SPEAR3XX MACHINE SUPPORT
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
+M:	Shiraz Hashim <shiraz.hashim@st.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
@@ -6356,6 +6405,8 @@
 
 SPEAR6XX MACHINE SUPPORT
 M:	Rajeev Kumar <rajeev-dlh.kumar@st.com>
+M:	Shiraz Hashim <shiraz.hashim@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
@@ -6363,14 +6414,12 @@
 F:	arch/arm/mach-spear6xx/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
-M:	Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
-F:	arch/arm/mach-spear*/clock.c
-F:	arch/arm/plat-spear/clock.c
-F:	arch/arm/plat-spear/include/plat/clock.h
+F:	drivers/clk/spear/
 
 SPI SUBSYSTEM
 M:	Grant Likely <grant.likely@secretlab.ca>
@@ -6632,7 +6681,7 @@
 F:	kernel/taskstats.c
 
 TC CLASSIFIER
-M:	Jamal Hadi Salim <hadi@cyberus.ca>
+M:	Jamal Hadi Salim <jhs@mojatatu.com>
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	include/linux/pkt_cls.h
@@ -7266,11 +7315,11 @@
 F:	drivers/uio/
 F:	include/linux/uio*.h
 
-UTIL-LINUX-NG PACKAGE
+UTIL-LINUX PACKAGE
 M:	Karel Zak <kzak@redhat.com>
-L:	util-linux-ng@vger.kernel.org
-W:	http://kernel.org/~kzak/util-linux-ng/
-T:	git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
+L:	util-linux@vger.kernel.org
+W:	http://en.wikipedia.org/wiki/Util-linux
+T:	git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:	Maintained
 
 UVESAFB DRIVER
@@ -7372,7 +7421,7 @@
 
 VME SUBSYSTEM
 M:	Martyn Welch <martyn.welch@ge.com>
-M:	Manohar Vanga <manohar.vanga@cern.ch>
+M:	Manohar Vanga <manohar.vanga@gmail.com>
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	devel@driverdev.osuosl.org
 S:	Maintained
diff --git a/Makefile b/Makefile
index b62c1e0..81ea154 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 4
+PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc5
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -231,10 +231,6 @@
 # Where to locate arch specific headers
 hdr-arch  := $(SRCARCH)
 
-ifeq ($(ARCH),m68knommu)
-       hdr-arch  := m68k
-endif
-
 KCONFIG_CONFIG	?= .config
 export KCONFIG_CONFIG
 
@@ -341,7 +337,6 @@
 GENKSYMS	= scripts/genksyms/genksyms
 INSTALLKERNEL  := installkernel
 DEPMOD		= /sbin/depmod
-KALLSYMS	= scripts/kallsyms
 PERL		= perl
 CHECK		= sparse
 
@@ -566,6 +561,8 @@
 KBUILD_CFLAGS	+= -O2
 endif
 
+include $(srctree)/arch/$(SRCARCH)/Makefile
+
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -576,8 +573,6 @@
                  $(call cc-option,-fno-partial-inlining)
 endif
 
-include $(srctree)/arch/$(SRCARCH)/Makefile
-
 ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
@@ -739,197 +734,21 @@
 libs-y2		:= $(patsubst %/, %/built-in.o, $(libs-y))
 libs-y		:= $(libs-y1) $(libs-y2)
 
-# Build vmlinux
-# ---------------------------------------------------------------------------
-# vmlinux is built from the objects selected by $(vmlinux-init) and
-# $(vmlinux-main). Most are built-in.o files from top-level directories
-# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
-# Ordering when linking is important, and $(vmlinux-init) must be first.
-#
-# vmlinux
-#   ^
-#   |
-#   +-< $(vmlinux-init)
-#   |   +--< init/version.o + more
-#   |
-#   +--< $(vmlinux-main)
-#   |    +--< driver/built-in.o mm/built-in.o + more
-#   |
-#   +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
-#
-# vmlinux version (uname -v) cannot be updated during normal
-# descending-into-subdirs phase since we do not yet know if we need to
-# update vmlinux.
-# Therefore this step is delayed until just before final link of vmlinux -
-# except in the kallsyms case where it is done just before adding the
-# symbols to the kernel.
-#
-# System.map is generated to document addresses of all kernel symbols
+# Externally visible symbols (used by link-vmlinux.sh)
+export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
+export LDFLAGS_vmlinux
 
-vmlinux-init := $(head-y) $(init-y)
-vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
-vmlinux-all  := $(vmlinux-init) $(vmlinux-main)
-vmlinux-lds  := arch/$(SRCARCH)/kernel/vmlinux.lds
-export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
 
-# Rule to link vmlinux - also used during CONFIG_KALLSYMS
-# May be overridden by arch/$(ARCH)/Makefile
-quiet_cmd_vmlinux__ ?= LD      $@
-      cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
-      -T $(vmlinux-lds) $(vmlinux-init)                          \
-      --start-group $(vmlinux-main) --end-group                  \
-      $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
+# Final link of vmlinux
+      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK    $@
 
-# Generate new vmlinux version
-quiet_cmd_vmlinux_version = GEN     .version
-      cmd_vmlinux_version = set -e;                     \
-	if [ ! -r .version ]; then			\
-	  rm -f .version;				\
-	  echo 1 >.version;				\
-	else						\
-	  mv .version .old_version;			\
-	  expr 0$$(cat .old_version) + 1 >.version;	\
-	fi;						\
-	$(MAKE) $(build)=init
-
-# Generate System.map
-quiet_cmd_sysmap = SYSMAP
-      cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
-
-# Sort exception table at build time
-quiet_cmd_sortextable = SORTEX
-      cmd_sortextable = $(objtree)/scripts/sortextable
-
-# Link of vmlinux
-# If CONFIG_KALLSYMS is set .version is already updated
-# Generate System.map and verify that the content is consistent
-# Use + in front of the vmlinux_version rule to silent warning with make -j2
-# First command is ':' to allow us to use + in front of the rule
-define rule_vmlinux__
-	:
-	$(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
-
-	$(call cmd,vmlinux__)
-	$(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-
-	$(if $(CONFIG_BUILDTIME_EXTABLE_SORT),				\
-	  $(Q)$(if $($(quiet)cmd_sortextable),				\
-	    echo '  $($(quiet)cmd_sortextable)  vmlinux' &&)		\
-	  $(cmd_sortextable)  vmlinux)
-
-
-	$(Q)$(if $($(quiet)cmd_sysmap),                                      \
-	  echo '  $($(quiet)cmd_sysmap)  System.map' &&)                     \
-	$(cmd_sysmap) $@ System.map;                                         \
-	if [ $$? -ne 0 ]; then                                               \
-		rm -f $@;                                                    \
-		/bin/false;                                                  \
-	fi;
-	$(verify_kallsyms)
-endef
-
-
-ifdef CONFIG_KALLSYMS
-# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
-# It's a three stage process:
-# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
-#   empty
-#   Running kallsyms on that gives us .tmp_kallsyms1.o with
-#   the right size - vmlinux version (uname -v) is updated during this step
-# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
-#   but due to the added section, some addresses have shifted.
-#   From here, we generate a correct .tmp_kallsyms2.o
-# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
-# o Verify that the System.map from vmlinux matches the map from
-#   .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
-# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
-#   .tmp_vmlinux3 and .tmp_kallsyms3.o.  This is only meant as a
-#   temporary bypass to allow the kernel to be built while the
-#   maintainers work out what went wrong with kallsyms.
-
-last_kallsyms := 2
-
-ifdef KALLSYMS_EXTRA_PASS
-ifneq ($(KALLSYMS_EXTRA_PASS),0)
-last_kallsyms := 3
-endif
-endif
-
-kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
-
-define verify_kallsyms
-	$(Q)$(if $($(quiet)cmd_sysmap),                                      \
-	  echo '  $($(quiet)cmd_sysmap)  .tmp_System.map' &&)                \
-	  $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
-	$(Q)cmp -s System.map .tmp_System.map ||                             \
-		(echo Inconsistent kallsyms data;                            \
-		 echo This is a bug - please report about it;                \
-		 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround;      \
-		 rm .tmp_kallsyms* ; /bin/false )
-endef
-
-# Update vmlinux version before link
-# Use + in front of this rule to silent warning about make -j1
-# First command is ':' to allow us to use + in front of this rule
-cmd_ksym_ld = $(cmd_vmlinux__)
-define rule_ksym_ld
-	: 
-	+$(call cmd,vmlinux_version)
-	$(call cmd,vmlinux__)
-	$(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-endef
-
-# Generate .S file with all kernel symbols
-quiet_cmd_kallsyms = KSYM    $@
-      cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
-                     $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
-
-.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
-	$(call if_changed_dep,as_o_S)
-
-.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS)
-	$(call cmd,kallsyms)
-
-# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version
-.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE
-	$(call if_changed_rule,ksym_ld)
-
-.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
-	$(call if_changed,vmlinux__)
-
-.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
-	$(call if_changed,vmlinux__)
-
-# Needs to visit scripts/ before $(KALLSYMS) can be used.
-$(KALLSYMS): scripts ;
-
-# Generate some data for debugging strange kallsyms problems
-debug_kallsyms: .tmp_map$(last_kallsyms)
-
-.tmp_map%: .tmp_vmlinux% FORCE
-	($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
-
-.tmp_map3: .tmp_map2
-
-.tmp_map2: .tmp_map1
-
-endif # ifdef CONFIG_KALLSYMS
-
-# Do modpost on a prelinked vmlinux. The finally linked vmlinux has
-# relevant sections renamed as per the linker script.
-quiet_cmd_vmlinux-modpost = LD      $@
-      cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@                          \
-	 $(vmlinux-init) --start-group $(vmlinux-main) --end-group             \
-	 $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
-define rule_vmlinux-modpost
-	:
-	+$(call cmd,vmlinux-modpost)
-	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
-	$(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd
-endef
-
-# vmlinux image - including updated kernel symbols
-vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
+# Include targets which we want to
+# execute if the rest of the kernel build went well.
+vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
 ifdef CONFIG_HEADERS_CHECK
 	$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
 endif
@@ -939,22 +758,11 @@
 ifdef CONFIG_BUILD_DOCSRC
 	$(Q)$(MAKE) $(build)=Documentation
 endif
-	$(call vmlinux-modpost)
-	$(call if_changed_rule,vmlinux__)
-	$(Q)rm -f .old_version
-
-# build vmlinux.o first to catch section mismatch errors early
-ifdef CONFIG_KALLSYMS
-.tmp_vmlinux1: vmlinux.o
-endif
-
-modpost-init := $(filter-out init/built-in.o, $(vmlinux-init))
-vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
-	$(call if_changed_rule,vmlinux-modpost)
+	+$(call if_changed,link-vmlinux)
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
-$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
 # Preset locale variables to speed up the build process. Limit locale
@@ -1181,8 +989,6 @@
 
 # Directories & files removed with 'make clean'
 CLEAN_DIRS  += $(MODVERDIR)
-CLEAN_FILES +=	vmlinux System.map \
-                .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
 
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
@@ -1428,6 +1234,7 @@
 endif # KBUILD_EXTMOD
 
 clean: $(clean-dirs)
+	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
 	$(call cmd,rmdirs)
 	$(call cmd,rmfiles)
 	@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@@ -1568,14 +1375,6 @@
 cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
                   $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
 
-a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
-	  $(KBUILD_AFLAGS_KERNEL)                              \
-	  $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \
-	  $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
-
-quiet_cmd_as_o_S = AS      $@
-cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
-
 # read all saved command lines
 
 targets := $(wildcard $(sort $(targets)))
diff --git a/arch/Kconfig b/arch/Kconfig
index e9a9108..8c3d957 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -159,6 +159,9 @@
 config HAVE_DMA_ATTRS
 	bool
 
+config HAVE_DMA_CONTIGUOUS
+	bool
+
 config USE_GENERIC_SMP_HELPERS
 	bool
 
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h
index 24779fc..5a8a483 100644
--- a/arch/alpha/include/asm/posix_types.h
+++ b/arch/alpha/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned int	__kernel_ino_t;
 #define __kernel_ino_t __kernel_ino_t
 
-typedef unsigned int	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long	__kernel_sigset_t;	/* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 10ab2d7..a8c97d4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -226,7 +226,6 @@
 	if (__get_user(set.sig[0], &sc->sc_mask))
 		goto give_sigsegv;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(sc, regs, sw))
@@ -261,7 +260,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto give_sigsegv;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
@@ -468,12 +466,9 @@
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
 	      struct pt_regs * regs, struct switch_stack *sw)
 {
-	sigset_t *oldset = &current->blocked;
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-
 	if (ka->sa.sa_flags & SA_SIGINFO)
 		ret = setup_rt_frame(sig, ka, info, oldset, regs, sw);
 	else
@@ -483,12 +478,7 @@
 		force_sigsegv(sig, current);
 		return;
 	}
-	block_sigmask(ka, sig);
-	/* A signal was successfully delivered, and the
-	   saved sigmask was stored on the signal frame,
-	   and will be restored by sigreturn.  So we can
-	   simply clear the restore sigmask flag.  */
-	clear_thread_flag(TIF_RESTORE_SIGMASK);
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 static inline void
@@ -572,9 +562,7 @@
 	}
 
 	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-		set_current_blocked(&current->saved_sigmask);
-
+	restore_saved_sigmask();
 	if (single_stepping)
 		ptrace_set_bpt(current);	/* re-set breakpoint */
 }
@@ -590,7 +578,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5458aa9..a91009c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,8 @@
 	select HAVE_AOUT
 	select HAVE_DMA_API_DEBUG
 	select HAVE_IDE if PCI || ISA || PCMCIA
+	select HAVE_DMA_ATTRS
+	select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
@@ -54,6 +56,14 @@
 config ARM_HAS_SG_CHAIN
 	bool
 
+config NEED_SG_DMA_LENGTH
+	bool
+
+config ARM_DMA_USE_IOMMU
+	select NEED_SG_DMA_LENGTH
+	select ARM_HAS_SG_CHAIN
+	bool
+
 config HAVE_PWM
 	bool
 
@@ -283,6 +293,7 @@
 	select ICST
 	select GENERIC_CLOCKEVENTS
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select NEED_MACH_IO_H if PCI
 	select PLAT_VERSATILE
 	select PLAT_VERSATILE_CLCD
 	select PLAT_VERSATILE_FPGA_IRQ
@@ -445,8 +456,10 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select CLKDEV_LOOKUP
 	select CLKSRC_MMIO
+	select COMMON_CLK
 	select HAVE_CLK_PREPARE
 	select PINCTRL
+	select USE_OF
 	help
 	  Support for Freescale MXS-based family of processors
 
@@ -512,7 +525,7 @@
 	select ARCH_HAS_DMA_SET_COHERENT_MASK
 	select CLKSRC_MMIO
 	select CPU_XSCALE
-	select GENERIC_GPIO
+	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_CLOCKEVENTS
 	select MIGHT_HAVE_PCI
 	select NEED_MACH_IO_H
@@ -576,6 +589,7 @@
 	select PCI
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_CLOCKEVENTS
+	select NEED_MACH_IO_H
 	select PLAT_ORION
 	help
 	  Support for the following Marvell Orion 5x series SoCs:
@@ -936,6 +950,7 @@
 	select ARM_AMBA
 	select ARCH_REQUIRE_GPIOLIB
 	select CLKDEV_LOOKUP
+	select COMMON_CLK
 	select CLKSRC_MMIO
 	select GENERIC_CLOCKEVENTS
 	select HAVE_CLK
@@ -1040,7 +1055,6 @@
 
 source "arch/arm/plat-samsung/Kconfig"
 source "arch/arm/plat-s3c24xx/Kconfig"
-source "arch/arm/plat-s5p/Kconfig"
 
 source "arch/arm/plat-spear/Kconfig"
 
@@ -1091,6 +1105,7 @@
 	bool
 	select CLKSRC_MMIO
 	select GENERIC_IRQ_CHIP
+	select COMMON_CLK
 
 config PLAT_PXA
 	bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85348a0..01a1341 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -103,6 +103,35 @@
 		  Say Y here if you want the debug print routines to direct
 		  their output to the second serial port on these devices.
 
+	config DEBUG_DAVINCI_DA8XX_UART1
+		bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
+		depends on ARCH_DAVINCI_DA8XX
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to UART1 serial port on DaVinci DA8XX devices.
+
+	config DEBUG_DAVINCI_DA8XX_UART2
+		bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
+		depends on ARCH_DAVINCI_DA8XX
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to UART2 serial port on DaVinci DA8XX devices.
+
+	config DEBUG_DAVINCI_DMx_UART0
+		bool "Kernel low-level debugging on DaVinci DMx using UART0"
+		depends on ARCH_DAVINCI_DMx
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to UART0 serial port on DaVinci DMx devices.
+
+	config DEBUG_DAVINCI_TNETV107X_UART1
+		bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
+		depends on ARCH_DAVINCI_TNETV107X
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to UART1 serial port on DaVinci TNETV107X
+		  devices.
+
 	config DEBUG_DC21285_PORT
 		bool "Kernel low-level debugging messages via footbridge serial port"
 		depends on FOOTBRIDGE
@@ -180,6 +209,14 @@
 		  Say Y here if you want kernel low-level debugging support
 		  on i.MX50 or i.MX53.
 
+	config DEBUG_IMX6Q_UART2
+		bool "i.MX6Q Debug UART2"
+		depends on SOC_IMX6Q
+		help
+		  Say Y here if you want kernel low-level debugging support
+		  on i.MX6Q UART2. This is correct for e.g. the SabreLite
+                  board.
+
 	config DEBUG_IMX6Q_UART4
 		bool "i.MX6Q Debug UART4"
 		depends on SOC_IMX6Q
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 157900d..0298b00 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,9 +160,7 @@
 machine-$(CONFIG_ARCH_NETX)		:= netx
 machine-$(CONFIG_ARCH_NOMADIK)		:= nomadik
 machine-$(CONFIG_ARCH_OMAP1)		:= omap1
-machine-$(CONFIG_ARCH_OMAP2)		:= omap2
-machine-$(CONFIG_ARCH_OMAP3)		:= omap2
-machine-$(CONFIG_ARCH_OMAP4)		:= omap2
+machine-$(CONFIG_ARCH_OMAP2PLUS)	:= omap2
 machine-$(CONFIG_ARCH_ORION5X)		:= orion5x
 machine-$(CONFIG_ARCH_PICOXCELL)	:= picoxcell
 machine-$(CONFIG_ARCH_PNX4008)		:= pnx4008
@@ -188,6 +186,8 @@
 machine-$(CONFIG_ARCH_VT8500)		:= vt8500
 machine-$(CONFIG_ARCH_W90X900)		:= w90x900
 machine-$(CONFIG_FOOTBRIDGE)		:= footbridge
+machine-$(CONFIG_MACH_SPEAR1310)	:= spear13xx
+machine-$(CONFIG_MACH_SPEAR1340)	:= spear13xx
 machine-$(CONFIG_MACH_SPEAR300)		:= spear3xx
 machine-$(CONFIG_MACH_SPEAR310)		:= spear3xx
 machine-$(CONFIG_MACH_SPEAR320)		:= spear3xx
@@ -205,7 +205,7 @@
 plat-$(CONFIG_PLAT_ORION)	:= orion
 plat-$(CONFIG_PLAT_PXA)		:= pxa
 plat-$(CONFIG_PLAT_S3C24XX)	:= s3c24xx samsung
-plat-$(CONFIG_PLAT_S5P)		:= s5p samsung
+plat-$(CONFIG_PLAT_S5P)		:= samsung
 plat-$(CONFIG_PLAT_SPEAR)	:= spear
 plat-$(CONFIG_PLAT_VERSATILE)	:= versatile
 
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi
index 881bc39..4ad5160 100644
--- a/arch/arm/boot/dts/db8500.dtsi
+++ b/arch/arm/boot/dts/db8500.dtsi
@@ -58,6 +58,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8012e000 0x80>;
 			interrupts = <0 119 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -69,6 +71,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8012e080 0x80>;
 			interrupts = <0 120 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -80,6 +84,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8000e000 0x80>;
 			interrupts = <0 121 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -91,6 +97,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8000e080 0x80>;
 			interrupts = <0 122 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -102,6 +110,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8000e100 0x80>;
 			interrupts = <0 123 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -113,6 +123,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8000e180 0x80>;
 			interrupts = <0 124 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -124,6 +136,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8011e000 0x80>;
 			interrupts = <0 125 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -135,6 +149,8 @@
 				"st,nomadik-gpio";
 			reg =  <0x8011e080 0x80>;
 			interrupts = <0 126 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -146,12 +162,18 @@
 				"st,nomadik-gpio";
 			reg =  <0xa03fe000 0x80>;
 			interrupts = <0 127 0x4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 			supports-sleepmode;
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <8>;
 		};
 
+		pinctrl {
+			compatible = "stericsson,nmk_pinctrl";
+		};
+
 		usb@a03e0000 {
 			compatible = "stericsson,db8500-musb",
 				"mentor,musb";
@@ -169,20 +191,195 @@
 		prcmu@80157000 {
 			compatible = "stericsson,db8500-prcmu";
 			reg = <0x80157000 0x1000>;
-			interrupts = <46 47>;
+			interrupts = <0 47 0x4>;
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges;
 
-				prcmu-timer-4@80157450 {
+			prcmu-timer-4@80157450 {
 				compatible = "stericsson,db8500-prcmu-timer-4";
 				reg = <0x80157450 0xC>;
 			};
 
+			db8500-prcmu-regulators {
+				compatible = "stericsson,db8500-prcmu-regulator";
+
+				// DB8500_REGULATOR_VAPE
+				db8500_vape_reg: db8500_vape {
+					regulator-name = "db8500-vape";
+					regulator-always-on;
+				};
+
+				// DB8500_REGULATOR_VARM
+				db8500_varm_reg: db8500_varm {
+					regulator-name = "db8500-varm";
+				};
+
+				// DB8500_REGULATOR_VMODEM
+				db8500_vmodem_reg: db8500_vmodem {
+					regulator-name = "db8500-vmodem";
+				};
+
+				// DB8500_REGULATOR_VPLL
+				db8500_vpll_reg: db8500_vpll {
+					regulator-name = "db8500-vpll";
+				};
+
+				// DB8500_REGULATOR_VSMPS1
+				db8500_vsmps1_reg: db8500_vsmps1 {
+					regulator-name = "db8500-vsmps1";
+				};
+
+				// DB8500_REGULATOR_VSMPS2
+				db8500_vsmps2_reg: db8500_vsmps2 {
+					regulator-name = "db8500-vsmps2";
+				};
+
+				// DB8500_REGULATOR_VSMPS3
+				db8500_vsmps3_reg: db8500_vsmps3 {
+					regulator-name = "db8500-vsmps3";
+				};
+
+				// DB8500_REGULATOR_VRF1
+				db8500_vrf1_reg: db8500_vrf1 {
+					regulator-name = "db8500-vrf1";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SVAMMDSP
+				db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+					regulator-name = "db8500-sva-mmdsp";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SVAMMDSPRET
+				db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+					regulator-name = "db8500-sva-mmdsp-ret";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SVAPIPE
+				db8500_sva_pipe_reg: db8500_sva_pipe {
+					regulator-name = "db8500_sva_pipe";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SIAMMDSP
+				db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+					regulator-name = "db8500_sia_mmdsp";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SIAMMDSPRET
+				db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+					regulator-name = "db8500-sia-mmdsp-ret";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SIAPIPE
+				db8500_sia_pipe_reg: db8500_sia_pipe {
+					regulator-name = "db8500-sia-pipe";
+				};
+
+				// DB8500_REGULATOR_SWITCH_SGA
+				db8500_sga_reg: db8500_sga {
+					regulator-name = "db8500-sga";
+					vin-supply = <&db8500_vape_reg>;
+				};
+
+				// DB8500_REGULATOR_SWITCH_B2R2_MCDE
+				db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+					regulator-name = "db8500-b2r2-mcde";
+					vin-supply = <&db8500_vape_reg>;
+				};
+
+				// DB8500_REGULATOR_SWITCH_ESRAM12
+				db8500_esram12_reg: db8500_esram12 {
+					regulator-name = "db8500-esram12";
+				};
+
+				// DB8500_REGULATOR_SWITCH_ESRAM12RET
+				db8500_esram12_ret_reg: db8500_esram12_ret {
+					regulator-name = "db8500-esram12-ret";
+				};
+
+				// DB8500_REGULATOR_SWITCH_ESRAM34
+				db8500_esram34_reg: db8500_esram34 {
+					regulator-name = "db8500-esram34";
+				};
+
+				// DB8500_REGULATOR_SWITCH_ESRAM34RET
+				db8500_esram34_ret_reg: db8500_esram34_ret {
+					regulator-name = "db8500-esram34-ret";
+				};
+			};
+
 			ab8500@5 {
 				compatible = "stericsson,ab8500";
 				reg = <5>; /* mailbox 5 is i2c */
 				interrupts = <0 40 0x4>;
+
+				ab8500-regulators {
+					compatible = "stericsson,ab8500-regulator";
+
+					// supplies to the display/camera
+					ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+						regulator-name = "V-DISPLAY";
+						regulator-min-microvolt = <2500000>;
+						regulator-max-microvolt = <2900000>;
+						regulator-boot-on;
+						/* BUG: If turned off MMC will be affected. */
+						regulator-always-on;
+					};
+
+					// supplies to the on-board eMMC
+					ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+						regulator-name = "V-eMMC1";
+						regulator-min-microvolt = <1100000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					// supply for VAUX3; SDcard slots
+					ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+						regulator-name = "V-MMC-SD";
+						regulator-min-microvolt = <1100000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					// supply for v-intcore12; VINTCORE12 LDO
+					ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+						regulator-name = "V-INTCORE";
+					};
+
+					// supply for tvout; gpadc; TVOUT LDO
+					ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+						regulator-name = "V-TVOUT";
+					};
+
+					// supply for ab8500-usb; USB LDO
+					ab8500_ldo_usb_reg: ab8500_ldo_usb {
+						regulator-name = "dummy";
+					};
+
+					// supply for ab8500-vaudio; VAUDIO LDO
+					ab8500_ldo_audio_reg: ab8500_ldo_audio {
+						regulator-name = "V-AUD";
+					};
+
+					// supply for v-anamic1 VAMic1-LDO
+					ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+						regulator-name = "V-AMIC1";
+					};
+
+					// supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
+					ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+						regulator-name = "V-AMIC2";
+					};
+
+					// supply for v-dmic; VDMIC LDO
+					ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+						regulator-name = "V-DMIC";
+					};
+
+					// supply for U8500 CSI/DSI; VANA LDO
+					ab8500_ldo_ana_reg: ab8500_ldo_ana {
+						regulator-name = "V-CSI/DSI";
+					};
+				};
 			};
 		};
 
@@ -235,7 +432,8 @@
 			status = "disabled";
 
 			// Add one of these for each child device
-			cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>;
+			cs-gpios = <&gpio0 31 0x4 &gpio4 14 0x4 &gpio4 16 0x4
+				    &gpio6 22 0x4 &gpio7 0 0x4>;
 
 		};
 
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 399d17b..49945cc 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -23,4 +23,52 @@
 	chosen {
 		bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200";
 	};
+
+	i2c@12C60000 {
+		samsung,i2c-sda-delay = <100>;
+		samsung,i2c-max-bus-freq = <20000>;
+		gpios = <&gpb3 0 2 3 0>,
+			<&gpb3 1 2 3 0>;
+
+		eeprom@50 {
+			compatible = "samsung,s524ad0xd1";
+			reg = <0x50>;
+		};
+	};
+
+	i2c@12C70000 {
+		samsung,i2c-sda-delay = <100>;
+		samsung,i2c-max-bus-freq = <20000>;
+		gpios = <&gpb3 2 2 3 0>,
+			<&gpb3 3 2 3 0>;
+
+		eeprom@51 {
+			compatible = "samsung,s524ad0xd1";
+			reg = <0x51>;
+		};
+	};
+
+	i2c@12C80000 {
+		status = "disabled";
+	};
+
+	i2c@12C90000 {
+		status = "disabled";
+	};
+
+	i2c@12CA0000 {
+		status = "disabled";
+	};
+
+	i2c@12CB0000 {
+		status = "disabled";
+	};
+
+	i2c@12CC0000 {
+		status = "disabled";
+	};
+
+	i2c@12CD0000 {
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index dfc4335..4272b29 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -23,11 +23,27 @@
 	compatible = "samsung,exynos5250";
 	interrupt-parent = <&gic>;
 
-	gic:interrupt-controller@10490000 {
+	gic:interrupt-controller@10481000 {
 		compatible = "arm,cortex-a9-gic";
 		#interrupt-cells = <3>;
 		interrupt-controller;
-		reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+		reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
+	};
+
+	combiner:interrupt-controller@10440000 {
+		compatible = "samsung,exynos4210-combiner";
+		#interrupt-cells = <2>;
+		interrupt-controller;
+		samsung,combiner-nr = <32>;
+		reg = <0x10440000 0x1000>;
+		interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+			     <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+			     <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+			     <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+			     <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+			     <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+			     <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+			     <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
 	};
 
 	watchdog {
@@ -42,30 +58,6 @@
 		interrupts = <0 43 0>, <0 44 0>;
 	};
 
-	sdhci@12200000 {
-		compatible = "samsung,exynos4210-sdhci";
-		reg = <0x12200000 0x100>;
-		interrupts = <0 75 0>;
-	};
-
-	sdhci@12210000 {
-		compatible = "samsung,exynos4210-sdhci";
-		reg = <0x12210000 0x100>;
-		interrupts = <0 76 0>;
-	};
-
-	sdhci@12220000 {
-		compatible = "samsung,exynos4210-sdhci";
-		reg = <0x12220000 0x100>;
-		interrupts = <0 77 0>;
-	};
-
-	sdhci@12230000 {
-		compatible = "samsung,exynos4210-sdhci";
-		reg = <0x12230000 0x100>;
-		interrupts = <0 78 0>;
-	};
-
 	serial@12C00000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0x12C00000 0x100>;
@@ -94,48 +86,64 @@
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12C60000 0x100>;
 		interrupts = <0 56 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12C70000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12C70000 0x100>;
 		interrupts = <0 57 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12C80000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12C80000 0x100>;
 		interrupts = <0 58 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12C90000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12C90000 0x100>;
 		interrupts = <0 59 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12CA0000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12CA0000 0x100>;
 		interrupts = <0 60 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12CB0000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12CB0000 0x100>;
 		interrupts = <0 61 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12CC0000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12CC0000 0x100>;
 		interrupts = <0 62 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	i2c@12CD0000 {
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x12CD0000 0x100>;
 		interrupts = <0 63 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
 	};
 
 	amba {
@@ -157,13 +165,13 @@
 			interrupts = <0 35 0>;
 		};
 
-		mdma0: pdma@10800000 {
+		mdma0: mdma@10800000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x10800000 0x1000>;
 			interrupts = <0 33 0>;
 		};
 
-		mdma1: pdma@11C10000 {
+		mdma1: mdma@11C10000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x11C10000 0x1000>;
 			interrupts = <0 124 0>;
@@ -242,6 +250,12 @@
 			#gpio-cells = <4>;
 		};
 
+		gpc4: gpio-controller@114002E0 {
+			compatible = "samsung,exynos4-gpio";
+			reg = <0x114002E0 0x20>;
+			#gpio-cells = <4>;
+		};
+
 		gpd0: gpio-controller@11400160 {
 			compatible = "samsung,exynos4-gpio";
 			reg = <0x11400160 0x20>;
@@ -388,19 +402,19 @@
 
 		gpv2: gpio-controller@10D10040 {
 			compatible = "samsung,exynos4-gpio";
-			reg = <0x10D10040 0x20>;
+			reg = <0x10D10060 0x20>;
 			#gpio-cells = <4>;
 		};
 
 		gpv3: gpio-controller@10D10060 {
 			compatible = "samsung,exynos4-gpio";
-			reg = <0x10D10060 0x20>;
+			reg = <0x10D10080 0x20>;
 			#gpio-cells = <4>;
 		};
 
 		gpv4: gpio-controller@10D10080 {
 			compatible = "samsung,exynos4-gpio";
-			reg = <0x10D10080 0x20>;
+			reg = <0x10D100C0 0x20>;
 			#gpio-cells = <4>;
 		};
 
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
new file mode 100644
index 0000000..70bffa9
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+	model = "Freescale i.MX23 Evaluation Kit";
+	compatible = "fsl,imx23-evk", "fsl,imx23";
+
+	memory {
+		reg = <0x40000000 0x08000000>;
+	};
+
+	apb@80000000 {
+		apbh@80000000 {
+			ssp0: ssp@80010000 {
+				compatible = "fsl,imx23-mmc";
+				pinctrl-names = "default";
+				pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
+				bus-width = <8>;
+				wp-gpios = <&gpio1 30 0>;
+				status = "okay";
+			};
+		};
+
+		apbx@80040000 {
+			duart: serial@80070000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&duart_pins_a>;
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
new file mode 100644
index 0000000..8c5f999
--- /dev/null
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	interrupt-parent = <&icoll>;
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+		gpio2 = &gpio2;
+	};
+
+	cpus {
+		cpu@0 {
+			compatible = "arm,arm926ejs";
+		};
+	};
+
+	apb@80000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x80000000 0x80000>;
+		ranges;
+
+		apbh@80000000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x80000000 0x40000>;
+			ranges;
+
+			icoll: interrupt-controller@80000000 {
+				compatible = "fsl,imx23-icoll", "fsl,mxs-icoll";
+				interrupt-controller;
+				#interrupt-cells = <1>;
+				reg = <0x80000000 0x2000>;
+			};
+
+			dma-apbh@80004000 {
+				compatible = "fsl,imx23-dma-apbh";
+				reg = <0x80004000 2000>;
+			};
+
+			ecc@80008000 {
+				reg = <0x80008000 2000>;
+				status = "disabled";
+			};
+
+			bch@8000a000 {
+				reg = <0x8000a000 2000>;
+				status = "disabled";
+			};
+
+			gpmi@8000c000 {
+				reg = <0x8000c000 2000>;
+				status = "disabled";
+			};
+
+			ssp0: ssp@80010000 {
+				reg = <0x80010000 2000>;
+				interrupts = <15 14>;
+				fsl,ssp-dma-channel = <1>;
+				status = "disabled";
+			};
+
+			etm@80014000 {
+				reg = <0x80014000 2000>;
+				status = "disabled";
+			};
+
+			pinctrl@80018000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx23-pinctrl", "simple-bus";
+				reg = <0x80018000 2000>;
+
+				gpio0: gpio@0 {
+					compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+					interrupts = <16>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio1: gpio@1 {
+					compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+					interrupts = <17>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio2: gpio@2 {
+					compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+					interrupts = <18>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				duart_pins_a: duart@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x11a2 0x11b2>;
+					fsl,drive-strength = <0>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <0>;
+				};
+
+				mmc0_8bit_pins_a: mmc0-8bit@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x2020 0x2030 0x2040
+						0x2050 0x0082 0x0092 0x00a2
+						0x00b2 0x2000 0x2010 0x2060>;
+					fsl,drive-strength = <1>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				mmc0_pins_fixup: mmc0-pins-fixup {
+					fsl,pinmux-ids = <0x2010 0x2060>;
+					fsl,pull-up = <0>;
+				};
+			};
+
+			digctl@8001c000 {
+				reg = <0x8001c000 2000>;
+				status = "disabled";
+			};
+
+			emi@80020000 {
+				reg = <0x80020000 2000>;
+				status = "disabled";
+			};
+
+			dma-apbx@80024000 {
+				compatible = "fsl,imx23-dma-apbx";
+				reg = <0x80024000 2000>;
+			};
+
+			dcp@80028000 {
+				reg = <0x80028000 2000>;
+				status = "disabled";
+			};
+
+			pxp@8002a000 {
+				reg = <0x8002a000 2000>;
+				status = "disabled";
+			};
+
+			ocotp@8002c000 {
+				reg = <0x8002c000 2000>;
+				status = "disabled";
+			};
+
+			axi-ahb@8002e000 {
+				reg = <0x8002e000 2000>;
+				status = "disabled";
+			};
+
+			lcdif@80030000 {
+				reg = <0x80030000 2000>;
+				status = "disabled";
+			};
+
+			ssp1: ssp@80034000 {
+				reg = <0x80034000 2000>;
+				interrupts = <2 20>;
+				fsl,ssp-dma-channel = <2>;
+				status = "disabled";
+			};
+
+			tvenc@80038000 {
+				reg = <0x80038000 2000>;
+				status = "disabled";
+			};
+                };
+
+		apbx@80040000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x80040000 0x40000>;
+			ranges;
+
+			clkctl@80040000 {
+				reg = <0x80040000 2000>;
+				status = "disabled";
+			};
+
+			saif0: saif@80042000 {
+				reg = <0x80042000 2000>;
+				status = "disabled";
+			};
+
+			power@80044000 {
+				reg = <0x80044000 2000>;
+				status = "disabled";
+			};
+
+			saif1: saif@80046000 {
+				reg = <0x80046000 2000>;
+				status = "disabled";
+			};
+
+			audio-out@80048000 {
+				reg = <0x80048000 2000>;
+				status = "disabled";
+			};
+
+			audio-in@8004c000 {
+				reg = <0x8004c000 2000>;
+				status = "disabled";
+			};
+
+			lradc@80050000 {
+				reg = <0x80050000 2000>;
+				status = "disabled";
+			};
+
+			spdif@80054000 {
+				reg = <0x80054000 2000>;
+				status = "disabled";
+			};
+
+			i2c@80058000 {
+				reg = <0x80058000 2000>;
+				status = "disabled";
+			};
+
+			rtc@8005c000 {
+				reg = <0x8005c000 2000>;
+				status = "disabled";
+			};
+
+			pwm@80064000 {
+				reg = <0x80064000 2000>;
+				status = "disabled";
+			};
+
+			timrot@80068000 {
+				reg = <0x80068000 2000>;
+				status = "disabled";
+			};
+
+			auart0: serial@8006c000 {
+				reg = <0x8006c000 0x2000>;
+				status = "disabled";
+			};
+
+			auart1: serial@8006e000 {
+				reg = <0x8006e000 0x2000>;
+				status = "disabled";
+			};
+
+			duart: serial@80070000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x80070000 0x2000>;
+				interrupts = <0>;
+				status = "disabled";
+			};
+
+			usbphy@8007c000 {
+				reg = <0x8007c000 0x2000>;
+				status = "disabled";
+			};
+		};
+	};
+
+	ahb@80080000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x80080000 0x80000>;
+		ranges;
+
+		usbctrl@80080000 {
+			reg = <0x80080000 0x10000>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore.dts b/arch/arm/boot/dts/imx27-phytec-phycore.dts
index a51a08f..2b0ff60 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore.dts
@@ -27,22 +27,22 @@
 				status = "okay";
 			};
 
-			uart@1000a000 {
+			serial@1000a000 {
 				fsl,uart-has-rtscts;
 				status = "okay";
 			};
 
-			uart@1000b000 {
+			serial@1000b000 {
 				fsl,uart-has-rtscts;
 				status = "okay";
 			};
 
-			uart@1000c000 {
+			serial@1000c000 {
 				fsl,uart-has-rtscts;
 				status = "okay";
 			};
 
-			fec@1002b000 {
+			ethernet@1002b000 {
 				status = "okay";
 			};
 
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc5e7d5..386c769 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -59,28 +59,28 @@
 				status = "disabled";
 			};
 
-			uart1: uart@1000a000 {
+			uart1: serial@1000a000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1000a000 0x1000>;
 				interrupts = <20>;
 				status = "disabled";
 			};
 
-			uart2: uart@1000b000 {
+			uart2: serial@1000b000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1000b000 0x1000>;
 				interrupts = <19>;
 				status = "disabled";
 			};
 
-			uart3: uart@1000c000 {
+			uart3: serial@1000c000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1000c000 0x1000>;
 				interrupts = <18>;
 				status = "disabled";
 			};
 
-			uart4: uart@1000d000 {
+			uart4: serial@1000d000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1000d000 0x1000>;
 				interrupts = <17>;
@@ -183,14 +183,14 @@
 				status = "disabled";
 			};
 
-			uart5: uart@1001b000 {
+			uart5: serial@1001b000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1001b000 0x1000>;
 				interrupts = <49>;
 				status = "disabled";
 			};
 
-			uart6: uart@1001c000 {
+			uart6: serial@1001c000 {
 				compatible = "fsl,imx27-uart", "fsl,imx21-uart";
 				reg = <0x1001c000 0x1000>;
 				interrupts = <48>;
@@ -206,12 +206,21 @@
 				status = "disabled";
 			};
 
-			fec: fec@1002b000 {
+			fec: ethernet@1002b000 {
 				compatible = "fsl,imx27-fec";
 				reg = <0x1002b000 0x4000>;
 				interrupts = <50>;
 				status = "disabled";
 			};
 		};
+		nand@d8000000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			compatible = "fsl,imx27-nand";
+			reg = <0xd8000000 0x1000>;
+			interrupts = <29>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
new file mode 100644
index 0000000..ee520a5
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+	model = "Freescale i.MX28 Evaluation Kit";
+	compatible = "fsl,imx28-evk", "fsl,imx28";
+
+	memory {
+		reg = <0x40000000 0x08000000>;
+	};
+
+	apb@80000000 {
+		apbh@80000000 {
+			ssp0: ssp@80010000 {
+				compatible = "fsl,imx28-mmc";
+				pinctrl-names = "default";
+				pinctrl-0 = <&mmc0_8bit_pins_a
+					&mmc0_cd_cfg &mmc0_sck_cfg>;
+				bus-width = <8>;
+				wp-gpios = <&gpio2 12 0>;
+				status = "okay";
+			};
+
+			ssp1: ssp@80012000 {
+				compatible = "fsl,imx28-mmc";
+				bus-width = <8>;
+				wp-gpios = <&gpio0 28 0>;
+				status = "okay";
+			};
+		};
+
+		apbx@80040000 {
+			saif0: saif@80042000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&saif0_pins_a>;
+				status = "okay";
+			};
+
+			saif1: saif@80046000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&saif1_pins_a>;
+				fsl,saif-master = <&saif0>;
+				status = "okay";
+			};
+
+			i2c0: i2c@80058000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&i2c0_pins_a>;
+				status = "okay";
+
+				sgtl5000: codec@0a {
+					compatible = "fsl,sgtl5000";
+					reg = <0x0a>;
+					VDDA-supply = <&reg_3p3v>;
+					VDDIO-supply = <&reg_3p3v>;
+
+				};
+			};
+
+			duart: serial@80074000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&duart_pins_a>;
+				status = "okay";
+			};
+		};
+	};
+
+	ahb@80080000 {
+		mac0: ethernet@800f0000 {
+			phy-mode = "rmii";
+			pinctrl-names = "default";
+			pinctrl-0 = <&mac0_pins_a>;
+			status = "okay";
+		};
+
+		mac1: ethernet@800f4000 {
+			phy-mode = "rmii";
+			pinctrl-names = "default";
+			pinctrl-0 = <&mac1_pins_a>;
+			status = "okay";
+		};
+	};
+
+	regulators {
+		compatible = "simple-bus";
+
+		reg_3p3v: 3p3v {
+			compatible = "regulator-fixed";
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+
+	sound {
+		compatible = "fsl,imx28-evk-sgtl5000",
+			     "fsl,mxs-audio-sgtl5000";
+		model = "imx28-evk-sgtl5000";
+		saif-controllers = <&saif0 &saif1>;
+		audio-codec = <&sgtl5000>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
new file mode 100644
index 0000000..4634cb8
--- /dev/null
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	interrupt-parent = <&icoll>;
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+		gpio2 = &gpio2;
+		gpio3 = &gpio3;
+		gpio4 = &gpio4;
+		saif0 = &saif0;
+		saif1 = &saif1;
+	};
+
+	cpus {
+		cpu@0 {
+			compatible = "arm,arm926ejs";
+		};
+	};
+
+	apb@80000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x80000000 0x80000>;
+		ranges;
+
+		apbh@80000000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x80000000 0x3c900>;
+			ranges;
+
+			icoll: interrupt-controller@80000000 {
+				compatible = "fsl,imx28-icoll", "fsl,mxs-icoll";
+				interrupt-controller;
+				#interrupt-cells = <1>;
+				reg = <0x80000000 0x2000>;
+			};
+
+			hsadc@80002000 {
+				reg = <0x80002000 2000>;
+				interrupts = <13 87>;
+				status = "disabled";
+			};
+
+			dma-apbh@80004000 {
+				compatible = "fsl,imx28-dma-apbh";
+				reg = <0x80004000 2000>;
+			};
+
+			perfmon@80006000 {
+				reg = <0x80006000 800>;
+				interrupts = <27>;
+				status = "disabled";
+			};
+
+			bch@8000a000 {
+				reg = <0x8000a000 2000>;
+				interrupts = <41>;
+				status = "disabled";
+			};
+
+			gpmi@8000c000 {
+				reg = <0x8000c000 2000>;
+				interrupts = <42 88>;
+				status = "disabled";
+			};
+
+			ssp0: ssp@80010000 {
+				reg = <0x80010000 2000>;
+				interrupts = <96 82>;
+				fsl,ssp-dma-channel = <0>;
+				status = "disabled";
+			};
+
+			ssp1: ssp@80012000 {
+				reg = <0x80012000 2000>;
+				interrupts = <97 83>;
+				fsl,ssp-dma-channel = <1>;
+				status = "disabled";
+			};
+
+			ssp2: ssp@80014000 {
+				reg = <0x80014000 2000>;
+				interrupts = <98 84>;
+				fsl,ssp-dma-channel = <2>;
+				status = "disabled";
+			};
+
+			ssp3: ssp@80016000 {
+				reg = <0x80016000 2000>;
+				interrupts = <99 85>;
+				fsl,ssp-dma-channel = <3>;
+				status = "disabled";
+			};
+
+			pinctrl@80018000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx28-pinctrl", "simple-bus";
+				reg = <0x80018000 2000>;
+
+				gpio0: gpio@0 {
+					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+					interrupts = <127>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio1: gpio@1 {
+					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+					interrupts = <126>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio2: gpio@2 {
+					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+					interrupts = <125>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio3: gpio@3 {
+					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+					interrupts = <124>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				gpio4: gpio@4 {
+					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+					interrupts = <123>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+				};
+
+				duart_pins_a: duart@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x3102 0x3112>;
+					fsl,drive-strength = <0>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <0>;
+				};
+
+				mac0_pins_a: mac0@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x4000 0x4010 0x4020
+						0x4030 0x4040 0x4060 0x4070
+						0x4080 0x4100>;
+					fsl,drive-strength = <1>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				mac1_pins_a: mac1@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
+						0x40e1 0x40b1 0x40c1>;
+					fsl,drive-strength = <1>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				mmc0_8bit_pins_a: mmc0-8bit@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x2000 0x2010 0x2020
+						0x2030 0x2040 0x2050 0x2060
+						0x2070 0x2080 0x2090 0x20a0>;
+					fsl,drive-strength = <1>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				mmc0_cd_cfg: mmc0-cd-cfg {
+					fsl,pinmux-ids = <0x2090>;
+					fsl,pull-up = <0>;
+				};
+
+				mmc0_sck_cfg: mmc0-sck-cfg {
+					fsl,pinmux-ids = <0x20a0>;
+					fsl,drive-strength = <2>;
+					fsl,pull-up = <0>;
+				};
+
+				i2c0_pins_a: i2c0@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x3180 0x3190>;
+					fsl,drive-strength = <1>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				saif0_pins_a: saif0@0 {
+					reg = <0>;
+					fsl,pinmux-ids =
+						<0x3140 0x3150 0x3160 0x3170>;
+					fsl,drive-strength = <2>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+
+				saif1_pins_a: saif1@0 {
+					reg = <0>;
+					fsl,pinmux-ids = <0x31a0>;
+					fsl,drive-strength = <2>;
+					fsl,voltage = <1>;
+					fsl,pull-up = <1>;
+				};
+			};
+
+			digctl@8001c000 {
+				reg = <0x8001c000 2000>;
+				interrupts = <89>;
+				status = "disabled";
+			};
+
+			etm@80022000 {
+				reg = <0x80022000 2000>;
+				status = "disabled";
+			};
+
+			dma-apbx@80024000 {
+				compatible = "fsl,imx28-dma-apbx";
+				reg = <0x80024000 2000>;
+			};
+
+			dcp@80028000 {
+				reg = <0x80028000 2000>;
+				interrupts = <52 53 54>;
+				status = "disabled";
+			};
+
+			pxp@8002a000 {
+				reg = <0x8002a000 2000>;
+				interrupts = <39>;
+				status = "disabled";
+			};
+
+			ocotp@8002c000 {
+				reg = <0x8002c000 2000>;
+				status = "disabled";
+			};
+
+			axi-ahb@8002e000 {
+				reg = <0x8002e000 2000>;
+				status = "disabled";
+			};
+
+			lcdif@80030000 {
+				reg = <0x80030000 2000>;
+				interrupts = <38 86>;
+				status = "disabled";
+			};
+
+			can0: can@80032000 {
+				reg = <0x80032000 2000>;
+				interrupts = <8>;
+				status = "disabled";
+			};
+
+			can1: can@80034000 {
+				reg = <0x80034000 2000>;
+				interrupts = <9>;
+				status = "disabled";
+			};
+
+			simdbg@8003c000 {
+				reg = <0x8003c000 200>;
+				status = "disabled";
+			};
+
+			simgpmisel@8003c200 {
+				reg = <0x8003c200 100>;
+				status = "disabled";
+			};
+
+			simsspsel@8003c300 {
+				reg = <0x8003c300 100>;
+				status = "disabled";
+			};
+
+			simmemsel@8003c400 {
+				reg = <0x8003c400 100>;
+				status = "disabled";
+			};
+
+			gpiomon@8003c500 {
+				reg = <0x8003c500 100>;
+				status = "disabled";
+			};
+
+			simenet@8003c700 {
+				reg = <0x8003c700 100>;
+				status = "disabled";
+			};
+
+			armjtag@8003c800 {
+				reg = <0x8003c800 100>;
+				status = "disabled";
+			};
+                };
+
+		apbx@80040000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x80040000 0x40000>;
+			ranges;
+
+			clkctl@80040000 {
+				reg = <0x80040000 2000>;
+				status = "disabled";
+			};
+
+			saif0: saif@80042000 {
+				compatible = "fsl,imx28-saif";
+				reg = <0x80042000 2000>;
+				interrupts = <59 80>;
+				fsl,saif-dma-channel = <4>;
+				status = "disabled";
+			};
+
+			power@80044000 {
+				reg = <0x80044000 2000>;
+				status = "disabled";
+			};
+
+			saif1: saif@80046000 {
+				compatible = "fsl,imx28-saif";
+				reg = <0x80046000 2000>;
+				interrupts = <58 81>;
+				fsl,saif-dma-channel = <5>;
+				status = "disabled";
+			};
+
+			lradc@80050000 {
+				reg = <0x80050000 2000>;
+				status = "disabled";
+			};
+
+			spdif@80054000 {
+				reg = <0x80054000 2000>;
+				interrupts = <45 66>;
+				status = "disabled";
+			};
+
+			rtc@80056000 {
+				reg = <0x80056000 2000>;
+				interrupts = <28 29>;
+				status = "disabled";
+			};
+
+			i2c0: i2c@80058000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx28-i2c";
+				reg = <0x80058000 2000>;
+				interrupts = <111 68>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@8005a000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx28-i2c";
+				reg = <0x8005a000 2000>;
+				interrupts = <110 69>;
+				status = "disabled";
+			};
+
+			pwm@80064000 {
+				reg = <0x80064000 2000>;
+				status = "disabled";
+			};
+
+			timrot@80068000 {
+				reg = <0x80068000 2000>;
+				status = "disabled";
+			};
+
+			auart0: serial@8006a000 {
+				reg = <0x8006a000 0x2000>;
+				interrupts = <112 70 71>;
+				status = "disabled";
+			};
+
+			auart1: serial@8006c000 {
+				reg = <0x8006c000 0x2000>;
+				interrupts = <113 72 73>;
+				status = "disabled";
+			};
+
+			auart2: serial@8006e000 {
+				reg = <0x8006e000 0x2000>;
+				interrupts = <114 74 75>;
+				status = "disabled";
+			};
+
+			auart3: serial@80070000 {
+				reg = <0x80070000 0x2000>;
+				interrupts = <115 76 77>;
+				status = "disabled";
+			};
+
+			auart4: serial@80072000 {
+				reg = <0x80072000 0x2000>;
+				interrupts = <116 78 79>;
+				status = "disabled";
+			};
+
+			duart: serial@80074000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x80074000 0x1000>;
+				interrupts = <47>;
+				status = "disabled";
+			};
+
+			usbphy0: usbphy@8007c000 {
+				reg = <0x8007c000 0x2000>;
+				status = "disabled";
+			};
+
+			usbphy1: usbphy@8007e000 {
+				reg = <0x8007e000 0x2000>;
+				status = "disabled";
+			};
+		};
+	};
+
+	ahb@80080000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x80080000 0x80000>;
+		ranges;
+
+		usbctrl0: usbctrl@80080000 {
+			reg = <0x80080000 0x10000>;
+			status = "disabled";
+		};
+
+		usbctrl1: usbctrl@80090000 {
+			reg = <0x80090000 0x10000>;
+			status = "disabled";
+		};
+
+		dflpt@800c0000 {
+			reg = <0x800c0000 0x10000>;
+			status = "disabled";
+		};
+
+		mac0: ethernet@800f0000 {
+			compatible = "fsl,imx28-fec";
+			reg = <0x800f0000 0x4000>;
+			interrupts = <101>;
+			status = "disabled";
+		};
+
+		mac1: ethernet@800f4000 {
+			compatible = "fsl,imx28-fec";
+			reg = <0x800f4000 0x4000>;
+			interrupts = <102>;
+			status = "disabled";
+		};
+
+		switch@800f8000 {
+			reg = <0x800f8000 0x8000>;
+			status = "disabled";
+		};
+
+	};
+};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 9949e60..de065b5 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -17,10 +17,6 @@
 	model = "Freescale i.MX51 Babbage Board";
 	compatible = "fsl,imx51-babbage", "fsl,imx51";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-	};
-
 	memory {
 		reg = <0x90000000 0x20000000>;
 	};
@@ -40,7 +36,7 @@
 					status = "okay";
 				};
 
-				uart3: uart@7000c000 {
+				uart3: serial@7000c000 {
 					fsl,uart-has-rtscts;
 					status = "okay";
 				};
@@ -166,6 +162,11 @@
 						};
 					};
 				};
+
+				ssi2: ssi@70014000 {
+					fsl,mode = "i2s-slave";
+					status = "okay";
+				};
 			};
 
 			wdog@73f98000 { /* WDOG1 */
@@ -177,12 +178,12 @@
 				reg = <0x73fa8000 0x4000>;
 			};
 
-			uart1: uart@73fbc000 {
+			uart1: serial@73fbc000 {
 				fsl,uart-has-rtscts;
 				status = "okay";
 			};
 
-			uart2: uart@73fc0000 {
+			uart2: serial@73fc0000 {
 				status = "okay";
 			};
 		};
@@ -195,13 +196,20 @@
 			i2c@83fc4000 { /* I2C2 */
 				status = "okay";
 
-				codec: sgtl5000@0a {
+				sgtl5000: codec@0a {
 					compatible = "fsl,sgtl5000";
 					reg = <0x0a>;
+					clock-frequency = <26000000>;
+					VDDA-supply = <&vdig_reg>;
+					VDDIO-supply = <&vvideo_reg>;
 				};
 			};
 
-			fec@83fec000 {
+			audmux@83fd0000 {
+				status = "okay";
+			};
+
+			ethernet@83fec000 {
 				phy-mode = "mii";
 				status = "okay";
 			};
@@ -218,4 +226,18 @@
 			gpio-key,wakeup;
 		};
 	};
+
+	sound {
+		compatible = "fsl,imx51-babbage-sgtl5000",
+			     "fsl,imx-audio-sgtl5000";
+		model = "imx51-babbage-sgtl5000";
+		ssi-controller = <&ssi2>;
+		audio-codec = <&sgtl5000>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <2>;
+		mux-ext-port = <3>;
+	};
 };
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 6663986..bfa65ab 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -86,7 +86,7 @@
 					status = "disabled";
 				};
 
-				uart3: uart@7000c000 {
+				uart3: serial@7000c000 {
 					compatible = "fsl,imx51-uart", "fsl,imx21-uart";
 					reg = <0x7000c000 0x4000>;
 					interrupts = <33>;
@@ -102,6 +102,15 @@
 					status = "disabled";
 				};
 
+				ssi2: ssi@70014000 {
+					compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+					reg = <0x70014000 0x4000>;
+					interrupts = <30>;
+					fsl,fifo-depth = <15>;
+					fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+					status = "disabled";
+				};
+
 				esdhc@70020000 { /* ESDHC3 */
 					compatible = "fsl,imx51-esdhc";
 					reg = <0x70020000 0x4000>;
@@ -171,14 +180,14 @@
 				status = "disabled";
 			};
 
-			uart1: uart@73fbc000 {
+			uart1: serial@73fbc000 {
 				compatible = "fsl,imx51-uart", "fsl,imx21-uart";
 				reg = <0x73fbc000 0x4000>;
 				interrupts = <31>;
 				status = "disabled";
 			};
 
-			uart2: uart@73fc0000 {
+			uart2: serial@73fc0000 {
 				compatible = "fsl,imx51-uart", "fsl,imx21-uart";
 				reg = <0x73fc0000 0x4000>;
 				interrupts = <32>;
@@ -235,7 +244,31 @@
 				status = "disabled";
 			};
 
-			fec@83fec000 {
+			ssi1: ssi@83fcc000 {
+				compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+				reg = <0x83fcc000 0x4000>;
+				interrupts = <29>;
+				fsl,fifo-depth = <15>;
+				fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+				status = "disabled";
+			};
+
+			audmux@83fd0000 {
+				compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
+				reg = <0x83fd0000 0x4000>;
+				status = "disabled";
+			};
+
+			ssi3: ssi@83fe8000 {
+				compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+				reg = <0x83fe8000 0x4000>;
+				interrupts = <96>;
+				fsl,fifo-depth = <15>;
+				fsl,ssi-dma-events = <47 46 37 35>; /* TX0 RX0 TX1 RX1 */
+				status = "disabled";
+			};
+
+			ethernet@83fec000 {
 				compatible = "fsl,imx51-fec", "fsl,imx27-fec";
 				reg = <0x83fec000 0x4000>;
 				interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 2dccce4..5b8eafc 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -17,10 +17,6 @@
 	model = "Freescale i.MX53 Automotive Reference Design Board";
 	compatible = "fsl,imx53-ard", "fsl,imx53";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-	};
-
 	memory {
 		reg = <0x70000000 0x40000000>;
 	};
@@ -44,7 +40,7 @@
 				reg = <0x53fa8000 0x4000>;
 			};
 
-			uart1: uart@53fbc000 {
+			uart1: serial@53fbc000 {
 				status = "okay";
 			};
 		};
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index 5bac4aa..9c79803 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -17,10 +17,6 @@
 	model = "Freescale i.MX53 Evaluation Kit";
 	compatible = "fsl,imx53-evk", "fsl,imx53";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-	};
-
 	memory {
 		reg = <0x70000000 0x80000000>;
 	};
@@ -75,7 +71,7 @@
 				reg = <0x53fa8000 0x4000>;
 			};
 
-			uart1: uart@53fbc000 {
+			uart1: serial@53fbc000 {
 				status = "okay";
 			};
 		};
@@ -99,7 +95,7 @@
 				};
 			};
 
-			fec@63fec000 {
+			ethernet@63fec000 {
 				phy-mode = "rmii";
 				phy-reset-gpios = <&gpio7 6 0>;
 				status = "okay";
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index 5c57c86..2d803a9 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -17,10 +17,6 @@
 	model = "Freescale i.MX53 Quick Start Board";
 	compatible = "fsl,imx53-qsb", "fsl,imx53";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-	};
-
 	memory {
 		reg = <0x70000000 0x40000000>;
 	};
@@ -33,6 +29,11 @@
 					status = "okay";
 				};
 
+				ssi2: ssi@50014000 {
+					fsl,mode = "i2s-slave";
+					status = "okay";
+				};
+
 				esdhc@50020000 { /* ESDHC3 */
 					cd-gpios = <&gpio3 11 0>;
 					wp-gpios = <&gpio3 12 0>;
@@ -49,7 +50,7 @@
 				reg = <0x53fa8000 0x4000>;
 			};
 
-			uart1: uart@53fbc000 {
+			uart1: serial@53fbc000 {
 				status = "okay";
 			};
 		};
@@ -62,9 +63,11 @@
 			i2c@63fc4000 { /* I2C2 */
 				status = "okay";
 
-				codec: sgtl5000@0a {
+				sgtl5000: codec@0a {
 					compatible = "fsl,sgtl5000";
 					reg = <0x0a>;
+					VDDA-supply = <&reg_3p2v>;
+					VDDIO-supply = <&reg_3p2v>;
 				};
 			};
 
@@ -77,12 +80,88 @@
 				};
 
 				pmic: dialog@48 {
-					compatible = "dialog,da9053", "dialog,da9052";
+					compatible = "dlg,da9053-aa", "dlg,da9052";
 					reg = <0x48>;
+
+					regulators {
+						buck0 {
+							regulator-min-microvolt = <500000>;
+							regulator-max-microvolt = <2075000>;
+						};
+
+						buck1 {
+							regulator-min-microvolt = <500000>;
+							regulator-max-microvolt = <2075000>;
+						};
+
+						buck2 {
+							regulator-min-microvolt = <925000>;
+					                regulator-max-microvolt = <2500000>;
+						};
+
+						buck3 {
+							regulator-min-microvolt = <925000>;
+					                regulator-max-microvolt = <2500000>;
+						};
+
+						ldo4 {
+							regulator-min-microvolt = <600000>;
+							regulator-max-microvolt = <1800000>;
+						};
+
+						ldo5 {
+							regulator-min-microvolt = <600000>;
+					                regulator-max-microvolt = <1800000>;
+						};
+
+						ldo6 {
+							regulator-min-microvolt = <1725000>;
+					                regulator-max-microvolt = <3300000>;
+						};
+
+						ldo7 {
+							regulator-min-microvolt = <1725000>;
+					                regulator-max-microvolt = <3300000>;
+						};
+
+						ldo8 {
+							regulator-min-microvolt = <1200000>;
+					                regulator-max-microvolt = <3600000>;
+						};
+
+						ldo9 {
+							regulator-min-microvolt = <1200000>;
+					                regulator-max-microvolt = <3600000>;
+						};
+
+						ldo10 {
+							regulator-min-microvolt = <1200000>;
+					                regulator-max-microvolt = <3600000>;
+						};
+
+						ldo11 {
+							regulator-min-microvolt = <1200000>;
+					                regulator-max-microvolt = <3600000>;
+						};
+
+						ldo12 {
+							regulator-min-microvolt = <1250000>;
+					                regulator-max-microvolt = <3650000>;
+						};
+
+						ldo13 {
+							regulator-min-microvolt = <1200000>;
+					                regulator-max-microvolt = <3600000>;
+						};
+					};
 				};
 			};
 
-			fec@63fec000 {
+			audmux@63fd0000 {
+				status = "okay";
+			};
+
+			ethernet@63fec000 {
 				phy-mode = "rmii";
 				phy-reset-gpios = <&gpio7 6 0>;
 				status = "okay";
@@ -122,4 +201,30 @@
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	regulators {
+		compatible = "simple-bus";
+
+		reg_3p2v: 3p2v {
+			compatible = "regulator-fixed";
+			regulator-name = "3P2V";
+			regulator-min-microvolt = <3200000>;
+			regulator-max-microvolt = <3200000>;
+			regulator-always-on;
+		};
+	};
+
+	sound {
+		compatible = "fsl,imx53-qsb-sgtl5000",
+			     "fsl,imx-audio-sgtl5000";
+		model = "imx53-qsb-sgtl5000";
+		ssi-controller = <&ssi2>;
+		audio-codec = <&sgtl5000>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <2>;
+		mux-ext-port = <5>;
+	};
 };
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index c7ee86c..0809102 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -17,10 +17,6 @@
 	model = "Freescale i.MX53 Smart Mobile Reference Design Board";
 	compatible = "fsl,imx53-smd", "fsl,imx53";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-	};
-
 	memory {
 		reg = <0x70000000 0x40000000>;
 	};
@@ -35,11 +31,11 @@
 				};
 
 				esdhc@50008000 { /* ESDHC2 */
-					fsl,card-wired;
+					non-removable;
 					status = "okay";
 				};
 
-				uart3: uart@5000c000 {
+				uart3: serial@5000c000 {
 					fsl,uart-has-rtscts;
 					status = "okay";
 				};
@@ -76,7 +72,7 @@
 				};
 
 				esdhc@50020000 { /* ESDHC3 */
-					fsl,card-wired;
+					non-removable;
 					status = "okay";
 				};
 			};
@@ -90,11 +86,11 @@
 				reg = <0x53fa8000 0x4000>;
 			};
 
-			uart1: uart@53fbc000 {
+			uart1: serial@53fbc000 {
 				status = "okay";
 			};
 
-			uart2: uart@53fc0000 {
+			uart2: serial@53fc0000 {
 				status = "okay";
 			};
 		};
@@ -142,7 +138,7 @@
 				};
 			};
 
-			fec@63fec000 {
+			ethernet@63fec000 {
 				phy-mode = "rmii";
 				phy-reset-gpios = <&gpio7 6 0>;
 				status = "okay";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 5dd91b9..e3e8694 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -88,7 +88,7 @@
 					status = "disabled";
 				};
 
-				uart3: uart@5000c000 {
+				uart3: serial@5000c000 {
 					compatible = "fsl,imx53-uart", "fsl,imx21-uart";
 					reg = <0x5000c000 0x4000>;
 					interrupts = <33>;
@@ -104,6 +104,15 @@
 					status = "disabled";
 				};
 
+				ssi2: ssi@50014000 {
+					compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+					reg = <0x50014000 0x4000>;
+					interrupts = <30>;
+					fsl,fifo-depth = <15>;
+					fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+					status = "disabled";
+				};
+
 				esdhc@50020000 { /* ESDHC3 */
 					compatible = "fsl,imx53-esdhc";
 					reg = <0x50020000 0x4000>;
@@ -173,14 +182,14 @@
 				status = "disabled";
 			};
 
-			uart1: uart@53fbc000 {
+			uart1: serial@53fbc000 {
 				compatible = "fsl,imx53-uart", "fsl,imx21-uart";
 				reg = <0x53fbc000 0x4000>;
 				interrupts = <31>;
 				status = "disabled";
 			};
 
-			uart2: uart@53fc0000 {
+			uart2: serial@53fc0000 {
 				compatible = "fsl,imx53-uart", "fsl,imx21-uart";
 				reg = <0x53fc0000 0x4000>;
 				interrupts = <32>;
@@ -226,7 +235,7 @@
 				status = "disabled";
 			};
 
-			uart4: uart@53ff0000 {
+			uart4: serial@53ff0000 {
 				compatible = "fsl,imx53-uart", "fsl,imx21-uart";
 				reg = <0x53ff0000 0x4000>;
 				interrupts = <13>;
@@ -241,7 +250,7 @@
 			reg = <0x60000000 0x10000000>;
 			ranges;
 
-			uart5: uart@63f90000 {
+			uart5: serial@63f90000 {
 				compatible = "fsl,imx53-uart", "fsl,imx21-uart";
 				reg = <0x63f90000 0x4000>;
 				interrupts = <86>;
@@ -290,7 +299,31 @@
 				status = "disabled";
 			};
 
-			fec@63fec000 {
+			ssi1: ssi@63fcc000 {
+				compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+				reg = <0x63fcc000 0x4000>;
+				interrupts = <29>;
+				fsl,fifo-depth = <15>;
+				fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+				status = "disabled";
+			};
+
+			audmux@63fd0000 {
+				compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
+				reg = <0x63fd0000 0x4000>;
+				status = "disabled";
+			};
+
+			ssi3: ssi@63fe8000 {
+				compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+				reg = <0x63fe8000 0x4000>;
+				interrupts = <96>;
+				fsl,fifo-depth = <15>;
+				fsl,ssi-dma-events = <47 46 45 44>; /* TX0 RX0 TX1 RX1 */
+				status = "disabled";
+			};
+
+			ethernet@63fec000 {
 				compatible = "fsl,imx53-fec", "fsl,imx25-fec";
 				reg = <0x63fec000 0x4000>;
 				interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index ce1c823..db4c609 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -17,19 +17,14 @@
 	model = "Freescale i.MX6 Quad Armadillo2 Board";
 	compatible = "fsl,imx6q-arm2", "fsl,imx6q";
 
-	chosen {
-		bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
-	};
-
 	memory {
 		reg = <0x10000000 0x80000000>;
 	};
 
 	soc {
 		aips-bus@02100000 { /* AIPS2 */
-			enet@02188000 {
+			ethernet@02188000 {
 				phy-mode = "rgmii";
-				local-mac-address = [00 04 9F 01 1B 61];
 				status = "okay";
 			};
 
@@ -37,16 +32,20 @@
 				cd-gpios = <&gpio6 11 0>;
 				wp-gpios = <&gpio6 14 0>;
 				vmmc-supply = <&reg_3p3v>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_usdhc3_1>;
 				status = "okay";
 			};
 
 			usdhc@0219c000 { /* uSDHC4 */
-				fsl,card-wired;
+				non-removable;
 				vmmc-supply = <&reg_3p3v>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_usdhc4_1>;
 				status = "okay";
 			};
 
-			uart4: uart@021f0000 {
+			uart4: serial@021f0000 {
 				status = "okay";
 			};
 		};
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 4663a4e..e0ec929 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -22,8 +22,30 @@
 	};
 
 	soc {
+		aips-bus@02000000 { /* AIPS1 */
+			spba-bus@02000000 {
+				ecspi@02008000 { /* eCSPI1 */
+					fsl,spi-num-chipselects = <1>;
+					cs-gpios = <&gpio3 19 0>;
+					status = "okay";
+
+					flash: m25p80@0 {
+						compatible = "sst,sst25vf016b";
+						spi-max-frequency = <20000000>;
+						reg = <0>;
+					};
+				};
+
+				ssi1: ssi@02028000 {
+					fsl,mode = "i2s-slave";
+					status = "okay";
+				};
+			};
+
+		};
+
 		aips-bus@02100000 { /* AIPS2 */
-			enet@02188000 {
+			ethernet@02188000 {
 				phy-mode = "rgmii";
 				phy-reset-gpios = <&gpio3 23 0>;
 				status = "okay";
@@ -43,13 +65,23 @@
 				status = "okay";
 			};
 
-			uart2: uart@021e8000 {
+			audmux@021d8000 {
 				status = "okay";
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_audmux_1>;
+			};
+
+			uart2: serial@021e8000 {
+				status = "okay";
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_serial2_1>;
 			};
 
 			i2c@021a0000 { /* I2C1 */
 				status = "okay";
 				clock-frequency = <100000>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_i2c1_1>;
 
 				codec: sgtl5000@0a {
 					compatible = "fsl,sgtl5000";
@@ -80,4 +112,18 @@
 			regulator-always-on;
 		};
 	};
+
+	sound {
+		compatible = "fsl,imx6q-sabrelite-sgtl5000",
+			     "fsl,imx-audio-sgtl5000";
+		model = "imx6q-sabrelite-sgtl5000";
+		ssi-controller = <&ssi1>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <1>;
+		mux-ext-port = <4>;
+	};
 };
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
new file mode 100644
index 0000000..07509a1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+	model = "Freescale i.MX6Q SABRE Smart Device Board";
+	compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
+	memory {
+		reg = <0x10000000 0x40000000>;
+	};
+
+	soc {
+
+		aips-bus@02000000 { /* AIPS1 */
+			spba-bus@02000000 {
+				uart1: serial@02020000 {
+					status = "okay";
+				};
+			};
+		};
+
+		aips-bus@02100000 { /* AIPS2 */
+			ethernet@02188000 {
+				phy-mode = "rgmii";
+				status = "okay";
+			};
+
+			usdhc@02194000 { /* uSDHC2 */
+				cd-gpios = <&gpio2 2 0>;
+				wp-gpios = <&gpio2 3 0>;
+				status = "okay";
+			};
+
+			usdhc@02198000 { /* uSDHC3 */
+				cd-gpios = <&gpio2 0 0>;
+				wp-gpios = <&gpio2 1 0>;
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 4905f51..8c90cba 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -165,7 +165,7 @@
 					status = "disabled";
 				};
 
-				uart1: uart@02020000 {
+				uart1: serial@02020000 {
 					compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
 					reg = <0x02020000 0x4000>;
 					interrupts = <0 26 0x04>;
@@ -177,19 +177,31 @@
 					interrupts = <0 51 0x04>;
 				};
 
-				ssi@02028000 { /* SSI1 */
+				ssi1: ssi@02028000 {
+					compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
 					reg = <0x02028000 0x4000>;
 					interrupts = <0 46 0x04>;
+					fsl,fifo-depth = <15>;
+					fsl,ssi-dma-events = <38 37>;
+					status = "disabled";
 				};
 
-				ssi@0202c000 { /* SSI2 */
+				ssi2: ssi@0202c000 {
+					compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
 					reg = <0x0202c000 0x4000>;
 					interrupts = <0 47 0x04>;
+					fsl,fifo-depth = <15>;
+					fsl,ssi-dma-events = <42 41>;
+					status = "disabled";
 				};
 
-				ssi@02030000 { /* SSI3 */
+				ssi3: ssi@02030000 {
+					compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
 					reg = <0x02030000 0x4000>;
 					interrupts = <0 48 0x04>;
+					fsl,fifo-depth = <15>;
+					fsl,ssi-dma-events = <46 45>;
+					status = "disabled";
 				};
 
 				asrc@02034000 {
@@ -346,6 +358,90 @@
 				compatible = "fsl,imx6q-anatop";
 				reg = <0x020c8000 0x1000>;
 				interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+				regulator-1p1@110 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vdd1p1";
+					regulator-min-microvolt = <800000>;
+					regulator-max-microvolt = <1375000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x110>;
+					anatop-vol-bit-shift = <8>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <4>;
+					anatop-min-voltage = <800000>;
+					anatop-max-voltage = <1375000>;
+				};
+
+				regulator-3p0@120 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vdd3p0";
+					regulator-min-microvolt = <2800000>;
+					regulator-max-microvolt = <3150000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x120>;
+					anatop-vol-bit-shift = <8>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <0>;
+					anatop-min-voltage = <2625000>;
+					anatop-max-voltage = <3400000>;
+				};
+
+				regulator-2p5@130 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vdd2p5";
+					regulator-min-microvolt = <2000000>;
+					regulator-max-microvolt = <2750000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x130>;
+					anatop-vol-bit-shift = <8>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <0>;
+					anatop-min-voltage = <2000000>;
+					anatop-max-voltage = <2750000>;
+				};
+
+				regulator-vddcore@140 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "cpu";
+					regulator-min-microvolt = <725000>;
+					regulator-max-microvolt = <1450000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x140>;
+					anatop-vol-bit-shift = <0>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <1>;
+					anatop-min-voltage = <725000>;
+					anatop-max-voltage = <1450000>;
+				};
+
+				regulator-vddpu@140 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vddpu";
+					regulator-min-microvolt = <725000>;
+					regulator-max-microvolt = <1450000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x140>;
+					anatop-vol-bit-shift = <9>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <1>;
+					anatop-min-voltage = <725000>;
+					anatop-max-voltage = <1450000>;
+				};
+
+				regulator-vddsoc@140 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vddsoc";
+					regulator-min-microvolt = <725000>;
+					regulator-max-microvolt = <1450000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x140>;
+					anatop-vol-bit-shift = <18>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <1>;
+					anatop-min-voltage = <725000>;
+					anatop-max-voltage = <1450000>;
+				};
 			};
 
 			usbphy@020c9000 { /* USBPHY1 */
@@ -386,7 +482,62 @@
 			};
 
 			iomuxc@020e0000 {
+				compatible = "fsl,imx6q-iomuxc";
 				reg = <0x020e0000 0x4000>;
+
+				/* shared pinctrl settings */
+				audmux {
+					pinctrl_audmux_1: audmux-1 {
+						fsl,pins = <18   0x80000000	/* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
+							    1586 0x80000000	/* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
+							    11   0x80000000	/* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
+							    3    0x80000000>;	/* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
+					};
+				};
+
+				i2c1 {
+					pinctrl_i2c1_1: i2c1grp-1 {
+						fsl,pins = <137 0x4001b8b1	/* MX6Q_PAD_EIM_D21__I2C1_SCL */
+							    196 0x4001b8b1>;	/* MX6Q_PAD_EIM_D28__I2C1_SDA */
+					};
+				};
+
+				serial2 {
+					pinctrl_serial2_1: serial2grp-1 {
+						fsl,pins = <183 0x1b0b1		/* MX6Q_PAD_EIM_D26__UART2_TXD */
+							    191 0x1b0b1>;	/* MX6Q_PAD_EIM_D27__UART2_RXD */
+					};
+				};
+
+				usdhc3 {
+					pinctrl_usdhc3_1: usdhc3grp-1 {
+						fsl,pins = <1273 0x17059	/* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
+							    1281 0x10059	/* MX6Q_PAD_SD3_CLK__USDHC3_CLK	*/
+							    1289 0x17059	/* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
+							    1297 0x17059	/* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
+							    1305 0x17059	/* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
+							    1312 0x17059	/* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
+							    1265 0x17059	/* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
+							    1257 0x17059	/* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
+							    1249 0x17059	/* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
+							    1241 0x17059>;	/* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
+					};
+				};
+
+				usdhc4 {
+					pinctrl_usdhc4_1: usdhc4grp-1 {
+						fsl,pins = <1386 0x17059	/* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+							    1392 0x10059	/* MX6Q_PAD_SD4_CLK__USDHC4_CLK	*/
+							    1462 0x17059	/* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+							    1470 0x17059	/* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+							    1478 0x17059	/* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+							    1486 0x17059	/* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+							    1493 0x17059	/* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+							    1501 0x17059	/* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+							    1509 0x17059	/* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+							    1517 0x17059>;	/* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+					};
+				};
 			};
 
 			dcic@020e4000 { /* DCIC1 */
@@ -422,7 +573,7 @@
 				reg = <0x0217c000 0x4000>;
 			};
 
-			enet@02188000 {
+			ethernet@02188000 {
 				compatible = "fsl,imx6q-fec";
 				reg = <0x02188000 0x4000>;
 				interrupts = <0 118 0x04 0 119 0x04>;
@@ -527,7 +678,9 @@
 			};
 
 			audmux@021d8000 {
+				compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
 				reg = <0x021d8000 0x4000>;
+				status = "disabled";
 			};
 
 			mipi@021dc000 { /* MIPI-CSI */
@@ -543,28 +696,28 @@
 				interrupts = <0 18 0x04>;
 			};
 
-			uart2: uart@021e8000 {
+			uart2: serial@021e8000 {
 				compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
 				reg = <0x021e8000 0x4000>;
 				interrupts = <0 27 0x04>;
 				status = "disabled";
 			};
 
-			uart3: uart@021ec000 {
+			uart3: serial@021ec000 {
 				compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
 				reg = <0x021ec000 0x4000>;
 				interrupts = <0 28 0x04>;
 				status = "disabled";
 			};
 
-			uart4: uart@021f0000 {
+			uart4: serial@021f0000 {
 				compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
 				reg = <0x021f0000 0x4000>;
 				interrupts = <0 29 0x04>;
 				status = "disabled";
 			};
 
-			uart5: uart@021f4000 {
+			uart5: serial@021f4000 {
 				compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
 				reg = <0x021f4000 0x4000>;
 				interrupts = <0 30 0x04>;
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 2d69686..3f5dad8 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -215,45 +215,8 @@
 			gpio: gpio@40028000 {
 				compatible = "nxp,lpc3220-gpio";
 				reg = <0x40028000 0x1000>;
-				/* create a private address space for enumeration */
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				gpio_p0: gpio-bank@0 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <0>;
-				};
-
-				gpio_p1: gpio-bank@1 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <1>;
-				};
-
-				gpio_p2: gpio-bank@2 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <2>;
-				};
-
-				gpio_p3: gpio-bank@3 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <3>;
-				};
-
-				gpi_p3: gpio-bank@4 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <4>;
-				};
-
-				gpo_p3: gpio-bank@5 {
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <5>;
-				};
+				gpio-controller;
+				#gpio-cells = <3>; /* bank, pin, flags */
 			};
 
 			watchdog@4003C000 {
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index 153a4b2..c9b4f27 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -11,7 +11,7 @@
 /include/ "mmp2.dtsi"
 
 / {
-	model = "Marvell MMP2 Aspenite Development Board";
+	model = "Marvell MMP2 Brownstone Development Board";
 	compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
 
 	chosen {
@@ -19,7 +19,7 @@
 	};
 
 	memory {
-		reg = <0x00000000 0x04000000>;
+		reg = <0x00000000 0x08000000>;
 	};
 
 	soc {
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index f2ab4ea..581cb08 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -44,6 +44,8 @@
 			compatible = "ti,omap2-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
+			ti,intc-size = <96>;
+			reg = <0x480FE000 0x1000>;
 		};
 
 		uart1: serial@4806a000 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 8c756be..5b4506c 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -57,7 +57,7 @@
 &mmc1 {
 	vmmc-supply = <&vmmc1>;
 	vmmc_aux-supply = <&vsim>;
-	ti,bus-width = <8>;
+	bus-width = <8>;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index e671361..1efe0c5 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -70,7 +70,7 @@
 
 &mmc1 {
 	vmmc-supply = <&vmmc>;
-	ti,bus-width = <8>;
+	bus-width = <8>;
 };
 
 &mmc2 {
@@ -87,5 +87,5 @@
 
 &mmc5 {
 	ti,non-removable;
-	ti,bus-width = <4>;
+	bus-width = <4>;
 };
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index e5eeb6f..d08c4d1 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -137,12 +137,12 @@
 
 &mmc1 {
 	vmmc-supply = <&vmmc>;
-	ti,bus-width = <8>;
+	bus-width = <8>;
 };
 
 &mmc2 {
 	vmmc-supply = <&vaux1>;
-	ti,bus-width = <8>;
+	bus-width = <8>;
 	ti,non-removable;
 };
 
@@ -155,6 +155,6 @@
 };
 
 &mmc5 {
-	ti,bus-width = <4>;
+	bus-width = <4>;
 	ti,non-removable;
 };
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
index 0167e86..c4ff6d1 100644
--- a/arch/arm/boot/dts/phy3250.dts
+++ b/arch/arm/boot/dts/phy3250.dts
@@ -131,13 +131,13 @@
 		compatible = "gpio-leds";
 
 		led0 {
-			gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */
+			gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
 			linux,default-trigger = "heartbeat";
 			default-state = "off";
 		};
 
 		led1 {
-			gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */
+			gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
 			linux,default-trigger = "timer";
 			default-state = "off";
 		};
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index d99dc04..ec3c339 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -20,6 +20,16 @@
 		reg = <0x00000000 0x20000000>;
 	};
 
+	en_3v3_reg: en_3v3 {
+		compatible = "regulator-fixed";
+                regulator-name = "en-3v3-fixed-supply";
+                regulator-min-microvolt = <3300000>;
+                regulator-max-microvolt = <3300000>;
+                gpios = <&gpio0 26  0x4>; // 26
+                startup-delay-us = <5000>;
+                enable-active-high;
+	};
+
 	gpio_keys {
 		compatible = "gpio-keys";
 		#address-cells = <1>;
@@ -30,35 +40,35 @@
 			wakeup = <1>;
 			linux,code = <2>;
 			label = "userpb";
-			gpios = <&gpio1 0 0>;
+			gpios = <&gpio1 0 0x4>;
 		};
 		button@2 {
 			debounce_interval = <50>;
 			wakeup = <1>;
 			linux,code = <3>;
 			label = "extkb1";
-			gpios = <&gpio4 23 0>;
+			gpios = <&gpio4 23 0x4>;
 		};
 		button@3 {
 			debounce_interval = <50>;
 			wakeup = <1>;
 			linux,code = <4>;
 			label = "extkb2";
-			gpios = <&gpio4 24 0>;
+			gpios = <&gpio4 24 0x4>;
 		};
 		button@4 {
 			debounce_interval = <50>;
 			wakeup = <1>;
 			linux,code = <5>;
 			label = "extkb3";
-			gpios = <&gpio5 1 0>;
+			gpios = <&gpio5 1 0x4>;
 		};
 		button@5 {
 			debounce_interval = <50>;
 			wakeup = <1>;
 			linux,code = <6>;
 			label = "extkb4";
-			gpios = <&gpio5 2 0>;
+			gpios = <&gpio5 2 0x4>;
 		};
 	};
 
@@ -66,12 +76,11 @@
 		compatible = "gpio-leds";
 		used-led {
 			label = "user_led";
-			gpios = <&gpio4 14>;
+			gpios = <&gpio4 14 0x4>;
 		};
 	};
 
 	soc-u9500 {
-
 		external-bus@50000000 {
 			status = "okay";
 
@@ -80,6 +89,9 @@
 				reg = <0 0x10000>;
 				interrupts = <12 0x1>;
 				interrupt-parent = <&gpio4>;
+				vdd33a-supply = <&en_3v3_reg>;
+				vddvario-supply = <&db8500_vape_reg>;
+
 
 				reg-shift = <1>;
 				reg-io-width = <2>;
@@ -91,11 +103,13 @@
 
 		sdi@80126000 {
 			status = "enabled";
-			cd-gpios = <&gpio6 26>;
+			vmmc-supply = <&ab8500_ldo_aux3_reg>;
+			cd-gpios  = <&gpio6 26 0x4>; // 218
 		};
 
 		sdi@80114000 {
 			status = "enabled";
+			vmmc-supply = <&ab8500_ldo_aux2_reg>;
 		};
 
 		uart@80120000 {
@@ -114,7 +128,7 @@
 			tc3589x@42 {
 				//compatible = "tc3589x";
 				reg = <0x42>;
-				interrupts = <25>;
+				gpios = <&gpio6 25 0x4>;
 				interrupt-parent = <&gpio6>;
 			};
 			tps61052@33 {
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
new file mode 100644
index 0000000..dd4358b
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -0,0 +1,292 @@
+/*
+ * DTS file for SPEAr1310 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1310.dtsi"
+
+/ {
+	model = "ST SPEAr1310 Evaluation Board";
+	compatible = "st,spear1310-evb", "st,spear1310";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	memory {
+		reg = <0 0x40000000>;
+	};
+
+	ahb {
+		pinmux@e0700000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&state_default>;
+
+			state_default: pinmux {
+				i2c0-pmx {
+					st,pins = "i2c0_grp";
+					st,function = "i2c0";
+				};
+				i2s1 {
+					st,pins = "i2s1_grp";
+					st,function = "i2s1";
+				};
+				gpio {
+					st,pins = "arm_gpio_grp";
+					st,function = "arm_gpio";
+				};
+				eth {
+					st,pins = "gmii_grp";
+					st,function = "gmii";
+				};
+				ssp0 {
+					st,pins = "ssp0_grp";
+					st,function = "ssp0";
+				};
+				kbd {
+					st,pins = "keyboard_6x6_grp";
+					st,function = "keyboard";
+				};
+				sdhci {
+					st,pins = "sdhci_grp";
+					st,function = "sdhci";
+				};
+				smi-pmx {
+					st,pins = "smi_2_chips_grp";
+					st,function = "smi";
+				};
+				uart0 {
+					st,pins = "uart0_grp";
+					st,function = "uart0";
+				};
+				rs485 {
+					st,pins = "rs485_0_1_tdm_0_1_grp";
+					st,function = "rs485_0_1_tdm_0_1";
+				};
+				i2c1_2 {
+					st,pins = "i2c_1_2_grp";
+					st,function = "i2c_1_2";
+				};
+				pci {
+					st,pins = "pcie0_grp","pcie1_grp",
+						"pcie2_grp";
+					st,function = "pci";
+				};
+				smii {
+					st,pins = "smii_0_1_2_grp";
+					st,function = "smii_0_1_2";
+				};
+				nand {
+					st,pins = "nand_8bit_grp",
+						"nand_16bit_grp";
+					st,function = "nand";
+				};
+			};
+		};
+
+		ahci@b1000000 {
+			status = "okay";
+		};
+
+		cf@b2800000 {
+			status = "okay";
+		};
+
+		dma@ea800000 {
+			status = "okay";
+		};
+
+		dma@eb000000 {
+			status = "okay";
+		};
+
+		fsmc: flash@b0000000 {
+			status = "okay";
+		};
+
+		gmac0: eth@e2000000 {
+			status = "okay";
+		};
+
+		sdhci@b3000000 {
+			status = "okay";
+		};
+
+		smi: flash@ea000000 {
+			status = "okay";
+			clock-rate=<50000000>;
+
+			flash@e6000000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xe6000000 0x800000>;
+				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
+			};
+		};
+
+		spi0: spi@e0100000 {
+			status = "okay";
+		};
+
+		ehci@e4800000 {
+			status = "okay";
+		};
+
+		ehci@e5800000 {
+			status = "okay";
+		};
+
+		ohci@e4000000 {
+			status = "okay";
+		};
+
+		ohci@e5000000 {
+			status = "okay";
+		};
+
+		apb {
+			adc@e0080000 {
+				status = "okay";
+			};
+
+			gpio0: gpio@e0600000 {
+			       status = "okay";
+			};
+
+			gpio1: gpio@e0680000 {
+			       status = "okay";
+			};
+
+			i2c0: i2c@e0280000 {
+			       status = "okay";
+			};
+
+			i2c1: i2c@5cd00000 {
+			       status = "okay";
+			};
+
+			kbd@e0300000 {
+				linux,keymap = < 0x00000001
+						 0x00010002
+						 0x00020003
+						 0x00030004
+						 0x00040005
+						 0x00050006
+						 0x00060007
+						 0x00070008
+						 0x00080009
+						 0x0100000a
+						 0x0101000c
+						 0x0102000d
+						 0x0103000e
+						 0x0104000f
+						 0x01050010
+						 0x01060011
+						 0x01070012
+						 0x01080013
+						 0x02000014
+						 0x02010015
+						 0x02020016
+						 0x02030017
+						 0x02040018
+						 0x02050019
+						 0x0206001a
+						 0x0207001b
+						 0x0208001c
+						 0x0300001d
+						 0x0301001e
+						 0x0302001f
+						 0x03030020
+						 0x03040021
+						 0x03050022
+						 0x03060023
+						 0x03070024
+						 0x03080025
+						 0x04000026
+						 0x04010027
+						 0x04020028
+						 0x04030029
+						 0x0404002a
+						 0x0405002b
+						 0x0406002c
+						 0x0407002d
+						 0x0408002e
+						 0x0500002f
+						 0x05010030
+						 0x05020031
+						 0x05030032
+						 0x05040033
+						 0x05050034
+						 0x05060035
+						 0x05070036
+						 0x05080037
+						 0x06000038
+						 0x06010039
+						 0x0602003a
+						 0x0603003b
+						 0x0604003c
+						 0x0605003d
+						 0x0606003e
+						 0x0607003f
+						 0x06080040
+						 0x07000041
+						 0x07010042
+						 0x07020043
+						 0x07030044
+						 0x07040045
+						 0x07050046
+						 0x07060047
+						 0x07070048
+						 0x07080049
+						 0x0800004a
+						 0x0801004b
+						 0x0802004c
+						 0x0803004d
+						 0x0804004e
+						 0x0805004f
+						 0x08060050
+						 0x08070051
+						 0x08080052 >;
+			       autorepeat;
+			       st,mode = <0>;
+			       status = "okay";
+			};
+
+			rtc@e0580000 {
+			       status = "okay";
+			};
+
+			serial@e0000000 {
+			       status = "okay";
+			};
+
+			wdt@ec800620 {
+			       status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
new file mode 100644
index 0000000..419ea74
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -0,0 +1,184 @@
+/*
+ * DTS file for all SPEAr1310 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+	compatible = "st,spear1310";
+
+	ahb {
+		ahci@b1000000 {
+			compatible = "snps,spear-ahci";
+			reg = <0xb1000000 0x10000>;
+			interrupts = <0 68 0x4>;
+			status = "disabled";
+		};
+
+		ahci@b1800000 {
+			compatible = "snps,spear-ahci";
+			reg = <0xb1800000 0x10000>;
+			interrupts = <0 69 0x4>;
+			status = "disabled";
+		};
+
+		ahci@b4000000 {
+			compatible = "snps,spear-ahci";
+			reg = <0xb4000000 0x10000>;
+			interrupts = <0 70 0x4>;
+			status = "disabled";
+		};
+
+		gmac1: eth@5c400000 {
+			compatible = "st,spear600-gmac";
+			reg = <0x5c400000 0x8000>;
+			interrupts = <0 95 0x4>;
+			interrupt-names = "macirq";
+			status = "disabled";
+		};
+
+		gmac2: eth@5c500000 {
+			compatible = "st,spear600-gmac";
+			reg = <0x5c500000 0x8000>;
+			interrupts = <0 96 0x4>;
+			interrupt-names = "macirq";
+			status = "disabled";
+		};
+
+		gmac3: eth@5c600000 {
+			compatible = "st,spear600-gmac";
+			reg = <0x5c600000 0x8000>;
+			interrupts = <0 97 0x4>;
+			interrupt-names = "macirq";
+			status = "disabled";
+		};
+
+		gmac4: eth@5c700000 {
+			compatible = "st,spear600-gmac";
+			reg = <0x5c700000 0x8000>;
+			interrupts = <0 98 0x4>;
+			interrupt-names = "macirq";
+			status = "disabled";
+		};
+
+		spi1: spi@5d400000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x5d400000 0x1000>;
+			interrupts = <0 99 0x4>;
+			status = "disabled";
+		};
+
+		apb {
+			i2c1: i2c@5cd00000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5cd00000 0x1000>;
+				interrupts = <0 87 0x4>;
+				status = "disabled";
+			};
+
+			i2c2: i2c@5ce00000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5ce00000 0x1000>;
+				interrupts = <0 88 0x4>;
+				status = "disabled";
+			};
+
+			i2c3: i2c@5cf00000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5cf00000 0x1000>;
+				interrupts = <0 89 0x4>;
+				status = "disabled";
+			};
+
+			i2c4: i2c@5d000000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5d000000 0x1000>;
+				interrupts = <0 90 0x4>;
+				status = "disabled";
+			};
+
+			i2c5: i2c@5d100000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5d100000 0x1000>;
+				interrupts = <0 91 0x4>;
+				status = "disabled";
+			};
+
+			i2c6: i2c@5d200000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5d200000 0x1000>;
+				interrupts = <0 92 0x4>;
+				status = "disabled";
+			};
+
+			i2c7: i2c@5d300000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0x5d300000 0x1000>;
+				interrupts = <0 93 0x4>;
+				status = "disabled";
+			};
+
+			serial@5c800000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x5c800000 0x1000>;
+				interrupts = <0 82 0x4>;
+				status = "disabled";
+			};
+
+			serial@5c900000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x5c900000 0x1000>;
+				interrupts = <0 83 0x4>;
+				status = "disabled";
+			};
+
+			serial@5ca00000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x5ca00000 0x1000>;
+				interrupts = <0 84 0x4>;
+				status = "disabled";
+			};
+
+			serial@5cb00000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x5cb00000 0x1000>;
+				interrupts = <0 85 0x4>;
+				status = "disabled";
+			};
+
+			serial@5cc00000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x5cc00000 0x1000>;
+				interrupts = <0 86 0x4>;
+				status = "disabled";
+			};
+
+			thermal@e07008c4 {
+				st,thermal-flags = <0x7000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
new file mode 100644
index 0000000..c9a54e0
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -0,0 +1,308 @@
+/*
+ * DTS file for SPEAr1340 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1340.dtsi"
+
+/ {
+	model = "ST SPEAr1340 Evaluation Board";
+	compatible = "st,spear1340-evb", "st,spear1340";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	memory {
+		reg = <0 0x40000000>;
+	};
+
+	ahb {
+		pinmux@e0700000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&state_default>;
+
+			state_default: pinmux {
+				pads_as_gpio {
+					st,pins = "pads_as_gpio_grp";
+					st,function = "pads_as_gpio";
+				};
+				fsmc {
+					st,pins = "fsmc_8bit_grp";
+					st,function = "fsmc";
+				};
+				kbd {
+					st,pins = "keyboard_row_col_grp",
+						"keyboard_col5_grp";
+					st,function = "keyboard";
+				};
+				uart0 {
+					st,pins = "uart0_grp", "uart0_enh_grp";
+					st,function = "uart0";
+				};
+				i2c0-pmx {
+					st,pins = "i2c0_grp";
+					st,function = "i2c0";
+				};
+				i2c1-pmx {
+					st,pins = "i2c1_grp";
+					st,function = "i2c1";
+				};
+				spdif-in {
+					st,pins = "spdif_in_grp";
+					st,function = "spdif_in";
+				};
+				spdif-out {
+					st,pins = "spdif_out_grp";
+					st,function = "spdif_out";
+				};
+				ssp0 {
+					st,pins = "ssp0_grp", "ssp0_cs1_grp",
+						"ssp0_cs3_grp";
+					st,function = "ssp0";
+				};
+				pwm {
+					st,pins = "pwm2_grp", "pwm3_grp";
+					st,function = "pwm";
+				};
+				smi-pmx {
+					st,pins = "smi_grp";
+					st,function = "smi";
+				};
+				i2s {
+					st,pins = "i2s_in_grp", "i2s_out_grp";
+					st,function = "i2s";
+				};
+				gmac {
+					st,pins = "gmii_grp", "rgmii_grp";
+					st,function = "gmac";
+				};
+				cam3 {
+					st,pins = "cam3_grp";
+					st,function = "cam3";
+				};
+				cec0 {
+					st,pins = "cec0_grp";
+					st,function = "cec0";
+				};
+				cec1 {
+					st,pins = "cec1_grp";
+					st,function = "cec1";
+				};
+				sdhci {
+					st,pins = "sdhci_grp";
+					st,function = "sdhci";
+				};
+				clcd {
+					st,pins = "clcd_grp";
+					st,function = "clcd";
+				};
+				sata {
+					st,pins = "sata_grp";
+					st,function = "sata";
+				};
+			};
+		};
+
+		dma@ea800000 {
+			status = "okay";
+		};
+
+		dma@eb000000 {
+			status = "okay";
+		};
+
+		fsmc: flash@b0000000 {
+			status = "okay";
+		};
+
+		gmac0: eth@e2000000 {
+			status = "okay";
+		};
+
+		sdhci@b3000000 {
+			status = "okay";
+		};
+
+		smi: flash@ea000000 {
+			status = "okay";
+			clock-rate=<50000000>;
+
+			flash@e6000000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xe6000000 0x800000>;
+				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
+			};
+		};
+
+		spi0: spi@e0100000 {
+			status = "okay";
+		};
+
+		ehci@e4800000 {
+			status = "okay";
+		};
+
+		ehci@e5800000 {
+			status = "okay";
+		};
+
+		ohci@e4000000 {
+			status = "okay";
+		};
+
+		ohci@e5000000 {
+			status = "okay";
+		};
+
+		apb {
+			adc@e0080000 {
+				status = "okay";
+			};
+
+			gpio0: gpio@e0600000 {
+			       status = "okay";
+			};
+
+			gpio1: gpio@e0680000 {
+			       status = "okay";
+			};
+
+			i2c0: i2c@e0280000 {
+			       status = "okay";
+			};
+
+			i2c1: i2c@b4000000 {
+			       status = "okay";
+			};
+
+			kbd@e0300000 {
+				linux,keymap = < 0x00000001
+						 0x00010002
+						 0x00020003
+						 0x00030004
+						 0x00040005
+						 0x00050006
+						 0x00060007
+						 0x00070008
+						 0x00080009
+						 0x0100000a
+						 0x0101000c
+						 0x0102000d
+						 0x0103000e
+						 0x0104000f
+						 0x01050010
+						 0x01060011
+						 0x01070012
+						 0x01080013
+						 0x02000014
+						 0x02010015
+						 0x02020016
+						 0x02030017
+						 0x02040018
+						 0x02050019
+						 0x0206001a
+						 0x0207001b
+						 0x0208001c
+						 0x0300001d
+						 0x0301001e
+						 0x0302001f
+						 0x03030020
+						 0x03040021
+						 0x03050022
+						 0x03060023
+						 0x03070024
+						 0x03080025
+						 0x04000026
+						 0x04010027
+						 0x04020028
+						 0x04030029
+						 0x0404002a
+						 0x0405002b
+						 0x0406002c
+						 0x0407002d
+						 0x0408002e
+						 0x0500002f
+						 0x05010030
+						 0x05020031
+						 0x05030032
+						 0x05040033
+						 0x05050034
+						 0x05060035
+						 0x05070036
+						 0x05080037
+						 0x06000038
+						 0x06010039
+						 0x0602003a
+						 0x0603003b
+						 0x0604003c
+						 0x0605003d
+						 0x0606003e
+						 0x0607003f
+						 0x06080040
+						 0x07000041
+						 0x07010042
+						 0x07020043
+						 0x07030044
+						 0x07040045
+						 0x07050046
+						 0x07060047
+						 0x07070048
+						 0x07080049
+						 0x0800004a
+						 0x0801004b
+						 0x0802004c
+						 0x0803004d
+						 0x0804004e
+						 0x0805004f
+						 0x08060050
+						 0x08070051
+						 0x08080052 >;
+			       autorepeat;
+			       st,mode = <0>;
+			       status = "okay";
+			};
+
+			rtc@e0580000 {
+			       status = "okay";
+			};
+
+			serial@e0000000 {
+			       status = "okay";
+			};
+
+			serial@b4100000 {
+			       status = "okay";
+			};
+
+			wdt@ec800620 {
+			       status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
new file mode 100644
index 0000000..d71fe2a
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -0,0 +1,56 @@
+/*
+ * DTS file for all SPEAr1340 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+	compatible = "st,spear1340";
+
+	ahb {
+		ahci@b1000000 {
+			compatible = "snps,spear-ahci";
+			reg = <0xb1000000 0x10000>;
+			interrupts = <0 72 0x4>;
+			status = "disabled";
+		};
+
+		spi1: spi@5d400000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x5d400000 0x1000>;
+			interrupts = <0 99 0x4>;
+			status = "disabled";
+		};
+
+		apb {
+			i2c1: i2c@b4000000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0xb4000000 0x1000>;
+				interrupts = <0 104 0x4>;
+				status = "disabled";
+			};
+
+			serial@b4100000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0xb4100000 0x1000>;
+				interrupts = <0 105 0x4>;
+				status = "disabled";
+			};
+
+			thermal@e07008c4 {
+				st,thermal-flags = <0x2a00>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
new file mode 100644
index 0000000..10dcec7
--- /dev/null
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -0,0 +1,262 @@
+/*
+ * DTS file for all SPEAr13xx SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	interrupt-parent = <&gic>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			next-level-cache = <&L2>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+			next-level-cache = <&L2>;
+		};
+	};
+
+	gic: interrupt-controller@ec801000 {
+		compatible = "arm,cortex-a9-gic";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		reg = < 0xec801000 0x1000 >,
+		      < 0xec800100 0x0100 >;
+	};
+
+	pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts = <0 8 0x04
+			      0 9 0x04>;
+	};
+
+	L2: l2-cache {
+		    compatible = "arm,pl310-cache";
+		    reg = <0xed000000 0x1000>;
+		    cache-unified;
+		    cache-level = <2>;
+	};
+
+	memory {
+		name = "memory";
+		device_type = "memory";
+		reg = <0 0x40000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyAMA0,115200";
+	};
+
+	ahb {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges = <0x50000000 0x50000000 0x10000000
+			  0xb0000000 0xb0000000 0x10000000
+			  0xe0000000 0xe0000000 0x10000000>;
+
+		sdhci@b3000000 {
+			compatible = "st,sdhci-spear";
+			reg = <0xb3000000 0x100>;
+			interrupts = <0 28 0x4>;
+			status = "disabled";
+		};
+
+		cf@b2800000 {
+			compatible = "arasan,cf-spear1340";
+			reg = <0xb2800000 0x100>;
+			interrupts = <0 29 0x4>;
+			status = "disabled";
+		};
+
+		dma@ea800000 {
+			compatible = "snps,dma-spear1340";
+			reg = <0xea800000 0x1000>;
+			interrupts = <0 19 0x4>;
+			status = "disabled";
+		};
+
+		dma@eb000000 {
+			compatible = "snps,dma-spear1340";
+			reg = <0xeb000000 0x1000>;
+			interrupts = <0 59 0x4>;
+			status = "disabled";
+		};
+
+		fsmc: flash@b0000000 {
+			compatible = "st,spear600-fsmc-nand";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0xb0000000 0x1000	/* FSMC Register */
+			       0xb0800000 0x0010>;	/* NAND Base */
+			reg-names = "fsmc_regs", "nand_data";
+			interrupts = <0 20 0x4
+				      0 21 0x4
+				      0 22 0x4
+				      0 23 0x4>;
+			st,ale-off = <0x20000>;
+			st,cle-off = <0x10000>;
+			status = "disabled";
+		};
+
+		gmac0: eth@e2000000 {
+			compatible = "st,spear600-gmac";
+			reg = <0xe2000000 0x8000>;
+			interrupts = <0 23 0x4
+				      0 24 0x4>;
+			interrupt-names = "macirq", "eth_wake_irq";
+			status = "disabled";
+		};
+
+		smi: flash@ea000000 {
+			compatible = "st,spear600-smi";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0xea000000 0x1000>;
+			interrupts = <0 30 0x4>;
+			status = "disabled";
+		};
+
+		spi0: spi@e0100000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0xe0100000 0x1000>;
+			interrupts = <0 31 0x4>;
+			status = "disabled";
+		};
+
+		ehci@e4800000 {
+			compatible = "st,spear600-ehci", "usb-ehci";
+			reg = <0xe4800000 0x1000>;
+			interrupts = <0 64 0x4>;
+			status = "disabled";
+		};
+
+		ehci@e5800000 {
+			compatible = "st,spear600-ehci", "usb-ehci";
+			reg = <0xe5800000 0x1000>;
+			interrupts = <0 66 0x4>;
+			status = "disabled";
+		};
+
+		ohci@e4000000 {
+			compatible = "st,spear600-ohci", "usb-ohci";
+			reg = <0xe4000000 0x1000>;
+			interrupts = <0 65 0x4>;
+			status = "disabled";
+		};
+
+		ohci@e5000000 {
+			compatible = "st,spear600-ohci", "usb-ohci";
+			reg = <0xe5000000 0x1000>;
+			interrupts = <0 67 0x4>;
+			status = "disabled";
+		};
+
+		apb {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			ranges = <0x50000000 0x50000000 0x10000000
+				  0xb0000000 0xb0000000 0x10000000
+				  0xe0000000 0xe0000000 0x10000000>;
+
+			gpio0: gpio@e0600000 {
+				compatible = "arm,pl061", "arm,primecell";
+				reg = <0xe0600000 0x1000>;
+				interrupts = <0 24 0x4>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				status = "disabled";
+			};
+
+			gpio1: gpio@e0680000 {
+				compatible = "arm,pl061", "arm,primecell";
+				reg = <0xe0680000 0x1000>;
+				interrupts = <0 25 0x4>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				status = "disabled";
+			};
+
+			kbd@e0300000 {
+				compatible = "st,spear300-kbd";
+				reg = <0xe0300000 0x1000>;
+				status = "disabled";
+			};
+
+			i2c0: i2c@e0280000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "snps,designware-i2c";
+				reg = <0xe0280000 0x1000>;
+				interrupts = <0 41 0x4>;
+				status = "disabled";
+			};
+
+			rtc@e0580000 {
+				compatible = "st,spear-rtc";
+				reg = <0xe0580000 0x1000>;
+				interrupts = <0 36 0x4>;
+				status = "disabled";
+			};
+
+			serial@e0000000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0xe0000000 0x1000>;
+				interrupts = <0 36 0x4>;
+				status = "disabled";
+			};
+
+			adc@e0080000 {
+				compatible = "st,spear600-adc";
+				reg = <0xe0080000 0x1000>;
+				interrupts = <0 44 0x4>;
+				status = "disabled";
+			};
+
+			timer@e0380000 {
+				compatible = "st,spear-timer";
+				reg = <0xe0380000 0x400>;
+				interrupts = <0 37 0x4>;
+			};
+
+			timer@ec800600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xec800600 0x20>;
+				interrupts = <1 13 0x301>;
+			};
+
+			wdt@ec800620 {
+				compatible = "arm,cortex-a9-twd-wdt";
+				reg = <0xec800620 0x20>;
+				status = "disabled";
+			};
+
+			thermal@e07008c4 {
+				compatible = "st,thermal-spear1340";
+				reg = <0xe07008c4 0x4>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 910e264..d71b8d5 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -87,6 +87,31 @@
 
 		smi: flash@fc000000 {
 			status = "okay";
+			clock-rate=<50000000>;
+
+			flash@f8000000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xf8000000 0x800000>;
+				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
+			};
 		};
 
 		spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index 01c5e35..ed3627c 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index 6d95317..b00544e 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -103,11 +103,27 @@
 			clock-rate=<50000000>;
 
 			flash@f8000000 {
-				label = "m25p64";
-				reg = <0xf8000000 0x800000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				reg = <0xf8000000 0x800000>;
 				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index e47081c..62fc4fb 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 0c6463b..c13fd1f 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -110,6 +110,31 @@
 
 		smi: flash@fc000000 {
 			status = "okay";
+			clock-rate=<50000000>;
+
+			flash@f8000000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xf8000000 0x800000>;
+				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
+			};
 		};
 
 		spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index 5372ca3..1f49d69 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 0ae7c8e..3a8bb57 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr3xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -139,6 +139,12 @@
 				interrupts = <12>;
 				status = "disabled";
 			};
+
+			timer@f0000000 {
+				compatible = "st,spear-timer";
+				reg = <0xf0000000 0x400>;
+				interrupts = <2>;
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index 790a7a8..1119c22 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -33,6 +33,35 @@
 			status = "okay";
 		};
 
+		smi: flash@fc000000 {
+			status = "okay";
+			clock-rate=<50000000>;
+
+			flash@f8000000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xf8000000 0x800000>;
+				st,smi-fast-mode;
+
+				partition@0 {
+					label = "xloader";
+					reg = <0x0 0x10000>;
+				};
+				partition@10000 {
+					label = "u-boot";
+					reg = <0x10000 0x40000>;
+				};
+				partition@50000 {
+					label = "linux";
+					reg = <0x50000 0x2c0000>;
+				};
+				partition@310000 {
+					label = "rootfs";
+					reg = <0x310000 0x4f0000>;
+				};
+			};
+		};
+
 		apb {
 			serial@d0000000 {
 				status = "okay";
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index d777e3a..089f0a4 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -177,6 +177,12 @@
 				interrupts = <28>;
 				status = "disabled";
 			};
+
+			timer@f0000000 {
+				compatible = "st,spear-timer";
+				reg = <0xf0000000 0x400>;
+				interrupts = <16>;
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts
index 0a9f34a..36321bc 100644
--- a/arch/arm/boot/dts/tegra-cardhu.dts
+++ b/arch/arm/boot/dts/tegra-cardhu.dts
@@ -7,10 +7,10 @@
 	compatible = "nvidia,cardhu", "nvidia,tegra30";
 
 	memory {
-		reg = < 0x80000000 0x40000000 >;
+		reg = <0x80000000 0x40000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -51,64 +51,122 @@
 				nvidia,pull = <2>;
 				nvidia,tristate = <0>;
 			};
+			dap2_fs_pa2 {
+				nvidia,pins =	"dap2_fs_pa2",
+						"dap2_sclk_pa3",
+						"dap2_din_pa4",
+						"dap2_dout_pa5";
+				nvidia,function = "i2s1";
+				nvidia,pull = <0>;
+				nvidia,tristate = <0>;
+			};
 		};
 	};
 
 	serial@70006000 {
-		clock-frequency = < 408000000 >;
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		status = "disable";
-	};
-
-	serial@70006300 {
-		status = "disable";
-	};
-
-	serial@70006400 {
-		status = "disable";
+		status = "okay";
+		clock-frequency = <408000000>;
 	};
 
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <100000>;
 	};
 
 	i2c@7000c400 {
+		status = "okay";
 		clock-frequency = <100000>;
 	};
 
 	i2c@7000c500 {
+		status = "okay";
 		clock-frequency = <100000>;
+
+		/* ALS and Proximity sensor */
+		isl29028@44 {
+			compatible = "isil,isl29028";
+			reg = <0x44>;
+			interrupt-parent = <&gpio>;
+			interrupts = <88 0x04>; /*gpio PL0 */
+		};
 	};
 
 	i2c@7000c700 {
+		status = "okay";
 		clock-frequency = <100000>;
 	};
 
 	i2c@7000d000 {
+		status = "okay";
 		clock-frequency = <100000>;
+
+		wm8903: wm8903@1a {
+			compatible = "wlf,wm8903";
+			reg = <0x1a>;
+			interrupt-parent = <&gpio>;
+			interrupts = <179 0x04>; /* gpio PW3 */
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			micdet-cfg = <0>;
+			micdet-delay = <100>;
+			gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+		};
+
+		tps62361 {
+			compatible = "ti,tps62361";
+			reg = <0x60>;
+
+			regulator-name = "tps62361-vout";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1500000>;
+			regulator-boot-on;
+			regulator-always-on;
+			ti,vsel0-state-high;
+			ti,vsel1-state-high;
+		};
+	};
+
+	ahub {
+		i2s@70080400 {
+			status = "okay";
+		};
 	};
 
 	sdhci@78000000 {
+		status = "okay";
 		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
 		wp-gpios = <&gpio 155 0>; /* gpio PT3 */
 		power-gpios = <&gpio 31 0>; /* gpio PD7 */
+		bus-width = <4>;
 	};
 
-	sdhci@78000200 {
-		status = "disable";
-	};
-
-	sdhci@78000400 {
-		status = "disable";
-	};
-
-	sdhci@78000400 {
+	sdhci@78000600 {
+		status = "okay";
 		support-8bit;
+		bus-width = <8>;
+	};
+
+	sound {
+		compatible = "nvidia,tegra-audio-wm8903-cardhu",
+			     "nvidia,tegra-audio-wm8903";
+		nvidia,model = "NVIDIA Tegra Cardhu";
+
+		nvidia,audio-routing =
+			"Headphone Jack", "HPOUTR",
+			"Headphone Jack", "HPOUTL",
+			"Int Spk", "ROP",
+			"Int Spk", "RON",
+			"Int Spk", "LOP",
+			"Int Spk", "LON",
+			"Mic Jack", "MICBIAS",
+			"IN1L", "Mic Jack";
+
+		nvidia,i2s-controller = <&tegra_i2s1>;
+		nvidia,audio-codec = <&wm8903>;
+
+		nvidia,spkr-en-gpios = <&wm8903 2 0>;
+		nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
 	};
 };
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 1a0b1f1..7de7013 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -6,11 +6,11 @@
 	model = "NVIDIA Tegra2 Harmony evaluation board";
 	compatible = "nvidia,harmony", "nvidia,tegra20";
 
-	memory@0 {
-		reg = < 0x00000000 0x40000000 >;
+	memory {
+		reg = <0x00000000 0x40000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -167,28 +167,28 @@
 			};
 			conf_ata {
 				nvidia,pins = "ata", "atb", "atc", "atd", "ate",
-					"cdev1", "dap1", "dtb", "gma", "gmb",
-					"gmc", "gmd", "gme", "gpu7", "gpv",
-					"i2cp", "pta", "rm", "slxa", "slxk",
-					"spia", "spib";
+					"cdev1", "cdev2", "dap1", "dtb", "gma",
+					"gmb", "gmc", "gmd", "gme", "gpu7",
+					"gpv", "i2cp", "pta", "rm", "slxa",
+					"slxk", "spia", "spib", "uac";
 				nvidia,pull = <0>;
 				nvidia,tristate = <0>;
 			};
-			conf_cdev2 {
-				nvidia,pins = "cdev2", "csus", "spid", "spif";
-				nvidia,pull = <1>;
-				nvidia,tristate = <1>;
-			};
 			conf_ck32 {
 				nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
 					"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
 				nvidia,pull = <0>;
 			};
+			conf_csus {
+				nvidia,pins = "csus", "spid", "spif";
+				nvidia,pull = <1>;
+				nvidia,tristate = <1>;
+			};
 			conf_crtp {
 				nvidia,pins = "crtp", "dap2", "dap3", "dap4",
 					"dtc", "dte", "dtf", "gpu", "sdio1",
 					"slxc", "slxd", "spdi", "spdo", "spig",
-					"uac", "uda";
+					"uda";
 				nvidia,pull = <0>;
 				nvidia,tristate = <1>;
 			};
@@ -234,42 +234,81 @@
 		};
 	};
 
-	pmc@7000f400 {
-		nvidia,invert-interrupt;
+	i2s@70002800 {
+		status = "okay";
+	};
+
+	serial@70006300 {
+		status = "okay";
+		clock-frequency = <216000000>;
 	};
 
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
 		wm8903: wm8903@1a {
 			compatible = "wlf,wm8903";
 			reg = <0x1a>;
 			interrupt-parent = <&gpio>;
-			interrupts = < 187 0x04 >;
+			interrupts = <187 0x04>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
 
 			micdet-cfg = <0>;
 			micdet-delay = <100>;
-			gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+			gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
 		};
 	};
 
 	i2c@7000c400 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000c500 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000d000 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
-	i2s@70002a00 {
-		status = "disable";
+	pmc {
+		nvidia,invert-interrupt;
+	};
+
+	usb@c5000000 {
+		status = "okay";
+	};
+
+	usb@c5004000 {
+		status = "okay";
+		nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+	};
+
+	usb@c5008000 {
+		status = "okay";
+	};
+
+	sdhci@c8000200 {
+		status = "okay";
+		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+		power-gpios = <&gpio 155 0>; /* gpio PT3 */
+		bus-width = <4>;
+	};
+
+	sdhci@c8000600 {
+		status = "okay";
+		cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+		wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+		power-gpios = <&gpio 70 0>; /* gpio PI6 */
+		support-8bit;
+		bus-width = <8>;
 	};
 
 	sound {
@@ -295,45 +334,4 @@
 		nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
 		nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
 	};
-
-	serial@70006000 {
-		status = "disable";
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		status = "disable";
-	};
-
-	serial@70006300 {
-		clock-frequency = < 216000000 >;
-	};
-
-	serial@70006400 {
-		status = "disable";
-	};
-
-	sdhci@c8000000 {
-		status = "disable";
-	};
-
-	sdhci@c8000200 {
-		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
-		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
-		power-gpios = <&gpio 155 0>; /* gpio PT3 */
-	};
-
-	sdhci@c8000400 {
-		status = "disable";
-	};
-
-	sdhci@c8000600 {
-		cd-gpios = <&gpio 58 0>; /* gpio PH2 */
-		wp-gpios = <&gpio 59 0>; /* gpio PH3 */
-		power-gpios = <&gpio 70 0>; /* gpio PI6 */
-		support-8bit;
-	};
 };
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
index 10943fb..bfeb117 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -6,11 +6,11 @@
 	model = "Toshiba AC100 / Dynabook AZ";
 	compatible = "compal,paz00", "nvidia,tegra20";
 
-	memory@0 {
+	memory {
 		reg = <0x00000000 0x20000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -159,18 +159,14 @@
 			};
 			conf_ata {
 				nvidia,pins = "ata", "atb", "atc", "atd", "ate",
-					"cdev1", "dap1", "dap2", "dtf", "gma",
-					"gmb", "gmc", "gmd", "gme", "gpu",
-					"gpu7", "gpv", "i2cp", "pta", "rm",
-					"sdio1", "slxk", "spdo", "uac", "uda";
+					"cdev1", "cdev2", "dap1", "dap2", "dtf",
+					"gma", "gmb", "gmc", "gmd", "gme",
+					"gpu", "gpu7", "gpv", "i2cp", "pta",
+					"rm", "sdio1", "slxk", "spdo", "uac",
+					"uda";
 				nvidia,pull = <0>;
 				nvidia,tristate = <0>;
 			};
-			conf_cdev2 {
-				nvidia,pins = "cdev2";
-				nvidia,pull = <1>;
-				nvidia,tristate = <0>;
-			};
 			conf_ck32 {
 				nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
 					"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
@@ -230,7 +226,22 @@
 		};
 	};
 
+	i2s@70002800 {
+		status = "okay";
+	};
+
+	serial@70006000 {
+		status = "okay";
+		clock-frequency = <216000000>;
+	};
+
+	serial@70006200 {
+		status = "okay";
+		clock-frequency = <216000000>;
+	};
+
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
 		alc5632: alc5632@1e {
@@ -242,25 +253,23 @@
 	};
 
 	i2c@7000c400 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
-	i2c@7000c500 {
-		status = "disable";
-	};
-
-	nvec@7000c500 {
+	nvec {
+		compatible = "nvidia,nvec";
+		reg = <0x7000c500 0x100>;
+		interrupts = <0 92 0x04>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		compatible = "nvidia,nvec";
-		reg = <0x7000C500 0x100>;
-		interrupts = <0 92 0x04>;
 		clock-frequency = <80000>;
-		request-gpios = <&gpio 170 0>;
+		request-gpios = <&gpio 170 0>; /* gpio PV2 */
 		slave-addr = <138>;
 	};
 
 	i2c@7000d000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
 		adt7461@4c {
@@ -269,8 +278,52 @@
 		};
 	};
 
-	i2s@70002a00 {
-		status = "disable";
+	usb@c5000000 {
+		status = "okay";
+	};
+
+	usb@c5004000 {
+		status = "okay";
+		nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+	};
+
+	usb@c5008000 {
+		status = "okay";
+	};
+
+	sdhci@c8000000 {
+		status = "okay";
+		cd-gpios = <&gpio 173 0>; /* gpio PV5 */
+		wp-gpios = <&gpio 57 0>;  /* gpio PH1 */
+		power-gpios = <&gpio 169 0>; /* gpio PV1 */
+		bus-width = <4>;
+	};
+
+	sdhci@c8000600 {
+		status = "okay";
+		support-8bit;
+		bus-width = <8>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		power {
+			label = "Power";
+			gpios = <&gpio 79 1>; /* gpio PJ7, active low */
+			linux,code = <116>; /* KEY_POWER */
+			gpio-key,wakeup;
+		};
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+
+		wifi {
+			label = "wifi-led";
+			gpios = <&gpio 24 0>; /* gpio PD0 */
+			linux,default-trigger = "rfkill0";
+		};
 	};
 
 	sound {
@@ -292,63 +345,4 @@
 		nvidia,i2s-controller = <&tegra_i2s1>;
 		nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
 	};
-
-	serial@70006000 {
-		clock-frequency = <216000000>;
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		clock-frequency = <216000000>;
-	};
-
-	serial@70006300 {
-		status = "disable";
-	};
-
-	serial@70006400 {
-		status = "disable";
-	};
-
-	sdhci@c8000000 {
-		cd-gpios = <&gpio 173 0>; /* gpio PV5 */
-		wp-gpios = <&gpio 57 0>;  /* gpio PH1 */
-		power-gpios = <&gpio 169 0>; /* gpio PV1 */
-	};
-
-	sdhci@c8000200 {
-		status = "disable";
-	};
-
-	sdhci@c8000400 {
-		status = "disable";
-	};
-
-	sdhci@c8000600 {
-		support-8bit;
-	};
-
-	gpio-keys {
-		compatible = "gpio-keys";
-
-		power {
-			label = "Power";
-			gpios = <&gpio 79 1>; /* gpio PJ7, active low */
-			linux,code = <116>; /* KEY_POWER */
-			gpio-key,wakeup;
-		};
-	};
-
-	gpio-leds {
-		compatible = "gpio-leds";
-
-		wifi {
-			label = "wifi-led";
-			gpios = <&gpio 24 0>;
-			linux,default-trigger = "rfkill0";
-		};
-	};
 };
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index ec33116f..89cb7f2 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -7,11 +7,10 @@
 	compatible = "nvidia,seaboard", "nvidia,tegra20";
 
 	memory {
-		device_type = "memory";
-		reg = < 0x00000000 0x40000000 >;
+		reg = <0x00000000 0x40000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -100,7 +99,7 @@
 			};
 			hdint {
 				nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1",
-					"lsck", "lsda", "pta";
+					"lsck", "lsda";
 				nvidia,function = "hdmi";
 			};
 			i2cp {
@@ -134,6 +133,10 @@
 				nvidia,pins = "pmc";
 				nvidia,function = "pwr_on";
 			};
+			pta {
+				nvidia,pins = "pta";
+				nvidia,function = "i2c2";
+			};
 			rm {
 				nvidia,pins = "rm";
 				nvidia,function = "i2c1";
@@ -254,110 +257,150 @@
 		};
 	};
 
+	i2s@70002800 {
+		status = "okay";
+	};
+
+	serial@70006300 {
+		status = "okay";
+		clock-frequency = <216000000>;
+	};
+
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
 		wm8903: wm8903@1a {
 			compatible = "wlf,wm8903";
 			reg = <0x1a>;
 			interrupt-parent = <&gpio>;
-			interrupts = < 187 0x04 >;
+			interrupts = <187 0x04>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
 
 			micdet-cfg = <0>;
 			micdet-delay = <100>;
-			gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+			gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+		};
+
+		/* ALS and proximity sensor */
+		isl29018@44 {
+			compatible = "isil,isl29018";
+			reg = <0x44>;
+			interrupt-parent = <&gpio>;
+			interrupts = <202 0x04>; /* GPIO PZ2 */
+		};
+
+		gyrometer@68 {
+			compatible = "invn,mpu3050";
+			reg = <0x68>;
+			interrupt-parent = <&gpio>;
+			interrupts = <204 0x04>; /* gpio PZ4 */
 		};
 	};
 
 	i2c@7000c400 {
-		clock-frequency = <400000>;
+		status = "okay";
+		clock-frequency = <100000>;
+
+		smart-battery@b {
+			compatible = "ti,bq20z75", "smart-battery-1.1";
+			reg = <0xb>;
+			ti,i2c-retry-count = <2>;
+			ti,poll-retry-count = <10>;
+		};
 	};
 
 	i2c@7000c500 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000d000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
-		adt7461@4c {
-			compatible = "adt7461";
+		temperature-sensor@4c {
+			compatible = "nct1008";
 			reg = <0x4c>;
 		};
+
+		magnetometer@c {
+			compatible = "ak8975";
+			reg = <0xc>;
+			interrupt-parent = <&gpio>;
+			interrupts = <109 0x04>; /* gpio PN5 */
+		};
 	};
 
-	i2s@70002a00 {
-		status = "disable";
-	};
+	emc {
+		emc-table@190000 {
+			reg = <190000>;
+			compatible = "nvidia,tegra20-emc-table";
+			clock-frequency = <190000>;
+			nvidia,emc-registers = <0x0000000c 0x00000026
+				0x00000009 0x00000003 0x00000004 0x00000004
+				0x00000002 0x0000000c 0x00000003 0x00000003
+				0x00000002 0x00000001 0x00000004 0x00000005
+				0x00000004 0x00000009 0x0000000d 0x0000059f
+				0x00000000 0x00000003 0x00000003 0x00000003
+				0x00000003 0x00000001 0x0000000b 0x000000c8
+				0x00000003 0x00000007 0x00000004 0x0000000f
+				0x00000002 0x00000000 0x00000000 0x00000002
+				0x00000000 0x00000000 0x00000083 0xa06204ae
+				0x007dc010 0x00000000 0x00000000 0x00000000
+				0x00000000 0x00000000 0x00000000 0x00000000>;
+		};
 
-	sound {
-		compatible = "nvidia,tegra-audio-wm8903-seaboard",
-			     "nvidia,tegra-audio-wm8903";
-		nvidia,model = "NVIDIA Tegra Seaboard";
-
-		nvidia,audio-routing =
-			"Headphone Jack", "HPOUTR",
-			"Headphone Jack", "HPOUTL",
-			"Int Spk", "ROP",
-			"Int Spk", "RON",
-			"Int Spk", "LOP",
-			"Int Spk", "LON",
-			"Mic Jack", "MICBIAS",
-			"IN1R", "Mic Jack";
-
-		nvidia,i2s-controller = <&tegra_i2s1>;
-		nvidia,audio-codec = <&wm8903>;
-
-		nvidia,spkr-en-gpios = <&wm8903 2 0>;
-		nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
-	};
-
-	serial@70006000 {
-		status = "disable";
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		status = "disable";
-	};
-
-	serial@70006300 {
-		clock-frequency = < 216000000 >;
-	};
-
-	serial@70006400 {
-		status = "disable";
-	};
-
-	sdhci@c8000000 {
-		status = "disable";
-	};
-
-	sdhci@c8000200 {
-		status = "disable";
-	};
-
-	sdhci@c8000400 {
-		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
-		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
-		power-gpios = <&gpio 70 0>; /* gpio PI6 */
-	};
-
-	sdhci@c8000600 {
-		support-8bit;
+		emc-table@380000 {
+			reg = <380000>;
+			compatible = "nvidia,tegra20-emc-table";
+			clock-frequency = <380000>;
+			nvidia,emc-registers = <0x00000017 0x0000004b
+				0x00000012 0x00000006 0x00000004 0x00000005
+				0x00000003 0x0000000c 0x00000006 0x00000006
+				0x00000003 0x00000001 0x00000004 0x00000005
+				0x00000004 0x00000009 0x0000000d 0x00000b5f
+				0x00000000 0x00000003 0x00000003 0x00000006
+				0x00000006 0x00000001 0x00000011 0x000000c8
+				0x00000003 0x0000000e 0x00000007 0x0000000f
+				0x00000002 0x00000000 0x00000000 0x00000002
+				0x00000000 0x00000000 0x00000083 0xe044048b
+				0x007d8010 0x00000000 0x00000000 0x00000000
+				0x00000000 0x00000000 0x00000000 0x00000000>;
+		};
 	};
 
 	usb@c5000000 {
+		status = "okay";
 		nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
 		dr_mode = "otg";
 	};
 
+	usb@c5004000 {
+		status = "okay";
+		nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+	};
+
+	usb@c5008000 {
+		status = "okay";
+	};
+
+	sdhci@c8000400 {
+		status = "okay";
+		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+		power-gpios = <&gpio 70 0>; /* gpio PI6 */
+		bus-width = <4>;
+	};
+
+	sdhci@c8000600 {
+		status = "okay";
+		support-8bit;
+		bus-width = <8>;
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 
@@ -378,41 +421,25 @@
 		};
 	};
 
-	emc@7000f400 {
-		emc-table@190000 {
-			reg = < 190000 >;
-			compatible = "nvidia,tegra20-emc-table";
-			clock-frequency = < 190000 >;
-			nvidia,emc-registers = < 0x0000000c 0x00000026
-				0x00000009 0x00000003 0x00000004 0x00000004
-				0x00000002 0x0000000c 0x00000003 0x00000003
-				0x00000002 0x00000001 0x00000004 0x00000005
-				0x00000004 0x00000009 0x0000000d 0x0000059f
-				0x00000000 0x00000003 0x00000003 0x00000003
-				0x00000003 0x00000001 0x0000000b 0x000000c8
-				0x00000003 0x00000007 0x00000004 0x0000000f
-				0x00000002 0x00000000 0x00000000 0x00000002
-				0x00000000 0x00000000 0x00000083 0xa06204ae
-				0x007dc010 0x00000000 0x00000000 0x00000000
-				0x00000000 0x00000000 0x00000000 0x00000000 >;
-		};
+	sound {
+		compatible = "nvidia,tegra-audio-wm8903-seaboard",
+			     "nvidia,tegra-audio-wm8903";
+		nvidia,model = "NVIDIA Tegra Seaboard";
 
-		emc-table@380000 {
-			reg = < 380000 >;
-			compatible = "nvidia,tegra20-emc-table";
-			clock-frequency = < 380000 >;
-			nvidia,emc-registers = < 0x00000017 0x0000004b
-				0x00000012 0x00000006 0x00000004 0x00000005
-				0x00000003 0x0000000c 0x00000006 0x00000006
-				0x00000003 0x00000001 0x00000004 0x00000005
-				0x00000004 0x00000009 0x0000000d 0x00000b5f
-				0x00000000 0x00000003 0x00000003 0x00000006
-				0x00000006 0x00000001 0x00000011 0x000000c8
-				0x00000003 0x0000000e 0x00000007 0x0000000f
-				0x00000002 0x00000000 0x00000000 0x00000002
-				0x00000000 0x00000000 0x00000083 0xe044048b
-				0x007d8010 0x00000000 0x00000000 0x00000000
-				0x00000000 0x00000000 0x00000000 0x00000000 >;
-		};
+		nvidia,audio-routing =
+			"Headphone Jack", "HPOUTR",
+			"Headphone Jack", "HPOUTL",
+			"Int Spk", "ROP",
+			"Int Spk", "RON",
+			"Int Spk", "LOP",
+			"Int Spk", "LON",
+			"Mic Jack", "MICBIAS",
+			"IN1R", "Mic Jack";
+
+		nvidia,i2s-controller = <&tegra_i2s1>;
+		nvidia,audio-codec = <&wm8903>;
+
+		nvidia,spkr-en-gpios = <&wm8903 2 0>;
+		nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
 	};
 };
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
index 98efd5b..9de5636 100644
--- a/arch/arm/boot/dts/tegra-trimslice.dts
+++ b/arch/arm/boot/dts/tegra-trimslice.dts
@@ -6,11 +6,11 @@
 	model = "Compulab TrimSlice board";
 	compatible = "compulab,trimslice", "nvidia,tegra20";
 
-	memory@0 {
-		reg = < 0x00000000 0x40000000 >;
+	memory {
+		reg = <0x00000000 0x40000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -182,23 +182,23 @@
 				nvidia,tristate = <1>;
 			};
 			conf_atb {
-				nvidia,pins = "atb", "cdev1", "dap1", "gma",
-					"gmc", "gmd", "gpu", "gpu7", "gpv",
-					"sdio1", "slxa", "slxk", "uac";
+				nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
+					"gma", "gmc", "gmd", "gpu", "gpu7",
+					"gpv", "sdio1", "slxa", "slxk", "uac";
 				nvidia,pull = <0>;
 				nvidia,tristate = <0>;
 			};
-			conf_cdev2 {
-				nvidia,pins = "cdev2", "csus", "spia", "spib",
-					"spid", "spif";
-				nvidia,pull = <1>;
-				nvidia,tristate = <1>;
-			};
 			conf_ck32 {
 				nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
 					"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
 				nvidia,pull = <0>;
 			};
+			conf_csus {
+				nvidia,pins = "csus", "spia", "spib",
+					"spid", "spif";
+				nvidia,pull = <1>;
+				nvidia,tristate = <1>;
+			};
 			conf_ddc {
 				nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd";
 				nvidia,pull = <2>;
@@ -240,68 +240,67 @@
 		};
 	};
 
+	i2s@70002800 {
+		status = "okay";
+	};
+
+	serial@70006000 {
+		status = "okay";
+		clock-frequency = <216000000>;
+	};
+
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000c400 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000c500 {
+		status = "okay";
 		clock-frequency = <400000>;
+
+		codec: codec@1a {
+			compatible = "ti,tlv320aic23";
+			reg = <0x1a>;
+		};
+
+		rtc@56 {
+			compatible = "emmicro,em3027";
+			reg = <0x56>;
+		};
 	};
 
-	i2c@7000d000 {
-		status = "disable";
+	usb@c5000000 {
+		status = "okay";
 	};
 
-	i2s@70002800 {
-		status = "disable";
+	usb@c5004000 {
+		nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
 	};
 
-	i2s@70002a00 {
-		status = "disable";
-	};
-
-	das@70000c00 {
-		status = "disable";
-	};
-
-	serial@70006000 {
-		clock-frequency = < 216000000 >;
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		status = "disable";
-	};
-
-	serial@70006300 {
-		status = "disable";
-	};
-
-	serial@70006400 {
-		status = "disable";
+	usb@c5008000 {
+		status = "okay";
 	};
 
 	sdhci@c8000000 {
-		status = "disable";
-	};
-
-	sdhci@c8000200 {
-		status = "disable";
-	};
-
-	sdhci@c8000400 {
-		status = "disable";
+		status = "okay";
+		bus-width = <4>;
 	};
 
 	sdhci@c8000600 {
-		cd-gpios = <&gpio 121 0>;
-		wp-gpios = <&gpio 122 0>;
+		status = "okay";
+		cd-gpios = <&gpio 121 0>; /* gpio PP1 */
+		wp-gpios = <&gpio 122 0>; /* gpio PP2 */
+		bus-width = <4>;
+	};
+
+	sound {
+		compatible = "nvidia,tegra-audio-trimslice";
+		nvidia,i2s-controller = <&tegra_i2s1>;
+		nvidia,audio-codec = <&codec>;
 	};
 };
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 71eb2e5..445343b 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -7,10 +7,10 @@
 	compatible = "nvidia,ventana", "nvidia,tegra20";
 
 	memory {
-		reg = < 0x00000000 0x40000000 >;
+		reg = <0x00000000 0x40000000>;
 	};
 
-	pinmux@70000000 {
+	pinmux {
 		pinctrl-names = "default";
 		pinctrl-0 = <&state_default>;
 
@@ -240,38 +240,82 @@
 		};
 	};
 
+	i2s@70002800 {
+		status = "okay";
+	};
+
+	serial@70006300 {
+		status = "okay";
+		clock-frequency = <216000000>;
+	};
+
 	i2c@7000c000 {
+		status = "okay";
 		clock-frequency = <400000>;
 
 		wm8903: wm8903@1a {
 			compatible = "wlf,wm8903";
 			reg = <0x1a>;
 			interrupt-parent = <&gpio>;
-			interrupts = < 187 0x04 >;
+			interrupts = <187 0x04>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
 
 			micdet-cfg = <0>;
 			micdet-delay = <100>;
-			gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+			gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+		};
+
+		/* ALS and proximity sensor */
+		isl29018@44 {
+			compatible = "isil,isl29018";
+			reg = <0x44>;
+			interrupt-parent = <&gpio>;
+			interrupts = <202 0x04>; /*gpio PZ2 */
 		};
 	};
 
 	i2c@7000c400 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000c500 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
 	i2c@7000d000 {
+		status = "okay";
 		clock-frequency = <400000>;
 	};
 
-	i2s@70002a00 {
-		status = "disable";
+	usb@c5000000 {
+		status = "okay";
+	};
+
+	usb@c5004000 {
+		status = "okay";
+		nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+	};
+
+	usb@c5008000 {
+		status = "okay";
+	};
+
+	sdhci@c8000400 {
+		status = "okay";
+		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+		power-gpios = <&gpio 70 0>; /* gpio PI6 */
+		bus-width = <4>;
+	};
+
+	sdhci@c8000600 {
+		status = "okay";
+		support-8bit;
+		bus-width = <8>;
 	};
 
 	sound {
@@ -294,45 +338,7 @@
 
 		nvidia,spkr-en-gpios = <&wm8903 2 0>;
 		nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
-		nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
+		nvidia,int-mic-en-gpios = <&gpio 184 0>; /* gpio PX0 */
 		nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
 	};
-
-	serial@70006000 {
-		status = "disable";
-	};
-
-	serial@70006040 {
-		status = "disable";
-	};
-
-	serial@70006200 {
-		status = "disable";
-	};
-
-	serial@70006300 {
-		clock-frequency = < 216000000 >;
-	};
-
-	serial@70006400 {
-		status = "disable";
-	};
-
-	sdhci@c8000000 {
-		status = "disable";
-	};
-
-	sdhci@c8000200 {
-		status = "disable";
-	};
-
-	sdhci@c8000400 {
-		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
-		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
-		power-gpios = <&gpio 70 0>; /* gpio PI6 */
-	};
-
-	sdhci@c8000600 {
-		support-8bit;
-	};
 };
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 108e894..c417d67 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,17 +4,237 @@
 	compatible = "nvidia,tegra20";
 	interrupt-parent = <&intc>;
 
-	pmc@7000f400 {
+	intc: interrupt-controller {
+		compatible = "arm,cortex-a9-gic";
+		reg = <0x50041000 0x1000
+		       0x50040100 0x0100>;
+		interrupt-controller;
+		#interrupt-cells = <3>;
+	};
+
+	apbdma: dma {
+		compatible = "nvidia,tegra20-apbdma";
+		reg = <0x6000a000 0x1200>;
+		interrupts = <0 104 0x04
+			      0 105 0x04
+			      0 106 0x04
+			      0 107 0x04
+			      0 108 0x04
+			      0 109 0x04
+			      0 110 0x04
+			      0 111 0x04
+			      0 112 0x04
+			      0 113 0x04
+			      0 114 0x04
+			      0 115 0x04
+			      0 116 0x04
+			      0 117 0x04
+			      0 118 0x04
+			      0 119 0x04>;
+	};
+
+	ahb {
+		compatible = "nvidia,tegra20-ahb";
+		reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+	};
+
+	gpio: gpio {
+		compatible = "nvidia,tegra20-gpio";
+		reg = <0x6000d000 0x1000>;
+		interrupts = <0 32 0x04
+			      0 33 0x04
+			      0 34 0x04
+			      0 35 0x04
+			      0 55 0x04
+			      0 87 0x04
+			      0 89 0x04>;
+		#gpio-cells = <2>;
+		gpio-controller;
+		#interrupt-cells = <2>;
+		interrupt-controller;
+	};
+
+	pinmux: pinmux {
+		compatible = "nvidia,tegra20-pinmux";
+		reg = <0x70000014 0x10   /* Tri-state registers */
+		       0x70000080 0x20   /* Mux registers */
+		       0x700000a0 0x14   /* Pull-up/down registers */
+		       0x70000868 0xa8>; /* Pad control registers */
+	};
+
+	das {
+		compatible = "nvidia,tegra20-das";
+		reg = <0x70000c00 0x80>;
+	};
+
+	tegra_i2s1: i2s@70002800 {
+		compatible = "nvidia,tegra20-i2s";
+		reg = <0x70002800 0x200>;
+		interrupts = <0 13 0x04>;
+		nvidia,dma-request-selector = <&apbdma 2>;
+		status = "disable";
+	};
+
+	tegra_i2s2: i2s@70002a00 {
+		compatible = "nvidia,tegra20-i2s";
+		reg = <0x70002a00 0x200>;
+		interrupts = <0 3 0x04>;
+		nvidia,dma-request-selector = <&apbdma 1>;
+		status = "disable";
+	};
+
+	serial@70006000 {
+		compatible = "nvidia,tegra20-uart";
+		reg = <0x70006000 0x40>;
+		reg-shift = <2>;
+		interrupts = <0 36 0x04>;
+		status = "disable";
+	};
+
+	serial@70006040 {
+		compatible = "nvidia,tegra20-uart";
+		reg = <0x70006040 0x40>;
+		reg-shift = <2>;
+		interrupts = <0 37 0x04>;
+		status = "disable";
+	};
+
+	serial@70006200 {
+		compatible = "nvidia,tegra20-uart";
+		reg = <0x70006200 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 46 0x04>;
+		status = "disable";
+	};
+
+	serial@70006300 {
+		compatible = "nvidia,tegra20-uart";
+		reg = <0x70006300 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 90 0x04>;
+		status = "disable";
+	};
+
+	serial@70006400 {
+		compatible = "nvidia,tegra20-uart";
+		reg = <0x70006400 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 91 0x04>;
+		status = "disable";
+	};
+
+	i2c@7000c000 {
+		compatible = "nvidia,tegra20-i2c";
+		reg = <0x7000c000 0x100>;
+		interrupts = <0 38 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000c400 {
+		compatible = "nvidia,tegra20-i2c";
+		reg = <0x7000c400 0x100>;
+		interrupts = <0 84 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000c500 {
+		compatible = "nvidia,tegra20-i2c";
+		reg = <0x7000c500 0x100>;
+		interrupts = <0 92 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000d000 {
+		compatible = "nvidia,tegra20-i2c-dvc";
+		reg = <0x7000d000 0x200>;
+		interrupts = <0 53 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	pmc {
 		compatible = "nvidia,tegra20-pmc";
 		reg = <0x7000e400 0x400>;
 	};
 
-	intc: interrupt-controller@50041000 {
-		compatible = "arm,cortex-a9-gic";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		reg = < 0x50041000 0x1000 >,
-		      < 0x50040100 0x0100 >;
+	mc {
+		compatible = "nvidia,tegra20-mc";
+		reg = <0x7000f000 0x024
+		       0x7000f03c 0x3c4>;
+		interrupts = <0 77 0x04>;
+	};
+
+	gart {
+		compatible = "nvidia,tegra20-gart";
+		reg = <0x7000f024 0x00000018	/* controller registers */
+		       0x58000000 0x02000000>;	/* GART aperture */
+	};
+
+	emc {
+		compatible = "nvidia,tegra20-emc";
+		reg = <0x7000f400 0x200>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	usb@c5000000 {
+		compatible = "nvidia,tegra20-ehci", "usb-ehci";
+		reg = <0xc5000000 0x4000>;
+		interrupts = <0 20 0x04>;
+		phy_type = "utmi";
+		nvidia,has-legacy-mode;
+		status = "disable";
+	};
+
+	usb@c5004000 {
+		compatible = "nvidia,tegra20-ehci", "usb-ehci";
+		reg = <0xc5004000 0x4000>;
+		interrupts = <0 21 0x04>;
+		phy_type = "ulpi";
+		status = "disable";
+	};
+
+	usb@c5008000 {
+		compatible = "nvidia,tegra20-ehci", "usb-ehci";
+		reg = <0xc5008000 0x4000>;
+		interrupts = <0 97 0x04>;
+		phy_type = "utmi";
+		status = "disable";
+	};
+
+	sdhci@c8000000 {
+		compatible = "nvidia,tegra20-sdhci";
+		reg = <0xc8000000 0x200>;
+		interrupts = <0 14 0x04>;
+		status = "disable";
+	};
+
+	sdhci@c8000200 {
+		compatible = "nvidia,tegra20-sdhci";
+		reg = <0xc8000200 0x200>;
+		interrupts = <0 15 0x04>;
+		status = "disable";
+	};
+
+	sdhci@c8000400 {
+		compatible = "nvidia,tegra20-sdhci";
+		reg = <0xc8000400 0x200>;
+		interrupts = <0 19 0x04>;
+		status = "disable";
+	};
+
+	sdhci@c8000600 {
+		compatible = "nvidia,tegra20-sdhci";
+		reg = <0xc8000600 0x200>;
+		interrupts = <0 31 0x04>;
+		status = "disable";
 	};
 
 	pmu {
@@ -22,189 +242,4 @@
 		interrupts = <0 56 0x04
 			      0 57 0x04>;
 	};
-
-	apbdma: dma@6000a000 {
-		compatible = "nvidia,tegra20-apbdma";
-		reg = <0x6000a000 0x1200>;
-		interrupts = < 0 104 0x04
-			       0 105 0x04
-			       0 106 0x04
-			       0 107 0x04
-			       0 108 0x04
-			       0 109 0x04
-			       0 110 0x04
-			       0 111 0x04
-			       0 112 0x04
-			       0 113 0x04
-			       0 114 0x04
-			       0 115 0x04
-			       0 116 0x04
-			       0 117 0x04
-			       0 118 0x04
-			       0 119 0x04 >;
-	};
-
-	i2c@7000c000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra20-i2c";
-		reg = <0x7000C000 0x100>;
-		interrupts = < 0 38 0x04 >;
-	};
-
-	i2c@7000c400 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra20-i2c";
-		reg = <0x7000C400 0x100>;
-		interrupts = < 0 84 0x04 >;
-	};
-
-	i2c@7000c500 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra20-i2c";
-		reg = <0x7000C500 0x100>;
-		interrupts = < 0 92 0x04 >;
-	};
-
-	i2c@7000d000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra20-i2c-dvc";
-		reg = <0x7000D000 0x200>;
-		interrupts = < 0 53 0x04 >;
-	};
-
-	tegra_i2s1: i2s@70002800 {
-		compatible = "nvidia,tegra20-i2s";
-		reg = <0x70002800 0x200>;
-		interrupts = < 0 13 0x04 >;
-		nvidia,dma-request-selector = < &apbdma 2 >;
-	};
-
-	tegra_i2s2: i2s@70002a00 {
-		compatible = "nvidia,tegra20-i2s";
-		reg = <0x70002a00 0x200>;
-		interrupts = < 0 3 0x04 >;
-		nvidia,dma-request-selector = < &apbdma 1 >;
-	};
-
-	das@70000c00 {
-		compatible = "nvidia,tegra20-das";
-		reg = <0x70000c00 0x80>;
-	};
-
-	gpio: gpio@6000d000 {
-		compatible = "nvidia,tegra20-gpio";
-		reg = < 0x6000d000 0x1000 >;
-		interrupts = < 0 32 0x04
-			       0 33 0x04
-			       0 34 0x04
-			       0 35 0x04
-			       0 55 0x04
-			       0 87 0x04
-			       0 89 0x04 >;
-		#gpio-cells = <2>;
-		gpio-controller;
-		#interrupt-cells = <2>;
-		interrupt-controller;
-	};
-
-	pinmux: pinmux@70000000 {
-		compatible = "nvidia,tegra20-pinmux";
-		reg = < 0x70000014 0x10    /* Tri-state registers */
-			0x70000080 0x20    /* Mux registers */
-			0x700000a0 0x14    /* Pull-up/down registers */
-			0x70000868 0xa8 >; /* Pad control registers */
-	};
-
-	serial@70006000 {
-		compatible = "nvidia,tegra20-uart";
-		reg = <0x70006000 0x40>;
-		reg-shift = <2>;
-		interrupts = < 0 36 0x04 >;
-	};
-
-	serial@70006040 {
-		compatible = "nvidia,tegra20-uart";
-		reg = <0x70006040 0x40>;
-		reg-shift = <2>;
-		interrupts = < 0 37 0x04 >;
-	};
-
-	serial@70006200 {
-		compatible = "nvidia,tegra20-uart";
-		reg = <0x70006200 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 46 0x04 >;
-	};
-
-	serial@70006300 {
-		compatible = "nvidia,tegra20-uart";
-		reg = <0x70006300 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 90 0x04 >;
-	};
-
-	serial@70006400 {
-		compatible = "nvidia,tegra20-uart";
-		reg = <0x70006400 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 91 0x04 >;
-	};
-
-	emc@7000f400 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra20-emc";
-		reg = <0x7000f400 0x200>;
-	};
-
-	sdhci@c8000000 {
-		compatible = "nvidia,tegra20-sdhci";
-		reg = <0xc8000000 0x200>;
-		interrupts = < 0 14 0x04 >;
-	};
-
-	sdhci@c8000200 {
-		compatible = "nvidia,tegra20-sdhci";
-		reg = <0xc8000200 0x200>;
-		interrupts = < 0 15 0x04 >;
-	};
-
-	sdhci@c8000400 {
-		compatible = "nvidia,tegra20-sdhci";
-		reg = <0xc8000400 0x200>;
-		interrupts = < 0 19 0x04 >;
-	};
-
-	sdhci@c8000600 {
-		compatible = "nvidia,tegra20-sdhci";
-		reg = <0xc8000600 0x200>;
-		interrupts = < 0 31 0x04 >;
-	};
-
-	usb@c5000000 {
-		compatible = "nvidia,tegra20-ehci", "usb-ehci";
-		reg = <0xc5000000 0x4000>;
-		interrupts = < 0 20 0x04 >;
-		phy_type = "utmi";
-		nvidia,has-legacy-mode;
-	};
-
-	usb@c5004000 {
-		compatible = "nvidia,tegra20-ehci", "usb-ehci";
-		reg = <0xc5004000 0x4000>;
-		interrupts = < 0 21 0x04 >;
-		phy_type = "ulpi";
-	};
-
-	usb@c5008000 {
-		compatible = "nvidia,tegra20-ehci", "usb-ehci";
-		reg = <0xc5008000 0x4000>;
-		interrupts = < 0 97 0x04 >;
-		phy_type = "utmi";
-	};
 };
-
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 62a7b39..2dcc09e 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,17 +4,261 @@
 	compatible = "nvidia,tegra30";
 	interrupt-parent = <&intc>;
 
-	pmc@7000f400 {
+	intc: interrupt-controller {
+		compatible = "arm,cortex-a9-gic";
+		reg = <0x50041000 0x1000
+		       0x50040100 0x0100>;
+		interrupt-controller;
+		#interrupt-cells = <3>;
+	};
+
+	apbdma: dma {
+		compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
+		reg = <0x6000a000 0x1400>;
+		interrupts = <0 104 0x04
+			      0 105 0x04
+			      0 106 0x04
+			      0 107 0x04
+			      0 108 0x04
+			      0 109 0x04
+			      0 110 0x04
+			      0 111 0x04
+			      0 112 0x04
+			      0 113 0x04
+			      0 114 0x04
+			      0 115 0x04
+			      0 116 0x04
+			      0 117 0x04
+			      0 118 0x04
+			      0 119 0x04
+			      0 128 0x04
+			      0 129 0x04
+			      0 130 0x04
+			      0 131 0x04
+			      0 132 0x04
+			      0 133 0x04
+			      0 134 0x04
+			      0 135 0x04
+			      0 136 0x04
+			      0 137 0x04
+			      0 138 0x04
+			      0 139 0x04
+			      0 140 0x04
+			      0 141 0x04
+			      0 142 0x04
+			      0 143 0x04>;
+	};
+
+	ahb: ahb {
+		compatible = "nvidia,tegra30-ahb";
+		reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
+	};
+
+	gpio: gpio {
+		compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
+		reg = <0x6000d000 0x1000>;
+		interrupts = <0 32 0x04
+			      0 33 0x04
+			      0 34 0x04
+			      0 35 0x04
+			      0 55 0x04
+			      0 87 0x04
+			      0 89 0x04
+			      0 125 0x04>;
+		#gpio-cells = <2>;
+		gpio-controller;
+		#interrupt-cells = <2>;
+		interrupt-controller;
+	};
+
+	pinmux: pinmux {
+		compatible = "nvidia,tegra30-pinmux";
+		reg = <0x70000868 0xd0    /* Pad control registers */
+		       0x70003000 0x3e0>; /* Mux registers */
+	};
+
+	serial@70006000 {
+		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+		reg = <0x70006000 0x40>;
+		reg-shift = <2>;
+		interrupts = <0 36 0x04>;
+		status = "disable";
+	};
+
+	serial@70006040 {
+		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+		reg = <0x70006040 0x40>;
+		reg-shift = <2>;
+		interrupts = <0 37 0x04>;
+		status = "disable";
+	};
+
+	serial@70006200 {
+		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+		reg = <0x70006200 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 46 0x04>;
+		status = "disable";
+	};
+
+	serial@70006300 {
+		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+		reg = <0x70006300 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 90 0x04>;
+		status = "disable";
+	};
+
+	serial@70006400 {
+		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+		reg = <0x70006400 0x100>;
+		reg-shift = <2>;
+		interrupts = <0 91 0x04>;
+		status = "disable";
+	};
+
+	i2c@7000c000 {
+		compatible =  "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+		reg = <0x7000c000 0x100>;
+		interrupts = <0 38 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000c400 {
+		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+		reg = <0x7000c400 0x100>;
+		interrupts = <0 84 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000c500 {
+		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+		reg = <0x7000c500 0x100>;
+		interrupts = <0 92 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000c700 {
+		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+		reg = <0x7000c700 0x100>;
+		interrupts = <0 120 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	i2c@7000d000 {
+		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+		reg = <0x7000d000 0x100>;
+		interrupts = <0 53 0x04>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disable";
+	};
+
+	pmc {
 		compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
 		reg = <0x7000e400 0x400>;
 	};
 
-	intc: interrupt-controller@50041000 {
-		compatible = "arm,cortex-a9-gic";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		reg = < 0x50041000 0x1000 >,
-		      < 0x50040100 0x0100 >;
+	mc {
+		compatible = "nvidia,tegra30-mc";
+		reg = <0x7000f000 0x010
+		       0x7000f03c 0x1b4
+		       0x7000f200 0x028
+		       0x7000f284 0x17c>;
+		interrupts = <0 77 0x04>;
+	};
+
+	smmu {
+		compatible = "nvidia,tegra30-smmu";
+		reg = <0x7000f010 0x02c
+		       0x7000f1f0 0x010
+		       0x7000f228 0x05c>;
+		nvidia,#asids = <4>;		/* # of ASIDs */
+		dma-window = <0 0x40000000>;	/* IOVA start & length */
+		nvidia,ahb = <&ahb>;
+	};
+
+	ahub {
+		compatible = "nvidia,tegra30-ahub";
+		reg = <0x70080000 0x200
+		       0x70080200 0x100>;
+		interrupts = <0 103 0x04>;
+		nvidia,dma-request-selector = <&apbdma 1>;
+
+		ranges;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		tegra_i2s0: i2s@70080300 {
+			compatible = "nvidia,tegra30-i2s";
+			reg = <0x70080300 0x100>;
+			nvidia,ahub-cif-ids = <4 4>;
+			status = "disable";
+		};
+
+		tegra_i2s1: i2s@70080400 {
+			compatible = "nvidia,tegra30-i2s";
+			reg = <0x70080400 0x100>;
+			nvidia,ahub-cif-ids = <5 5>;
+			status = "disable";
+		};
+
+		tegra_i2s2: i2s@70080500 {
+			compatible = "nvidia,tegra30-i2s";
+			reg = <0x70080500 0x100>;
+			nvidia,ahub-cif-ids = <6 6>;
+			status = "disable";
+		};
+
+		tegra_i2s3: i2s@70080600 {
+			compatible = "nvidia,tegra30-i2s";
+			reg = <0x70080600 0x100>;
+			nvidia,ahub-cif-ids = <7 7>;
+			status = "disable";
+		};
+
+		tegra_i2s4: i2s@70080700 {
+			compatible = "nvidia,tegra30-i2s";
+			reg = <0x70080700 0x100>;
+			nvidia,ahub-cif-ids = <8 8>;
+			status = "disable";
+		};
+	};
+
+	sdhci@78000000 {
+		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+		reg = <0x78000000 0x200>;
+		interrupts = <0 14 0x04>;
+		status = "disable";
+	};
+
+	sdhci@78000200 {
+		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+		reg = <0x78000200 0x200>;
+		interrupts = <0 15 0x04>;
+		status = "disable";
+	};
+
+	sdhci@78000400 {
+		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+		reg = <0x78000400 0x200>;
+		interrupts = <0 19 0x04>;
+		status = "disable";
+	};
+
+	sdhci@78000600 {
+		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+		reg = <0x78000600 0x200>;
+		interrupts = <0 31 0x04>;
+		status = "disable";
 	};
 
 	pmu {
@@ -24,163 +268,4 @@
 			      0 146 0x04
 			      0 147 0x04>;
 	};
-
-	apbdma: dma@6000a000 {
-		compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
-		reg = <0x6000a000 0x1400>;
-		interrupts = < 0 104 0x04
-			       0 105 0x04
-			       0 106 0x04
-			       0 107 0x04
-			       0 108 0x04
-			       0 109 0x04
-			       0 110 0x04
-			       0 111 0x04
-			       0 112 0x04
-			       0 113 0x04
-			       0 114 0x04
-			       0 115 0x04
-			       0 116 0x04
-			       0 117 0x04
-			       0 118 0x04
-			       0 119 0x04
-			       0 128 0x04
-			       0 129 0x04
-			       0 130 0x04
-			       0 131 0x04
-			       0 132 0x04
-			       0 133 0x04
-			       0 134 0x04
-			       0 135 0x04
-			       0 136 0x04
-			       0 137 0x04
-			       0 138 0x04
-			       0 139 0x04
-			       0 140 0x04
-			       0 141 0x04
-			       0 142 0x04
-			       0 143 0x04 >;
-	};
-
-	i2c@7000c000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible =  "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-		reg = <0x7000C000 0x100>;
-		interrupts = < 0 38 0x04 >;
-	};
-
-	i2c@7000c400 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-		reg = <0x7000C400 0x100>;
-		interrupts = < 0 84 0x04 >;
-	};
-
-	i2c@7000c500 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-		reg = <0x7000C500 0x100>;
-		interrupts = < 0 92 0x04 >;
-	};
-
-	i2c@7000c700 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-		reg = <0x7000c700 0x100>;
-		interrupts = < 0 120 0x04 >;
-	};
-
-	i2c@7000d000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-		reg = <0x7000D000 0x100>;
-		interrupts = < 0 53 0x04 >;
-	};
-
-	gpio: gpio@6000d000 {
-		compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
-		reg = < 0x6000d000 0x1000 >;
-		interrupts = < 0 32 0x04
-			       0 33 0x04
-			       0 34 0x04
-			       0 35 0x04
-			       0 55 0x04
-			       0 87 0x04
-			       0 89 0x04
-			       0 125 0x04 >;
-		#gpio-cells = <2>;
-		gpio-controller;
-		#interrupt-cells = <2>;
-		interrupt-controller;
-	};
-
-	serial@70006000 {
-		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
-		reg = <0x70006000 0x40>;
-		reg-shift = <2>;
-		interrupts = < 0 36 0x04 >;
-	};
-
-	serial@70006040 {
-		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
-		reg = <0x70006040 0x40>;
-		reg-shift = <2>;
-		interrupts = < 0 37 0x04 >;
-	};
-
-	serial@70006200 {
-		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
-		reg = <0x70006200 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 46 0x04 >;
-	};
-
-	serial@70006300 {
-		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
-		reg = <0x70006300 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 90 0x04 >;
-	};
-
-	serial@70006400 {
-		compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
-		reg = <0x70006400 0x100>;
-		reg-shift = <2>;
-		interrupts = < 0 91 0x04 >;
-	};
-
-	sdhci@78000000 {
-		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
-		reg = <0x78000000 0x200>;
-		interrupts = < 0 14 0x04 >;
-	};
-
-	sdhci@78000200 {
-		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
-		reg = <0x78000200 0x200>;
-		interrupts = < 0 15 0x04 >;
-	};
-
-	sdhci@78000400 {
-		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
-		reg = <0x78000400 0x200>;
-		interrupts = < 0 19 0x04 >;
-	};
-
-	sdhci@78000600 {
-		compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
-		reg = <0x78000600 0x200>;
-		interrupts = < 0 31 0x04 >;
-	};
-
-	pinmux: pinmux@70000000 {
-		compatible = "nvidia,tegra30-pinmux";
-		reg = < 0x70000868 0xd0     /* Pad control registers */
-			0x70003000 0x3e0 >; /* Mux registers */
-	};
 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 941b161..7e1091d 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -73,7 +73,10 @@
 		#address-cells = <0>;
 		interrupt-controller;
 		reg = <0x2c001000 0x1000>,
-		      <0x2c002000 0x100>;
+		      <0x2c002000 0x1000>,
+		      <0x2c004000 0x2000>,
+		      <0x2c006000 0x2000>;
+		interrupts = <1 9 0xf04>;
 	};
 
 	memory-controller@7ffd0000 {
@@ -93,6 +96,14 @@
 			     <0 91 4>;
 	};
 
+	timer {
+		compatible = "arm,armv7-timer";
+		interrupts = <1 13 0xf08>,
+			     <1 14 0xf08>,
+			     <1 11 0xf08>,
+			     <1 10 0xf08>;
+	};
+
 	pmu {
 		compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
 		interrupts = <0 68 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6905e66d..18917a0 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -77,13 +77,18 @@
 
 	timer@2c000600 {
 		compatible = "arm,cortex-a5-twd-timer";
-		reg = <0x2c000600 0x38>;
-		interrupts = <1 2 0x304>,
-			     <1 3 0x304>;
+		reg = <0x2c000600 0x20>;
+		interrupts = <1 13 0x304>;
+	};
+
+	watchdog@2c000620 {
+		compatible = "arm,cortex-a5-twd-wdt";
+		reg = <0x2c000620 0x20>;
+		interrupts = <1 14 0x304>;
 	};
 
 	gic: interrupt-controller@2c001000 {
-		compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+		compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
 		interrupt-controller;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index da77869..3f0c736 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -105,8 +105,13 @@
 	timer@1e000600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0x1e000600 0x20>;
-		interrupts = <1 2 0xf04>,
-			     <1 3 0xf04>;
+		interrupts = <1 13 0xf04>;
+	};
+
+	watchdog@1e000620 {
+		compatible = "arm,cortex-a9-twd-wdt";
+		reg = <0x1e000620 0x20>;
+		interrupts = <1 14 0xf04>;
 	};
 
 	gic: interrupt-controller@1e001000 {
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd29..aa07f59 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@
 	read_lock_irqsave(&device_info->lock, flags);
 
 	list_for_each_entry(b, &device_info->safe_buffers, node)
-		if (b->safe_dma_addr == safe_dma_addr) {
+		if (b->safe_dma_addr <= safe_dma_addr &&
+		    b->safe_dma_addr + b->size > safe_dma_addr) {
 			rb = b;
 			break;
 		}
@@ -254,7 +255,7 @@
 	if (buf == NULL) {
 		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
 		       __func__, ptr);
-		return ~0;
+		return DMA_ERROR_CODE;
 	}
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+		unsigned long offset, size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
 {
 	dma_addr_t dma_addr;
 	int ret;
@@ -320,21 +322,20 @@
 
 	ret = needs_bounce(dev, dma_addr, size);
 	if (ret < 0)
-		return ~0;
+		return DMA_ERROR_CODE;
 
 	if (ret == 0) {
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
 		return dma_addr;
 	}
 
 	if (PageHighMem(page)) {
 		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
-		return ~0;
+		return DMA_ERROR_CODE;
 	}
 
 	return map_single(dev, page_address(page) + offset, size, dir);
 }
-EXPORT_SYMBOL(__dma_map_page);
 
 /*
  * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@
  * the safe buffer.  (basically return things back to the way they
  * should be)
  */
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-		enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
 	struct safe_buffer *buf;
 
@@ -352,31 +353,32 @@
 
 	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
 	if (!buf) {
-		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
-			dma_addr & ~PAGE_MASK, size, dir);
+		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
 		return;
 	}
 
 	unmap_single(dev, buf, size, dir);
 }
-EXPORT_SYMBOL(__dma_unmap_page);
 
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
-		unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+		size_t sz, enum dma_data_direction dir)
 {
 	struct safe_buffer *buf;
+	unsigned long off;
 
-	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-		__func__, addr, off, sz, dir);
+	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+		__func__, addr, sz, dir);
 
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	if (!buf)
 		return 1;
 
+	off = addr - buf->safe_dma_addr;
+
 	BUG_ON(buf->direction != dir);
 
-	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
 		buf->safe, buf->safe_dma_addr);
 
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -388,24 +390,35 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
-		unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+		return;
+
+	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+		size_t sz, enum dma_data_direction dir)
 {
 	struct safe_buffer *buf;
+	unsigned long off;
 
-	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-		__func__, addr, off, sz, dir);
+	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+		__func__, addr, sz, dir);
 
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	if (!buf)
 		return 1;
 
+	off = addr - buf->safe_dma_addr;
+
 	BUG_ON(buf->direction != dir);
 
-	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
 		buf->safe, buf->safe_dma_addr);
 
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -417,7 +430,38 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+		return;
+
+	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (dev->archdata.dmabounce)
+		return 0;
+
+	return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+	.alloc			= arm_dma_alloc,
+	.free			= arm_dma_free,
+	.mmap			= arm_dma_mmap,
+	.map_page		= dmabounce_map_page,
+	.unmap_page		= dmabounce_unmap_page,
+	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
+	.sync_single_for_device	= dmabounce_sync_for_device,
+	.map_sg			= arm_dma_map_sg,
+	.unmap_sg		= arm_dma_unmap_sg,
+	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
+	.set_dma_mask		= dmabounce_set_mask,
+};
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 		const char *name, unsigned long size)
@@ -479,6 +523,7 @@
 #endif
 
 	dev->archdata.dmabounce = device_info;
+	set_dma_ops(dev, &dmabounce_ops);
 
 	dev_info(dev, "dmabounce: registered device\n");
 
@@ -497,6 +542,7 @@
 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 
 	dev->archdata.dmabounce = NULL;
+	set_dma_ops(dev, NULL);
 
 	if (!device_info) {
 		dev_warn(dev,
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 09a0296..e05a2f1 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -33,6 +33,7 @@
 CONFIG_MACH_PCA100=y
 CONFIG_MACH_MXT_TD60=y
 CONFIG_MACH_IMX27IPCAM=y
+CONFIG_MACH_IMX27_DT=y
 CONFIG_MXC_IRQ_PRIOR=y
 CONFIG_MXC_PWM=y
 CONFIG_NO_HZ=y
@@ -172,7 +173,7 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=y
 CONFIG_RTC_DRV_IMXDI=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
 CONFIG_DMADEVICES=y
 CONFIG_IMX_SDMA=y
 CONFIG_IMX_DMA=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index dc6f641..b1d3675 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -64,6 +64,12 @@
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SST25L=y
 # CONFIG_STANDALONE is not set
 CONFIG_CONNECTOR=y
 CONFIG_BLK_DEV_LOOP=y
@@ -172,7 +178,7 @@
 CONFIG_LEDS_CLASS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
 CONFIG_DMADEVICES=y
 CONFIG_IMX_SDMA=y
 CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 1ebbf45..5406c23 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -22,6 +22,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_MXS=y
+CONFIG_MACH_MXS_DT=y
 CONFIG_MACH_MX23EVK=y
 CONFIG_MACH_MX28EVK=y
 CONFIG_MACH_STMP378X_DEVB=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
new file mode 100644
index 0000000..c328ac6
--- /dev/null
+++ b/arch/arm/configs/prima2_defconfig
@@ -0,0 +1,69 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_ARCH_PRIMA2=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_KEXEC=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_SIRFSOC=y
+CONFIG_SERIAL_SIRFSOC_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_SIRF=y
+CONFIG_SPI=y
+CONFIG_SPI_SIRF=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_GADGET=y
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_DMADEVICES=y
+CONFIG_DMADEVICES_DEBUG=y
+CONFIG_DMADEVICES_VDEBUG=y
+CONFIG_SIRF_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
new file mode 100644
index 0000000..1fdb826
--- /dev/null
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -0,0 +1,95 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ARASAN_CF=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SPEAR=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_MPCORE_WATCHDOG=y
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DMATEST=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 7ed4291..865980c 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -14,6 +14,9 @@
 CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
 CONFIG_BLK_DEV_RAM=y
@@ -73,6 +76,7 @@
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index cf94bc7..a2a1265 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -8,11 +8,13 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_PLAT_SPEAR=y
 CONFIG_ARCH_SPEAR6XX=y
-CONFIG_BOARD_SPEAR600_DT=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
 CONFIG_BLK_DEV_RAM=y
@@ -64,6 +66,7 @@
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 351d670..1198dd6 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -45,6 +45,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
+CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -91,6 +92,8 @@
 CONFIG_USB_NET_SMSC95XX=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MPU3050=y
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
@@ -103,12 +106,15 @@
 CONFIG_I2C_TEGRA=y
 CONFIG_SPI=y
 CONFIG_SPI_TEGRA=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_SBS=y
 CONFIG_SENSORS_LM90=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS62360=y
 CONFIG_REGULATOR_TPS6586X=y
 CONFIG_SOUND=y
 CONFIG_SND=y
@@ -133,16 +139,19 @@
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_STAGING=y
-CONFIG_IIO=y
 CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_SENSORS_AK8975=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 7e84f45..2d4f661 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -75,6 +75,7 @@
 CONFIG_AB8500_CORE=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_AB8500_USB=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa3680..b69c0d3 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
 #define ASMARM_DEVICE_H
 
 struct dev_archdata {
+	struct dma_map_ops	*dma_ops;
 #ifdef CONFIG_DMABOUNCE
 	struct dmabounce_device_info *dmabounce;
 #endif
 #ifdef CONFIG_IOMMU_API
 	void *iommu; /* private IOMMU data */
 #endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+	struct dma_iommu_mapping	*mapping;
+#endif
 };
 
 struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..3ed37b4
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 0000000..799b094
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+	/* iommu specific data */
+	struct iommu_domain	*domain;
+
+	void			*bitmap;
+	size_t			bits;
+	unsigned int		order;
+	dma_addr_t		base;
+
+	spinlock_t		lock;
+	struct kref		kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+			 int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+					struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c9..bbef15d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
 
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 
 #include <asm-generic/dma-coherent.h>
 #include <asm/memory.h>
 
+#define DMA_ERROR_CODE	(~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+	if (dev && dev->archdata.dma_ops)
+		return dev->archdata.dma_ops;
+	return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+	BUG_ON(!dev);
+	dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+	return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
@@ -62,68 +86,11 @@
 #endif
 
 /*
- * The DMA API is built upon the notion of "buffer ownership".  A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device.  These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches.  We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change.  Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	extern void ___dma_single_cpu_to_dev(const void *, size_t,
-		enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	extern void ___dma_single_dev_to_cpu(const void *, size_t,
-		enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
-{
-	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
-		size_t, enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
-{
-	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
-		size_t, enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
-/*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-	return dma_addr == ~0;
+	return dma_addr == DMA_ERROR_CODE;
 }
 
 /*
@@ -141,69 +108,118 @@
 {
 }
 
+extern int dma_supported(struct device *dev, u64 mask);
+
 /**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: required memory size
  * @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
  *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA.  This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
  */
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+			   gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	void *cpu_addr;
+	BUG_ON(!ops);
+
+	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+	return cpu_addr;
+}
 
 /**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: size of memory originally requested in dma_alloc_coherent
  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
  *
  * References to memory and mappings associated with cpu_addr/handle
  * during and after this call executing are illegal.
  */
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+			 dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+
+	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+	ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
 
 /**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @vma: vm_area_struct describing requested user mapping
  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
  * @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  * into user space.  The coherent DMA buffer must not be freed by the
  * driver until the user space mapping has been released.
  */
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
-		void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			struct dma_attrs *attrs);
 
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
 
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
-		gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+				  void *cpu_addr, dma_addr_t dma_addr,
+				  size_t size, struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
 
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
-	dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
 
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
-		void *, dma_addr_t, size_t);
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
 
 /*
  * This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@
  */
 extern void __init init_consistent_dma_size(unsigned long size);
 
-
-#ifdef CONFIG_DMABOUNCE
 /*
  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
  * and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@
  */
 extern void dmabounce_unregister_dev(struct device *);
 
-/*
- * The DMA API, implemented by dmabounce.c.  See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
-		unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
-		enum dma_data_direction);
 
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
-		size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
-		size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
-	unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	return 1;
-}
-
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
-	unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(page, offset, size, dir);
-	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
-		handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-		size_t size, enum dma_data_direction dir)
-{
-	unsigned long offset;
-	struct page *page;
-	dma_addr_t addr;
-
-	BUG_ON(!virt_addr_valid(cpu_addr));
-	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
-	BUG_ON(!valid_dma_direction(dir));
-
-	page = virt_to_page(cpu_addr);
-	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
-	addr = __dma_map_page(dev, page, offset, size, dir);
-	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
-	return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	dma_addr_t addr;
-
-	BUG_ON(!valid_dma_direction(dir));
-
-	addr = __dma_map_page(dev, page, offset, size, dir);
-	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-	return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	debug_dma_unmap_page(dev, handle, size, dir, true);
-	__dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	debug_dma_unmap_page(dev, handle, size, dir, false);
-	__dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-		dma_addr_t handle, unsigned long offset, size_t size,
-		enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-
-	debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
-	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
-		return;
-
-	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-		dma_addr_t handle, unsigned long offset, size_t size,
-		enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-
-	debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
-	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
-		return;
-
-	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
 
 /*
  * The scatter list versions of the above methods.
  */
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+		enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+		enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
 		enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
 		enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
-		enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
-		enum dma_data_direction);
-
 
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 7be5469..e42cf59 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -19,6 +19,7 @@
 	"	.long	1b, 4f, 2b, 4f\n"			\
 	"	.popsection\n"					\
 	"	.pushsection .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
 	"4:	mov	%0, " err_reg "\n"			\
 	"	b	3b\n"					\
 	"	.popsection"
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index 33c78d7..4eea210 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -102,6 +102,8 @@
 #define PL080_WIDTH_16BIT			(0x1)
 #define PL080_WIDTH_32BIT			(0x2)
 
+#define PL080N_CONFIG_ITPROT			(1 << 20)
+#define PL080N_CONFIG_SECPROT			(1 << 19)
 #define PL080_CONFIG_HALT			(1 << 18)
 #define PL080_CONFIG_ACTIVE			(1 << 17)  /* RO */
 #define PL080_CONFIG_LOCK			(1 << 16)
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index e0d1c0c..6b9b077 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -4,7 +4,7 @@
  * ARM PrimeXsys System Controller SP810 header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563..815c669 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,9 +47,9 @@
 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 
-#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v))
-#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v))
+#define __raw_writeb(v,a)	((void)(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v)))
+#define __raw_writew(v,a)	((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_writel(v,a)	((void)(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v)))
 
 #define __raw_readb(a)		(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a))
 #define __raw_readw(a)		(__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
@@ -229,11 +229,9 @@
 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
 					__raw_readl(c)); __r; })
 
-#define writeb_relaxed(v,c)	((void)__raw_writeb(v,c))
-#define writew_relaxed(v,c)	((void)__raw_writew((__force u16) \
-					cpu_to_le16(v),c))
-#define writel_relaxed(v,c)	((void)__raw_writel((__force u32) \
-					cpu_to_le32(v),c))
+#define writeb_relaxed(v,c)	__raw_writeb(v,c)
+#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
 
 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -281,12 +279,12 @@
 #define ioread16be(p)	({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
 #define ioread32be(p)	({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
 
-#define iowrite8(v,p)	({ __iowmb(); (void)__raw_writeb(v, p); })
-#define iowrite16(v,p)	({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p)	({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite8(v,p)	({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
 
-#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
 
 #define ioread8_rep(p,d,c)	__raw_readsb(p,d,c)
 #define ioread16_rep(p,d,c)	__raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d7692ca..0b1c94b 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@
 	void			(*init_irq)(void);
 	struct sys_timer	*timer;		/* system tick timer	*/
 	void			(*init_machine)(void);
+	void			(*init_late)(void);
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	void			(*handle_irq)(struct pt_regs *);
 #endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f365..a6efcdd 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,7 @@
 #define MT_MEMORY_DTCM		12
 #define MT_MEMORY_ITCM		13
 #define MT_MEMORY_SO		14
+#define MT_MEMORY_DMA_READY	15
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h
index efdf990..d2de9cb 100644
--- a/arch/arm/include/asm/posix_types.h
+++ b/arch/arm/include/asm/posix_types.h
@@ -22,9 +22,6 @@
 typedef unsigned short		__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short		__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short		__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 68388eb..b79f8e9 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
+#define TIF_SYSCALL_RESTARTSYS	10
 #define TIF_POLLING_NRFLAG	16
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
@@ -162,16 +163,17 @@
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
-#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_RESTARTSYS	(1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+			   _TIF_SYSCALL_RESTARTSYS)
 
 /*
  * Change these and you break ASM code in entry-common.S
  */
-#define _TIF_WORK_MASK		0x000000ff
+#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 437f0c4..0d1851c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -495,6 +495,7 @@
  * The out of line fixup for the ldrt above.
  */
 	.pushsection .fixup, "ax"
+	.align	2
 4:	mov	pc, r9
 	.popsection
 	.pushsection __ex_table,"a"
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7bd2d3c..4afed88 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -53,9 +53,13 @@
 work_pending:
 	tst	r1, #_TIF_NEED_RESCHED
 	bne	work_resched
-	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
-	beq	no_work_pending
+	/*
+	 * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
+	 */
+	ldr	r2, [sp, #S_PSR]
 	mov	r0, sp				@ 'regs'
+	tst	r2, #15				@ are we returning to user mode?
+	bne	no_work_pending			@ no?  just leave, then...
 	mov	r2, why				@ 'syscall'
 	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
 	movne	why, #0				@ prevent further restarts
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 8f96ec7..6123daf 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -660,7 +660,7 @@
 	/* LDRSB (literal)	1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
 	/* LDRH (literal)	1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
 	/* LDRSH (literal)	1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
-	DECODE_EMULATEX	(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+	DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
 						 REGS(PC, NOSPPCX, 0, 0, 0)),
 
 	/* STRB (immediate)	1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e3826..5700a7a 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
+#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -917,6 +918,8 @@
 		audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
 				    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
+	if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
+		scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
 		return scno;
 
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebfac78..e15d83b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@
 extern void paging_init(struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -800,6 +801,14 @@
 }
 arch_initcall(customize_machine);
 
+static int __init init_machine_late(void)
+{
+	if (machine_desc->init_late)
+		machine_desc->init_late();
+	return 0;
+}
+late_initcall(init_machine_late);
+
 #ifdef CONFIG_KEXEC
 static inline unsigned long long get_total_mem(void)
 {
@@ -939,12 +948,8 @@
 	machine_desc = mdesc;
 	machine_name = mdesc->name;
 
-#ifdef CONFIG_ZONE_DMA
-	if (mdesc->dma_zone_size) {
-		extern unsigned long arm_dma_zone_size;
-		arm_dma_zone_size = mdesc->dma_zone_size;
-	}
-#endif
+	setup_dma_zone(mdesc);
+
 	if (mdesc->restart_mode)
 		reboot_setup(&mdesc->restart_mode);
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 4e5fdd9..fd2392a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -22,14 +22,11 @@
 
 #include "signal.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For ARM syscalls, we encode the syscall number into the instruction.
  */
 #define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RESTART		(0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -50,18 +47,6 @@
 };
 
 /*
- * Either we support OABI only, or we have EABI with the OABI
- * compat layer enabled.  In the later case we don't know if
- * user space is EABI or not, and if not we must not clobber r7.
- * Always using the OABI syscall solves that issue and works for
- * all those cases.
- */
-const unsigned long syscall_restart_code[2] = {
-	SWI_SYS_RESTART,	/* swi	__NR_restart_syscall */
-	0xe49df004,		/* ldr	pc, [sp], #4 */
-};
-
-/*
  * atomically swap in the new signal mask, and wait for a signal.
  */
 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -82,10 +67,10 @@
 		old_sigset_t mask;
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
 			return -EFAULT;
-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 	}
 
@@ -94,10 +79,10 @@
 	if (!ret && oact) {
 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
 			return -EFAULT;
-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
 
 	return ret;
@@ -223,10 +208,8 @@
 	int err;
 
 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-	if (err == 0) {
-		sigdelsetmask(&set, ~_BLOCKABLE);
+	if (err == 0)
 		set_current_blocked(&set);
-	}
 
 	__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
 	__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
@@ -541,13 +524,13 @@
 /*
  * OK, we're invoking a handler
  */	
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-	      siginfo_t *info, sigset_t *oldset,
-	      struct pt_regs * regs)
+	      siginfo_t *info, struct pt_regs *regs)
 {
 	struct thread_info *thread = current_thread_info();
 	struct task_struct *tsk = current;
+	sigset_t *oldset = sigmask_to_save();
 	int usig = sig;
 	int ret;
 
@@ -572,17 +555,9 @@
 
 	if (ret != 0) {
 		force_sigsegv(sig, tsk);
-		return ret;
+		return;
 	}
-
-	/*
-	 * Block the signal if we were successful.
-	 */
-	block_sigmask(ka, sig);
-
-	tracehook_signal_handler(sig, info, ka, regs, 0);
-
-	return 0;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -602,15 +577,6 @@
 	int signr;
 
 	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
-	 * if so.
-	 */
-	if (!user_mode(regs))
-		return;
-
-	/*
 	 * If we were from a system call, check for system call restarting...
 	 */
 	if (syscall) {
@@ -626,58 +592,39 @@
 		case -ERESTARTNOHAND:
 		case -ERESTARTSYS:
 		case -ERESTARTNOINTR:
+		case -ERESTART_RESTARTBLOCK:
 			regs->ARM_r0 = regs->ARM_ORIG_r0;
 			regs->ARM_pc = restart_addr;
 			break;
-		case -ERESTART_RESTARTBLOCK:
-			regs->ARM_r0 = -EINTR;
-			break;
 		}
 	}
 
-	if (try_to_freeze())
-		goto no_signal;
-
 	/*
 	 * Get the signal to deliver.  When running under ptrace, at this
 	 * point the debugger may change all our registers ...
 	 */
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
-		sigset_t *oldset;
-
 		/*
 		 * Depending on the signal settings we may need to revert the
 		 * decision to restart the system call.  But skip this if a
 		 * debugger has chosen to restart at a different PC.
 		 */
 		if (regs->ARM_pc == restart_addr) {
-			if (retval == -ERESTARTNOHAND
+			if (retval == -ERESTARTNOHAND ||
+			    retval == -ERESTART_RESTARTBLOCK
 			    || (retval == -ERESTARTSYS
 				&& !(ka.sa.sa_flags & SA_RESTART))) {
 				regs->ARM_r0 = -EINTR;
 				regs->ARM_pc = continue_addr;
 			}
+			clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
 		}
 
-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-			oldset = &current->saved_sigmask;
-		else
-			oldset = &current->blocked;
-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
+		handle_signal(signr, &ka, &info, regs);
 		return;
 	}
 
- no_signal:
 	if (syscall) {
 		/*
 		 * Handle restarting a different system call.  As above,
@@ -685,38 +632,11 @@
 		 * ignore the restart.
 		 */
 		if (retval == -ERESTART_RESTARTBLOCK
-		    && regs->ARM_pc == continue_addr) {
-			if (thumb_mode(regs)) {
-				regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
-				regs->ARM_pc -= 2;
-			} else {
-#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
-				regs->ARM_r7 = __NR_restart_syscall;
-				regs->ARM_pc -= 4;
-#else
-				u32 __user *usp;
-
-				regs->ARM_sp -= 4;
-				usp = (u32 __user *)regs->ARM_sp;
-
-				if (put_user(regs->ARM_pc, usp) == 0) {
-					regs->ARM_pc = KERN_RESTART_CODE;
-				} else {
-					regs->ARM_sp += 4;
-					force_sigsegv(0, current);
-				}
-#endif
-			}
-		}
-
-		/* If there's no signal to deliver, we just put the saved sigmask
-		 * back.
-		 */
-		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-		}
+		    && regs->ARM_pc == restart_addr)
+			set_thread_flag(TIF_SYSCALL_RESTARTSYS);
 	}
+
+	restore_saved_sigmask();
 }
 
 asmlinkage void
@@ -728,7 +648,5 @@
 	if (thread_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 6fcfe83..5ff067b7 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,7 +8,5 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE	(CONFIG_VECTORS_BASE + 0x00000500)
-#define KERN_RESTART_CODE	(KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
-extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b735521..2c7217d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -109,7 +109,6 @@
 int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
-	struct task_struct *p;
 	int ret;
 
 	ret = platform_cpu_disable(cpu);
@@ -139,12 +138,7 @@
 	flush_cache_all();
 	local_flush_tlb_all();
 
-	read_lock(&tasklist_lock);
-	for_each_process(p) {
-		if (p->mm)
-			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-	}
-	read_unlock(&tasklist_lock);
+	clear_tasks_mm_cpumask(cpu);
 
 	return 0;
 }
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170..4928d89 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,8 +820,6 @@
 	 */
 	memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
 	       sigreturn_codes, sizeof(sigreturn_codes));
-	memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
-	       syscall_restart_code, sizeof(syscall_restart_code));
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE);
 	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index f674724..933fc9a 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -436,7 +436,6 @@
 	atslave->dma_dev = &at_hdmac_device.dev;
 	atslave->cfg = ATC_FIFOCFG_HALFFIFO
 			| ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
-	atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
 	if (mmc_id == 0)	/* MCI0 */
 		atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
 			      | ATC_DST_PER(AT_DMA_ID_MCI0);
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index fff48d1..cab0997 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -26,18 +26,11 @@
 /**
  * struct at_dma_slave - Controller-specific information about a slave
  * @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- *	memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- *	peripheral-to-memory transfers
- * @reg_width: peripheral register width
  * @cfg: Platform-specific initializer for the CFG register
- * @ctrla: Platform-specific initializer for the CTRLA register
  */
 struct at_dma_slave {
 	struct device		*dma_dev;
 	u32			cfg;
-	u32			ctrla;
 };
 
 
@@ -64,24 +57,5 @@
 #define		ATC_FIFOCFG_HALFFIFO		(0x1 << 28)
 #define		ATC_FIFOCFG_ENOUGHSPACE		(0x2 << 28)
 
-/* Platform-configurable bits in CTRLA */
-#define	ATC_SCSIZE_MASK		(0x7 << 16)	/* Source Chunk Transfer Size */
-#define		ATC_SCSIZE_1		(0x0 << 16)
-#define		ATC_SCSIZE_4		(0x1 << 16)
-#define		ATC_SCSIZE_8		(0x2 << 16)
-#define		ATC_SCSIZE_16		(0x3 << 16)
-#define		ATC_SCSIZE_32		(0x4 << 16)
-#define		ATC_SCSIZE_64		(0x5 << 16)
-#define		ATC_SCSIZE_128		(0x6 << 16)
-#define		ATC_SCSIZE_256		(0x7 << 16)
-#define	ATC_DCSIZE_MASK		(0x7 << 20)	/* Destination Chunk Transfer Size */
-#define		ATC_DCSIZE_1		(0x0 << 20)
-#define		ATC_DCSIZE_4		(0x1 << 20)
-#define		ATC_DCSIZE_8		(0x2 << 20)
-#define		ATC_DCSIZE_16		(0x3 << 20)
-#define		ATC_DCSIZE_32		(0x4 << 20)
-#define		ATC_DCSIZE_64		(0x5 << 20)
-#define		ATC_DCSIZE_128		(0x6 << 20)
-#define		ATC_DCSIZE_256		(0x7 << 20)
 
 #endif /* AT_HDMAC_H */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc1afe5..0031864 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,6 +681,7 @@
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= da830_evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 09f6107..0149fb4 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1411,6 +1411,7 @@
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= da850_evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 82ed753..1c7b1f4 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,6 +357,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index d74a8b3..8e77032 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,6 +276,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_leopard_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 5bce2b8..688a9c5 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -618,6 +618,7 @@
 	.init_irq	= davinci_irq_init,
 	.timer		= &davinci_timer,
 	.init_machine	= dm365_evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 3683306..d34ed55 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -825,6 +825,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index d72ab94..958679a 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -788,6 +788,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
@@ -798,6 +799,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 672d820..beecde3 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -572,6 +572,7 @@
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= mityomapl138_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index a772bb4..5de69f2 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,6 +278,7 @@
 	.init_irq	= davinci_irq_init,
 	.timer		= &davinci_timer,
 	.init_machine = davinci_ntosd2_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 45e8157..dc1208e 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,6 +343,7 @@
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= omapl138_hawk_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 76e67509..9078acf 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,6 +157,7 @@
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_sffsdr_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 5f14e30..ac4e003 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,6 +282,7 @@
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= tnetv107x_evm_board_init,
+	.init_late	= davinci_init_late,
 	.dma_zone_size	= SZ_128M,
 	.restart	= tnetv107x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 008772e..34668ea 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -213,7 +213,7 @@
 /*
  * Disable any unused clocks left on by the bootloader
  */
-static int __init clk_disable_unused(void)
+int __init davinci_clk_disable_unused(void)
 {
 	struct clk *ck;
 
@@ -237,7 +237,6 @@
 
 	return 0;
 }
-late_initcall(clk_disable_unused);
 #endif
 
 static unsigned long clk_sysclk_recalc(struct clk *clk)
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index cb9b2e4..64b0f65 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -117,3 +117,10 @@
 err:
 	panic("davinci_common_init: SoC Initialization failed\n");
 }
+
+void __init davinci_init_late(void)
+{
+	davinci_cpufreq_init();
+	davinci_pm_init();
+	davinci_clk_disable_unused();
+}
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 031048f..4729eaa 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -240,10 +240,9 @@
 	.remove = __exit_p(davinci_cpufreq_remove),
 };
 
-static int __init davinci_cpufreq_init(void)
+int __init davinci_cpufreq_init(void)
 {
 	return platform_driver_probe(&davinci_cpufreq_driver,
 							davinci_cpufreq_probe);
 }
-late_initcall(davinci_cpufreq_init);
 
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 95ce019..a685e97 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -353,9 +353,10 @@
  *****************************************************************************/
 static irqreturn_t dma_irq_handler(int irq, void *data)
 {
-	int i;
 	int ctlr;
-	unsigned int cnt = 0;
+	u32 sh_ier;
+	u32 sh_ipr;
+	u32 bank;
 
 	ctlr = irq2ctlr(irq);
 	if (ctlr < 0)
@@ -363,41 +364,39 @@
 
 	dev_dbg(data, "dma_irq_handler\n");
 
-	if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
-	    (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
-		return IRQ_NONE;
-
-	while (1) {
-		int j;
-		if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
-				edma_shadow0_read_array(ctlr, SH_IER, 0))
-			j = 0;
-		else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
-				edma_shadow0_read_array(ctlr, SH_IER, 1))
-			j = 1;
-		else
-			break;
-		dev_dbg(data, "IPR%d %08x\n", j,
-				edma_shadow0_read_array(ctlr, SH_IPR, j));
-		for (i = 0; i < 32; i++) {
-			int k = (j << 5) + i;
-			if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
-					&& (edma_shadow0_read_array(ctlr,
-							SH_IER, j) & BIT(i))) {
-				/* Clear the corresponding IPR bits */
-				edma_shadow0_write_array(ctlr, SH_ICR, j,
-							BIT(i));
-				if (edma_cc[ctlr]->intr_data[k].callback)
-					edma_cc[ctlr]->intr_data[k].callback(
-						k, DMA_COMPLETE,
-						edma_cc[ctlr]->intr_data[k].
-						data);
-			}
-		}
-		cnt++;
-		if (cnt > 10)
-			break;
+	sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
+	if (!sh_ipr) {
+		sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
+		if (!sh_ipr)
+			return IRQ_NONE;
+		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
+		bank = 1;
+	} else {
+		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
+		bank = 0;
 	}
+
+	do {
+		u32 slot;
+		u32 channel;
+
+		dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+
+		slot = __ffs(sh_ipr);
+		sh_ipr &= ~(BIT(slot));
+
+		if (sh_ier & BIT(slot)) {
+			channel = (bank << 5) | slot;
+			/* Clear the corresponding IPR bits */
+			edma_shadow0_write_array(ctlr, SH_ICR, bank,
+					BIT(slot));
+			if (edma_cc[ctlr]->intr_data[channel].callback)
+				edma_cc[ctlr]->intr_data[channel].callback(
+					channel, DMA_COMPLETE,
+					edma_cc[ctlr]->intr_data[channel].data);
+		}
+	} while (sh_ipr);
+
 	edma_shadow0_write(ctlr, SH_IEVAL, 1);
 	return IRQ_HANDLED;
 }
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 5cd39a4..bdc4aa8 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -84,6 +84,25 @@
 extern void davinci_common_init(struct davinci_soc_info *soc_info);
 extern void davinci_init_ide(void);
 void davinci_restart(char mode, const char *cmd);
+void davinci_init_late(void);
+
+#ifdef CONFIG_DAVINCI_RESET_CLOCKS
+int davinci_clk_disable_unused(void);
+#else
+static inline int davinci_clk_disable_unused(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_SUSPEND
+int davinci_pm_init(void);
+#else
+static inline int davinci_pm_init(void) { return 0; }
+#endif
 
 /* standard place to map on-chip SRAMs; they *may* support DMA */
 #define SRAM_VIRT	0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
index cf94552..34290d1 100644
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ b/arch/arm/mach-davinci/include/mach/debug-macro.S
@@ -22,46 +22,28 @@
 
 #define UART_SHIFT	2
 
-		.pushsection .data
-davinci_uart_phys:	.word	0
-davinci_uart_virt:	.word	0
-		.popsection
+#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
+#define UART_BASE	DAVINCI_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART0)
+#define UART_BASE	DA8XX_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
+#define UART_BASE	DA8XX_UART1_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
+#define UART_BASE	DA8XX_UART2_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
+#define UART_BASE	TNETV107X_UART2_BASE
+#define UART_VIRTBASE	TNETV107X_UART2_VIRT
+#else
+#error "Select a specifc port for DEBUG_LL"
+#endif
+
+#ifndef UART_VIRTBASE
+#define UART_VIRTBASE	IO_ADDRESS(UART_BASE)
+#endif
 
 		.macro addruart, rp, rv, tmp
-
-		/* Use davinci_uart_phys/virt if already configured */
-10:		adr	\rp, 99f		@ get effective addr of 99f
-		ldr	\rv, [\rp]		@ get absolute addr of 99f
-		sub	\rv, \rv, \rp		@ offset between the two
-		ldr	\rp, [\rp, #4]		@ abs addr of omap_uart_phys
-		sub	\tmp, \rp, \rv		@ make it effective
-		ldr	\rp, [\tmp, #0]		@ davinci_uart_phys
-		ldr	\rv, [\tmp, #4]		@ davinci_uart_virt
-		cmp	\rp, #0			@ is port configured?
-		cmpne	\rv, #0
-		bne	100f			@ already configured
-
-		/* Check the debug UART address set in uncompress.h */
-		and	\rp, pc, #0xff000000
-		ldr	\rv, =DAVINCI_UART_INFO_OFS
-		add	\rp, \rp, \rv
-
-		/* Copy uart phys address from decompressor uart info */
-		ldr	\rv, [\rp, #0]
-		str	\rv, [\tmp, #0]
-
-		/* Copy uart virt address from decompressor uart info */
-		ldr	\rv, [\rp, #4]
-		str	\rv, [\tmp, #4]
-
-		b	10b
-
-		.align
-99:		.word	.
-		.word	davinci_uart_phys
-		.ltorg
-
-100:
+		ldr	\rp, =UART_BASE
+		ldr	\rv, =UART_VIRTBASE
 		.endm
 
 		.macro	senduart,rd,rx
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 2184691..16bb422 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -22,7 +22,7 @@
 /*
  * I/O mapping
  */
-#define IO_PHYS				0x01c00000UL
+#define IO_PHYS				UL(0x01c00000)
 #define IO_OFFSET			0xfd000000 /* Virtual IO = 0xfec00000 */
 #define IO_SIZE				0x00400000
 #define IO_VIRT				(IO_PHYS + IO_OFFSET)
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index e347d88..46b3cd1 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,16 +15,6 @@
 
 #include <mach/hardware.h>
 
-/*
- * Stolen area that contains debug uart physical and virtual addresses.  These
- * addresses are filled in by the uncompress.h code, and are used by the debug
- * macros in debug-macro.S.
- *
- * This area sits just below the page tables (see arch/arm/kernel/head.S).
- * We define it as a relative offset from start of usable RAM.
- */
-#define DAVINCI_UART_INFO_OFS	0x3ff8
-
 #define DAVINCI_UART0_BASE	(IO_PHYS + 0x20000)
 #define DAVINCI_UART1_BASE	(IO_PHYS + 0x20400)
 #define DAVINCI_UART2_BASE	(IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index da2fb2c..18cfd49 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -43,37 +43,27 @@
 		barrier();
 }
 
-static inline void set_uart_info(u32 phys, void * __iomem virt)
+static inline void set_uart_info(u32 phys)
 {
-	/*
-	 * Get address of some.bss variable and round it down
-	 * a la CONFIG_AUTO_ZRELADDR.
-	 */
-	u32 ram_start = (u32)&uart & 0xf8000000;
-	u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
-
 	uart = (u32 *)phys;
-	uart_info[0] = phys;
-	uart_info[1] = (u32)virt;
 }
 
-#define _DEBUG_LL_ENTRY(machine, phys, virt)			\
-	if (machine_is_##machine()) {				\
-		set_uart_info(phys, virt);			\
-		break;						\
+#define _DEBUG_LL_ENTRY(machine, phys)				\
+	{							\
+		if (machine_is_##machine()) {			\
+			set_uart_info(phys);			\
+			break;					\
+		}						\
 	}
 
 #define DEBUG_LL_DAVINCI(machine, port)				\
-	_DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE,	\
-			IO_ADDRESS(DAVINCI_UART##port##_BASE))
+	_DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE)
 
 #define DEBUG_LL_DA8XX(machine, port)				\
-	_DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE,	\
-			IO_ADDRESS(DA8XX_UART##port##_BASE))
+	_DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE)
 
 #define DEBUG_LL_TNETV107X(machine, port)			\
-	_DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE,	\
-			TNETV107X_UART##port##_VIRT)
+	_DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE)
 
 static inline void __arch_decomp_setup(unsigned long arch_id)
 {
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 04c49f7..eb8360b 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -152,8 +152,7 @@
 	.remove = __exit_p(davinci_pm_remove),
 };
 
-static int __init davinci_pm_init(void)
+int __init davinci_pm_init(void)
 {
 	return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
 }
-late_initcall(davinci_pm_init);
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 42ab1e7..9493076 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/pci.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/ata_platform.h>
 #include <linux/gpio.h>
 #include <asm/page.h>
@@ -68,6 +68,19 @@
 }
 
 /*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+				       get_tclk());
+
+	orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
  * EHCI0
  ****************************************************************************/
 void __init dove_ehci0_init(void)
@@ -89,8 +102,7 @@
 void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
 	orion_ge00_init(eth_data,
-			DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
-			0, get_tclk());
+			DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
 }
 
 /*****************************************************************************
@@ -116,7 +128,7 @@
 void __init dove_uart0_init(void)
 {
 	orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE,
-			 IRQ_DOVE_UART_0, get_tclk());
+			 IRQ_DOVE_UART_0, tclk);
 }
 
 /*****************************************************************************
@@ -125,7 +137,7 @@
 void __init dove_uart1_init(void)
 {
 	orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE,
-			 IRQ_DOVE_UART_1, get_tclk());
+			 IRQ_DOVE_UART_1, tclk);
 }
 
 /*****************************************************************************
@@ -134,7 +146,7 @@
 void __init dove_uart2_init(void)
 {
 	orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE,
-			 IRQ_DOVE_UART_2, get_tclk());
+			 IRQ_DOVE_UART_2, tclk);
 }
 
 /*****************************************************************************
@@ -143,7 +155,7 @@
 void __init dove_uart3_init(void)
 {
 	orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE,
-			 IRQ_DOVE_UART_3, get_tclk());
+			 IRQ_DOVE_UART_3, tclk);
 }
 
 /*****************************************************************************
@@ -151,12 +163,12 @@
  ****************************************************************************/
 void __init dove_spi0_init(void)
 {
-	orion_spi_init(DOVE_SPI0_PHYS_BASE, get_tclk());
+	orion_spi_init(DOVE_SPI0_PHYS_BASE);
 }
 
 void __init dove_spi1_init(void)
 {
-	orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+	orion_spi_1_init(DOVE_SPI1_PHYS_BASE);
 }
 
 /*****************************************************************************
@@ -272,18 +284,17 @@
 
 void __init dove_init(void)
 {
-	int tclk;
-
-	tclk = get_tclk();
-
 	printk(KERN_INFO "Dove 88AP510 SoC, ");
-	printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+	printk(KERN_INFO "TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
 
 #ifdef CONFIG_CACHE_TAUROS2
 	tauros2_init();
 #endif
 	dove_setup_cpu_mbus();
 
+	/* Setup root of clk tree */
+	clk_init();
+
 	/* internal devices that every board has */
 	dove_rtc_init();
 	dove_xor0_init();
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index ea77ae4..bc2867f 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -20,7 +20,6 @@
 #include <linux/i2c.h>
 #include <linux/pci.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/gpio.h>
 #include <asm/mach-types.h>
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 2d45947..a472777 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,5 +41,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= adssphere_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 66b1494..4dd07a0 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -675,7 +675,7 @@
 fail_gpio_d:
 	gpio_free(EP93XX_GPIO_LINE_C(i));
 fail_gpio_c:
-	for ( ; i >= 0; --i) {
+	for (--i; i >= 0; --i) {
 		gpio_free(EP93XX_GPIO_LINE_C(i));
 		gpio_free(EP93XX_GPIO_LINE_D(i));
 	}
@@ -834,3 +834,8 @@
 	while (1)
 		;
 }
+
+void __init ep93xx_init_late(void)
+{
+	crunch_init();
+}
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 74753e2..a4a2ab9 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -79,12 +79,10 @@
 	.notifier_call	= crunch_do,
 };
 
-static int __init crunch_init(void)
+int __init crunch_init(void)
 {
 	thread_register_notifier(&crunch_notifier_block);
 	elf_hwcap |= HWCAP_CRUNCH;
 
 	return 0;
 }
-
-late_initcall(crunch_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index da9047d..d74c5cd 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -255,6 +255,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -268,6 +269,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -281,6 +283,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -294,6 +297,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -307,6 +311,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -320,6 +325,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -333,6 +339,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -346,6 +353,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index fcdffbe49..437c341 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,5 +41,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= gesbc9312_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 602bd87..1ecb040 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -53,5 +53,12 @@
 extern struct sys_timer ep93xx_timer;
 
 void ep93xx_restart(char, const char *);
+void ep93xx_init_late(void);
+
+#ifdef CONFIG_CRUNCH
+int crunch_init(void);
+#else
+static inline int crunch_init(void) { return 0; }
+#endif
 
 #endif
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index dc431c5..3d7cdab 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -85,6 +85,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -98,6 +99,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -111,6 +113,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
@@ -124,6 +127,7 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f40c298..33dc079 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -86,5 +86,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= simone_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 0c00852..01abd35 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -82,8 +82,6 @@
 	return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY);
 }
 
-static const char *snappercl15_nand_part_probes[] = {"cmdlinepart", NULL};
-
 static struct mtd_partition snappercl15_nand_parts[] = {
 	{
 		.name		= "Kernel",
@@ -100,10 +98,8 @@
 static struct platform_nand_data snappercl15_nand_data = {
 	.chip = {
 		.nr_chips		= 1,
-		.part_probe_types	= snappercl15_nand_part_probes,
 		.partitions		= snappercl15_nand_parts,
 		.nr_partitions		= ARRAY_SIZE(snappercl15_nand_parts),
-		.options		= NAND_NO_AUTOINCR,
 		.chip_delay		= 25,
 	},
 	.ctrl = {
@@ -183,5 +179,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer 		= &ep93xx_timer,
 	.init_machine	= snappercl15_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 5ea7909..75cab2d 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -105,8 +105,6 @@
 	return !!(__raw_readb(addr) & 0x20);
 }
 
-static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
-
 #define TS72XX_BOOTROM_PART_SIZE	(SZ_16K)
 #define TS72XX_REDBOOT_PART_SIZE	(SZ_2M + SZ_1M)
 
@@ -134,7 +132,6 @@
 		.nr_chips	= 1,
 		.chip_offset	= 0,
 		.chip_delay	= 15,
-		.part_probe_types = ts72xx_nand_part_probes,
 		.partitions	= ts72xx_nand_parts,
 		.nr_partitions	= ARRAY_SIZE(ts72xx_nand_parts),
 	},
@@ -252,5 +249,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= ts72xx_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index ba156eb..2905a49 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -367,5 +367,6 @@
 	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= vision_init_machine,
+	.init_late	= ep93xx_init_late,
 	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 15b05b8..6f6d13f 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,6 +61,9 @@
 	bool "SAMSUNG EXYNOS5250"
 	default y
 	depends on ARCH_EXYNOS5
+	select SAMSUNG_DMADEV
+	select S5P_PM if PM
+	select S5P_SLEEP if PM
 	help
 	  Enable EXYNOS5250 SoC support
 
@@ -70,7 +73,7 @@
 	help
 	  Use MCT (Multi Core Timer) as kernel timers
 
-config EXYNOS4_DEV_DMA
+config EXYNOS_DEV_DMA
 	bool
 	help
 	  Compile in amba device definitions for DMA controller
@@ -80,15 +83,20 @@
 	help
 	  Compile in platform device definitions for AHCI
 
+config EXYNOS_DEV_DRM
+	bool
+	help
+	  Compile in platform device definitions for core DRM device
+
 config EXYNOS4_SETUP_FIMD0
 	bool
 	help
 	  Common setup code for FIMD0.
 
-config EXYNOS4_DEV_SYSMMU
+config EXYNOS_DEV_SYSMMU
 	bool
 	help
-	  Common setup code for SYSTEM MMU in EXYNOS4
+	  Common setup code for SYSTEM MMU in EXYNOS platforms
 
 config EXYNOS4_DEV_DWMCI
 	bool
@@ -161,7 +169,7 @@
 	help
 	  Common setup code for USB PHY controller
 
-config EXYNOS4_SETUP_SPI
+config EXYNOS_SETUP_SPI
 	bool
 	help
 	  Common setup code for SPI GPIO configurations.
@@ -201,12 +209,12 @@
 	select S3C_DEV_HSMMC3
 	select SAMSUNG_DEV_BACKLIGHT
 	select EXYNOS_DEV_DRM
+	select EXYNOS_DEV_SYSMMU
 	select EXYNOS4_DEV_AHCI
 	select SAMSUNG_DEV_KEYPAD
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select SAMSUNG_DEV_PWM
 	select EXYNOS4_DEV_USB_OHCI
-	select EXYNOS4_DEV_SYSMMU
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_I2C1
 	select EXYNOS4_SETUP_KEYPAD
@@ -224,8 +232,7 @@
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
 	select EXYNOS4_DEV_AHCI
-	select EXYNOS4_DEV_DMA
-	select EXYNOS4_DEV_SYSMMU
+	select EXYNOS_DEV_DMA
 	select EXYNOS4_SETUP_SDHCI
 	help
 	  Machine support for Samsung ARMLEX4210 based on EXYNOS4210
@@ -256,7 +263,8 @@
 	select S5P_DEV_MFC
 	select S5P_DEV_ONENAND
 	select S5P_DEV_TV
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_SYSMMU
+	select EXYNOS_DEV_DMA
 	select EXYNOS_DEV_DRM
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_I2C1
@@ -295,7 +303,7 @@
 	select S5P_DEV_MFC
 	select S5P_DEV_USB_EHCI
 	select S5P_SETUP_MIPIPHY
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select EXYNOS_DEV_DRM
 	select EXYNOS4_SETUP_FIMC
 	select EXYNOS4_SETUP_FIMD0
@@ -332,7 +340,8 @@
 	select SAMSUNG_DEV_BACKLIGHT
 	select SAMSUNG_DEV_PWM
 	select EXYNOS_DEV_DRM
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_SYSMMU
+	select EXYNOS_DEV_DMA
 	select EXYNOS4_DEV_USB_OHCI
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_SDHCI
@@ -360,7 +369,8 @@
 	select SAMSUNG_DEV_BACKLIGHT
 	select SAMSUNG_DEV_KEYPAD
 	select SAMSUNG_DEV_PWM
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_SYSMMU
+	select EXYNOS_DEV_DMA
 	select EXYNOS4_SETUP_I2C1
 	select EXYNOS4_SETUP_I2C3
 	select EXYNOS4_SETUP_I2C7
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 8631840..9b58024 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -22,7 +22,7 @@
 obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
 
-obj-$(CONFIG_ARCH_EXYNOS4)	+= pmu.o
+obj-$(CONFIG_ARCH_EXYNOS)	+= pmu.o
 
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 
@@ -50,10 +50,11 @@
 obj-y					+= dev-uart.o
 obj-$(CONFIG_ARCH_EXYNOS4)		+= dev-audio.o
 obj-$(CONFIG_EXYNOS4_DEV_AHCI)		+= dev-ahci.o
-obj-$(CONFIG_EXYNOS4_DEV_SYSMMU)	+= dev-sysmmu.o
 obj-$(CONFIG_EXYNOS4_DEV_DWMCI)		+= dev-dwmci.o
-obj-$(CONFIG_EXYNOS4_DEV_DMA)		+= dma.o
+obj-$(CONFIG_EXYNOS_DEV_DMA)		+= dma.o
 obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI)	+= dev-ohci.o
+obj-$(CONFIG_EXYNOS_DEV_DRM)		+= dev-drm.o
+obj-$(CONFIG_EXYNOS_DEV_SYSMMU)		+= dev-sysmmu.o
 
 obj-$(CONFIG_ARCH_EXYNOS)		+= setup-i2c0.o
 obj-$(CONFIG_EXYNOS4_SETUP_FIMC)	+= setup-fimc.o
@@ -68,4 +69,4 @@
 obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD)	+= setup-keypad.o
 obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
 obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY)	+= setup-usb-phy.o
-obj-$(CONFIG_EXYNOS4_SETUP_SPI)		+= setup-spi.o
+obj-$(CONFIG_EXYNOS_SETUP_SPI)		+= setup-spi.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
index b9862e2..31bd181 100644
--- a/arch/arm/mach-exynos/Makefile.boot
+++ b/arch/arm/mach-exynos/Makefile.boot
@@ -1,2 +1,5 @@
    zreladdr-y	+= 0x40008000
 params_phys-y	:= 0x40000100
+
+dtb-$(CONFIG_MACH_EXYNOS4_DT) += exynos4210-origen.dtb exynos4210-smdkv310.dtb
+dtb-$(CONFIG_MACH_EXYNOS5_DT) += exynos5250-smdk5250.dtb
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index 6efd1e5..bcb7db4 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -168,7 +168,7 @@
 	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
 }
 
-static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
+int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
 {
 	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
 }
@@ -198,6 +198,11 @@
 	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
 }
 
+int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
+}
+
 static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
 {
 	return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
@@ -678,61 +683,55 @@
 		.enable		= exynos4_clk_ip_peril_ctrl,
 		.ctrlbit	= (1 << 14),
 	}, {
-		.name		= "SYSMMU_MDMA",
-		.enable		= exynos4_clk_ip_image_ctrl,
-		.ctrlbit	= (1 << 5),
-	}, {
-		.name		= "SYSMMU_FIMC0",
-		.enable		= exynos4_clk_ip_cam_ctrl,
-		.ctrlbit	= (1 << 7),
-	}, {
-		.name		= "SYSMMU_FIMC1",
-		.enable		= exynos4_clk_ip_cam_ctrl,
-		.ctrlbit	= (1 << 8),
-	}, {
-		.name		= "SYSMMU_FIMC2",
-		.enable		= exynos4_clk_ip_cam_ctrl,
-		.ctrlbit	= (1 << 9),
-	}, {
-		.name		= "SYSMMU_FIMC3",
-		.enable		= exynos4_clk_ip_cam_ctrl,
-		.ctrlbit	= (1 << 10),
-	}, {
-		.name		= "SYSMMU_JPEG",
-		.enable		= exynos4_clk_ip_cam_ctrl,
-		.ctrlbit	= (1 << 11),
-	}, {
-		.name		= "SYSMMU_FIMD0",
-		.enable		= exynos4_clk_ip_lcd0_ctrl,
-		.ctrlbit	= (1 << 4),
-	}, {
-		.name		= "SYSMMU_FIMD1",
-		.enable		= exynos4_clk_ip_lcd1_ctrl,
-		.ctrlbit	= (1 << 4),
-	}, {
-		.name		= "SYSMMU_PCIe",
-		.enable		= exynos4_clk_ip_fsys_ctrl,
-		.ctrlbit	= (1 << 18),
-	}, {
-		.name		= "SYSMMU_G2D",
-		.enable		= exynos4_clk_ip_image_ctrl,
-		.ctrlbit	= (1 << 3),
-	}, {
-		.name		= "SYSMMU_ROTATOR",
-		.enable		= exynos4_clk_ip_image_ctrl,
-		.ctrlbit	= (1 << 4),
-	}, {
-		.name		= "SYSMMU_TV",
-		.enable		= exynos4_clk_ip_tv_ctrl,
-		.ctrlbit	= (1 << 4),
-	}, {
-		.name		= "SYSMMU_MFC_L",
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
 		.enable		= exynos4_clk_ip_mfc_ctrl,
 		.ctrlbit	= (1 << 1),
 	}, {
-		.name		= "SYSMMU_MFC_R",
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
 		.enable		= exynos4_clk_ip_mfc_ctrl,
 		.ctrlbit	= (1 << 2),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(tv, 2),
+		.enable		= exynos4_clk_ip_tv_ctrl,
+		.ctrlbit	= (1 << 4),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+		.enable		= exynos4_clk_ip_cam_ctrl,
+		.ctrlbit	= (1 << 11),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(rot, 4),
+		.enable		= exynos4_clk_ip_image_ctrl,
+		.ctrlbit	= (1 << 4),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimc0, 5),
+		.enable		= exynos4_clk_ip_cam_ctrl,
+		.ctrlbit	= (1 << 7),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimc1, 6),
+		.enable		= exynos4_clk_ip_cam_ctrl,
+		.ctrlbit	= (1 << 8),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimc2, 7),
+		.enable		= exynos4_clk_ip_cam_ctrl,
+		.ctrlbit	= (1 << 9),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimc3, 8),
+		.enable		= exynos4_clk_ip_cam_ctrl,
+		.ctrlbit	= (1 << 10),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimd0, 10),
+		.enable		= exynos4_clk_ip_lcd0_ctrl,
+		.ctrlbit	= (1 << 4),
 	}
 };
 
diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h
index cb71c29..28a1197 100644
--- a/arch/arm/mach-exynos/clock-exynos4.h
+++ b/arch/arm/mach-exynos/clock-exynos4.h
@@ -26,5 +26,7 @@
 extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
 extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
 extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
 
 #endif /* __ASM_ARCH_CLOCK_H */
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index 3b131e4..b8689ff 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -26,6 +26,7 @@
 #include <mach/hardware.h>
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
 
 #include "common.h"
 #include "clock-exynos4.h"
@@ -94,6 +95,16 @@
 		.devname	= "exynos4-fb.1",
 		.enable		= exynos4_clk_ip_lcd1_ctrl,
 		.ctrlbit	= (1 << 0),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(2d, 14),
+		.enable		= exynos4_clk_ip_image_ctrl,
+		.ctrlbit	= (1 << 3),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(fimd1, 11),
+		.enable		= exynos4_clk_ip_lcd1_ctrl,
+		.ctrlbit	= (1 << 4),
 	},
 };
 
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 3ecc01e..da397d2 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -26,6 +26,7 @@
 #include <mach/hardware.h>
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
 
 #include "common.h"
 #include "clock-exynos4.h"
@@ -39,6 +40,16 @@
 };
 #endif
 
+static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
+}
+
 static struct clk *clk_src_mpll_user_list[] = {
 	[0] = &clk_fin_mpll,
 	[1] = &exynos4_clk_mout_mpll.clk,
@@ -66,7 +77,32 @@
 };
 
 static struct clk init_clocks_off[] = {
-	/* nothing here yet */
+	{
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(2d, 14),
+		.enable		= exynos4_clk_ip_dmc_ctrl,
+		.ctrlbit	= (1 << 24),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(isp, 9),
+		.enable		= exynos4212_clk_ip_isp0_ctrl,
+		.ctrlbit	= (7 << 8),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME2,
+		.devname	= SYSMMU_CLOCK_DEVNAME(isp, 9),
+		.enable		= exynos4212_clk_ip_isp1_ctrl,
+		.ctrlbit	= (1 << 4),
+	}, {
+		.name		= "flite",
+		.devname	= "exynos-fimc-lite.0",
+		.enable		= exynos4212_clk_ip_isp0_ctrl,
+		.ctrlbit	= (1 << 4),
+	}, {
+		.name		= "flite",
+		.devname	= "exynos-fimc-lite.1",
+		.enable		= exynos4212_clk_ip_isp0_ctrl,
+		.ctrlbit	= (1 << 3),
+	}
 };
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 7ac6ff4..fefa336 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -30,7 +30,56 @@
 
 #ifdef CONFIG_PM_SLEEP
 static struct sleep_save exynos5_clock_save[] = {
-	/* will be implemented */
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
+	SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
+	SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
+	SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
+	SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
+	SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
+	SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
+	SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
+	SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
+	SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
+	SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
+	SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
+	SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
+	SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
+	SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
+	SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
+	SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
+	SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
+	SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
+	SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
+	SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
+	SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
+	SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
+	SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
+	SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
+	SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
+	SAVE_ITEM(EXYNOS5_EPLL_CON0),
+	SAVE_ITEM(EXYNOS5_EPLL_CON1),
+	SAVE_ITEM(EXYNOS5_EPLL_CON2),
+	SAVE_ITEM(EXYNOS5_VPLL_CON0),
+	SAVE_ITEM(EXYNOS5_VPLL_CON1),
+	SAVE_ITEM(EXYNOS5_VPLL_CON2),
 };
 #endif
 
@@ -82,6 +131,11 @@
 	return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
 }
 
+static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
+}
+
 static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
 {
 	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
@@ -127,6 +181,21 @@
 	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
 }
 
+static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
+}
+
+static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+	return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
+}
+
 /* Core list of CMU_CPU side */
 
 static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -145,11 +214,29 @@
 	.reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 },
 };
 
+static struct clksrc_clk exynos5_clk_mout_bpll_fout = {
+	.clk	= {
+		.name		= "mout_bpll_fout",
+	},
+	.sources = &clk_src_bpll_fout,
+	.reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 0, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_bpll_list[] = {
+	[0] = &clk_fin_bpll,
+	[1] = &exynos5_clk_mout_bpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_bpll = {
+	.sources	= exynos5_clk_src_bpll_list,
+	.nr_sources	= ARRAY_SIZE(exynos5_clk_src_bpll_list),
+};
+
 static struct clksrc_clk exynos5_clk_mout_bpll = {
 	.clk	= {
 		.name		= "mout_bpll",
 	},
-	.sources = &clk_src_bpll,
+	.sources = &exynos5_clk_src_bpll,
 	.reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 },
 };
 
@@ -187,11 +274,29 @@
 	.reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 },
 };
 
+static struct clksrc_clk exynos5_clk_mout_mpll_fout = {
+	.clk	= {
+		.name		= "mout_mpll_fout",
+	},
+	.sources = &clk_src_mpll_fout,
+	.reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 4, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_mpll_list[] = {
+	[0] = &clk_fin_mpll,
+	[1] = &exynos5_clk_mout_mpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_mpll = {
+	.sources	= exynos5_clk_src_mpll_list,
+	.nr_sources	= ARRAY_SIZE(exynos5_clk_src_mpll_list),
+};
+
 struct clksrc_clk exynos5_clk_mout_mpll = {
 	.clk = {
 		.name		= "mout_mpll",
 	},
-	.sources = &clk_src_mpll,
+	.sources = &exynos5_clk_src_mpll,
 	.reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 },
 };
 
@@ -454,6 +559,11 @@
 		.enable		= exynos5_clk_ip_peris_ctrl,
 		.ctrlbit	= (1 << 20),
 	}, {
+		.name		= "watchdog",
+		.parent		= &exynos5_clk_aclk_66.clk,
+		.enable		= exynos5_clk_ip_peris_ctrl,
+		.ctrlbit	= (1 << 19),
+	}, {
 		.name		= "hsmmc",
 		.devname	= "exynos4-sdhci.0",
 		.parent		= &exynos5_clk_aclk_200.clk,
@@ -630,6 +740,76 @@
 		.parent		= &exynos5_clk_aclk_66.clk,
 		.enable		= exynos5_clk_ip_peric_ctrl,
 		.ctrlbit	= (1 << 14),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+		.enable		= &exynos5_clk_ip_mfc_ctrl,
+		.ctrlbit	= (1 << 1),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+		.enable		= &exynos5_clk_ip_mfc_ctrl,
+		.ctrlbit	= (1 << 2),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(tv, 2),
+		.enable		= &exynos5_clk_ip_disp1_ctrl,
+		.ctrlbit	= (1 << 9)
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+		.enable		= &exynos5_clk_ip_gen_ctrl,
+		.ctrlbit	= (1 << 7),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(rot, 4),
+		.enable		= &exynos5_clk_ip_gen_ctrl,
+		.ctrlbit	= (1 << 6)
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(gsc0, 5),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 7),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(gsc1, 6),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 8),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(gsc2, 7),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 9),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(gsc3, 8),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 10),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(isp, 9),
+		.enable		= &exynos5_clk_ip_isp0_ctrl,
+		.ctrlbit	= (0x3F << 8),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME2,
+		.devname	= SYSMMU_CLOCK_DEVNAME(isp, 9),
+		.enable		= &exynos5_clk_ip_isp1_ctrl,
+		.ctrlbit	= (0xF << 4),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(camif0, 12),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 11),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(camif1, 13),
+		.enable		= &exynos5_clk_ip_gscl_ctrl,
+		.ctrlbit	= (1 << 12),
+	}, {
+		.name		= SYSMMU_CLOCK_NAME,
+		.devname	= SYSMMU_CLOCK_DEVNAME(2d, 14),
+		.enable		= &exynos5_clk_ip_acp_ctrl,
+		.ctrlbit	= (1 << 7)
 	}
 };
 
@@ -941,10 +1121,12 @@
 	&exynos5_clk_mout_apll,
 	&exynos5_clk_sclk_apll,
 	&exynos5_clk_mout_bpll,
+	&exynos5_clk_mout_bpll_fout,
 	&exynos5_clk_mout_bpll_user,
 	&exynos5_clk_mout_cpll,
 	&exynos5_clk_mout_epll,
 	&exynos5_clk_mout_mpll,
+	&exynos5_clk_mout_mpll_fout,
 	&exynos5_clk_mout_mpll_user,
 	&exynos5_clk_vpllsrc,
 	&exynos5_clk_sclk_vpll,
@@ -1008,7 +1190,9 @@
 	&exynos5_clk_sclk_hdmi27m,
 	&exynos5_clk_sclk_hdmiphy,
 	&clk_fout_bpll,
+	&clk_fout_bpll_div2,
 	&clk_fout_cpll,
+	&clk_fout_mpll_div2,
 	&exynos5_clk_armclk,
 };
 
@@ -1173,8 +1357,10 @@
 
 	clk_fout_apll.ops = &exynos5_fout_apll_ops;
 	clk_fout_bpll.rate = bpll;
+	clk_fout_bpll_div2.rate = bpll >> 1;
 	clk_fout_cpll.rate = cpll;
 	clk_fout_mpll.rate = mpll;
+	clk_fout_mpll_div2.rate = mpll >> 1;
 	clk_fout_epll.rate = epll;
 	clk_fout_vpll.rate = vpll;
 
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 5ccd6e8..742edd3 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -19,6 +19,9 @@
 #include <linux/serial_core.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
 
 #include <asm/proc-fns.h>
 #include <asm/exception.h>
@@ -265,12 +268,12 @@
 	}, {
 		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
 		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
-		.length		= SZ_64K,
+		.length		= SZ_8K,
 		.type		= MT_DEVICE,
 	}, {
 		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
 		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
-		.length		= SZ_64K,
+		.length		= SZ_4K,
 		.type		= MT_DEVICE,
 	},
 };
@@ -285,6 +288,11 @@
 	__raw_writel(0x1, EXYNOS_SWRESET);
 }
 
+void __init exynos_init_late(void)
+{
+	exynos_pm_late_initcall();
+}
+
 /*
  * exynos_map_io
  *
@@ -399,6 +407,7 @@
 	void __iomem *base;
 };
 
+static struct irq_domain *combiner_irq_domain;
 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
 
 static inline void __iomem *combiner_base(struct irq_data *data)
@@ -411,14 +420,14 @@
 
 static void combiner_mask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (data->irq % 32);
+	u32 mask = 1 << (data->hwirq % 32);
 
 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 }
 
 static void combiner_unmask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (data->irq % 32);
+	u32 mask = 1 << (data->hwirq % 32);
 
 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
@@ -474,49 +483,131 @@
 	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
 }
 
-static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
-			  unsigned int irq_start)
+static void __init combiner_init_one(unsigned int combiner_nr,
+				     void __iomem *base)
 {
-	unsigned int i;
-	unsigned int max_nr;
-
-	if (soc_is_exynos5250())
-		max_nr = EXYNOS5_MAX_COMBINER_NR;
-	else
-		max_nr = EXYNOS4_MAX_COMBINER_NR;
-
-	if (combiner_nr >= max_nr)
-		BUG();
-
 	combiner_data[combiner_nr].base = base;
-	combiner_data[combiner_nr].irq_offset = irq_start;
+	combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+		combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
 	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
 
 	/* Disable all interrupts */
-
 	__raw_writel(combiner_data[combiner_nr].irq_mask,
 		     base + COMBINER_ENABLE_CLEAR);
+}
 
-	/* Setup the Linux IRQ subsystem */
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec, unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
+{
+	if (d->of_node != controller)
+		return -EINVAL;
 
-	for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
-				+ MAX_IRQ_IN_COMBINER; i++) {
-		irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
-		irq_set_chip_data(i, &combiner_data[combiner_nr]);
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+	if (intsize < 2)
+		return -EINVAL;
+
+	*out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+	*out_type = 0;
+
+	return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec, unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
+{
+	return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				   irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+	return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+	.xlate	= combiner_irq_domain_xlate,
+	.map	= combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
+{
+	int i, irq, irq_base;
+	unsigned int max_nr, nr_irq;
+
+	if (np) {
+		if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+			pr_warning("%s: number of combiners not specified, "
+				"setting default as %d.\n",
+				__func__, EXYNOS4_MAX_COMBINER_NR);
+			max_nr = EXYNOS4_MAX_COMBINER_NR;
+		}
+	} else {
+		max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+						EXYNOS4_MAX_COMBINER_NR;
+	}
+	nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
+
+	irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+	if (IS_ERR_VALUE(irq_base)) {
+		irq_base = COMBINER_IRQ(0, 0);
+		pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+	}
+
+	combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+				&combiner_irq_domain_ops, &combiner_data);
+	if (WARN_ON(!combiner_irq_domain)) {
+		pr_warning("%s: irq domain init failed\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < max_nr; i++) {
+		combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+		irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+		if (np)
+			irq = irq_of_parse_and_map(np, i);
+#endif
+		combiner_cascade_irq(i, irq);
 	}
 }
 
 #ifdef CONFIG_OF
+int __init combiner_of_init(struct device_node *np, struct device_node *parent)
+{
+	void __iomem *combiner_base;
+
+	combiner_base = of_iomap(np, 0);
+	if (!combiner_base) {
+		pr_err("%s: failed to map combiner registers\n", __func__);
+		return -ENXIO;
+	}
+
+	combiner_init(combiner_base, np);
+
+	return 0;
+}
+
 static const struct of_device_id exynos4_dt_irq_match[] = {
 	{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+	{ .compatible = "samsung,exynos4210-combiner",
+			.data = combiner_of_init, },
 	{},
 };
 #endif
 
 void __init exynos4_init_irq(void)
 {
-	int irq;
 	unsigned int gic_bank_offset;
 
 	gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
@@ -528,12 +619,8 @@
 		of_irq_init(exynos4_dt_irq_match);
 #endif
 
-	for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) {
-
-		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
-				COMBINER_IRQ(irq, 0));
-		combiner_cascade_irq(irq, IRQ_SPI(irq));
-	}
+	if (!of_have_populated_dt())
+		combiner_init(S5P_VA_COMBINER_BASE, NULL);
 
 	/*
 	 * The parameters of s5p_init_irq() are for VIC init.
@@ -545,18 +632,9 @@
 
 void __init exynos5_init_irq(void)
 {
-	int irq;
-
 #ifdef CONFIG_OF
 	of_irq_init(exynos4_dt_irq_match);
 #endif
-
-	for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
-		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
-				COMBINER_IRQ(irq, 0));
-		combiner_cascade_irq(irq, IRQ_SPI(irq));
-	}
-
 	/*
 	 * The parameters of s5p_init_irq() are for VIC init.
 	 * Theses parameters should be NULL and 0 because EXYNOS4
@@ -565,30 +643,18 @@
 	s5p_init_irq(NULL, 0);
 }
 
-struct bus_type exynos4_subsys = {
-	.name		= "exynos4-core",
-	.dev_name	= "exynos4-core",
-};
-
-struct bus_type exynos5_subsys = {
-	.name		= "exynos5-core",
-	.dev_name	= "exynos5-core",
+struct bus_type exynos_subsys = {
+	.name		= "exynos-core",
+	.dev_name	= "exynos-core",
 };
 
 static struct device exynos4_dev = {
-	.bus	= &exynos4_subsys,
-};
-
-static struct device exynos5_dev = {
-	.bus	= &exynos5_subsys,
+	.bus	= &exynos_subsys,
 };
 
 static int __init exynos_core_init(void)
 {
-	if (soc_is_exynos5250())
-		return subsys_system_register(&exynos5_subsys, NULL);
-	else
-		return subsys_system_register(&exynos4_subsys, NULL);
+	return subsys_system_register(&exynos_subsys, NULL);
 }
 core_initcall(exynos_core_init);
 
@@ -675,10 +741,7 @@
 {
 	printk(KERN_INFO "EXYNOS: Initializing architecture\n");
 
-	if (soc_is_exynos5250())
-		return device_register(&exynos5_dev);
-	else
-		return device_register(&exynos4_dev);
+	return device_register(&exynos4_dev);
 }
 
 /* uart registration process */
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 677b546..aed2eeb 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -19,6 +19,13 @@
 void exynos5_init_irq(void);
 void exynos4_restart(char mode, const char *cmd);
 void exynos5_restart(char mode, const char *cmd);
+void exynos_init_late(void);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+int exynos_pm_late_initcall(void);
+#else
+static int exynos_pm_late_initcall(void) { return 0; }
+#endif
 
 #ifdef CONFIG_ARCH_EXYNOS4
 void exynos4_register_clocks(void);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 26dac28..cff0595 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -100,7 +100,7 @@
 	exynos4_set_wakeupmask();
 
 	/* Set value of power down register for aftr mode */
-	exynos4_sys_powerdown_conf(SYS_AFTR);
+	exynos_sys_powerdown_conf(SYS_AFTR);
 
 	__raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
 	__raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
new file mode 100644
index 0000000..17c9c6e
--- /dev/null
+++ b/arch/arm/mach-exynos/dev-drm.c
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-exynos/dev-drm.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * EXYNOS - core DRM device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <plat/devs.h>
+
+static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device exynos_device_drm = {
+	.name	= "exynos-drm",
+	.dev	= {
+		.dma_mask		= &exynos_drm_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	}
+};
diff --git a/arch/arm/mach-exynos/dev-sysmmu.c b/arch/arm/mach-exynos/dev-sysmmu.c
index 781563f..c5b1ea3 100644
--- a/arch/arm/mach-exynos/dev-sysmmu.c
+++ b/arch/arm/mach-exynos/dev-sysmmu.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
+/* linux/arch/arm/mach-exynos/dev-sysmmu.c
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * EXYNOS4 - System MMU support
+ * EXYNOS - System MMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -12,222 +12,263 @@
 
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
-#include <linux/export.h>
+
+#include <plat/cpu.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
 #include <mach/sysmmu.h>
-#include <plat/s5p-clock.h>
 
-/* These names must be equal to the clock names in mach-exynos4/clock.c */
-const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
-	"SYSMMU_MDMA"	,
-	"SYSMMU_SSS"	,
-	"SYSMMU_FIMC0"	,
-	"SYSMMU_FIMC1"	,
-	"SYSMMU_FIMC2"	,
-	"SYSMMU_FIMC3"	,
-	"SYSMMU_JPEG"	,
-	"SYSMMU_FIMD0"	,
-	"SYSMMU_FIMD1"	,
-	"SYSMMU_PCIe"	,
-	"SYSMMU_G2D"	,
-	"SYSMMU_ROTATOR",
-	"SYSMMU_MDMA2"	,
-	"SYSMMU_TV"	,
-	"SYSMMU_MFC_L"	,
-	"SYSMMU_MFC_R"	,
-};
+static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
 
-static struct resource exynos4_sysmmu_resource[] = {
-	[0] = {
-		.start	= EXYNOS4_PA_SYSMMU_MDMA,
-		.end	= EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_SYSMMU_MDMA0_0,
-		.end	= IRQ_SYSMMU_MDMA0_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start	= EXYNOS4_PA_SYSMMU_SSS,
-		.end	= EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[3] = {
-		.start	= IRQ_SYSMMU_SSS_0,
-		.end	= IRQ_SYSMMU_SSS_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[4] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMC0,
-		.end	= EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[5] = {
-		.start	= IRQ_SYSMMU_FIMC0_0,
-		.end	= IRQ_SYSMMU_FIMC0_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[6] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMC1,
-		.end	= EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[7] = {
-		.start	= IRQ_SYSMMU_FIMC1_0,
-		.end	= IRQ_SYSMMU_FIMC1_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[8] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMC2,
-		.end	= EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[9] = {
-		.start	= IRQ_SYSMMU_FIMC2_0,
-		.end	= IRQ_SYSMMU_FIMC2_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[10] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMC3,
-		.end	= EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[11] = {
-		.start	= IRQ_SYSMMU_FIMC3_0,
-		.end	= IRQ_SYSMMU_FIMC3_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[12] = {
-		.start	= EXYNOS4_PA_SYSMMU_JPEG,
-		.end	= EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[13] = {
-		.start	= IRQ_SYSMMU_JPEG_0,
-		.end	= IRQ_SYSMMU_JPEG_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[14] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMD0,
-		.end	= EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[15] = {
-		.start	= IRQ_SYSMMU_LCD0_M0_0,
-		.end	= IRQ_SYSMMU_LCD0_M0_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[16] = {
-		.start	= EXYNOS4_PA_SYSMMU_FIMD1,
-		.end	= EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[17] = {
-		.start	= IRQ_SYSMMU_LCD1_M1_0,
-		.end	= IRQ_SYSMMU_LCD1_M1_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[18] = {
-		.start	= EXYNOS4_PA_SYSMMU_PCIe,
-		.end	= EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[19] = {
-		.start	= IRQ_SYSMMU_PCIE_0,
-		.end	= IRQ_SYSMMU_PCIE_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[20] = {
-		.start	= EXYNOS4_PA_SYSMMU_G2D,
-		.end	= EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[21] = {
-		.start	= IRQ_SYSMMU_2D_0,
-		.end	= IRQ_SYSMMU_2D_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[22] = {
-		.start	= EXYNOS4_PA_SYSMMU_ROTATOR,
-		.end	= EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[23] = {
-		.start	= IRQ_SYSMMU_ROTATOR_0,
-		.end	= IRQ_SYSMMU_ROTATOR_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[24] = {
-		.start	= EXYNOS4_PA_SYSMMU_MDMA2,
-		.end	= EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[25] = {
-		.start	= IRQ_SYSMMU_MDMA1_0,
-		.end	= IRQ_SYSMMU_MDMA1_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[26] = {
-		.start	= EXYNOS4_PA_SYSMMU_TV,
-		.end	= EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[27] = {
-		.start	= IRQ_SYSMMU_TV_M0_0,
-		.end	= IRQ_SYSMMU_TV_M0_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[28] = {
-		.start	= EXYNOS4_PA_SYSMMU_MFC_L,
-		.end	= EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[29] = {
-		.start	= IRQ_SYSMMU_MFC_M0_0,
-		.end	= IRQ_SYSMMU_MFC_M0_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-	[30] = {
-		.start	= EXYNOS4_PA_SYSMMU_MFC_R,
-		.end	= EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[31] = {
-		.start	= IRQ_SYSMMU_MFC_M1_0,
-		.end	= IRQ_SYSMMU_MFC_M1_0,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-struct platform_device exynos4_device_sysmmu = {
-	.name		= "s5p-sysmmu",
-	.id		= 32,
-	.num_resources	= ARRAY_SIZE(exynos4_sysmmu_resource),
-	.resource	= exynos4_sysmmu_resource,
-};
-EXPORT_SYMBOL(exynos4_device_sysmmu);
-
-static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM];
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips)
-{
-	sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]);
-	if (IS_ERR(sysmmu_clk[ips]))
-		sysmmu_clk[ips] = NULL;
-	else
-		clk_put(sysmmu_clk[ips]);
+#define SYSMMU_PLATFORM_DEVICE(ipname, devid)				\
+static struct sysmmu_platform_data platdata_##ipname = {		\
+	.dbgname = #ipname,						\
+};									\
+struct platform_device SYSMMU_PLATDEV(ipname) =				\
+{									\
+	.name		= SYSMMU_DEVNAME_BASE,				\
+	.id		= devid,					\
+	.dev		= {						\
+		.dma_mask		= &exynos_sysmmu_dma_mask,	\
+		.coherent_dma_mask	= DMA_BIT_MASK(32),		\
+		.platform_data		= &platdata_##ipname,		\
+	},								\
 }
 
-void sysmmu_clk_enable(sysmmu_ips ips)
-{
-	if (sysmmu_clk[ips])
-		clk_enable(sysmmu_clk[ips]);
+SYSMMU_PLATFORM_DEVICE(mfc_l,	0);
+SYSMMU_PLATFORM_DEVICE(mfc_r,	1);
+SYSMMU_PLATFORM_DEVICE(tv,	2);
+SYSMMU_PLATFORM_DEVICE(jpeg,	3);
+SYSMMU_PLATFORM_DEVICE(rot,	4);
+SYSMMU_PLATFORM_DEVICE(fimc0,	5); /* fimc* and gsc* exist exclusively */
+SYSMMU_PLATFORM_DEVICE(fimc1,	6);
+SYSMMU_PLATFORM_DEVICE(fimc2,	7);
+SYSMMU_PLATFORM_DEVICE(fimc3,	8);
+SYSMMU_PLATFORM_DEVICE(gsc0,	5);
+SYSMMU_PLATFORM_DEVICE(gsc1,	6);
+SYSMMU_PLATFORM_DEVICE(gsc2,	7);
+SYSMMU_PLATFORM_DEVICE(gsc3,	8);
+SYSMMU_PLATFORM_DEVICE(isp,	9);
+SYSMMU_PLATFORM_DEVICE(fimd0,	10);
+SYSMMU_PLATFORM_DEVICE(fimd1,	11);
+SYSMMU_PLATFORM_DEVICE(camif0,	12);
+SYSMMU_PLATFORM_DEVICE(camif1,	13);
+SYSMMU_PLATFORM_DEVICE(2d,	14);
+
+#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
+
+#define SYSMMU_RESOURCE(core, ipname)					\
+	static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
+
+#define DEFINE_SYSMMU_RESOURCE(core, mem, irq)				\
+	DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem),	\
+	DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
+
+#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq)			\
+	SYSMMU_RESOURCE(core, ipname) {					\
+		DEFINE_SYSMMU_RESOURCE(core, mem, irq)			\
+	}
+
+struct sysmmu_resource_map {
+	struct platform_device *pdev;
+	struct resource *res;
+	u32 rnum;
+	struct device *pdd;
+	char *clocknames;
+};
+
+#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) {		\
+	.pdev = &SYSMMU_PLATDEV(ipname),				\
+	.res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),		\
+	.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+	.clocknames = SYSMMU_CLOCK_NAME,				\
 }
 
-void sysmmu_clk_disable(sysmmu_ips ips)
-{
-	if (sysmmu_clk[ips])
-		clk_disable(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) {	\
+	.pdev = &SYSMMU_PLATDEV(ipname),				\
+	.res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),		\
+	.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+	.clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2,		\
 }
+
+#ifdef CONFIG_EXYNOS_DEV_PD
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) {		\
+	.pdev = &SYSMMU_PLATDEV(ipname),				\
+	.res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),		\
+	.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+	.clocknames = SYSMMU_CLOCK_NAME,				\
+	.pdd = &exynos##core##_device_pd[pd].dev,			\
+}
+
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
+	.pdev = &SYSMMU_PLATDEV(ipname),				\
+	.res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),		\
+	.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+	.clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2,		\
+	.pdd = &exynos##core##_device_pd[pd].dev,			\
+}
+#else
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd)		\
+		SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata)	\
+		SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
+
+#endif /* CONFIG_EXYNOS_DEV_PD */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0,	FIMC0,	FIMC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1,	FIMC1,	FIMC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2,	FIMC2,	FIMC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3,	FIMC3,	FIMC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg,	JPEG,	JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d,	G2D,	2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv,	TV,	TV_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp,	2D_ACP,	2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot,	ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0,	FIMD0,	LCD0_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1,	FIMD1,	LCD1_M1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0,	FIMC_LITE0, FIMC_LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1,	FIMC_LITE1, FIMC_LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r,	MFC_R,	MFC_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l,	MFC_L,	MFC_M1);
+SYSMMU_RESOURCE(EXYNOS4, isp) {
+	DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimc0,	fimc0,	PD_CAM),
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimc1,	fimc1,	PD_CAM),
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimc2,	fimc2,	PD_CAM),
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimc3,	fimc3,	PD_CAM),
+	SYSMMU_RESOURCE_MAPPING_PD(4, tv,	tv,	PD_TV),
+	SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r,	mfc_r,	PD_MFC),
+	SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l,	mfc_l,	PD_MFC),
+	SYSMMU_RESOURCE_MAPPING_PD(4, rot,	rot,	PD_LCD0),
+	SYSMMU_RESOURCE_MAPPING_PD(4, jpeg,	jpeg,	PD_CAM),
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimd0,	fimd0,	PD_LCD0),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
+	SYSMMU_RESOURCE_MAPPING_PD(4, 2d,	2d,	PD_LCD0),
+	SYSMMU_RESOURCE_MAPPING_PD(4, fimd1,	fimd1,	PD_LCD1),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
+	SYSMMU_RESOURCE_MAPPING(4,	2d,	2d_acp),
+	SYSMMU_RESOURCE_MAPPING_PD(4,	camif0, flite0,	PD_ISP),
+	SYSMMU_RESOURCE_MAPPING_PD(4,	camif1, flite1,	PD_ISP),
+	SYSMMU_RESOURCE_MAPPING_PD(4,	isp,	isp,	PD_ISP),
+};
+#endif /* CONFIG_ARCH_EXYNOS4 */
+
+#ifdef CONFIG_ARCH_EXYNOS5
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg,	JPEG,	JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1,	FIMD1,	FIMD1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d,	2D,	2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot,	ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv,	TV,	TV);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0,	LITE0,	LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1,	LITE1,	LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0,	GSC0,	GSC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1,	GSC1,	GSC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2,	GSC2,	GSC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3,	GSC3,	GSC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r,	MFC_R,	MFC_R);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l,	MFC_L,	MFC_L);
+SYSMMU_RESOURCE(EXYNOS5, isp) {
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5,	ODC, ODC),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
+	DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
+	SYSMMU_RESOURCE_MAPPING(5,	jpeg,	jpeg),
+	SYSMMU_RESOURCE_MAPPING(5,	fimd1,	fimd1),
+	SYSMMU_RESOURCE_MAPPING(5,	2d,	2d),
+	SYSMMU_RESOURCE_MAPPING(5,	rot,	rot),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	tv,	tv,	PD_DISP1),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	camif0,	flite0,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	camif1,	flite1,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	gsc0,	gsc0,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	gsc1,	gsc1,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	gsc2,	gsc2,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	gsc3,	gsc3,	PD_GSCL),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	mfc_r,	mfc_r,	PD_MFC),
+	SYSMMU_RESOURCE_MAPPING_PD(5,	mfc_l,	mfc_l,	PD_MFC),
+	SYSMMU_RESOURCE_MAPPING_MCPD(5,	isp,	isp,	PD_ISP, mc_platdata),
+};
+#endif /* CONFIG_ARCH_EXYNOS5 */
+
+static int __init init_sysmmu_platform_device(void)
+{
+	int i, j;
+	struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
+	int nmap[2] = {0, 0};
+
+#ifdef CONFIG_ARCH_EXYNOS5
+	if (soc_is_exynos5250()) {
+		resmap[0] = sysmmu_resmap5;
+		nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
+		nmap[1] = 0;
+	}
+#endif
+
+#ifdef CONFIG_ARCH_EXYNOS4
+	if (resmap[0] == NULL) {
+		resmap[0] = sysmmu_resmap4;
+		nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
+	}
+
+	if (soc_is_exynos4210()) {
+		resmap[1] = sysmmu_resmap4210;
+		nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
+	}
+
+	if (soc_is_exynos4412() || soc_is_exynos4212()) {
+		resmap[1] = sysmmu_resmap4212;
+		nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
+	}
+#endif
+
+	for (j = 0; j < 2; j++) {
+		for (i = 0; i < nmap[j]; i++) {
+			struct sysmmu_resource_map *map;
+			struct sysmmu_platform_data *platdata;
+
+			map = &resmap[j][i];
+
+			map->pdev->dev.parent = map->pdd;
+
+			platdata = map->pdev->dev.platform_data;
+			platdata->clockname = map->clocknames;
+
+			if (platform_device_add_resources(map->pdev, map->res,
+								map->rnum)) {
+				pr_err("%s: Failed to add device resources for "
+						"%s.%d\n", __func__,
+						map->pdev->name, map->pdev->id);
+				continue;
+			}
+
+			if (platform_device_register(map->pdev)) {
+				pr_err("%s: Failed to register %s.%d\n",
+					__func__, map->pdev->name,
+						map->pdev->id);
+			}
+		}
+	}
+
+	return 0;
+}
+arch_initcall(init_sysmmu_platform_device);
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 69aaa45..f60b66d 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -103,10 +103,45 @@
 	DMACH_MIPI_HSI5,
 };
 
-struct dma_pl330_platdata exynos4_pdma0_pdata;
+static u8 exynos5250_pdma0_peri[] = {
+	DMACH_PCM0_RX,
+	DMACH_PCM0_TX,
+	DMACH_PCM2_RX,
+	DMACH_PCM2_TX,
+	DMACH_SPI0_RX,
+	DMACH_SPI0_TX,
+	DMACH_SPI2_RX,
+	DMACH_SPI2_TX,
+	DMACH_I2S0S_TX,
+	DMACH_I2S0_RX,
+	DMACH_I2S0_TX,
+	DMACH_I2S2_RX,
+	DMACH_I2S2_TX,
+	DMACH_UART0_RX,
+	DMACH_UART0_TX,
+	DMACH_UART2_RX,
+	DMACH_UART2_TX,
+	DMACH_UART4_RX,
+	DMACH_UART4_TX,
+	DMACH_SLIMBUS0_RX,
+	DMACH_SLIMBUS0_TX,
+	DMACH_SLIMBUS2_RX,
+	DMACH_SLIMBUS2_TX,
+	DMACH_SLIMBUS4_RX,
+	DMACH_SLIMBUS4_TX,
+	DMACH_AC97_MICIN,
+	DMACH_AC97_PCMIN,
+	DMACH_AC97_PCMOUT,
+	DMACH_MIPI_HSI0,
+	DMACH_MIPI_HSI2,
+	DMACH_MIPI_HSI4,
+	DMACH_MIPI_HSI6,
+};
 
-static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330,
-	EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata);
+static struct dma_pl330_platdata exynos_pdma0_pdata;
+
+static AMBA_AHB_DEVICE(exynos_pdma0, "dma-pl330.0", 0x00041330,
+	EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos_pdma0_pdata);
 
 static u8 exynos4210_pdma1_peri[] = {
 	DMACH_PCM0_RX,
@@ -169,10 +204,45 @@
 	DMACH_MIPI_HSI7,
 };
 
-static struct dma_pl330_platdata exynos4_pdma1_pdata;
+static u8 exynos5250_pdma1_peri[] = {
+	DMACH_PCM0_RX,
+	DMACH_PCM0_TX,
+	DMACH_PCM1_RX,
+	DMACH_PCM1_TX,
+	DMACH_SPI1_RX,
+	DMACH_SPI1_TX,
+	DMACH_PWM,
+	DMACH_SPDIF,
+	DMACH_I2S0S_TX,
+	DMACH_I2S0_RX,
+	DMACH_I2S0_TX,
+	DMACH_I2S1_RX,
+	DMACH_I2S1_TX,
+	DMACH_UART0_RX,
+	DMACH_UART0_TX,
+	DMACH_UART1_RX,
+	DMACH_UART1_TX,
+	DMACH_UART3_RX,
+	DMACH_UART3_TX,
+	DMACH_SLIMBUS1_RX,
+	DMACH_SLIMBUS1_TX,
+	DMACH_SLIMBUS3_RX,
+	DMACH_SLIMBUS3_TX,
+	DMACH_SLIMBUS5_RX,
+	DMACH_SLIMBUS5_TX,
+	DMACH_SLIMBUS0AUX_RX,
+	DMACH_SLIMBUS0AUX_TX,
+	DMACH_DISP1,
+	DMACH_MIPI_HSI1,
+	DMACH_MIPI_HSI3,
+	DMACH_MIPI_HSI5,
+	DMACH_MIPI_HSI7,
+};
 
-static AMBA_AHB_DEVICE(exynos4_pdma1,  "dma-pl330.1", 0x00041330,
-	EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata);
+static struct dma_pl330_platdata exynos_pdma1_pdata;
+
+static AMBA_AHB_DEVICE(exynos_pdma1,  "dma-pl330.1", 0x00041330,
+	EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos_pdma1_pdata);
 
 static u8 mdma_peri[] = {
 	DMACH_MTOM_0,
@@ -185,46 +255,63 @@
 	DMACH_MTOM_7,
 };
 
-static struct dma_pl330_platdata exynos4_mdma1_pdata = {
+static struct dma_pl330_platdata exynos_mdma1_pdata = {
 	.nr_valid_peri = ARRAY_SIZE(mdma_peri),
 	.peri_id = mdma_peri,
 };
 
-static AMBA_AHB_DEVICE(exynos4_mdma1,  "dma-pl330.2", 0x00041330,
-	EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata);
+static AMBA_AHB_DEVICE(exynos_mdma1,  "dma-pl330.2", 0x00041330,
+	EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos_mdma1_pdata);
 
-static int __init exynos4_dma_init(void)
+static int __init exynos_dma_init(void)
 {
 	if (of_have_populated_dt())
 		return 0;
 
 	if (soc_is_exynos4210()) {
-		exynos4_pdma0_pdata.nr_valid_peri =
+		exynos_pdma0_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4210_pdma0_peri);
-		exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
-		exynos4_pdma1_pdata.nr_valid_peri =
+		exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
+		exynos_pdma1_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4210_pdma1_peri);
-		exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+		exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
 	} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
-		exynos4_pdma0_pdata.nr_valid_peri =
+		exynos_pdma0_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4212_pdma0_peri);
-		exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
-		exynos4_pdma1_pdata.nr_valid_peri =
+		exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
+		exynos_pdma1_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4212_pdma1_peri);
-		exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+		exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+	} else if (soc_is_exynos5250()) {
+		exynos_pdma0_pdata.nr_valid_peri =
+			ARRAY_SIZE(exynos5250_pdma0_peri);
+		exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
+		exynos_pdma1_pdata.nr_valid_peri =
+			ARRAY_SIZE(exynos5250_pdma1_peri);
+		exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
+
+		exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0;
+		exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K;
+		exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0;
+		exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1;
+		exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K;
+		exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1;
+		exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1;
+		exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K;
+		exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1;
 	}
 
-	dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
-	dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
-	amba_device_register(&exynos4_pdma0_device, &iomem_resource);
+	dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
+	dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
+	amba_device_register(&exynos_pdma0_device, &iomem_resource);
 
-	dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
-	dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
-	amba_device_register(&exynos4_pdma1_device, &iomem_resource);
+	dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
+	dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
+	amba_device_register(&exynos_pdma1_device, &iomem_resource);
 
-	dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask);
-	amba_device_register(&exynos4_mdma1_device, &iomem_resource);
+	dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
+	amba_device_register(&exynos_mdma1_device, &iomem_resource);
 
 	return 0;
 }
-arch_initcall(exynos4_dma_init);
+arch_initcall(exynos_dma_init);
diff --git a/arch/arm/mach-exynos/include/mach/gpio.h b/arch/arm/mach-exynos/include/mach/gpio.h
index d7498af..eb24f1e 100644
--- a/arch/arm/mach-exynos/include/mach/gpio.h
+++ b/arch/arm/mach-exynos/include/mach/gpio.h
@@ -153,10 +153,11 @@
 #define EXYNOS5_GPIO_B2_NR	(4)
 #define EXYNOS5_GPIO_B3_NR	(4)
 #define EXYNOS5_GPIO_C0_NR	(7)
-#define EXYNOS5_GPIO_C1_NR	(7)
+#define EXYNOS5_GPIO_C1_NR	(4)
 #define EXYNOS5_GPIO_C2_NR	(7)
 #define EXYNOS5_GPIO_C3_NR	(7)
-#define EXYNOS5_GPIO_D0_NR	(8)
+#define EXYNOS5_GPIO_C4_NR	(7)
+#define EXYNOS5_GPIO_D0_NR	(4)
 #define EXYNOS5_GPIO_D1_NR	(8)
 #define EXYNOS5_GPIO_Y0_NR	(6)
 #define EXYNOS5_GPIO_Y1_NR	(4)
@@ -199,7 +200,8 @@
 	EXYNOS5_GPIO_C1_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0),
 	EXYNOS5_GPIO_C2_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1),
 	EXYNOS5_GPIO_C3_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2),
-	EXYNOS5_GPIO_D0_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+	EXYNOS5_GPIO_C4_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+	EXYNOS5_GPIO_D0_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C4),
 	EXYNOS5_GPIO_D1_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0),
 	EXYNOS5_GPIO_Y0_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1),
 	EXYNOS5_GPIO_Y1_START		= EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0),
@@ -242,6 +244,7 @@
 #define EXYNOS5_GPC1(_nr)	(EXYNOS5_GPIO_C1_START + (_nr))
 #define EXYNOS5_GPC2(_nr)	(EXYNOS5_GPIO_C2_START + (_nr))
 #define EXYNOS5_GPC3(_nr)	(EXYNOS5_GPIO_C3_START + (_nr))
+#define EXYNOS5_GPC4(_nr)	(EXYNOS5_GPIO_C4_START + (_nr))
 #define EXYNOS5_GPD0(_nr)	(EXYNOS5_GPIO_D0_START + (_nr))
 #define EXYNOS5_GPD1(_nr)	(EXYNOS5_GPIO_D1_START + (_nr))
 #define EXYNOS5_GPY0(_nr)	(EXYNOS5_GPIO_Y0_START + (_nr))
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index c02dae7..7a4b478 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -154,6 +154,13 @@
 #define EXYNOS4_IRQ_SYSMMU_MFC_M1_0	COMBINER_IRQ(5, 6)
 #define EXYNOS4_IRQ_SYSMMU_PCIE_0	COMBINER_IRQ(5, 7)
 
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0	COMBINER_IRQ(16, 0)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0	COMBINER_IRQ(16, 1)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0	COMBINER_IRQ(16, 2)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0	COMBINER_IRQ(16, 3)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0	COMBINER_IRQ(16, 4)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0	COMBINER_IRQ(16, 5)
+
 #define EXYNOS4_IRQ_FIMD0_FIFO		COMBINER_IRQ(11, 0)
 #define EXYNOS4_IRQ_FIMD0_VSYNC		COMBINER_IRQ(11, 1)
 #define EXYNOS4_IRQ_FIMD0_SYSTEM	COMBINER_IRQ(11, 2)
@@ -221,24 +228,6 @@
 #define IRQ_KEYPAD			EXYNOS4_IRQ_KEYPAD
 #define IRQ_PMU				EXYNOS4_IRQ_PMU
 
-#define IRQ_SYSMMU_MDMA0_0		EXYNOS4_IRQ_SYSMMU_MDMA0_0
-#define IRQ_SYSMMU_SSS_0                EXYNOS4_IRQ_SYSMMU_SSS_0
-#define IRQ_SYSMMU_FIMC0_0              EXYNOS4_IRQ_SYSMMU_FIMC0_0
-#define IRQ_SYSMMU_FIMC1_0              EXYNOS4_IRQ_SYSMMU_FIMC1_0
-#define IRQ_SYSMMU_FIMC2_0              EXYNOS4_IRQ_SYSMMU_FIMC2_0
-#define IRQ_SYSMMU_FIMC3_0              EXYNOS4_IRQ_SYSMMU_FIMC3_0
-#define IRQ_SYSMMU_JPEG_0               EXYNOS4_IRQ_SYSMMU_JPEG_0
-#define IRQ_SYSMMU_2D_0                 EXYNOS4_IRQ_SYSMMU_2D_0
-
-#define IRQ_SYSMMU_ROTATOR_0            EXYNOS4_IRQ_SYSMMU_ROTATOR_0
-#define IRQ_SYSMMU_MDMA1_0              EXYNOS4_IRQ_SYSMMU_MDMA1_0
-#define IRQ_SYSMMU_LCD0_M0_0            EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
-#define IRQ_SYSMMU_LCD1_M1_0            EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
-#define IRQ_SYSMMU_TV_M0_0              EXYNOS4_IRQ_SYSMMU_TV_M0_0
-#define IRQ_SYSMMU_MFC_M0_0             EXYNOS4_IRQ_SYSMMU_MFC_M0_0
-#define IRQ_SYSMMU_MFC_M1_0             EXYNOS4_IRQ_SYSMMU_MFC_M1_0
-#define IRQ_SYSMMU_PCIE_0               EXYNOS4_IRQ_SYSMMU_PCIE_0
-
 #define IRQ_FIMD0_FIFO			EXYNOS4_IRQ_FIMD0_FIFO
 #define IRQ_FIMD0_VSYNC			EXYNOS4_IRQ_FIMD0_VSYNC
 #define IRQ_FIMD0_SYSTEM		EXYNOS4_IRQ_FIMD0_SYSTEM
@@ -298,6 +287,7 @@
 #define EXYNOS5_IRQ_MIPICSI1		IRQ_SPI(80)
 #define EXYNOS5_IRQ_EFNFCON_DMA_ABORT	IRQ_SPI(81)
 #define EXYNOS5_IRQ_MIPIDSI0		IRQ_SPI(82)
+#define EXYNOS5_IRQ_WDT_IOP		IRQ_SPI(83)
 #define EXYNOS5_IRQ_ROTATOR		IRQ_SPI(84)
 #define EXYNOS5_IRQ_GSC0		IRQ_SPI(85)
 #define EXYNOS5_IRQ_GSC1		IRQ_SPI(86)
@@ -306,8 +296,8 @@
 #define EXYNOS5_IRQ_JPEG		IRQ_SPI(89)
 #define EXYNOS5_IRQ_EFNFCON_DMA		IRQ_SPI(90)
 #define EXYNOS5_IRQ_2D			IRQ_SPI(91)
-#define EXYNOS5_IRQ_SFMC0		IRQ_SPI(92)
-#define EXYNOS5_IRQ_SFMC1		IRQ_SPI(93)
+#define EXYNOS5_IRQ_EFNFCON_0		IRQ_SPI(92)
+#define EXYNOS5_IRQ_EFNFCON_1		IRQ_SPI(93)
 #define EXYNOS5_IRQ_MIXER		IRQ_SPI(94)
 #define EXYNOS5_IRQ_HDMI		IRQ_SPI(95)
 #define EXYNOS5_IRQ_MFC			IRQ_SPI(96)
@@ -321,7 +311,7 @@
 #define EXYNOS5_IRQ_PCM2		IRQ_SPI(104)
 #define EXYNOS5_IRQ_SPDIF		IRQ_SPI(105)
 #define EXYNOS5_IRQ_ADC0		IRQ_SPI(106)
-
+#define EXYNOS5_IRQ_ADC1		IRQ_SPI(107)
 #define EXYNOS5_IRQ_SATA_PHY		IRQ_SPI(108)
 #define EXYNOS5_IRQ_SATA_PMEMREQ	IRQ_SPI(109)
 #define EXYNOS5_IRQ_CAM_C		IRQ_SPI(110)
@@ -330,8 +320,9 @@
 #define EXYNOS5_IRQ_DP1_INTP1		IRQ_SPI(113)
 #define EXYNOS5_IRQ_CEC			IRQ_SPI(114)
 #define EXYNOS5_IRQ_SATA		IRQ_SPI(115)
-#define EXYNOS5_IRQ_NFCON		IRQ_SPI(116)
 
+#define EXYNOS5_IRQ_MCT_L0		IRQ_SPI(120)
+#define EXYNOS5_IRQ_MCT_L1		IRQ_SPI(121)
 #define EXYNOS5_IRQ_MMC44		IRQ_SPI(123)
 #define EXYNOS5_IRQ_MDMA1		IRQ_SPI(124)
 #define EXYNOS5_IRQ_FIMC_LITE0		IRQ_SPI(125)
@@ -339,7 +330,6 @@
 #define EXYNOS5_IRQ_RP_TIMER		IRQ_SPI(127)
 
 #define EXYNOS5_IRQ_PMU			COMBINER_IRQ(1, 2)
-#define EXYNOS5_IRQ_PMU_CPU1		COMBINER_IRQ(1, 6)
 
 #define EXYNOS5_IRQ_SYSMMU_GSC0_0	COMBINER_IRQ(2, 0)
 #define EXYNOS5_IRQ_SYSMMU_GSC0_1	COMBINER_IRQ(2, 1)
@@ -350,6 +340,8 @@
 #define EXYNOS5_IRQ_SYSMMU_GSC3_0	COMBINER_IRQ(2, 6)
 #define EXYNOS5_IRQ_SYSMMU_GSC3_1	COMBINER_IRQ(2, 7)
 
+#define EXYNOS5_IRQ_SYSMMU_LITE2_0	COMBINER_IRQ(3, 0)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_1	COMBINER_IRQ(3, 1)
 #define EXYNOS5_IRQ_SYSMMU_FIMD1_0	COMBINER_IRQ(3, 2)
 #define EXYNOS5_IRQ_SYSMMU_FIMD1_1	COMBINER_IRQ(3, 3)
 #define EXYNOS5_IRQ_SYSMMU_LITE0_0	COMBINER_IRQ(3, 4)
@@ -373,8 +365,8 @@
 
 #define EXYNOS5_IRQ_SYSMMU_ARM_0	COMBINER_IRQ(6, 0)
 #define EXYNOS5_IRQ_SYSMMU_ARM_1	COMBINER_IRQ(6, 1)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_0	COMBINER_IRQ(6, 2)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_1	COMBINER_IRQ(6, 3)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_0	COMBINER_IRQ(6, 2)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_1	COMBINER_IRQ(6, 3)
 #define EXYNOS5_IRQ_SYSMMU_RTIC_0	COMBINER_IRQ(6, 4)
 #define EXYNOS5_IRQ_SYSMMU_RTIC_1	COMBINER_IRQ(6, 5)
 #define EXYNOS5_IRQ_SYSMMU_SSS_0	COMBINER_IRQ(6, 6)
@@ -386,11 +378,9 @@
 #define EXYNOS5_IRQ_SYSMMU_MDMA1_1	COMBINER_IRQ(7, 3)
 #define EXYNOS5_IRQ_SYSMMU_TV_0		COMBINER_IRQ(7, 4)
 #define EXYNOS5_IRQ_SYSMMU_TV_1		COMBINER_IRQ(7, 5)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_0	COMBINER_IRQ(7, 6)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_1	COMBINER_IRQ(7, 7)
 
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_0	COMBINER_IRQ(8, 5)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_1	COMBINER_IRQ(8, 6)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_0	COMBINER_IRQ(8, 5)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_1	COMBINER_IRQ(8, 6)
 
 #define EXYNOS5_IRQ_SYSMMU_DIS1_0	COMBINER_IRQ(9, 4)
 #define EXYNOS5_IRQ_SYSMMU_DIS1_1	COMBINER_IRQ(9, 5)
@@ -406,17 +396,24 @@
 #define EXYNOS5_IRQ_SYSMMU_DRC_0	COMBINER_IRQ(11, 6)
 #define EXYNOS5_IRQ_SYSMMU_DRC_1	COMBINER_IRQ(11, 7)
 
+#define EXYNOS5_IRQ_MDMA1_ABORT		COMBINER_IRQ(13, 1)
+
+#define EXYNOS5_IRQ_MDMA0_ABORT		COMBINER_IRQ(15, 3)
+
 #define EXYNOS5_IRQ_FIMD1_FIFO		COMBINER_IRQ(18, 4)
 #define EXYNOS5_IRQ_FIMD1_VSYNC		COMBINER_IRQ(18, 5)
 #define EXYNOS5_IRQ_FIMD1_SYSTEM	COMBINER_IRQ(18, 6)
 
+#define EXYNOS5_IRQ_ARMIOP_GIC		COMBINER_IRQ(19, 0)
+#define EXYNOS5_IRQ_ARMISP_GIC		COMBINER_IRQ(19, 1)
+#define EXYNOS5_IRQ_IOP_GIC		COMBINER_IRQ(19, 3)
+#define EXYNOS5_IRQ_ISP_GIC		COMBINER_IRQ(19, 4)
+
+#define EXYNOS5_IRQ_PMU_CPU1		COMBINER_IRQ(22, 4)
+
 #define EXYNOS5_IRQ_EINT0		COMBINER_IRQ(23, 0)
-#define EXYNOS5_IRQ_MCT_L0		COMBINER_IRQ(23, 1)
-#define EXYNOS5_IRQ_MCT_L1		COMBINER_IRQ(23, 2)
 #define EXYNOS5_IRQ_MCT_G0		COMBINER_IRQ(23, 3)
 #define EXYNOS5_IRQ_MCT_G1		COMBINER_IRQ(23, 4)
-#define EXYNOS5_IRQ_MCT_G2		COMBINER_IRQ(23, 5)
-#define EXYNOS5_IRQ_MCT_G3		COMBINER_IRQ(23, 6)
 
 #define EXYNOS5_IRQ_EINT1		COMBINER_IRQ(24, 0)
 #define EXYNOS5_IRQ_SYSMMU_LITE1_0	COMBINER_IRQ(24, 1)
@@ -447,7 +444,7 @@
 
 #define EXYNOS5_MAX_COMBINER_NR		32
 
-#define EXYNOS5_IRQ_GPIO1_NR_GROUPS	13
+#define EXYNOS5_IRQ_GPIO1_NR_GROUPS	14
 #define EXYNOS5_IRQ_GPIO2_NR_GROUPS	9
 #define EXYNOS5_IRQ_GPIO3_NR_GROUPS	5
 #define EXYNOS5_IRQ_GPIO4_NR_GROUPS	1
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index e009a66..ca4aa89 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -34,6 +34,9 @@
 
 #define EXYNOS4_PA_JPEG			0x11840000
 
+/* x = 0...1 */
+#define EXYNOS4_PA_FIMC_LITE(x)		(0x12390000 + ((x) * 0x10000))
+
 #define EXYNOS4_PA_G2D			0x12800000
 
 #define EXYNOS4_PA_I2S0			0x03830000
@@ -78,8 +81,8 @@
 
 #define EXYNOS4_PA_GIC_CPU		0x10480000
 #define EXYNOS4_PA_GIC_DIST		0x10490000
-#define EXYNOS5_PA_GIC_CPU		0x10480000
-#define EXYNOS5_PA_GIC_DIST		0x10490000
+#define EXYNOS5_PA_GIC_CPU		0x10482000
+#define EXYNOS5_PA_GIC_DIST		0x10481000
 
 #define EXYNOS4_PA_COREPERI		0x10500000
 #define EXYNOS4_PA_TWD			0x10500600
@@ -95,6 +98,7 @@
 #define EXYNOS5_PA_PDMA1		0x121B0000
 
 #define EXYNOS4_PA_SYSMMU_MDMA		0x10A40000
+#define EXYNOS4_PA_SYSMMU_2D_ACP	0x10A40000
 #define EXYNOS4_PA_SYSMMU_SSS		0x10A50000
 #define EXYNOS4_PA_SYSMMU_FIMC0		0x11A20000
 #define EXYNOS4_PA_SYSMMU_FIMC1		0x11A30000
@@ -103,6 +107,12 @@
 #define EXYNOS4_PA_SYSMMU_JPEG		0x11A60000
 #define EXYNOS4_PA_SYSMMU_FIMD0		0x11E20000
 #define EXYNOS4_PA_SYSMMU_FIMD1		0x12220000
+#define EXYNOS4_PA_SYSMMU_FIMC_ISP	0x12260000
+#define EXYNOS4_PA_SYSMMU_FIMC_DRC	0x12270000
+#define EXYNOS4_PA_SYSMMU_FIMC_FD	0x122A0000
+#define EXYNOS4_PA_SYSMMU_ISPCPU	0x122B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE0	0x123B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE1	0x123C0000
 #define EXYNOS4_PA_SYSMMU_PCIe		0x12620000
 #define EXYNOS4_PA_SYSMMU_G2D		0x12A20000
 #define EXYNOS4_PA_SYSMMU_ROTATOR	0x12A30000
@@ -110,6 +120,37 @@
 #define EXYNOS4_PA_SYSMMU_TV		0x12E20000
 #define EXYNOS4_PA_SYSMMU_MFC_L		0x13620000
 #define EXYNOS4_PA_SYSMMU_MFC_R		0x13630000
+
+#define EXYNOS5_PA_SYSMMU_MDMA1		0x10A40000
+#define EXYNOS5_PA_SYSMMU_SSS		0x10A50000
+#define EXYNOS5_PA_SYSMMU_2D		0x10A60000
+#define EXYNOS5_PA_SYSMMU_MFC_L		0x11200000
+#define EXYNOS5_PA_SYSMMU_MFC_R		0x11210000
+#define EXYNOS5_PA_SYSMMU_ROTATOR	0x11D40000
+#define EXYNOS5_PA_SYSMMU_MDMA2		0x11D50000
+#define EXYNOS5_PA_SYSMMU_JPEG		0x11F20000
+#define EXYNOS5_PA_SYSMMU_IOP		0x12360000
+#define EXYNOS5_PA_SYSMMU_RTIC		0x12370000
+#define EXYNOS5_PA_SYSMMU_GPS		0x12630000
+#define EXYNOS5_PA_SYSMMU_ISP		0x13260000
+#define EXYNOS5_PA_SYSMMU_DRC		0x12370000
+#define EXYNOS5_PA_SYSMMU_SCALERC	0x13280000
+#define EXYNOS5_PA_SYSMMU_SCALERP	0x13290000
+#define EXYNOS5_PA_SYSMMU_FD		0x132A0000
+#define EXYNOS5_PA_SYSMMU_ISPCPU	0x132B0000
+#define EXYNOS5_PA_SYSMMU_ODC		0x132C0000
+#define EXYNOS5_PA_SYSMMU_DIS0		0x132D0000
+#define EXYNOS5_PA_SYSMMU_DIS1		0x132E0000
+#define EXYNOS5_PA_SYSMMU_3DNR		0x132F0000
+#define EXYNOS5_PA_SYSMMU_LITE0		0x13C40000
+#define EXYNOS5_PA_SYSMMU_LITE1		0x13C50000
+#define EXYNOS5_PA_SYSMMU_GSC0		0x13E80000
+#define EXYNOS5_PA_SYSMMU_GSC1		0x13E90000
+#define EXYNOS5_PA_SYSMMU_GSC2		0x13EA0000
+#define EXYNOS5_PA_SYSMMU_GSC3		0x13EB0000
+#define EXYNOS5_PA_SYSMMU_FIMD1		0x14640000
+#define EXYNOS5_PA_SYSMMU_TV		0x14650000
+
 #define EXYNOS4_PA_SPI0			0x13920000
 #define EXYNOS4_PA_SPI1			0x13930000
 #define EXYNOS4_PA_SPI2			0x13940000
diff --git a/arch/arm/mach-exynos/include/mach/pm-core.h b/arch/arm/mach-exynos/include/mach/pm-core.h
index 9d8da51e3..a67ecfa 100644
--- a/arch/arm/mach-exynos/include/mach/pm-core.h
+++ b/arch/arm/mach-exynos/include/mach/pm-core.h
@@ -33,7 +33,7 @@
 	__raw_writel(tmp, S5P_WAKEUP_MASK);
 
 	__raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
-	__raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+	__raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
 }
 
 static inline void s3c_pm_arch_stop_clocks(void)
diff --git a/arch/arm/mach-exynos/include/mach/pmu.h b/arch/arm/mach-exynos/include/mach/pmu.h
index e76b7fa..7c27c2d 100644
--- a/arch/arm/mach-exynos/include/mach/pmu.h
+++ b/arch/arm/mach-exynos/include/mach/pmu.h
@@ -23,12 +23,12 @@
 };
 
 extern unsigned long l2x0_regs_phys;
-struct exynos4_pmu_conf {
+struct exynos_pmu_conf {
 	void __iomem *reg;
 	unsigned int val[NUM_SYS_POWERDOWN];
 };
 
-extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode);
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
 extern void s3c_cpu_resume(void);
 
 #endif /* __ASM_ARCH_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index d9578a5..8c9b38c 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -135,6 +135,9 @@
 #define EXYNOS4_CLKGATE_SCLKCPU			EXYNOS_CLKREG(0x14800)
 #define EXYNOS4_CLKGATE_IP_CPU			EXYNOS_CLKREG(0x14900)
 
+#define EXYNOS4_CLKGATE_IP_ISP0			EXYNOS_CLKREG(0x18800)
+#define EXYNOS4_CLKGATE_IP_ISP1			EXYNOS_CLKREG(0x18804)
+
 #define EXYNOS4_APLL_LOCKTIME			(0x1C20)	/* 300us */
 
 #define EXYNOS4_APLLCON0_ENABLE_SHIFT		(31)
@@ -271,41 +274,59 @@
 
 #define EXYNOS5_CLKDIV_ACP			EXYNOS_CLKREG(0x08500)
 
-#define EXYNOS5_CLKSRC_TOP2			EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_EPLL_CON0			EXYNOS_CLKREG(0x10130)
 #define EXYNOS5_EPLL_CON1			EXYNOS_CLKREG(0x10134)
+#define EXYNOS5_EPLL_CON2			EXYNOS_CLKREG(0x10138)
 #define EXYNOS5_VPLL_CON0			EXYNOS_CLKREG(0x10140)
 #define EXYNOS5_VPLL_CON1			EXYNOS_CLKREG(0x10144)
+#define EXYNOS5_VPLL_CON2			EXYNOS_CLKREG(0x10148)
 #define EXYNOS5_CPLL_CON0			EXYNOS_CLKREG(0x10120)
 
 #define EXYNOS5_CLKSRC_TOP0			EXYNOS_CLKREG(0x10210)
+#define EXYNOS5_CLKSRC_TOP1			EXYNOS_CLKREG(0x10214)
+#define EXYNOS5_CLKSRC_TOP2			EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_CLKSRC_TOP3			EXYNOS_CLKREG(0x1021C)
 #define EXYNOS5_CLKSRC_GSCL			EXYNOS_CLKREG(0x10220)
 #define EXYNOS5_CLKSRC_DISP1_0			EXYNOS_CLKREG(0x1022C)
+#define EXYNOS5_CLKSRC_MAUDIO			EXYNOS_CLKREG(0x10240)
 #define EXYNOS5_CLKSRC_FSYS			EXYNOS_CLKREG(0x10244)
 #define EXYNOS5_CLKSRC_PERIC0			EXYNOS_CLKREG(0x10250)
+#define EXYNOS5_CLKSRC_PERIC1			EXYNOS_CLKREG(0x10254)
+#define EXYNOS5_SCLK_SRC_ISP			EXYNOS_CLKREG(0x10270)
 
 #define EXYNOS5_CLKSRC_MASK_TOP			EXYNOS_CLKREG(0x10310)
 #define EXYNOS5_CLKSRC_MASK_GSCL		EXYNOS_CLKREG(0x10320)
 #define EXYNOS5_CLKSRC_MASK_DISP1_0		EXYNOS_CLKREG(0x1032C)
+#define EXYNOS5_CLKSRC_MASK_MAUDIO		EXYNOS_CLKREG(0x10334)
 #define EXYNOS5_CLKSRC_MASK_FSYS		EXYNOS_CLKREG(0x10340)
 #define EXYNOS5_CLKSRC_MASK_PERIC0		EXYNOS_CLKREG(0x10350)
+#define EXYNOS5_CLKSRC_MASK_PERIC1		EXYNOS_CLKREG(0x10354)
 
 #define EXYNOS5_CLKDIV_TOP0			EXYNOS_CLKREG(0x10510)
 #define EXYNOS5_CLKDIV_TOP1			EXYNOS_CLKREG(0x10514)
 #define EXYNOS5_CLKDIV_GSCL			EXYNOS_CLKREG(0x10520)
 #define EXYNOS5_CLKDIV_DISP1_0			EXYNOS_CLKREG(0x1052C)
 #define EXYNOS5_CLKDIV_GEN			EXYNOS_CLKREG(0x1053C)
+#define EXYNOS5_CLKDIV_MAUDIO			EXYNOS_CLKREG(0x10544)
 #define EXYNOS5_CLKDIV_FSYS0			EXYNOS_CLKREG(0x10548)
 #define EXYNOS5_CLKDIV_FSYS1			EXYNOS_CLKREG(0x1054C)
 #define EXYNOS5_CLKDIV_FSYS2			EXYNOS_CLKREG(0x10550)
 #define EXYNOS5_CLKDIV_FSYS3			EXYNOS_CLKREG(0x10554)
 #define EXYNOS5_CLKDIV_PERIC0			EXYNOS_CLKREG(0x10558)
+#define EXYNOS5_CLKDIV_PERIC1			EXYNOS_CLKREG(0x1055C)
+#define EXYNOS5_CLKDIV_PERIC2			EXYNOS_CLKREG(0x10560)
+#define EXYNOS5_CLKDIV_PERIC3			EXYNOS_CLKREG(0x10564)
+#define EXYNOS5_CLKDIV_PERIC4			EXYNOS_CLKREG(0x10568)
+#define EXYNOS5_CLKDIV_PERIC5			EXYNOS_CLKREG(0x1056C)
+#define EXYNOS5_SCLK_DIV_ISP			EXYNOS_CLKREG(0x10580)
 
 #define EXYNOS5_CLKGATE_IP_ACP			EXYNOS_CLKREG(0x08800)
+#define EXYNOS5_CLKGATE_IP_ISP0			EXYNOS_CLKREG(0x0C800)
+#define EXYNOS5_CLKGATE_IP_ISP1			EXYNOS_CLKREG(0x0C804)
 #define EXYNOS5_CLKGATE_IP_GSCL			EXYNOS_CLKREG(0x10920)
 #define EXYNOS5_CLKGATE_IP_DISP1		EXYNOS_CLKREG(0x10928)
 #define EXYNOS5_CLKGATE_IP_MFC			EXYNOS_CLKREG(0x1092C)
+#define EXYNOS5_CLKGATE_IP_G3D			EXYNOS_CLKREG(0x10930)
 #define EXYNOS5_CLKGATE_IP_GEN			EXYNOS_CLKREG(0x10934)
 #define EXYNOS5_CLKGATE_IP_FSYS			EXYNOS_CLKREG(0x10944)
 #define EXYNOS5_CLKGATE_IP_GPS			EXYNOS_CLKREG(0x1094C)
@@ -317,6 +338,8 @@
 #define EXYNOS5_CLKSRC_CDREX			EXYNOS_CLKREG(0x20200)
 #define EXYNOS5_CLKDIV_CDREX			EXYNOS_CLKREG(0x20500)
 
+#define EXYNOS5_PLL_DIV2_SEL			EXYNOS_CLKREG(0x20A24)
+
 #define EXYNOS5_EPLL_LOCK			EXYNOS_CLKREG(0x10030)
 
 #define EXYNOS5_EPLLCON0_LOCKED_SHIFT		(29)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index d457d05..43a99e6 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * EXYNOS4 - Power management unit definition
+ * EXYNOS - Power management unit definition
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -180,7 +179,7 @@
 
 #define S5P_PMU_LCD1_CONF		S5P_PMUREG(0x3CA0)
 
-/* Only for EXYNOS4212 */
+/* Only for EXYNOS4x12 */
 #define S5P_ISP_ARM_LOWPWR			S5P_PMUREG(0x1050)
 #define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR	S5P_PMUREG(0x1054)
 #define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR	S5P_PMUREG(0x1058)
@@ -221,4 +220,146 @@
 #define S5P_SECSS_MEM_OPTION			S5P_PMUREG(0x2EC8)
 #define S5P_ROTATOR_MEM_OPTION			S5P_PMUREG(0x2F48)
 
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR			S5P_PMUREG(0x1020)
+#define S5P_DIS_IRQ_CORE2			S5P_PMUREG(0x1024)
+#define S5P_DIS_IRQ_CENTRAL2			S5P_PMUREG(0x1028)
+#define S5P_ARM_CORE3_LOWPWR			S5P_PMUREG(0x1030)
+#define S5P_DIS_IRQ_CORE3			S5P_PMUREG(0x1034)
+#define S5P_DIS_IRQ_CENTRAL3			S5P_PMUREG(0x1038)
+
+/* For EXYNOS5 */
+
+#define EXYNOS5_USB_CFG						S5P_PMUREG(0x0230)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG				S5P_PMUREG(0x1000)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG		S5P_PMUREG(0x1004)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG		S5P_PMUREG(0x1008)
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG				S5P_PMUREG(0x1010)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG		S5P_PMUREG(0x1014)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG		S5P_PMUREG(0x1018)
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG				S5P_PMUREG(0x1040)
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG		S5P_PMUREG(0x1048)
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG				S5P_PMUREG(0x1050)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG		S5P_PMUREG(0x1054)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG		S5P_PMUREG(0x1058)
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG				S5P_PMUREG(0x1080)
+#define EXYNOS5_ARM_L2_SYS_PWR_REG				S5P_PMUREG(0x10C0)
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG			S5P_PMUREG(0x1100)
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG			S5P_PMUREG(0x1104)
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG				S5P_PMUREG(0x110C)
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x1120)
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x1124)
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x112C)
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG			S5P_PMUREG(0x1130)
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG			S5P_PMUREG(0x1134)
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG			S5P_PMUREG(0x1138)
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x1140)
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x1144)
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x1148)
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x114C)
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x1150)
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG				S5P_PMUREG(0x1154)
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG			S5P_PMUREG(0x1164)
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG			S5P_PMUREG(0x1170)
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG				S5P_PMUREG(0x1180)
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG			S5P_PMUREG(0x1184)
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG				S5P_PMUREG(0x1188)
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x1190)
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG		S5P_PMUREG(0x1194)
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x1198)
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG				S5P_PMUREG(0x11A0)
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG				S5P_PMUREG(0x11A4)
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x11B0)
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x11B4)
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG				S5P_PMUREG(0x11C0)
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG				S5P_PMUREG(0x11C8)
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG				S5P_PMUREG(0x11CC)
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG				S5P_PMUREG(0x11D0)
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG				S5P_PMUREG(0x11D4)
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG				S5P_PMUREG(0x11D8)
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG				S5P_PMUREG(0x11DC)
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG				S5P_PMUREG(0x11E0)
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG				S5P_PMUREG(0x11E4)
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG				S5P_PMUREG(0x11E8)
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG				S5P_PMUREG(0x11EC)
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG				S5P_PMUREG(0x11F4)
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG				S5P_PMUREG(0x11FC)
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG			S5P_PMUREG(0x1200)
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG			S5P_PMUREG(0x1204)
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG		S5P_PMUREG(0x1208)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG			S5P_PMUREG(0x1220)
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG			S5P_PMUREG(0x1224)
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG			S5P_PMUREG(0x1228)
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG			S5P_PMUREG(0x122C)
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG			S5P_PMUREG(0x1230)
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG			S5P_PMUREG(0x1234)
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG			S5P_PMUREG(0x1238)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG		S5P_PMUREG(0x123C)
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG			S5P_PMUREG(0x1240)
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG		S5P_PMUREG(0x1250)
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG				S5P_PMUREG(0x1260)
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG				S5P_PMUREG(0x1280)
+#define EXYNOS5_XXTI_SYS_PWR_REG				S5P_PMUREG(0x1284)
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG			S5P_PMUREG(0x12C0)
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG				S5P_PMUREG(0x1300)
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG			S5P_PMUREG(0x1320)
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG			S5P_PMUREG(0x1340)
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG			S5P_PMUREG(0x1344)
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG			S5P_PMUREG(0x1348)
+#define EXYNOS5_GSCL_SYS_PWR_REG				S5P_PMUREG(0x1400)
+#define EXYNOS5_ISP_SYS_PWR_REG					S5P_PMUREG(0x1404)
+#define EXYNOS5_MFC_SYS_PWR_REG					S5P_PMUREG(0x1408)
+#define EXYNOS5_G3D_SYS_PWR_REG					S5P_PMUREG(0x140C)
+#define EXYNOS5_DISP1_SYS_PWR_REG				S5P_PMUREG(0x1414)
+#define EXYNOS5_MAU_SYS_PWR_REG					S5P_PMUREG(0x1418)
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG			S5P_PMUREG(0x1480)
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG			S5P_PMUREG(0x1484)
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG			S5P_PMUREG(0x1488)
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG			S5P_PMUREG(0x148C)
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG			S5P_PMUREG(0x1494)
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG			S5P_PMUREG(0x1498)
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG			S5P_PMUREG(0x14C0)
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG			S5P_PMUREG(0x14C4)
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG			S5P_PMUREG(0x14C8)
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG			S5P_PMUREG(0x14CC)
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG			S5P_PMUREG(0x14D4)
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG			S5P_PMUREG(0x14D8)
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG			S5P_PMUREG(0x1580)
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG			S5P_PMUREG(0x1584)
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG			S5P_PMUREG(0x1588)
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG			S5P_PMUREG(0x158C)
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG			S5P_PMUREG(0x1594)
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG			S5P_PMUREG(0x1598)
+
+#define EXYNOS5_ARM_CORE0_OPTION				S5P_PMUREG(0x2008)
+#define EXYNOS5_ARM_CORE1_OPTION				S5P_PMUREG(0x2088)
+#define EXYNOS5_FSYS_ARM_OPTION					S5P_PMUREG(0x2208)
+#define EXYNOS5_ISP_ARM_OPTION					S5P_PMUREG(0x2288)
+#define EXYNOS5_ARM_COMMON_OPTION				S5P_PMUREG(0x2408)
+#define EXYNOS5_TOP_PWR_OPTION					S5P_PMUREG(0x2C48)
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION				S5P_PMUREG(0x2CC8)
+#define EXYNOS5_JPEG_MEM_OPTION					S5P_PMUREG(0x2F48)
+#define EXYNOS5_GSCL_STATUS					S5P_PMUREG(0x4004)
+#define EXYNOS5_ISP_STATUS					S5P_PMUREG(0x4024)
+#define EXYNOS5_GSCL_OPTION					S5P_PMUREG(0x4008)
+#define EXYNOS5_ISP_OPTION					S5P_PMUREG(0x4028)
+#define EXYNOS5_MFC_OPTION					S5P_PMUREG(0x4048)
+#define EXYNOS5_G3D_CONFIGURATION				S5P_PMUREG(0x4060)
+#define EXYNOS5_G3D_STATUS					S5P_PMUREG(0x4064)
+#define EXYNOS5_G3D_OPTION					S5P_PMUREG(0x4068)
+#define EXYNOS5_DISP1_OPTION					S5P_PMUREG(0x40A8)
+#define EXYNOS5_MAU_OPTION					S5P_PMUREG(0x40C8)
+
+#define EXYNOS5_USE_SC_FEEDBACK					(1 << 1)
+#define EXYNOS5_USE_SC_COUNTER					(1 << 0)
+
+#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL			(1 << 2)
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN			(1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE				(1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI				(1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION				(1 << 4)
+
 #endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
deleted file mode 100644
index 68ff6ad..0000000
--- a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * EXYNOS4 - System MMU register
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYSMMU_H
-#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
-
-#define S5P_MMU_CTRL			0x000
-#define S5P_MMU_CFG			0x004
-#define S5P_MMU_STATUS			0x008
-#define S5P_MMU_FLUSH			0x00C
-#define S5P_PT_BASE_ADDR		0x014
-#define S5P_INT_STATUS			0x018
-#define S5P_INT_CLEAR			0x01C
-#define S5P_PAGE_FAULT_ADDR		0x024
-#define S5P_AW_FAULT_ADDR		0x028
-#define S5P_AR_FAULT_ADDR		0x02C
-#define S5P_DEFAULT_SLAVE_ADDR		0x030
-
-#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
index 576efdf..c71a5fb 100644
--- a/arch/arm/mach-exynos/include/mach/spi-clocks.h
+++ b/arch/arm/mach-exynos/include/mach/spi-clocks.h
@@ -11,6 +11,6 @@
 #define __ASM_ARCH_SPI_CLKS_H __FILE__
 
 /* Must source from SCLK_SPI */
-#define EXYNOS4_SPI_SRCCLK_SCLK		0
+#define EXYNOS_SPI_SRCCLK_SCLK		0
 
 #endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/include/mach/sysmmu.h b/arch/arm/mach-exynos/include/mach/sysmmu.h
index 6a5fbb5..998daf2 100644
--- a/arch/arm/mach-exynos/include/mach/sysmmu.h
+++ b/arch/arm/mach-exynos/include/mach/sysmmu.h
@@ -1,46 +1,66 @@
-/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * Samsung sysmmu driver for EXYNOS4
+ * EXYNOS - System MMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
-#ifndef __ASM_ARM_ARCH_SYSMMU_H
-#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
+#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
+#define _ARM_MACH_EXYNOS_SYSMMU_H_
 
-enum exynos4_sysmmu_ips {
-	SYSMMU_MDMA,
-	SYSMMU_SSS,
-	SYSMMU_FIMC0,
-	SYSMMU_FIMC1,
-	SYSMMU_FIMC2,
-	SYSMMU_FIMC3,
-	SYSMMU_JPEG,
-	SYSMMU_FIMD0,
-	SYSMMU_FIMD1,
-	SYSMMU_PCIe,
-	SYSMMU_G2D,
-	SYSMMU_ROTATOR,
-	SYSMMU_MDMA2,
-	SYSMMU_TV,
-	SYSMMU_MFC_L,
-	SYSMMU_MFC_R,
-	EXYNOS4_SYSMMU_TOTAL_IPNUM,
+struct sysmmu_platform_data {
+	char *dbgname;
+	/* comma(,) separated list of clock names for clock gating */
+	char *clockname;
 };
 
-#define S5P_SYSMMU_TOTAL_IPNUM		EXYNOS4_SYSMMU_TOTAL_IPNUM
+#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
 
-extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM];
+#define SYSMMU_CLOCK_NAME "sysmmu"
+#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
 
-typedef enum exynos4_sysmmu_ips sysmmu_ips;
+#ifdef CONFIG_EXYNOS_DEV_SYSMMU
+#include <linux/device.h>
+struct platform_device;
 
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips);
-void sysmmu_clk_enable(sysmmu_ips ips);
-void sysmmu_clk_disable(sysmmu_ips ips);
+#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
 
-#endif /* __ASM_ARM_ARCH_SYSMMU_H */
+extern struct platform_device SYSMMU_PLATDEV(mfc_l);
+extern struct platform_device SYSMMU_PLATDEV(mfc_r);
+extern struct platform_device SYSMMU_PLATDEV(tv);
+extern struct platform_device SYSMMU_PLATDEV(jpeg);
+extern struct platform_device SYSMMU_PLATDEV(rot);
+extern struct platform_device SYSMMU_PLATDEV(fimc0);
+extern struct platform_device SYSMMU_PLATDEV(fimc1);
+extern struct platform_device SYSMMU_PLATDEV(fimc2);
+extern struct platform_device SYSMMU_PLATDEV(fimc3);
+extern struct platform_device SYSMMU_PLATDEV(gsc0);
+extern struct platform_device SYSMMU_PLATDEV(gsc1);
+extern struct platform_device SYSMMU_PLATDEV(gsc2);
+extern struct platform_device SYSMMU_PLATDEV(gsc3);
+extern struct platform_device SYSMMU_PLATDEV(isp);
+extern struct platform_device SYSMMU_PLATDEV(fimd0);
+extern struct platform_device SYSMMU_PLATDEV(fimd1);
+extern struct platform_device SYSMMU_PLATDEV(camif0);
+extern struct platform_device SYSMMU_PLATDEV(camif1);
+extern struct platform_device SYSMMU_PLATDEV(2d);
+
+#ifdef CONFIG_IOMMU_API
+static inline void platform_set_sysmmu(
+				struct device *sysmmu, struct device *dev)
+{
+	dev->archdata.iommu = sysmmu;
+}
+#endif
+
+#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
+#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
+#endif
+
+#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
+
+#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index fed7116..5a3daa0 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -147,7 +147,6 @@
 	&s3c_device_hsmmc3,
 	&s3c_device_rtc,
 	&s3c_device_wdt,
-	&exynos4_device_sysmmu,
 	&samsung_asoc_dma,
 	&armlex4210_smsc911x,
 	&exynos4_device_ahci,
@@ -204,6 +203,7 @@
 	.map_io		= armlex4210_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= armlex4210_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 8245f1c..e7e9743 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -83,6 +83,7 @@
 	.map_io		= exynos4210_dt_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= exynos4210_dt_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.dt_compat	= exynos4210_dt_compat,
 	.restart        = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 4711c89..7b1e11a 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,10 @@
 				"exynos4210-uart.2", NULL),
 	OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3,
 				"exynos4210-uart.3", NULL),
+	OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(0),
+				"s3c2440-i2c.0", NULL),
+	OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
+				"s3c2440-i2c.1", NULL),
 	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
 	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
 	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -72,6 +76,7 @@
 	.map_io		= exynos5250_dt_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= exynos5250_dt_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.dt_compat	= exynos5250_dt_compat,
 	.restart        = exynos5_restart,
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 6c31f2a..656f8fc 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -237,25 +237,29 @@
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win nuri_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 64,
-		.right_margin	= 16,
-		.upper_margin	= 64,
-		.lower_margin	= 1,
-		.hsync_len	= 48,
-		.vsync_len	= 3,
-		.xres		= 1024,
-		.yres		= 600,
-		.refresh	= 60,
-	},
 	.max_bpp	= 24,
 	.default_bpp	= 16,
+	.xres		= 1024,
+	.yres		= 600,
 	.virtual_x	= 1024,
 	.virtual_y	= 2 * 600,
 };
 
+static struct fb_videomode nuri_lcd_timing = {
+	.left_margin	= 64,
+	.right_margin	= 16,
+	.upper_margin	= 64,
+	.lower_margin	= 1,
+	.hsync_len	= 48,
+	.vsync_len	= 3,
+	.xres		= 1024,
+	.yres		= 600,
+	.refresh	= 60,
+};
+
 static struct s3c_fb_platdata nuri_fb_pdata __initdata = {
 	.win[0]		= &nuri_fb_win0,
+	.vtiming	= &nuri_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
 			  VIDCON0_CLKSEL_LCD,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -1389,6 +1393,7 @@
 	.map_io		= nuri_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= nuri_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.reserve        = &nuri_reserve,
 	.restart	= exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 26124a3..f5572be 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -604,24 +604,28 @@
 };
 #else
 static struct s3c_fb_pd_win origen_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 64,
-		.right_margin	= 16,
-		.upper_margin	= 64,
-		.lower_margin	= 16,
-		.hsync_len	= 48,
-		.vsync_len	= 3,
-		.xres		= 1024,
-		.yres		= 600,
-	},
+	.xres			= 1024,
+	.yres			= 600,
 	.max_bpp		= 32,
 	.default_bpp		= 24,
 	.virtual_x		= 1024,
 	.virtual_y		= 2 * 600,
 };
 
+static struct fb_videomode origen_lcd_timing = {
+	.left_margin	= 64,
+	.right_margin	= 16,
+	.upper_margin	= 64,
+	.lower_margin	= 16,
+	.hsync_len	= 48,
+	.vsync_len	= 3,
+	.xres		= 1024,
+	.yres		= 600,
+};
+
 static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
 	.win[0]		= &origen_fb_win0,
+	.vtiming	= &origen_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
 				VIDCON1_INV_VCLK,
@@ -766,6 +770,7 @@
 	.map_io		= origen_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= origen_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.reserve	= &origen_reserve,
 	.restart	= exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fe772d8..fb09c70 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -316,6 +316,7 @@
 	.map_io		= smdk4x12_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdk4x12_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.restart	= exynos4_restart,
 	.reserve	= &smdk4x12_reserve,
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 5af9606..262e9e4 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -178,22 +178,26 @@
 };
 #else
 static struct s3c_fb_pd_win smdkv310_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 13,
-		.right_margin	= 8,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
-	.max_bpp		= 32,
-	.default_bpp		= 24,
+	.max_bpp	= 32,
+	.default_bpp	= 24,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smdkv310_lcd_timing = {
+	.left_margin	= 13,
+	.right_margin	= 8,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = {
 	.win[0]		= &smdkv310_fb_win0,
+	.vtiming	= &smdkv310_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 	.setup_gpio	= exynos4_fimd0_gpio_setup_24bpp,
@@ -295,7 +299,6 @@
 	&s5p_device_mfc_l,
 	&s5p_device_mfc_r,
 	&exynos4_device_spdif,
-	&exynos4_device_sysmmu,
 	&samsung_asoc_dma,
 	&samsung_asoc_idma,
 	&s5p_device_fimd0,
@@ -412,6 +415,7 @@
 	.map_io		= smdkv310_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdkv310_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &exynos4_timer,
 	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6b731b8..cd92fa8 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -843,25 +843,29 @@
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win universal_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 16,
-		.right_margin	= 16,
-		.upper_margin	= 2,
-		.lower_margin	= 28,
-		.hsync_len	= 2,
-		.vsync_len	= 1,
-		.xres		= 480,
-		.yres		= 800,
-		.refresh	= 55,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 480,
+	.yres		= 800,
 	.virtual_x	= 480,
 	.virtual_y	= 2 * 800,
 };
 
+static struct fb_videomode universal_lcd_timing = {
+	.left_margin	= 16,
+	.right_margin	= 16,
+	.upper_margin	= 2,
+	.lower_margin	= 28,
+	.hsync_len	= 2,
+	.vsync_len	= 1,
+	.xres		= 480,
+	.yres		= 800,
+	.refresh	= 55,
+};
+
 static struct s3c_fb_platdata universal_lcd_pdata __initdata = {
 	.win[0]		= &universal_fb_win0,
+	.vtiming	= &universal_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
 			  VIDCON0_CLKSEL_LCD,
 	.vidcon1	= VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
@@ -1157,6 +1161,7 @@
 	.map_io		= universal_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= universal_machine_init,
+	.init_late	= exynos_init_late,
 	.timer		= &s5p_timer,
 	.reserve        = &universal_reserve,
 	.restart	= exynos4_restart,
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 897d9a9..b601fb8 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -388,6 +388,7 @@
 {
 	struct mct_clock_event_device *mevt;
 	unsigned int cpu = smp_processor_id();
+	int mct_lx_irq;
 
 	mevt = this_cpu_ptr(&percpu_mct_tick);
 	mevt->evt = evt;
@@ -414,14 +415,18 @@
 
 	if (mct_int_type == MCT_INT_SPI) {
 		if (cpu == 0) {
+			mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
+						EXYNOS5_IRQ_MCT_L0;
 			mct_tick0_event_irq.dev_id = mevt;
-			evt->irq = EXYNOS4_IRQ_MCT_L0;
-			setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq);
+			evt->irq = mct_lx_irq;
+			setup_irq(mct_lx_irq, &mct_tick0_event_irq);
 		} else {
+			mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
+						EXYNOS5_IRQ_MCT_L1;
 			mct_tick1_event_irq.dev_id = mevt;
-			evt->irq = EXYNOS4_IRQ_MCT_L1;
-			setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq);
-			irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1));
+			evt->irq = mct_lx_irq;
+			setup_irq(mct_lx_irq, &mct_tick1_event_irq);
+			irq_set_affinity(mct_lx_irq, cpumask_of(1));
 		}
 	} else {
 		enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
@@ -473,7 +478,7 @@
 
 static void __init exynos4_timer_init(void)
 {
-	if (soc_is_exynos4210())
+	if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
 		mct_int_type = MCT_INT_SPI;
 	else
 		mct_int_type = MCT_INT_PPI;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 428cfeb..c06c992 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * EXYNOS4210 - Power Management support
+ * EXYNOS - Power Management support
  *
  * Based on arch/arm/mach-s3c2410/pm.c
  * Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@
 	SAVE_ITEM(EXYNOS4_VPLL_CON1),
 };
 
-static struct sleep_save exynos4_core_save[] = {
-	/* GIC side */
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
-	SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
-
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
-
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
-	SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
-
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
-	SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
-
+static struct sleep_save exynos_core_save[] = {
 	/* SROM side */
 	SAVE_ITEM(S5P_SROM_BW),
 	SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@
 /* For Cortex-A9 Diagnostic and Power control register */
 static unsigned int save_arm_register[2];
 
-static int exynos4_cpu_suspend(unsigned long arg)
+static int exynos_cpu_suspend(unsigned long arg)
 {
+#ifdef CONFIG_CACHE_L2X0
 	outer_flush_all();
+#endif
 
 	/* issue the standby signal into the pm unit. */
 	cpu_do_idle();
@@ -170,19 +88,25 @@
 	panic("sleep resumed to originator?");
 }
 
-static void exynos4_pm_prepare(void)
+static void exynos_pm_prepare(void)
 {
-	u32 tmp;
+	unsigned int tmp;
 
-	s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
-	s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
-	s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+	s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-	tmp = __raw_readl(S5P_INFORM1);
+	if (!soc_is_exynos5250()) {
+		s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
+		s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+	} else {
+		/* Disable USE_RETENTION of JPEG_MEM_OPTION */
+		tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
+		tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+		__raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
+	}
 
 	/* Set value of power down register for sleep mode */
 
-	exynos4_sys_powerdown_conf(SYS_SLEEP);
+	exynos_sys_powerdown_conf(SYS_SLEEP);
 	__raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
 
 	/* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@
 
 	/* Before enter central sequence mode, clock src register have to set */
 
-	s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
+	if (!soc_is_exynos5250())
+		s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
 
 	if (soc_is_exynos4210())
 		s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
 
 }
 
-static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
+static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
 {
-	pm_cpu_prep = exynos4_pm_prepare;
-	pm_cpu_sleep = exynos4_cpu_suspend;
+	pm_cpu_prep = exynos_pm_prepare;
+	pm_cpu_sleep = exynos_cpu_suspend;
 
 	return 0;
 }
@@ -273,13 +198,13 @@
 	} while (epll_wait || vpll_wait);
 }
 
-static struct subsys_interface exynos4_pm_interface = {
-	.name		= "exynos4_pm",
-	.subsys		= &exynos4_subsys,
-	.add_dev	= exynos4_pm_add,
+static struct subsys_interface exynos_pm_interface = {
+	.name		= "exynos_pm",
+	.subsys		= &exynos_subsys,
+	.add_dev	= exynos_pm_add,
 };
 
-static __init int exynos4_pm_drvinit(void)
+static __init int exynos_pm_drvinit(void)
 {
 	struct clk *pll_base;
 	unsigned int tmp;
@@ -292,18 +217,20 @@
 	tmp |= ((0xFF << 8) | (0x1F << 1));
 	__raw_writel(tmp, S5P_WAKEUP_MASK);
 
-	pll_base = clk_get(NULL, "xtal");
+	if (!soc_is_exynos5250()) {
+		pll_base = clk_get(NULL, "xtal");
 
-	if (!IS_ERR(pll_base)) {
-		pll_base_rate = clk_get_rate(pll_base);
-		clk_put(pll_base);
+		if (!IS_ERR(pll_base)) {
+			pll_base_rate = clk_get_rate(pll_base);
+			clk_put(pll_base);
+		}
 	}
 
-	return subsys_interface_register(&exynos4_pm_interface);
+	return subsys_interface_register(&exynos_pm_interface);
 }
-arch_initcall(exynos4_pm_drvinit);
+arch_initcall(exynos_pm_drvinit);
 
-static int exynos4_pm_suspend(void)
+static int exynos_pm_suspend(void)
 {
 	unsigned long tmp;
 
@@ -313,27 +240,27 @@
 	tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
 	__raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
 
-	if (soc_is_exynos4212()) {
-		tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
-		tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
-			 S5P_USE_STANDBYWFE_ISP_ARM);
-		__raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
+	/* Setting SEQ_OPTION register */
+
+	tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
+	__raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
+
+	if (!soc_is_exynos5250()) {
+		/* Save Power control register */
+		asm ("mrc p15, 0, %0, c15, c0, 0"
+		     : "=r" (tmp) : : "cc");
+		save_arm_register[0] = tmp;
+
+		/* Save Diagnostic register */
+		asm ("mrc p15, 0, %0, c15, c0, 1"
+		     : "=r" (tmp) : : "cc");
+		save_arm_register[1] = tmp;
 	}
 
-	/* Save Power control register */
-	asm ("mrc p15, 0, %0, c15, c0, 0"
-	     : "=r" (tmp) : : "cc");
-	save_arm_register[0] = tmp;
-
-	/* Save Diagnostic register */
-	asm ("mrc p15, 0, %0, c15, c0, 1"
-	     : "=r" (tmp) : : "cc");
-	save_arm_register[1] = tmp;
-
 	return 0;
 }
 
-static void exynos4_pm_resume(void)
+static void exynos_pm_resume(void)
 {
 	unsigned long tmp;
 
@@ -350,17 +277,19 @@
 		/* No need to perform below restore code */
 		goto early_wakeup;
 	}
-	/* Restore Power control register */
-	tmp = save_arm_register[0];
-	asm volatile ("mcr p15, 0, %0, c15, c0, 0"
-		      : : "r" (tmp)
-		      : "cc");
+	if (!soc_is_exynos5250()) {
+		/* Restore Power control register */
+		tmp = save_arm_register[0];
+		asm volatile ("mcr p15, 0, %0, c15, c0, 0"
+			      : : "r" (tmp)
+			      : "cc");
 
-	/* Restore Diagnostic register */
-	tmp = save_arm_register[1];
-	asm volatile ("mcr p15, 0, %0, c15, c0, 1"
-		      : : "r" (tmp)
-		      : "cc");
+		/* Restore Diagnostic register */
+		tmp = save_arm_register[1];
+		asm volatile ("mcr p15, 0, %0, c15, c0, 1"
+			      : : "r" (tmp)
+			      : "cc");
+	}
 
 	/* For release retention */
 
@@ -372,26 +301,28 @@
 	__raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
 	__raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
 
-	s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
+	s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-	exynos4_restore_pll();
+	if (!soc_is_exynos5250()) {
+		exynos4_restore_pll();
 
 #ifdef CONFIG_SMP
-	scu_enable(S5P_VA_SCU);
+		scu_enable(S5P_VA_SCU);
 #endif
+	}
 
 early_wakeup:
 	return;
 }
 
-static struct syscore_ops exynos4_pm_syscore_ops = {
-	.suspend	= exynos4_pm_suspend,
-	.resume		= exynos4_pm_resume,
+static struct syscore_ops exynos_pm_syscore_ops = {
+	.suspend	= exynos_pm_suspend,
+	.resume		= exynos_pm_resume,
 };
 
-static __init int exynos4_pm_syscore_init(void)
+static __init int exynos_pm_syscore_init(void)
 {
-	register_syscore_ops(&exynos4_pm_syscore_ops);
+	register_syscore_ops(&exynos_pm_syscore_ops);
 	return 0;
 }
-arch_initcall(exynos4_pm_syscore_init);
+arch_initcall(exynos_pm_syscore_init);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 13b3068..e9fafcf 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -193,9 +193,8 @@
 }
 arch_initcall(exynos4_pm_init_power_domain);
 
-static __init int exynos_pm_late_initcall(void)
+int __init exynos_pm_late_initcall(void)
 {
 	pm_genpd_poweroff_unused();
 	return 0;
 }
-late_initcall(exynos_pm_late_initcall);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index bba48f5..4aacb66 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pmu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
- * EXYNOS4210 - CPU PMU(Power Management Unit) support
+ * EXYNOS - CPU PMU(Power Management Unit) support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -12,13 +11,14 @@
 
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/bug.h>
 
 #include <mach/regs-clock.h>
 #include <mach/pmu.h>
 
-static struct exynos4_pmu_conf *exynos4_pmu_config;
+static struct exynos_pmu_conf *exynos_pmu_config;
 
-static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
+static struct exynos_pmu_conf exynos4210_pmu_config[] = {
 	/* { .reg = address, .val = { AFTR, LPA, SLEEP } */
 	{ S5P_ARM_CORE0_LOWPWR,			{ 0x0, 0x0, 0x2 } },
 	{ S5P_DIS_IRQ_CORE0,			{ 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@
 	{ PMU_TABLE_END,},
 };
 
-static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
+static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
 	{ S5P_ARM_CORE0_LOWPWR,			{ 0x0, 0x0, 0x2 } },
 	{ S5P_DIS_IRQ_CORE0,			{ 0x0, 0x0, 0x0 } },
 	{ S5P_DIS_IRQ_CENTRAL0,			{ 0x0, 0x0, 0x0 } },
@@ -202,29 +202,209 @@
 	{ PMU_TABLE_END,},
 };
 
-void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
+static struct exynos_pmu_conf exynos4412_pmu_config[] = {
+	{ S5P_ARM_CORE2_LOWPWR,			{ 0x0, 0x0, 0x2 } },
+	{ S5P_DIS_IRQ_CORE2,			{ 0x0, 0x0, 0x0 } },
+	{ S5P_DIS_IRQ_CENTRAL2,			{ 0x0, 0x0, 0x0 } },
+	{ S5P_ARM_CORE3_LOWPWR,			{ 0x0, 0x0, 0x2 } },
+	{ S5P_DIS_IRQ_CORE3,			{ 0x0, 0x0, 0x0 } },
+	{ S5P_DIS_IRQ_CENTRAL3,			{ 0x0, 0x0, 0x0 } },
+	{ PMU_TABLE_END,},
+};
+
+static struct exynos_pmu_conf exynos5250_pmu_config[] = {
+	/* { .reg = address, .val = { AFTR, LPA, SLEEP } */
+	{ EXYNOS5_ARM_CORE0_SYS_PWR_REG,		{ 0x0, 0x0, 0x2} },
+	{ EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_ARM_CORE1_SYS_PWR_REG,		{ 0x0, 0x0, 0x2} },
+	{ EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_FSYS_ARM_SYS_PWR_REG,			{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG,	{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_ISP_ARM_SYS_PWR_REG,			{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
+	{ EXYNOS5_ARM_COMMON_SYS_PWR_REG,		{ 0x0, 0x0, 0x2} },
+	{ EXYNOS5_ARM_L2_SYS_PWR_REG,			{ 0x3, 0x3, 0x3} },
+	{ EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_CMU_RESET_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,		{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,		{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,		{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_TOP_BUS_SYS_PWR_REG,			{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_TOP_RETENTION_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_TOP_PWR_SYS_PWR_REG,			{ 0x3, 0x0, 0x3} },
+	{ EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x3} },
+	{ EXYNOS5_LOGIC_RESET_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_USBOTG_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_G2D_MEM_SYS_PWR_REG,			{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_USBDRD_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_SDMMC_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_CSSYS_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_SECSS_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_INTRAM_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_INTROM_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_JPEG_MEM_SYS_PWR_REG,			{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_HSI_MEM_SYS_PWR_REG,			{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,		{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_SATA_MEM_SYS_PWR_REG,			{ 0x3, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,	{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_XUSBXTI_SYS_PWR_REG,			{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_XXTI_SYS_PWR_REG,			{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_GPIO_MODE_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,		{ 0x1, 0x1, 0x1} },
+	{ EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,	{ 0x1, 0x0, 0x1} },
+	{ EXYNOS5_GSCL_SYS_PWR_REG,			{ 0x7, 0x0, 0x0} },
+	{ EXYNOS5_ISP_SYS_PWR_REG,			{ 0x7, 0x0, 0x0} },
+	{ EXYNOS5_MFC_SYS_PWR_REG,			{ 0x7, 0x0, 0x0} },
+	{ EXYNOS5_G3D_SYS_PWR_REG,			{ 0x7, 0x0, 0x0} },
+	{ EXYNOS5_DISP1_SYS_PWR_REG,			{ 0x7, 0x0, 0x0} },
+	{ EXYNOS5_MAU_SYS_PWR_REG,			{ 0x7, 0x7, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,	{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,		{ 0x1, 0x0, 0x0} },
+	{ EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
+	{ PMU_TABLE_END,},
+};
+
+void __iomem *exynos5_list_both_cnt_feed[] = {
+	EXYNOS5_ARM_CORE0_OPTION,
+	EXYNOS5_ARM_CORE1_OPTION,
+	EXYNOS5_ARM_COMMON_OPTION,
+	EXYNOS5_GSCL_OPTION,
+	EXYNOS5_ISP_OPTION,
+	EXYNOS5_MFC_OPTION,
+	EXYNOS5_G3D_OPTION,
+	EXYNOS5_DISP1_OPTION,
+	EXYNOS5_MAU_OPTION,
+	EXYNOS5_TOP_PWR_OPTION,
+	EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+void __iomem *exynos5_list_diable_wfi_wfe[] = {
+	EXYNOS5_ARM_CORE1_OPTION,
+	EXYNOS5_FSYS_ARM_OPTION,
+	EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5_init_pmu(void)
+{
+	unsigned int i;
+	unsigned int tmp;
+
+	/*
+	 * Enable both SC_FEEDBACK and SC_COUNTER
+	 */
+	for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
+		tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
+		tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+			EXYNOS5_USE_SC_COUNTER);
+		__raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+	}
+
+	/*
+	 * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+	 * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
+	 */
+	tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+	tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
+		EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+	__raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+	/*
+	 * Disable WFI/WFE on XXX_OPTION
+	 */
+	for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
+		tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
+		tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+			 EXYNOS5_OPTION_USE_STANDBYWFI);
+		__raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+	}
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
 {
 	unsigned int i;
 
-	for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
-		__raw_writel(exynos4_pmu_config[i].val[mode],
-				exynos4_pmu_config[i].reg);
+	if (soc_is_exynos5250())
+		exynos5_init_pmu();
+
+	for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
+		__raw_writel(exynos_pmu_config[i].val[mode],
+				exynos_pmu_config[i].reg);
+
+	if (soc_is_exynos4412()) {
+		for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
+			__raw_writel(exynos4412_pmu_config[i].val[mode],
+				exynos4412_pmu_config[i].reg);
+	}
 }
 
-static int __init exynos4_pmu_init(void)
+static int __init exynos_pmu_init(void)
 {
-	exynos4_pmu_config = exynos4210_pmu_config;
+	exynos_pmu_config = exynos4210_pmu_config;
 
 	if (soc_is_exynos4210()) {
-		exynos4_pmu_config = exynos4210_pmu_config;
+		exynos_pmu_config = exynos4210_pmu_config;
 		pr_info("EXYNOS4210 PMU Initialize\n");
-	} else if (soc_is_exynos4212()) {
-		exynos4_pmu_config = exynos4212_pmu_config;
-		pr_info("EXYNOS4212 PMU Initialize\n");
+	} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
+		exynos_pmu_config = exynos4x12_pmu_config;
+		pr_info("EXYNOS4x12 PMU Initialize\n");
+	} else if (soc_is_exynos5250()) {
+		exynos_pmu_config = exynos5250_pmu_config;
+		pr_info("EXYNOS5250 PMU Initialize\n");
 	} else {
-		pr_info("EXYNOS4: PMU not supported\n");
+		pr_info("EXYNOS: PMU not supported\n");
 	}
 
 	return 0;
 }
-arch_initcall(exynos4_pmu_init);
+arch_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile
index f8437dd2..ded4652 100644
--- a/arch/arm/mach-highbank/Makefile
+++ b/arch/arm/mach-highbank/Makefile
@@ -1,4 +1,8 @@
-obj-y					:= clock.o highbank.o system.o
+obj-y					:= clock.o highbank.o system.o smc.o
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_smc.o				:=-Wa,-march=armv7-a$(plus_sec)
+
 obj-$(CONFIG_DEBUG_HIGHBANK_UART)	+= lluart.o
 obj-$(CONFIG_SMP)			+= platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= hotplug.o
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index d8e2d0b..141ed51 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -8,3 +8,4 @@
 static inline void highbank_lluart_map_io(void) {}
 #endif
 
+extern void highbank_smc1(int fn, int arg);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 410a112..8777612 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -85,10 +85,24 @@
 	{}
 };
 
+#ifdef CONFIG_CACHE_L2X0
+static void highbank_l2x0_disable(void)
+{
+	/* Disable PL310 L2 Cache controller */
+	highbank_smc1(0x102, 0x0);
+}
+#endif
+
 static void __init highbank_init_irq(void)
 {
 	of_irq_init(irq_match);
+
+#ifdef CONFIG_CACHE_L2X0
+	/* Enable PL310 L2 Cache controller */
+	highbank_smc1(0x102, 0x1);
 	l2x0_of_init(0, ~0UL);
+	outer_cache.disable = highbank_l2x0_disable;
+#endif
 }
 
 static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644
index 0000000..407d17b
--- /dev/null
+++ b/arch/arm/mach-highbank/smc.S
@@ -0,0 +1,27 @@
+/*
+ * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * This is common routine to manage secure monitor API
+ * used to modify the PL310 secure registers.
+ * 'r0' contains the value to be modified and 'r12' contains
+ * the monitor API number.
+ * Function signature : void highbank_smc1(u32 fn, u32 arg)
+ */
+
+ENTRY(highbank_smc1)
+	stmfd   sp!, {r4-r11, lr}
+	mov	r12, r0
+	mov 	r0, r1
+	dsb
+	smc	#0
+	ldmfd   sp!, {r4-r11, pc}
+ENDPROC(highbank_smc1)
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index cca8c0c..eff4db5 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -34,6 +34,7 @@
 config SOC_IMX1
 	bool
 	select ARCH_MX1
+	select COMMON_CLK
 	select CPU_ARM920T
 	select IMX_HAVE_IOMUX_V1
 	select MXC_AVIC
@@ -42,12 +43,14 @@
 	bool
 	select MACH_MX21
 	select CPU_ARM926T
+	select COMMON_CLK
 	select IMX_HAVE_IOMUX_V1
 	select MXC_AVIC
 
 config SOC_IMX25
 	bool
 	select ARCH_MX25
+	select COMMON_CLK
 	select CPU_ARM926T
 	select ARCH_MXC_IOMUX_V3
 	select MXC_AVIC
@@ -56,6 +59,7 @@
 	bool
 	select MACH_MX27
 	select CPU_ARM926T
+	select COMMON_CLK
 	select IMX_HAVE_IOMUX_V1
 	select MXC_AVIC
 
@@ -64,12 +68,14 @@
 	select CPU_V6
 	select IMX_HAVE_PLATFORM_MXC_RNGA
 	select MXC_AVIC
+	select COMMON_CLK
 	select SMP_ON_UP if SMP
 
 config SOC_IMX35
 	bool
 	select CPU_V6
 	select ARCH_MXC_IOMUX_V3
+	select COMMON_CLK
 	select HAVE_EPIT
 	select MXC_AVIC
 	select SMP_ON_UP if SMP
@@ -77,6 +83,7 @@
 config SOC_IMX5
 	select CPU_V7
 	select MXC_TZIC
+	select COMMON_CLK
 	select ARCH_MXC_IOMUX_V3
 	select ARCH_HAS_CPUFREQ
 	select ARCH_MX5
@@ -470,6 +477,7 @@
 	select IMX_HAVE_PLATFORM_IMX2_WDT
 	select IMX_HAVE_PLATFORM_IMX_I2C
 	select IMX_HAVE_PLATFORM_IMX_KEYPAD
+	select IMX_HAVE_PLATFORM_IMX_SSI
 	select IMX_HAVE_PLATFORM_IMX_UART
 	select IMX_HAVE_PLATFORM_IPU_CORE
 	select IMX_HAVE_PLATFORM_MXC_EHCI
@@ -815,6 +823,7 @@
 	bool "i.MX6 Quad support"
 	select ARM_CPU_SUSPEND if PM
 	select ARM_GIC
+	select COMMON_CLK
 	select CPU_V7
 	select HAVE_ARM_SCU
 	select HAVE_IMX_GPC
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 4937c07..ff29421 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,15 +1,18 @@
-obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
-obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
+obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
+obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
 
-obj-$(CONFIG_SOC_IMX25) += clock-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
+obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
 
 obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
-obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
+obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
 
-obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
 
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
+obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
+			    clk-pfd.o clk-busy.o
 
 # Support for CMOS sensor interface
 obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -70,7 +73,7 @@
 AFLAGS_head-v7.o :=-Wa,-march=armv7-a
 obj-$(CONFIG_SMP) += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
 
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 3851d8a..05541cf 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -42,4 +42,5 @@
 dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \
 			       imx53-qsb.dtb imx53-smd.dtb
 dtb-$(CONFIG_SOC_IMX6Q)	+= imx6q-arm2.dtb \
-			   imx6q-sabrelite.dtb
+			   imx6q-sabrelite.dtb \
+			   imx6q-sabresd.dtb \
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
new file mode 100644
index 0000000..1a7a8dd
--- /dev/null
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+static int clk_busy_wait(void __iomem *reg, u8 shift)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+	while (readl_relaxed(reg) & (1 << shift))
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+
+	return 0;
+}
+
+struct clk_busy_divider {
+	struct clk_divider div;
+	const struct clk_ops *div_ops;
+	void __iomem *reg;
+	u8 shift;
+};
+
+static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
+{
+	struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+
+	return container_of(div, struct clk_busy_divider, div);
+}
+
+static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+	return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
+}
+
+static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long *prate)
+{
+	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+	return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+}
+
+static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+	int ret;
+
+	ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
+	if (!ret)
+		ret = clk_busy_wait(busy->reg, busy->shift);
+
+	return ret;
+}
+
+static struct clk_ops clk_busy_divider_ops = {
+	.recalc_rate = clk_busy_divider_recalc_rate,
+	.round_rate = clk_busy_divider_round_rate,
+	.set_rate = clk_busy_divider_set_rate,
+};
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+				 void __iomem *reg, u8 shift, u8 width,
+				 void __iomem *busy_reg, u8 busy_shift)
+{
+	struct clk_busy_divider *busy;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+	if (!busy)
+		return ERR_PTR(-ENOMEM);
+
+	busy->reg = busy_reg;
+	busy->shift = busy_shift;
+
+	busy->div.reg = reg;
+	busy->div.shift = shift;
+	busy->div.width = width;
+	busy->div.lock = &imx_ccm_lock;
+	busy->div_ops = &clk_divider_ops;
+
+	init.name = name;
+	init.ops = &clk_busy_divider_ops;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	busy->div.hw.init = &init;
+
+	clk = clk_register(NULL, &busy->div.hw);
+	if (!clk)
+		kfree(busy);
+
+	return clk;
+}
+
+struct clk_busy_mux {
+	struct clk_mux mux;
+	const struct clk_ops *mux_ops;
+	void __iomem *reg;
+	u8 shift;
+};
+
+static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
+{
+	struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+
+	return container_of(mux, struct clk_busy_mux, mux);
+}
+
+static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
+{
+	struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+
+	return busy->mux_ops->get_parent(&busy->mux.hw);
+}
+
+static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+	int ret;
+
+	ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
+	if (!ret)
+		ret = clk_busy_wait(busy->reg, busy->shift);
+
+	return ret;
+}
+
+struct clk_ops clk_busy_mux_ops = {
+	.get_parent = clk_busy_mux_get_parent,
+	.set_parent = clk_busy_mux_set_parent,
+};
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+			     u8 width, void __iomem *busy_reg, u8 busy_shift,
+			     const char **parent_names, int num_parents)
+{
+	struct clk_busy_mux *busy;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+	if (!busy)
+		return ERR_PTR(-ENOMEM);
+
+	busy->reg = busy_reg;
+	busy->shift = busy_shift;
+
+	busy->mux.reg = reg;
+	busy->mux.shift = shift;
+	busy->mux.width = width;
+	busy->mux.lock = &imx_ccm_lock;
+	busy->mux_ops = &clk_mux_ops;
+
+	init.name = name;
+	init.ops = &clk_busy_mux_ops;
+	init.flags = 0;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
+	busy->mux.hw.init = &init;
+
+	clk = clk_register(NULL, &busy->mux.hw);
+	if (IS_ERR(clk))
+		kfree(busy);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
new file mode 100644
index 0000000..3c1b8ff
--- /dev/null
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/**
+ * DOC: basic gatable clock which can gate and ungate it's ouput
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - inherits rate from parent.  No clk_set_rate support
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+	struct clk_gate *gate = to_clk_gate(hw);
+	u32 reg;
+	unsigned long flags = 0;
+
+	if (gate->lock)
+		spin_lock_irqsave(gate->lock, flags);
+
+	reg = readl(gate->reg);
+	reg |= 3 << gate->bit_idx;
+	writel(reg, gate->reg);
+
+	if (gate->lock)
+		spin_unlock_irqrestore(gate->lock, flags);
+
+	return 0;
+}
+
+static void clk_gate2_disable(struct clk_hw *hw)
+{
+	struct clk_gate *gate = to_clk_gate(hw);
+	u32 reg;
+	unsigned long flags = 0;
+
+	if (gate->lock)
+		spin_lock_irqsave(gate->lock, flags);
+
+	reg = readl(gate->reg);
+	reg &= ~(3 << gate->bit_idx);
+	writel(reg, gate->reg);
+
+	if (gate->lock)
+		spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+	u32 reg;
+	struct clk_gate *gate = to_clk_gate(hw);
+
+	reg = readl(gate->reg);
+
+	if (((reg >> gate->bit_idx) & 3) == 3)
+		return 1;
+
+	return 0;
+}
+
+static struct clk_ops clk_gate2_ops = {
+	.enable = clk_gate2_enable,
+	.disable = clk_gate2_disable,
+	.is_enabled = clk_gate2_is_enabled,
+};
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+		const char *parent_name, unsigned long flags,
+		void __iomem *reg, u8 bit_idx,
+		u8 clk_gate2_flags, spinlock_t *lock)
+{
+	struct clk_gate *gate;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+	if (!gate)
+		return ERR_PTR(-ENOMEM);
+
+	/* struct clk_gate assignments */
+	gate->reg = reg;
+	gate->bit_idx = bit_idx;
+	gate->flags = clk_gate2_flags;
+	gate->lock = lock;
+
+	init.name = name;
+	init.ops = &clk_gate2_ops;
+	init.flags = flags;
+	init.parent_names = parent_name ? &parent_name : NULL;
+	init.num_parents = parent_name ? 1 : 0;
+
+	gate->hw.init = &init;
+
+	clk = clk_register(dev, &gate->hw);
+	if (IS_ERR(clk))
+		kfree(clk);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
new file mode 100644
index 0000000..516ddee
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+/* CCM register addresses */
+#define IO_ADDR_CCM(off)	(MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
+
+#define CCM_CSCR	IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0	IO_ADDR_CCM(0x4)
+#define CCM_SPCTL0	IO_ADDR_CCM(0xc)
+#define CCM_PCDR	IO_ADDR_CCM(0x20)
+
+/* SCM register addresses */
+#define IO_ADDR_SCM(off)	(MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
+
+#define SCM_GCCR	IO_ADDR_SCM(0xc)
+
+static const char *prem_sel_clks[] = { "clk32_premult", "clk16m", };
+static const char *clko_sel_clks[] = { "per1", "hclk", "clk48m", "clk16m", "prem",
+				"fclk", };
+enum imx1_clks {
+	dummy, clk32, clk16m_ext, clk16m, clk32_premult, prem, mpll, spll, mcu,
+	fclk, hclk, clk48m, per1, per2, per3, clko, dma_gate, csi_gate,
+	mma_gate, usbd_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx1_clocks_init(unsigned long fref)
+{
+	int i;
+
+	clk[dummy] = imx_clk_fixed("dummy", 0);
+	clk[clk32] = imx_clk_fixed("clk32", fref);
+	clk[clk16m_ext] = imx_clk_fixed("clk16m_ext", 16000000);
+	clk[clk16m] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
+	clk[clk32_premult] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
+	clk[prem] = imx_clk_mux("prem", CCM_CSCR, 16, 1, prem_sel_clks,
+			ARRAY_SIZE(prem_sel_clks));
+	clk[mpll] = imx_clk_pllv1("mpll", "clk32_premult", CCM_MPCTL0);
+	clk[spll] = imx_clk_pllv1("spll", "prem", CCM_SPCTL0);
+	clk[mcu] = imx_clk_divider("mcu", "clk32_premult", CCM_CSCR, 15, 1);
+	clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 15, 1);
+	clk[hclk] = imx_clk_divider("hclk", "spll", CCM_CSCR, 10, 4);
+	clk[clk48m] = imx_clk_divider("clk48m", "spll", CCM_CSCR, 26, 3);
+	clk[per1] = imx_clk_divider("per1", "spll", CCM_PCDR, 0, 4);
+	clk[per2] = imx_clk_divider("per2", "spll", CCM_PCDR, 4, 4);
+	clk[per3] = imx_clk_divider("per3", "spll", CCM_PCDR, 16, 7);
+	clk[clko] = imx_clk_mux("clko", CCM_CSCR, 29, 3, clko_sel_clks,
+			ARRAY_SIZE(clko_sel_clks));
+	clk[dma_gate] = imx_clk_gate("dma_gate", "hclk", SCM_GCCR, 4);
+	clk[csi_gate] = imx_clk_gate("csi_gate", "hclk", SCM_GCCR, 2);
+	clk[mma_gate] = imx_clk_gate("mma_gate", "hclk", SCM_GCCR, 1);
+	clk[usbd_gate] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("imx1 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	clk_register_clkdev(clk[dma_gate], "ahb", "imx-dma");
+	clk_register_clkdev(clk[csi_gate], NULL, "mx1-camera.0");
+	clk_register_clkdev(clk[mma_gate], "mma", NULL);
+	clk_register_clkdev(clk[usbd_gate], NULL, "imx_udc.0");
+	clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[hclk], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[per1], "per", "imx1-uart.0");
+	clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.0");
+	clk_register_clkdev(clk[per1], "per", "imx1-uart.1");
+	clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.1");
+	clk_register_clkdev(clk[per1], "per", "imx1-uart.2");
+	clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.2");
+	clk_register_clkdev(clk[hclk], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[per2], "per", "imx1-cspi.0");
+	clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.0");
+	clk_register_clkdev(clk[per2], "per", "imx1-cspi.1");
+	clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.1");
+	clk_register_clkdev(clk[per2], NULL, "imx-mmc.0");
+	clk_register_clkdev(clk[per2], "per", "imx-fb.0");
+	clk_register_clkdev(clk[dummy], "ipg", "imx-fb.0");
+	clk_register_clkdev(clk[dummy], "ahb", "imx-fb.0");
+	clk_register_clkdev(clk[hclk], "mshc", NULL);
+	clk_register_clkdev(clk[per3], "ssi", NULL);
+	clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
+	clk_register_clkdev(clk[clko], "clko", NULL);
+
+	mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
+
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
new file mode 100644
index 0000000..ea13e61
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off)	(MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR		IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0		IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1		IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0		IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1		IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL		IO_ADDR_CCM(0x14)
+#define CCM_PCDR0		IO_ADDR_CCM(0x18)
+#define CCM_PCDR1		IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0		IO_ADDR_CCM(0x20)
+#define CCM_PCCR1		IO_ADDR_CCM(0x24)
+#define CCM_CCSR		IO_ADDR_CCM(0x28)
+#define CCM_PMCTL		IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT		IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL		IO_ADDR_CCM(0x34)
+
+static const char *mpll_sel_clks[] = { "fpm", "ckih", };
+static const char *spll_sel_clks[] = { "fpm", "ckih", };
+
+enum imx21_clks {
+	ckil, ckih, fpm, mpll_sel, spll_sel, mpll, spll, fclk, hclk, ipg, per1,
+	per2, per3, per4, uart1_ipg_gate, uart2_ipg_gate, uart3_ipg_gate,
+	uart4_ipg_gate, gpt1_ipg_gate, gpt2_ipg_gate, gpt3_ipg_gate,
+	pwm_ipg_gate, sdhc1_ipg_gate, sdhc2_ipg_gate, lcdc_ipg_gate,
+	lcdc_hclk_gate, cspi3_ipg_gate, cspi2_ipg_gate, cspi1_ipg_gate,
+	per4_gate, csi_hclk_gate, usb_div, usb_gate, usb_hclk_gate, ssi1_gate,
+	ssi2_gate, nfc_div, nfc_gate, dma_gate, dma_hclk_gate, brom_gate,
+	emma_gate, emma_hclk_gate, slcdc_gate, slcdc_hclk_gate, wdog_gate,
+	gpio_gate, i2c_gate, kpp_gate, owire_gate, rtc_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+/*
+ * must be called very early to get information about the
+ * available clock rate when the timer framework starts
+ */
+int __init mx21_clocks_init(unsigned long lref, unsigned long href)
+{
+	int i;
+
+	clk[ckil] = imx_clk_fixed("ckil", lref);
+	clk[ckih] = imx_clk_fixed("ckih", href);
+	clk[fpm] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
+	clk[mpll_sel] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks,
+			ARRAY_SIZE(mpll_sel_clks));
+	clk[spll_sel] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks,
+			ARRAY_SIZE(spll_sel_clks));
+	clk[mpll] = imx_clk_pllv1("mpll", "mpll_sel", CCM_MPCTL0);
+	clk[spll] = imx_clk_pllv1("spll", "spll_sel", CCM_SPCTL0);
+	clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 29, 3);
+	clk[hclk] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
+	clk[ipg] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
+	clk[per1] = imx_clk_divider("per1", "mpll", CCM_PCDR1, 0, 6);
+	clk[per2] = imx_clk_divider("per2", "mpll", CCM_PCDR1, 8, 6);
+	clk[per3] = imx_clk_divider("per3", "mpll", CCM_PCDR1, 16, 6);
+	clk[per4] = imx_clk_divider("per4", "mpll", CCM_PCDR1, 24, 6);
+	clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
+	clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
+	clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
+	clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
+	clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
+	clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
+	clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
+	clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
+	clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
+	clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
+	clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
+	clk[lcdc_hclk_gate] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
+	clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
+	clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
+	clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
+	clk[per4_gate] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
+	clk[csi_hclk_gate] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
+	clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 26, 3);
+	clk[usb_gate] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
+	clk[usb_hclk_gate] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
+	clk[ssi1_gate] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
+	clk[ssi2_gate] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
+	clk[nfc_div] = imx_clk_divider("nfc_div", "ipg", CCM_PCDR0, 12, 4);
+	clk[nfc_gate] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
+	clk[dma_gate] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
+	clk[dma_hclk_gate] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
+	clk[brom_gate] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
+	clk[emma_gate] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
+	clk[emma_hclk_gate] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
+	clk[slcdc_gate] = imx_clk_gate("slcdc_gate", "ipg", CCM_PCCR0, 25);
+	clk[slcdc_hclk_gate] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
+	clk[wdog_gate] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
+	clk[gpio_gate] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
+	clk[i2c_gate] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
+	clk[kpp_gate] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
+	clk[owire_gate] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
+	clk[rtc_gate] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX21 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	clk_register_clkdev(clk[per1], "per1", NULL);
+	clk_register_clkdev(clk[per2], "per2", NULL);
+	clk_register_clkdev(clk[per3], "per3", NULL);
+	clk_register_clkdev(clk[per4], "per4", NULL);
+	clk_register_clkdev(clk[per1], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[per1], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[per1], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[per1], "per", "imx21-uart.3");
+	clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+	clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+	clk_register_clkdev(clk[per1], "per", "imx-gpt.1");
+	clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+	clk_register_clkdev(clk[per1], "per", "imx-gpt.2");
+	clk_register_clkdev(clk[pwm_ipg_gate], "pwm", "mxc_pwm.0");
+	clk_register_clkdev(clk[per2], "per", "imx21-cspi.0");
+	clk_register_clkdev(clk[cspi1_ipg_gate], "ipg", "imx21-cspi.0");
+	clk_register_clkdev(clk[per2], "per", "imx21-cspi.1");
+	clk_register_clkdev(clk[cspi2_ipg_gate], "ipg", "imx21-cspi.1");
+	clk_register_clkdev(clk[per2], "per", "imx21-cspi.2");
+	clk_register_clkdev(clk[cspi3_ipg_gate], "ipg", "imx21-cspi.2");
+	clk_register_clkdev(clk[per3], "per", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_hclk_gate], "ahb", "imx-fb.0");
+	clk_register_clkdev(clk[usb_gate], "per", "imx21-hcd.0");
+	clk_register_clkdev(clk[usb_hclk_gate], "ahb", "imx21-hcd.0");
+	clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand.0");
+	clk_register_clkdev(clk[dma_hclk_gate], "ahb", "imx-dma");
+	clk_register_clkdev(clk[dma_gate], "ipg", "imx-dma");
+	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[i2c_gate], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[kpp_gate], NULL, "mxc-keypad");
+	clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+	clk_register_clkdev(clk[brom_gate], "brom", NULL);
+	clk_register_clkdev(clk[emma_gate], "emma", NULL);
+	clk_register_clkdev(clk[slcdc_gate], "slcdc", NULL);
+	clk_register_clkdev(clk[gpio_gate], "gpio", NULL);
+	clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+	clk_register_clkdev(clk[csi_hclk_gate], "csi", NULL);
+	clk_register_clkdev(clk[ssi1_gate], "ssi1", NULL);
+	clk_register_clkdev(clk[ssi2_gate], "ssi2", NULL);
+	clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
+	clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
+
+	mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
+
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
new file mode 100644
index 0000000..fdd8cc8
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2009 by Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/mx25.h>
+#include "clk.h"
+
+#define CRM_BASE	MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
+
+#define CCM_MPCTL	0x00
+#define CCM_UPCTL	0x04
+#define CCM_CCTL	0x08
+#define CCM_CGCR0	0x0C
+#define CCM_CGCR1	0x10
+#define CCM_CGCR2	0x14
+#define CCM_PCDR0	0x18
+#define CCM_PCDR1	0x1C
+#define CCM_PCDR2	0x20
+#define CCM_PCDR3	0x24
+#define CCM_RCSR	0x28
+#define CCM_CRDR	0x2C
+#define CCM_DCVR0	0x30
+#define CCM_DCVR1	0x34
+#define CCM_DCVR2	0x38
+#define CCM_DCVR3	0x3c
+#define CCM_LTR0	0x40
+#define CCM_LTR1	0x44
+#define CCM_LTR2	0x48
+#define CCM_LTR3	0x4c
+#define CCM_MCR		0x64
+
+#define ccm(x)	(CRM_BASE + (x))
+
+static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
+static const char *per_sel_clks[] = { "ahb", "upll", };
+
+enum mx25_clks {
+	dummy, osc, mpll, upll, mpll_cpu_3_4, cpu_sel, cpu, ahb, usb_div, ipg,
+	per0_sel, per1_sel, per2_sel, per3_sel, per4_sel, per5_sel, per6_sel,
+	per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
+	per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
+	per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
+	csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
+	lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
+	csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
+	usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
+	cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
+	kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
+	ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
+	uart4_ipg, uart5_ipg, wdt_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx25_clocks_init(void)
+{
+	int i;
+
+	clk[dummy] = imx_clk_fixed("dummy", 0);
+	clk[osc] = imx_clk_fixed("osc", 24000000);
+	clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
+	clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
+	clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
+	clk[cpu_sel] = imx_clk_mux("cpu_sel", ccm(CCM_CCTL), 14, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+	clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
+	clk[ahb] = imx_clk_divider("ahb", "cpu", ccm(CCM_CCTL), 28, 2);
+	clk[usb_div] = imx_clk_divider("usb_div", "upll", ccm(CCM_CCTL), 16, 6); 
+	clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+	clk[per0_sel] = imx_clk_mux("per0_sel", ccm(CCM_MCR), 0, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per1_sel] = imx_clk_mux("per1_sel", ccm(CCM_MCR), 1, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per2_sel] = imx_clk_mux("per2_sel", ccm(CCM_MCR), 2, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per3_sel] = imx_clk_mux("per3_sel", ccm(CCM_MCR), 3, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per4_sel] = imx_clk_mux("per4_sel", ccm(CCM_MCR), 4, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per5_sel] = imx_clk_mux("per5_sel", ccm(CCM_MCR), 5, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per6_sel] = imx_clk_mux("per6_sel", ccm(CCM_MCR), 6, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per7_sel] = imx_clk_mux("per7_sel", ccm(CCM_MCR), 7, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per8_sel] = imx_clk_mux("per8_sel", ccm(CCM_MCR), 8, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per9_sel] = imx_clk_mux("per9_sel", ccm(CCM_MCR), 9, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per10_sel] = imx_clk_mux("per10_sel", ccm(CCM_MCR), 10, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per11_sel] = imx_clk_mux("per11_sel", ccm(CCM_MCR), 11, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per12_sel] = imx_clk_mux("per12_sel", ccm(CCM_MCR), 12, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per13_sel] = imx_clk_mux("per13_sel", ccm(CCM_MCR), 13, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per14_sel] = imx_clk_mux("per14_sel", ccm(CCM_MCR), 14, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per15_sel] = imx_clk_mux("per15_sel", ccm(CCM_MCR), 15, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+	clk[per0] = imx_clk_divider("per0", "per0_sel", ccm(CCM_PCDR0), 0, 6);
+	clk[per1] = imx_clk_divider("per1", "per1_sel", ccm(CCM_PCDR0), 8, 6);
+	clk[per2] = imx_clk_divider("per2", "per2_sel", ccm(CCM_PCDR0), 16, 6);
+	clk[per3] = imx_clk_divider("per3", "per3_sel", ccm(CCM_PCDR0), 24, 6);
+	clk[per4] = imx_clk_divider("per4", "per4_sel", ccm(CCM_PCDR1), 0, 6);
+	clk[per5] = imx_clk_divider("per5", "per5_sel", ccm(CCM_PCDR1), 8, 6);
+	clk[per6] = imx_clk_divider("per6", "per6_sel", ccm(CCM_PCDR1), 16, 6);
+	clk[per7] = imx_clk_divider("per7", "per7_sel", ccm(CCM_PCDR1), 24, 6);
+	clk[per8] = imx_clk_divider("per8", "per8_sel", ccm(CCM_PCDR2), 0, 6);
+	clk[per9] = imx_clk_divider("per9", "per9_sel", ccm(CCM_PCDR2), 8, 6);
+	clk[per10] = imx_clk_divider("per10", "per10_sel", ccm(CCM_PCDR2), 16, 6);
+	clk[per11] = imx_clk_divider("per11", "per11_sel", ccm(CCM_PCDR2), 24, 6);
+	clk[per12] = imx_clk_divider("per12", "per12_sel", ccm(CCM_PCDR3), 0, 6);
+	clk[per13] = imx_clk_divider("per13", "per13_sel", ccm(CCM_PCDR3), 8, 6);
+	clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
+	clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
+	clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
+	clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0),  3);
+	clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0),  4);
+	clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0),  5);
+	clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0),  6);
+	clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0),  7);
+	clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0),  8);
+	clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
+	clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
+	clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
+	clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
+	clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
+	clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
+	clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
+	clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
+	clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
+	clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
+	clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1),  2);
+	clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1),  3);
+	clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1),  4);
+	clk[cspi1_ipg] = imx_clk_gate("cspi1_ipg", "ipg", ccm(CCM_CGCR1),  5);
+	clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1),  6);
+	clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1),  7);
+	clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1),  8);
+	clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
+	clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
+	clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
+	clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
+	clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
+	clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
+	clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
+	clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2),  0);
+	clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2),  1);
+	clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2),  2);
+	clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2),  6);
+	clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
+	clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
+	clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
+	clk[uart1_ipg] = imx_clk_gate("uart1_ipg", "ipg", ccm(CCM_CGCR2), 14);
+	clk[uart2_ipg] = imx_clk_gate("uart2_ipg", "ipg", ccm(CCM_CGCR2), 15);
+	clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
+	clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
+	clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
+	clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX25 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	/* i.mx25 has the i.mx21 type uart */
+	clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
+	clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
+	clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
+	clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
+	clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+	clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+	clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+	clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+	clk_register_clkdev(clk[nfc_ipg_per], NULL, "mxc_nand.0");
+	/* i.mx25 has the i.mx35 type cspi */
+	clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
+	clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
+	clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
+	clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.0");
+	clk_register_clkdev(clk[per10], "per", "mxc_pwm.0");
+	clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.1");
+	clk_register_clkdev(clk[per10], "per", "mxc_pwm.1");
+	clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.2");
+	clk_register_clkdev(clk[per10], "per", "mxc_pwm.2");
+	clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.3");
+	clk_register_clkdev(clk[per10], "per", "mxc_pwm.3");
+	clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
+	clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
+	clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.1");
+	clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.2");
+	clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
+	clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
+	clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
+	clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
+	clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
+	clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
+	clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
+	clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+	clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
+	clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
+	clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
+	clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
+	clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
+	clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
+	clk_register_clkdev(clk[csi_ipg_per], "per", "mx2-camera.0");
+	clk_register_clkdev(clk[csi_ipg], "ipg", "mx2-camera.0");
+	clk_register_clkdev(clk[csi_ahb], "ahb", "mx2-camera.0");
+	clk_register_clkdev(clk[dummy], "audmux", NULL);
+	clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
+	clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
+	/* i.mx25 has the i.mx35 type sdma */
+	clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
+	clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
+	clk_register_clkdev(clk[iim_ipg], "iim", NULL);
+
+	mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
new file mode 100644
index 0000000..295cbd7
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -0,0 +1,289 @@
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off)	(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR		IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0		IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1		IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0		IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1		IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL		IO_ADDR_CCM(0x14)
+#define CCM_PCDR0		IO_ADDR_CCM(0x18)
+#define CCM_PCDR1		IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0		IO_ADDR_CCM(0x20)
+#define CCM_PCCR1		IO_ADDR_CCM(0x24)
+#define CCM_CCSR		IO_ADDR_CCM(0x28)
+#define CCM_PMCTL		IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT		IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL		IO_ADDR_CCM(0x34)
+
+#define CCM_CSCR_UPDATE_DIS	(1 << 31)
+#define CCM_CSCR_SSI2		(1 << 23)
+#define CCM_CSCR_SSI1		(1 << 22)
+#define CCM_CSCR_VPU		(1 << 21)
+#define CCM_CSCR_MSHC           (1 << 20)
+#define CCM_CSCR_SPLLRES        (1 << 19)
+#define CCM_CSCR_MPLLRES        (1 << 18)
+#define CCM_CSCR_SP             (1 << 17)
+#define CCM_CSCR_MCU            (1 << 16)
+#define CCM_CSCR_OSC26MDIV      (1 << 4)
+#define CCM_CSCR_OSC26M         (1 << 3)
+#define CCM_CSCR_FPM            (1 << 2)
+#define CCM_CSCR_SPEN           (1 << 1)
+#define CCM_CSCR_MPEN           (1 << 0)
+
+/* i.MX27 TO 2+ */
+#define CCM_CSCR_ARM_SRC        (1 << 15)
+
+#define CCM_SPCTL1_LF           (1 << 15)
+#define CCM_SPCTL1_BRMO         (1 << 6)
+
+static const char *vpu_sel_clks[] = { "spll", "mpll_main2", };
+static const char *cpu_sel_clks[] = { "mpll_main2", "mpll", };
+static const char *clko_sel_clks[] = {
+	"ckil", "prem", "ckih", "ckih",
+	"ckih", "mpll", "spll", "cpu_div",
+	"ahb", "ipg", "per1_div", "per2_div",
+	"per3_div", "per4_div", "ssi1_div", "ssi2_div",
+	"nfc_div", "mshc_div", "vpu_div", "60m",
+	"32k", "usb_div", "dptc",
+};
+
+static const char *ssi_sel_clks[] = { "spll", "mpll", };
+
+enum mx27_clks {
+	dummy, ckih, ckil, mpll, spll, mpll_main2, ahb, ipg, nfc_div, per1_div,
+	per2_div, per3_div, per4_div, vpu_sel, vpu_div, usb_div, cpu_sel,
+	clko_sel, cpu_div, clko_div, ssi1_sel, ssi2_sel, ssi1_div, ssi2_div,
+	clko_en, ssi2_ipg_gate, ssi1_ipg_gate, slcdc_ipg_gate, sdhc3_ipg_gate,
+	sdhc2_ipg_gate, sdhc1_ipg_gate, scc_ipg_gate, sahara_ipg_gate,
+	rtc_ipg_gate, pwm_ipg_gate, owire_ipg_gate, lcdc_ipg_gate,
+	kpp_ipg_gate, iim_ipg_gate, i2c2_ipg_gate, i2c1_ipg_gate,
+	gpt6_ipg_gate, gpt5_ipg_gate, gpt4_ipg_gate, gpt3_ipg_gate,
+	gpt2_ipg_gate, gpt1_ipg_gate, gpio_ipg_gate, fec_ipg_gate,
+	emma_ipg_gate, dma_ipg_gate, cspi3_ipg_gate, cspi2_ipg_gate,
+	cspi1_ipg_gate, nfc_baud_gate, ssi2_baud_gate, ssi1_baud_gate,
+	vpu_baud_gate, per4_gate, per3_gate, per2_gate, per1_gate,
+	usb_ahb_gate, slcdc_ahb_gate, sahara_ahb_gate, lcdc_ahb_gate,
+	vpu_ahb_gate, fec_ahb_gate, emma_ahb_gate, emi_ahb_gate, dma_ahb_gate,
+	csi_ahb_gate, brom_ahb_gate, ata_ahb_gate, wdog_ipg_gate, usb_ipg_gate,
+	uart6_ipg_gate, uart5_ipg_gate, uart4_ipg_gate, uart3_ipg_gate,
+	uart2_ipg_gate, uart1_ipg_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx27_clocks_init(unsigned long fref)
+{
+	int i;
+
+	clk[dummy] = imx_clk_fixed("dummy", 0);
+	clk[ckih] = imx_clk_fixed("ckih", fref);
+	clk[ckil] = imx_clk_fixed("ckil", 32768);
+	clk[mpll] = imx_clk_pllv1("mpll", "ckih", CCM_MPCTL0);
+	clk[spll] = imx_clk_pllv1("spll", "ckih", CCM_SPCTL0);
+	clk[mpll_main2] = imx_clk_fixed_factor("mpll_main2", "mpll", 2, 3);
+
+	if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
+		clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 8, 2);
+		clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+	} else {
+		clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 9, 4);
+		clk[ipg] = imx_clk_divider("ipg", "ahb", CCM_CSCR, 8, 1);
+	}
+
+	clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", CCM_PCDR0, 6, 4);
+	clk[per1_div] = imx_clk_divider("per1_div", "mpll_main2", CCM_PCDR1, 0, 6);
+	clk[per2_div] = imx_clk_divider("per2_div", "mpll_main2", CCM_PCDR1, 8, 6);
+	clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
+	clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
+	clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
+	clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+	clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
+	clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+	clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
+	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
+		clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 12, 2);
+	else
+		clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 13, 3);
+	clk[clko_div] = imx_clk_divider("clko_div", "clko_sel", CCM_PCDR0, 22, 3);
+	clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+	clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+	clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
+	clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+	clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
+	clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
+	clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
+	clk[slcdc_ipg_gate] = imx_clk_gate("slcdc_ipg_gate", "ipg", CCM_PCCR0, 2);
+	clk[sdhc3_ipg_gate] = imx_clk_gate("sdhc3_ipg_gate", "ipg", CCM_PCCR0, 3);
+	clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 4);
+	clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 5);
+	clk[scc_ipg_gate] = imx_clk_gate("scc_ipg_gate", "ipg", CCM_PCCR0, 6);
+	clk[sahara_ipg_gate] = imx_clk_gate("sahara_ipg_gate", "ipg", CCM_PCCR0, 7);
+	clk[rtc_ipg_gate] = imx_clk_gate("rtc_ipg_gate", "ipg", CCM_PCCR0, 9);
+	clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR0, 11);
+	clk[owire_ipg_gate] = imx_clk_gate("owire_ipg_gate", "ipg", CCM_PCCR0, 12);
+	clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 14);
+	clk[kpp_ipg_gate] = imx_clk_gate("kpp_ipg_gate", "ipg", CCM_PCCR0, 15);
+	clk[iim_ipg_gate] = imx_clk_gate("iim_ipg_gate", "ipg", CCM_PCCR0, 16);
+	clk[i2c2_ipg_gate] = imx_clk_gate("i2c2_ipg_gate", "ipg", CCM_PCCR0, 17);
+	clk[i2c1_ipg_gate] = imx_clk_gate("i2c1_ipg_gate", "ipg", CCM_PCCR0, 18);
+	clk[gpt6_ipg_gate] = imx_clk_gate("gpt6_ipg_gate", "ipg", CCM_PCCR0, 19);
+	clk[gpt5_ipg_gate] = imx_clk_gate("gpt5_ipg_gate", "ipg", CCM_PCCR0, 20);
+	clk[gpt4_ipg_gate] = imx_clk_gate("gpt4_ipg_gate", "ipg", CCM_PCCR0, 21);
+	clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR0, 22);
+	clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR0, 23);
+	clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR0, 24);
+	clk[gpio_ipg_gate] = imx_clk_gate("gpio_ipg_gate", "ipg", CCM_PCCR0, 25);
+	clk[fec_ipg_gate] = imx_clk_gate("fec_ipg_gate", "ipg", CCM_PCCR0, 26);
+	clk[emma_ipg_gate] = imx_clk_gate("emma_ipg_gate", "ipg", CCM_PCCR0, 27);
+	clk[dma_ipg_gate] = imx_clk_gate("dma_ipg_gate", "ipg", CCM_PCCR0, 28);
+	clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR0, 29);
+	clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 30);
+	clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 31);
+	clk[nfc_baud_gate] = imx_clk_gate("nfc_baud_gate", "nfc_div", CCM_PCCR1,  3);
+	clk[ssi2_baud_gate] = imx_clk_gate("ssi2_baud_gate", "ssi2_div", CCM_PCCR1,  4);
+	clk[ssi1_baud_gate] = imx_clk_gate("ssi1_baud_gate", "ssi1_div", CCM_PCCR1,  5);
+	clk[vpu_baud_gate] = imx_clk_gate("vpu_baud_gate", "vpu_div", CCM_PCCR1,  6);
+	clk[per4_gate] = imx_clk_gate("per4_gate", "per4_div", CCM_PCCR1,  7);
+	clk[per3_gate] = imx_clk_gate("per3_gate", "per3_div", CCM_PCCR1,  8);
+	clk[per2_gate] = imx_clk_gate("per2_gate", "per2_div", CCM_PCCR1,  9);
+	clk[per1_gate] = imx_clk_gate("per1_gate", "per1_div", CCM_PCCR1, 10);
+	clk[usb_ahb_gate] = imx_clk_gate("usb_ahb_gate", "ahb", CCM_PCCR1, 11);
+	clk[slcdc_ahb_gate] = imx_clk_gate("slcdc_ahb_gate", "ahb", CCM_PCCR1, 12);
+	clk[sahara_ahb_gate] = imx_clk_gate("sahara_ahb_gate", "ahb", CCM_PCCR1, 13);
+	clk[lcdc_ahb_gate] = imx_clk_gate("lcdc_ahb_gate", "ahb", CCM_PCCR1, 15);
+	clk[vpu_ahb_gate] = imx_clk_gate("vpu_ahb_gate", "ahb", CCM_PCCR1, 16);
+	clk[fec_ahb_gate] = imx_clk_gate("fec_ahb_gate", "ahb", CCM_PCCR1, 17);
+	clk[emma_ahb_gate] = imx_clk_gate("emma_ahb_gate", "ahb", CCM_PCCR1, 18);
+	clk[emi_ahb_gate] = imx_clk_gate("emi_ahb_gate", "ahb", CCM_PCCR1, 19);
+	clk[dma_ahb_gate] = imx_clk_gate("dma_ahb_gate", "ahb", CCM_PCCR1, 20);
+	clk[csi_ahb_gate] = imx_clk_gate("csi_ahb_gate", "ahb", CCM_PCCR1, 21);
+	clk[brom_ahb_gate] = imx_clk_gate("brom_ahb_gate", "ahb", CCM_PCCR1, 22);
+	clk[ata_ahb_gate] = imx_clk_gate("ata_ahb_gate", "ahb", CCM_PCCR1, 23);
+	clk[wdog_ipg_gate] = imx_clk_gate("wdog_ipg_gate", "ipg", CCM_PCCR1, 24);
+	clk[usb_ipg_gate] = imx_clk_gate("usb_ipg_gate", "ipg", CCM_PCCR1, 25);
+	clk[uart6_ipg_gate] = imx_clk_gate("uart6_ipg_gate", "ipg", CCM_PCCR1, 26);
+	clk[uart5_ipg_gate] = imx_clk_gate("uart5_ipg_gate", "ipg", CCM_PCCR1, 27);
+	clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR1, 28);
+	clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR1, 29);
+	clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR1, 30);
+	clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR1, 31);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX27 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.3");
+	clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.4");
+	clk_register_clkdev(clk[uart6_ipg_gate], "ipg", "imx21-uart.5");
+	clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.5");
+	clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.1");
+	clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.2");
+	clk_register_clkdev(clk[gpt4_ipg_gate], "ipg", "imx-gpt.3");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.3");
+	clk_register_clkdev(clk[gpt5_ipg_gate], "ipg", "imx-gpt.4");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.4");
+	clk_register_clkdev(clk[gpt6_ipg_gate], "ipg", "imx-gpt.5");
+	clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.5");
+	clk_register_clkdev(clk[pwm_ipg_gate], NULL, "mxc_pwm.0");
+	clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.0");
+	clk_register_clkdev(clk[sdhc1_ipg_gate], "ipg", "mxc-mmc.0");
+	clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.1");
+	clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.1");
+	clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.2");
+	clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.2");
+	clk_register_clkdev(clk[cspi1_ipg_gate], NULL, "imx27-cspi.0");
+	clk_register_clkdev(clk[cspi2_ipg_gate], NULL, "imx27-cspi.1");
+	clk_register_clkdev(clk[cspi3_ipg_gate], NULL, "imx27-cspi.2");
+	clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+	clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
+	clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.2");
+	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+	clk_register_clkdev(clk[nfc_baud_gate], NULL, "mxc_nand.0");
+	clk_register_clkdev(clk[vpu_baud_gate], "per", "imx-vpu");
+	clk_register_clkdev(clk[vpu_ahb_gate], "ahb", "imx-vpu");
+	clk_register_clkdev(clk[dma_ahb_gate], "ahb", "imx-dma");
+	clk_register_clkdev(clk[dma_ipg_gate], "ipg", "imx-dma");
+	clk_register_clkdev(clk[fec_ipg_gate], "ipg", "imx27-fec.0");
+	clk_register_clkdev(clk[fec_ahb_gate], "ahb", "imx27-fec.0");
+	clk_register_clkdev(clk[wdog_ipg_gate], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[i2c1_ipg_gate], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
+	clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
+	clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
+	clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
+	clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+	clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
+	clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
+	clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
+	clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
+	clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
+	clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
+	clk_register_clkdev(clk[cpu_div], "cpu", NULL);
+	clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
+	clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
+	clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
+
+	mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
+
+	clk_prepare_enable(clk[emi_ahb_gate]);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+int __init mx27_clocks_init_dt(void)
+{
+	struct device_node *np;
+	u32 fref = 26000000; /* default */
+
+	for_each_compatible_node(np, NULL, "fixed-clock") {
+		if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+			continue;
+
+		if (!of_property_read_u32(np, "clock-frequency", &fref))
+			break;
+	}
+
+	return mx27_clocks_init(fref);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
new file mode 100644
index 0000000..c9a06d8
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/mx31.h>
+#include <mach/common.h>
+
+#include "clk.h"
+#include "crmregs-imx3.h"
+
+static const char *mcu_main_sel[] = { "spll", "mpll", };
+static const char *per_sel[] = { "per_div", "ipg", };
+static const char *csi_sel[] = { "upll", "spll", };
+static const char *fir_sel[] = { "mcu_main", "upll", "spll" };
+
+enum mx31_clks {
+	ckih, ckil, mpll, spll, upll, mcu_main, hsp, ahb, nfc, ipg, per_div,
+	per, csi, fir, csi_div, usb_div_pre, usb_div_post, fir_div_pre,
+	fir_div_post, sdhc1_gate, sdhc2_gate, gpt_gate, epit1_gate, epit2_gate,
+	iim_gate, ata_gate, sdma_gate, cspi3_gate, rng_gate, uart1_gate,
+	uart2_gate, ssi1_gate, i2c1_gate, i2c2_gate, i2c3_gate, hantro_gate,
+	mstick1_gate, mstick2_gate, csi_gate, rtc_gate, wdog_gate, pwm_gate,
+	sim_gate, ect_gate, usb_gate, kpp_gate, ipu_gate, uart3_gate,
+	uart4_gate, uart5_gate, owire_gate, ssi2_gate, cspi1_gate, cspi2_gate,
+	gacc_gate, emi_gate, rtic_gate, firi_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx31_clocks_init(unsigned long fref)
+{
+	void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
+	int i;
+
+	clk[ckih] = imx_clk_fixed("ckih", fref);
+	clk[ckil] = imx_clk_fixed("ckil", 32768);
+	clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL);
+	clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL);
+	clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL);
+	clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel));
+	clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3);
+	clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3);
+	clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3);
+	clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2);
+	clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5);
+	clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel));
+	clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel));
+	clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel));
+	clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9);
+	clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2);
+	clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3);
+	clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3);
+	clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6);
+	clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0);
+	clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2);
+	clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4);
+	clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6);
+	clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8);
+	clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10);
+	clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12);
+	clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14);
+	clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16);
+	clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18);
+	clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20);
+	clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22);
+	clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24);
+	clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26);
+	clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28);
+	clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30);
+	clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0);
+	clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2);
+	clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4);
+	clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6);
+	clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8);
+	clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10);
+	clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12);
+	clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14);
+	clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16);
+	clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18);
+	clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20);
+	clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22);
+	clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24);
+	clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26);
+	clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28);
+	clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30);
+	clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0);
+	clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2);
+	clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4);
+	clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6);
+	clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8);
+	clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10);
+	clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("imx31 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
+	clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
+	clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
+	clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
+	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+	clk_register_clkdev(clk[epit1_gate], "epit", NULL);
+	clk_register_clkdev(clk[epit2_gate], "epit", NULL);
+	clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
+	clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+	clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+	clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+	clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
+	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+	clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+	/* i.mx31 has the i.mx21 type uart */
+	clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
+	clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
+	clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+	clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+	clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+	clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0");
+	clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1");
+	clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+	clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
+	clk_register_clkdev(clk[firi_gate], "firi", NULL);
+	clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
+	clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
+	clk_register_clkdev(clk[rng_gate], "rng", NULL);
+	clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
+	clk_register_clkdev(clk[iim_gate], "iim", NULL);
+
+	clk_set_parent(clk[csi], clk[upll]);
+	clk_prepare_enable(clk[emi_gate]);
+	clk_prepare_enable(clk[iim_gate]);
+	mx31_revision();
+	clk_disable_unprepare(clk[iim_gate]);
+
+	mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
+
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
new file mode 100644
index 0000000..920a8cc
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crmregs-imx3.h"
+#include "clk.h"
+
+struct arm_ahb_div {
+	unsigned char arm, ahb, sel;
+};
+
+static struct arm_ahb_div clk_consumer[] = {
+	{ .arm = 1, .ahb = 4, .sel = 0},
+	{ .arm = 1, .ahb = 3, .sel = 1},
+	{ .arm = 2, .ahb = 2, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 4, .ahb = 1, .sel = 0},
+	{ .arm = 1, .ahb = 5, .sel = 0},
+	{ .arm = 1, .ahb = 8, .sel = 0},
+	{ .arm = 1, .ahb = 6, .sel = 1},
+	{ .arm = 2, .ahb = 4, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+	{ .arm = 4, .ahb = 2, .sel = 0},
+	{ .arm = 0, .ahb = 0, .sel = 0},
+};
+
+static char hsp_div_532[] = { 4, 8, 3, 0 };
+static char hsp_div_400[] = { 3, 6, 3, 0 };
+
+static const char *std_sel[] = {"ppll", "arm"};
+static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
+
+enum mx35_clks {
+	ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+	arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
+	esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
+	spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
+	ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
+	audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
+	edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
+	esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
+	gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
+	kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
+	rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
+	ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
+	wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate,
+	clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx35_clocks_init()
+{
+	void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
+	u32 pdr0, consumer_sel, hsp_sel;
+	struct arm_ahb_div *aad;
+	unsigned char *hsp_div;
+	int i;
+
+	pdr0 = __raw_readl(base + MXC_CCM_PDR0);
+	consumer_sel = (pdr0 >> 16) & 0xf;
+	aad = &clk_consumer[consumer_sel];
+	if (!aad->arm) {
+		pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel);
+		/*
+		 * We are basically stuck. Continue with a default entry and hope we
+		 * get far enough to actually show the above message
+		 */
+		aad = &clk_consumer[0];
+	}
+
+	clk[ckih] = imx_clk_fixed("ckih", 24000000);
+	clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL);
+	clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL);
+
+	clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4);
+
+	if (aad->sel)
+		clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm);
+	else
+		clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm);
+
+	if (clk_get_rate(clk[arm]) > 400000000)
+		hsp_div = hsp_div_532;
+	else
+		hsp_div = hsp_div_400;
+
+	hsp_sel = (pdr0 >> 20) & 0x3;
+	if (!hsp_div[hsp_sel]) {
+		pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel);
+		hsp_sel = 0;
+	}
+
+	clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]);
+
+	clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb);
+	clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+
+	clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6);
+	clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3);
+	clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel));
+
+	clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel));
+	clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6);
+
+	clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+	clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6);
+	clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6);
+	clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6);
+
+	clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel));
+	clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */ 
+	clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6);
+
+	clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel));
+	clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3);
+	clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6);
+	clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3);
+	clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6);
+
+	clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+	clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6);
+
+	clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4);
+
+	clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0,  0);
+	clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0,  2);
+	clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0,  4);
+	clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0,  6);
+	clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0,  8);
+	clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10);
+	clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12);
+	clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14);
+	clk[edio_gate] = imx_clk_gate2("edio_gate",   "ipg", base + MX35_CCM_CGR0, 16);
+	clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18);
+	clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20);
+	clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22);
+	clk[esai_gate] = imx_clk_gate2("esai_gate",   "ipg", base + MX35_CCM_CGR0, 24);
+	clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26);
+	clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28);
+	clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30);
+
+	clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1,  0);
+	clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1,  2);
+	clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1,  4);
+	clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1,  6);
+	clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1,  8);
+	clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10);
+	clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12);
+	clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14);
+	clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16);
+	clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18);
+	clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20);
+	clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22);
+	clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24);
+	clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26);
+	clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28);
+	clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30);
+
+	clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2,  0);
+	clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2,  2);
+	clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2,  4);
+	clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2,  6);
+	clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2,  8);
+	clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10);
+	clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12);
+	clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14);
+	clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16);
+	clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18);
+	clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20);
+	clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22);
+	clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24);
+	clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26);
+	clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30);
+
+	clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3,  0);
+	clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3,  2);
+	clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3,  4);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX35 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+
+	clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
+	clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
+	clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
+	clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
+	clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
+	clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
+	clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
+	clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
+	clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
+	clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
+	clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
+	clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
+	clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
+	clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
+	clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
+	clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
+	clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
+	clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
+	/* i.mx35 has the i.mx27 type fec */
+	clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+	clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+	clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+	clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+	clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+	clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
+	clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+	clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
+	clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
+	clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
+	clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+	/* i.mx35 has the i.mx21 type uart */
+	clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+	clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+	clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
+	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+	clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
+	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0");
+
+	clk_prepare_enable(clk[spba_gate]);
+	clk_prepare_enable(clk[gpio1_gate]);
+	clk_prepare_enable(clk[gpio2_gate]);
+	clk_prepare_enable(clk[gpio3_gate]);
+	clk_prepare_enable(clk[iim_gate]);
+	clk_prepare_enable(clk[emi_gate]);
+
+	imx_print_silicon_rev("i.MX35", mx35_revision());
+
+#ifdef CONFIG_MXC_USE_EPIT
+	epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+#else
+	mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
+#endif
+
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
new file mode 100644
index 0000000..a2200c7
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crm-regs-imx5.h"
+#include "clk.h"
+
+/* Low-power Audio Playback Mode clock */
+static const char *lp_apm_sel[] = { "osc", };
+
+/* This is used multiple times */
+static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
+static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
+static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
+static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
+static const char *per_root_sel[] = { "per_podf", "ipg", };
+static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
+static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
+static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
+static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
+static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
+static const char *emi_slow_sel[] = { "main_bus", "ahb", };
+static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
+static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
+static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
+static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
+static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
+static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
+static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
+static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
+static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+
+enum imx5_clks {
+	dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
+	uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
+	emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
+	usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di,
+	tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
+	uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
+	gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
+	gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
+	esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
+	ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
+	ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
+	ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
+	vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
+	uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
+	esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
+	mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
+	ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
+	ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
+	periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
+	tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
+	esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
+	usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
+	pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
+	ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
+	usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
+	ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
+	ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
+	ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
+	ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
+	ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
+	clk_max
+};
+
+static struct clk *clk[clk_max];
+
+static void __init mx5_clocks_common_init(unsigned long rate_ckil,
+		unsigned long rate_osc, unsigned long rate_ckih1,
+		unsigned long rate_ckih2)
+{
+	int i;
+
+	clk[dummy] = imx_clk_fixed("dummy", 0);
+	clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
+	clk[osc] = imx_clk_fixed("osc", rate_osc);
+	clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
+	clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
+
+	clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
+				lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+	clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+				periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+	clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+				main_bus_sel, ARRAY_SIZE(main_bus_sel));
+	clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
+				per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
+	clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
+	clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
+	clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
+	clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
+				per_root_sel, ARRAY_SIZE(per_root_sel));
+	clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
+	clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
+	clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
+	clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
+	clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
+	clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
+	clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
+	clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+	clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
+	clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
+	clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
+	clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
+				standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+	clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
+	clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
+
+	clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+				standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+	clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+				standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+	clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
+	clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
+	clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
+	clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
+	clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+	clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
+
+	clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
+				emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
+	clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
+	clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
+	clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
+				standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+	clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
+	clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
+	clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
+				standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+	clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
+	clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
+	clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
+	clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
+	clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
+				usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
+	clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
+	clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
+	clk[tve_di] = imx_clk_fixed("tve_di", 65000000); /* FIXME */
+	clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, tve_sel, ARRAY_SIZE(tve_sel));
+	clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
+	clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
+	clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
+	clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
+	clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
+	clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
+	clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
+	clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
+	clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
+	clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
+	clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
+	clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
+	clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
+	clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
+	clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
+	clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
+	clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
+	clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
+	clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
+	clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
+	clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
+	clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
+	clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
+	clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
+	clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
+	clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
+	clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
+	clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
+	clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
+	clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
+	clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
+	clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
+	clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+	clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
+	clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
+	clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
+	clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
+	clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
+	clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
+	clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
+	clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
+	clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
+	clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
+	clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
+	clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
+	clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+
+	clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
+	clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+	clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+	clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
+	clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+	clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+	clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
+	clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
+	clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
+	clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
+	clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
+	clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
+	clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
+	clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
+	clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
+	clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
+	clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
+	clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
+	clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
+	clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
+	clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX5 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+	
+	clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
+	clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+	clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
+	clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+	clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
+	clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+	clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
+	clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+	clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
+	clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+	clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
+	clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
+	clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
+	clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
+	clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
+	clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
+	clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
+	clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+	clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+	clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
+	clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
+	clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
+	clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
+	clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
+	clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
+	clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
+	clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
+	clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
+	clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
+	clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+	clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand");
+	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+	clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
+	clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
+	clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
+	clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+	clk_register_clkdev(clk[cpu_podf], "cpu", NULL);
+	clk_register_clkdev(clk[iim_gate], "iim", NULL);
+	clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
+	clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
+	clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
+	clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
+	clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
+
+	/* Set SDHC parents to be PLL2 */
+	clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
+	clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
+
+	/* move usb phy clk to 24MHz */
+	clk_set_parent(clk[usb_phy_sel], clk[osc]);
+
+	clk_prepare_enable(clk[gpc_dvfs]);
+	clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
+	clk_prepare_enable(clk[aips_tz1]);
+	clk_prepare_enable(clk[aips_tz2]); /* fec */
+	clk_prepare_enable(clk[spba]);
+	clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+	clk_prepare_enable(clk[tmax1]);
+	clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
+	clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
+}
+
+int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+			unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+	int i;
+
+	clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
+	clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
+	clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
+	clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+				mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
+	clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+				mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
+	clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+				mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel));
+	clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
+	clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+	clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+	clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
+	clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
+	clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+	clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
+	clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+	clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
+	clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
+	clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
+	clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX51 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+	clk_register_clkdev(clk[hsi2c_gate], NULL, "imx-i2c.2");
+	clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
+	clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
+	clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+	clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
+	clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
+	clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
+	clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
+	clk_register_clkdev(clk[ipu_gate], "hsp", "imx51-ipu");
+	clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
+	clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
+	clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
+	clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
+	clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
+	clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
+	clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
+	clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
+	clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
+	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
+	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
+	clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+
+	/* set the usboh3 parent to pll2_sw */
+	clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
+
+	/* set SDHC root clock to 166.25MHZ*/
+	clk_set_rate(clk[esdhc_a_podf], 166250000);
+	clk_set_rate(clk[esdhc_b_podf], 166250000);
+
+	/* System timer */
+	mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
+
+	clk_prepare_enable(clk[iim_gate]);
+	imx_print_silicon_rev("i.MX51", mx51_revision());
+	clk_disable_unprepare(clk[iim_gate]);
+
+	return 0;
+}
+
+int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+			unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+	int i;
+	unsigned long r;
+
+	clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
+	clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
+	clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
+	clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
+
+	clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
+				mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel));
+	clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+	clk[ldb_di1_div] = imx_clk_divider("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1);
+	clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
+	clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
+				mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel));
+	clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+	clk[ldb_di0_div] = imx_clk_divider("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1);
+	clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
+	clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
+	clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+				mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
+	clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+				mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
+	clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+				mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel));
+	clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
+	clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+	clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+	clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
+	clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
+	clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+	clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
+	clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
+	clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
+	clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
+	clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX53 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+	clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
+	clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+	clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
+	clk_register_clkdev(clk[ipu_gate], "bus", "imx53-ipu");
+	clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx53-ipu");
+	clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx53-ipu");
+	clk_register_clkdev(clk[ipu_gate], "hsp", "imx53-ipu");
+	clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
+	clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
+	clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
+	clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
+	clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
+	clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
+	clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
+	clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
+	clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
+	clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
+	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
+	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
+	clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+
+	/* set SDHC root clock to 200MHZ*/
+	clk_set_rate(clk[esdhc_a_podf], 200000000);
+	clk_set_rate(clk[esdhc_b_podf], 200000000);
+
+	/* System timer */
+	mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
+
+	clk_prepare_enable(clk[iim_gate]);
+	imx_print_silicon_rev("i.MX53", mx53_revision());
+	clk_disable_unprepare(clk[iim_gate]);
+
+	r = clk_round_rate(clk[usboh3_per_gate], 54000000);
+	clk_set_rate(clk[usboh3_per_gate], r);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
+				   unsigned long *ckih1, unsigned long *ckih2)
+{
+	struct device_node *np;
+
+	/* retrieve the freqency of fixed clocks from device tree */
+	for_each_compatible_node(np, NULL, "fixed-clock") {
+		u32 rate;
+		if (of_property_read_u32(np, "clock-frequency", &rate))
+			continue;
+
+		if (of_device_is_compatible(np, "fsl,imx-ckil"))
+			*ckil = rate;
+		else if (of_device_is_compatible(np, "fsl,imx-osc"))
+			*osc = rate;
+		else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+			*ckih1 = rate;
+		else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
+			*ckih2 = rate;
+	}
+}
+
+int __init mx51_clocks_init_dt(void)
+{
+	unsigned long ckil, osc, ckih1, ckih2;
+
+	clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+	return mx51_clocks_init(ckil, osc, ckih1, ckih2);
+}
+
+int __init mx53_clocks_init_dt(void)
+{
+	unsigned long ckil, osc, ckih1, ckih2;
+
+	clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+	return mx53_clocks_init(ckil, osc, ckih1, ckih2);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
new file mode 100644
index 0000000..e1a17ac
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define CCGR0				0x68
+#define CCGR1				0x6c
+#define CCGR2				0x70
+#define CCGR3				0x74
+#define CCGR4				0x78
+#define CCGR5				0x7c
+#define CCGR6				0x80
+#define CCGR7				0x84
+
+#define CLPCR				0x54
+#define BP_CLPCR_LPM			0
+#define BM_CLPCR_LPM			(0x3 << 0)
+#define BM_CLPCR_BYPASS_PMIC_READY	(0x1 << 2)
+#define BM_CLPCR_ARM_CLK_DIS_ON_LPM	(0x1 << 5)
+#define BM_CLPCR_SBYOS			(0x1 << 6)
+#define BM_CLPCR_DIS_REF_OSC		(0x1 << 7)
+#define BM_CLPCR_VSTBY			(0x1 << 8)
+#define BP_CLPCR_STBY_COUNT		9
+#define BM_CLPCR_STBY_COUNT		(0x3 << 9)
+#define BM_CLPCR_COSC_PWRDOWN		(0x1 << 11)
+#define BM_CLPCR_WB_PER_AT_LPM		(0x1 << 16)
+#define BM_CLPCR_WB_CORE_AT_LPM		(0x1 << 17)
+#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS	(0x1 << 19)
+#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS	(0x1 << 21)
+#define BM_CLPCR_MASK_CORE0_WFI		(0x1 << 22)
+#define BM_CLPCR_MASK_CORE1_WFI		(0x1 << 23)
+#define BM_CLPCR_MASK_CORE2_WFI		(0x1 << 24)
+#define BM_CLPCR_MASK_CORE3_WFI		(0x1 << 25)
+#define BM_CLPCR_MASK_SCU_IDLE		(0x1 << 26)
+#define BM_CLPCR_MASK_L2CC_IDLE		(0x1 << 27)
+
+static void __iomem *ccm_base;
+
+void __init imx6q_clock_map_io(void) { }
+
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+{
+	u32 val = readl_relaxed(ccm_base + CLPCR);
+
+	val &= ~BM_CLPCR_LPM;
+	switch (mode) {
+	case WAIT_CLOCKED:
+		break;
+	case WAIT_UNCLOCKED:
+		val |= 0x1 << BP_CLPCR_LPM;
+		break;
+	case STOP_POWER_ON:
+		val |= 0x2 << BP_CLPCR_LPM;
+		break;
+	case WAIT_UNCLOCKED_POWER_OFF:
+		val |= 0x1 << BP_CLPCR_LPM;
+		val &= ~BM_CLPCR_VSTBY;
+		val &= ~BM_CLPCR_SBYOS;
+		break;
+	case STOP_POWER_OFF:
+		val |= 0x2 << BP_CLPCR_LPM;
+		val |= 0x3 << BP_CLPCR_STBY_COUNT;
+		val |= BM_CLPCR_VSTBY;
+		val |= BM_CLPCR_SBYOS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel_relaxed(val, ccm_base + CLPCR);
+
+	return 0;
+}
+
+static const char *step_sels[]	= { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[]	= { "pll1_sys", "step", };
+static const char *periph_pre_sels[]	= { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph_clk2_sels[]	= { "pll3_usb_otg", "osc", };
+static const char *periph_sels[]	= { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[]	= { "periph2_pre", "periph2_clk2", };
+static const char *axi_sels[]		= { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *audio_sels[]	= { "pll4_audio", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
+static const char *gpu_axi_sels[]	= { "axi", "ahb", };
+static const char *gpu2d_core_sels[]	= { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu3d_core_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *ipu_sels[]		= { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[]	= { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu_di_pre_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu1_di0_sels[]	= { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di1_sels[]	= { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di0_sels[]	= { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di1_sels[]	= { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *hsi_tx_sels[]	= { "pll3_120m", "pll2_pfd2_396m", };
+static const char *pcie_axi_sels[]	= { "axi", "ahb", };
+static const char *ssi_sels[]		= { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio", };
+static const char *usdhc_sels[]	= { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *enfc_sels[]	= { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *emi_sels[]		= { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *vdo_axi_sels[]	= { "axi", "ahb", };
+static const char *vpu_axi_sels[]	= { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *cko1_sels[]	= { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video",
+				    "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+				    "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
+
+enum mx6q_clks {
+	dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+	pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
+	pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
+	periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
+	esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
+	gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
+	ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
+	ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
+	ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
+	usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
+	emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
+	periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+	asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+	gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+	ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+	ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+	ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+	usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+	emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+	mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+	can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+	esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+	hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
+	ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
+	mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
+	gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
+	ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
+	usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
+	pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
+	ssi2_ipg, ssi3_ipg, rom,
+	clk_max
+};
+
+static struct clk *clk[clk_max];
+
+static enum mx6q_clks const clks_init_on[] __initconst = {
+	mmdc_ch0_axi, rom,
+};
+
+int __init mx6q_clocks_init(void)
+{
+	struct device_node *np;
+	void __iomem *base;
+	int i, irq;
+
+	clk[dummy] = imx_clk_fixed("dummy", 0);
+
+	/* retrieve the freqency of fixed clocks from device tree */
+	for_each_compatible_node(np, NULL, "fixed-clock") {
+		u32 rate;
+		if (of_property_read_u32(np, "clock-frequency", &rate))
+			continue;
+
+		if (of_device_is_compatible(np, "fsl,imx-ckil"))
+			clk[ckil] = imx_clk_fixed("ckil", rate);
+		else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+			clk[ckih] = imx_clk_fixed("ckih", rate);
+		else if (of_device_is_compatible(np, "fsl,imx-osc"))
+			clk[osc] = imx_clk_fixed("osc", rate);
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+	base = of_iomap(np, 0);
+	WARN_ON(!base);
+
+	/*                   type                               name         parent_name  base     gate_mask div_mask */
+	clk[pll1_sys]      = imx_clk_pllv3(IMX_PLLV3_SYS,	"pll1_sys",	"osc", base,        0x2000,   0x7f);
+	clk[pll2_bus]      = imx_clk_pllv3(IMX_PLLV3_GENERIC,	"pll2_bus",	"osc", base + 0x30, 0x2000,   0x1);
+	clk[pll3_usb_otg]  = imx_clk_pllv3(IMX_PLLV3_USB,	"pll3_usb_otg",	"osc", base + 0x10, 0x2000,   0x3);
+	clk[pll4_audio]    = imx_clk_pllv3(IMX_PLLV3_AV,	"pll4_audio",	"osc", base + 0x70, 0x2000,   0x7f);
+	clk[pll5_video]    = imx_clk_pllv3(IMX_PLLV3_AV,	"pll5_video",	"osc", base + 0xa0, 0x2000,   0x7f);
+	clk[pll6_mlb]      = imx_clk_pllv3(IMX_PLLV3_MLB,	"pll6_mlb",	"osc", base + 0xd0, 0x2000,   0x0);
+	clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB,	"pll7_usb_host","osc", base + 0x20, 0x2000,   0x3);
+	clk[pll8_enet]     = imx_clk_pllv3(IMX_PLLV3_ENET,	"pll8_enet",	"osc", base + 0xe0, 0x182000, 0x3);
+
+	/*                                name              parent_name        reg       idx */
+	clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus",     base + 0x100, 0);
+	clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus",     base + 0x100, 1);
+	clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus",     base + 0x100, 2);
+	clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0,  0);
+	clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0,  1);
+	clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0,  2);
+	clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0,  3);
+
+	/*                                    name         parent_name     mult div */
+	clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+	clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg",   1, 4);
+	clk[pll3_80m]  = imx_clk_fixed_factor("pll3_80m",  "pll3_usb_otg",   1, 6);
+	clk[pll3_60m]  = imx_clk_fixed_factor("pll3_60m",  "pll3_usb_otg",   1, 8);
+	clk[twd]       = imx_clk_fixed_factor("twd",       "arm",            1, 2);
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
+	base = of_iomap(np, 0);
+	WARN_ON(!base);
+	ccm_base = base;
+
+	/*                                  name                reg       shift width parent_names     num_parents */
+	clk[step]             = imx_clk_mux("step",	        base + 0xc,  8,  1, step_sels,	       ARRAY_SIZE(step_sels));
+	clk[pll1_sw]          = imx_clk_mux("pll1_sw",	        base + 0xc,  2,  1, pll1_sw_sels,      ARRAY_SIZE(pll1_sw_sels));
+	clk[periph_pre]       = imx_clk_mux("periph_pre",       base + 0x18, 18, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
+	clk[periph2_pre]      = imx_clk_mux("periph2_pre",      base + 0x18, 21, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
+	clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+	clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+	clk[axi_sel]          = imx_clk_mux("axi_sel",          base + 0x14, 6,  2, axi_sels,          ARRAY_SIZE(axi_sels));
+	clk[esai_sel]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
+	clk[asrc_sel]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
+	clk[spdif_sel]        = imx_clk_mux("spdif_sel",        base + 0x30, 20, 2, audio_sels,        ARRAY_SIZE(audio_sels));
+	clk[gpu2d_axi]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+	clk[gpu3d_axi]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+	clk[gpu2d_core_sel]   = imx_clk_mux("gpu2d_core_sel",   base + 0x18, 16, 2, gpu2d_core_sels,   ARRAY_SIZE(gpu2d_core_sels));
+	clk[gpu3d_core_sel]   = imx_clk_mux("gpu3d_core_sel",   base + 0x18, 4,  2, gpu3d_core_sels,   ARRAY_SIZE(gpu3d_core_sels));
+	clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8,  2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+	clk[ipu1_sel]         = imx_clk_mux("ipu1_sel",         base + 0x3c, 9,  2, ipu_sels,          ARRAY_SIZE(ipu_sels));
+	clk[ipu2_sel]         = imx_clk_mux("ipu2_sel",         base + 0x3c, 14, 2, ipu_sels,          ARRAY_SIZE(ipu_sels));
+	clk[ldb_di0_sel]      = imx_clk_mux("ldb_di0_sel",      base + 0x2c, 9,  3, ldb_di_sels,       ARRAY_SIZE(ldb_di_sels));
+	clk[ldb_di1_sel]      = imx_clk_mux("ldb_di1_sel",      base + 0x2c, 12, 3, ldb_di_sels,       ARRAY_SIZE(ldb_di_sels));
+	clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+	clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+	clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+	clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+	clk[ipu1_di0_sel]     = imx_clk_mux("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels));
+	clk[ipu1_di1_sel]     = imx_clk_mux("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels));
+	clk[ipu2_di0_sel]     = imx_clk_mux("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels));
+	clk[ipu2_di1_sel]     = imx_clk_mux("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels));
+	clk[hsi_tx_sel]       = imx_clk_mux("hsi_tx_sel",       base + 0x30, 28, 1, hsi_tx_sels,       ARRAY_SIZE(hsi_tx_sels));
+	clk[pcie_axi_sel]     = imx_clk_mux("pcie_axi_sel",     base + 0x18, 10, 1, pcie_axi_sels,     ARRAY_SIZE(pcie_axi_sels));
+	clk[ssi1_sel]         = imx_clk_mux("ssi1_sel",         base + 0x1c, 10, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+	clk[ssi2_sel]         = imx_clk_mux("ssi2_sel",         base + 0x1c, 12, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+	clk[ssi3_sel]         = imx_clk_mux("ssi3_sel",         base + 0x1c, 14, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+	clk[usdhc1_sel]       = imx_clk_mux("usdhc1_sel",       base + 0x1c, 16, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+	clk[usdhc2_sel]       = imx_clk_mux("usdhc2_sel",       base + 0x1c, 17, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+	clk[usdhc3_sel]       = imx_clk_mux("usdhc3_sel",       base + 0x1c, 18, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+	clk[usdhc4_sel]       = imx_clk_mux("usdhc4_sel",       base + 0x1c, 19, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+	clk[enfc_sel]         = imx_clk_mux("enfc_sel",         base + 0x2c, 16, 2, enfc_sels,         ARRAY_SIZE(enfc_sels));
+	clk[emi_sel]          = imx_clk_mux("emi_sel",          base + 0x1c, 27, 2, emi_sels,          ARRAY_SIZE(emi_sels));
+	clk[emi_slow_sel]     = imx_clk_mux("emi_slow_sel",     base + 0x1c, 29, 2, emi_sels,          ARRAY_SIZE(emi_sels));
+	clk[vdo_axi_sel]      = imx_clk_mux("vdo_axi_sel",      base + 0x18, 11, 1, vdo_axi_sels,      ARRAY_SIZE(vdo_axi_sels));
+	clk[vpu_axi_sel]      = imx_clk_mux("vpu_axi_sel",      base + 0x18, 14, 2, vpu_axi_sels,      ARRAY_SIZE(vpu_axi_sels));
+	clk[cko1_sel]         = imx_clk_mux("cko1_sel",         base + 0x60, 0,  4, cko1_sels,         ARRAY_SIZE(cko1_sels));
+
+	/*                              name         reg      shift width busy: reg, shift parent_names  num_parents */
+	clk[periph]  = imx_clk_busy_mux("periph",  base + 0x14, 25,  1,   base + 0x48, 5,  periph_sels,  ARRAY_SIZE(periph_sels));
+	clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26,  1,   base + 0x48, 3,  periph2_sels, ARRAY_SIZE(periph2_sels));
+
+	/*                                      name                parent_name          reg       shift width */
+	clk[periph_clk2]      = imx_clk_divider("periph_clk2",      "periph_clk2_sel",   base + 0x14, 27, 3);
+	clk[periph2_clk2]     = imx_clk_divider("periph2_clk2",     "periph2_clk2_sel",  base + 0x14, 0,  3);
+	clk[ipg]              = imx_clk_divider("ipg",              "ahb",               base + 0x14, 8,  2);
+	clk[ipg_per]          = imx_clk_divider("ipg_per",          "ipg",               base + 0x1c, 0,  6);
+	clk[esai_pred]        = imx_clk_divider("esai_pred",        "esai_sel",          base + 0x28, 9,  3);
+	clk[esai_podf]        = imx_clk_divider("esai_podf",        "esai_pred",         base + 0x28, 25, 3);
+	clk[asrc_pred]        = imx_clk_divider("asrc_pred",        "asrc_sel",          base + 0x30, 12, 3);
+	clk[asrc_podf]        = imx_clk_divider("asrc_podf",        "asrc_pred",         base + 0x30, 9,  3);
+	clk[spdif_pred]       = imx_clk_divider("spdif_pred",       "spdif_sel",         base + 0x30, 25, 3);
+	clk[spdif_podf]       = imx_clk_divider("spdif_podf",       "spdif_pred",        base + 0x30, 22, 3);
+	clk[can_root]         = imx_clk_divider("can_root",         "pll3_usb_otg",      base + 0x20, 2,  6);
+	clk[ecspi_root]       = imx_clk_divider("ecspi_root",       "pll3_60m",          base + 0x38, 19, 6);
+	clk[gpu2d_core_podf]  = imx_clk_divider("gpu2d_core_podf",  "gpu2d_core_sel",    base + 0x18, 23, 3);
+	clk[gpu3d_core_podf]  = imx_clk_divider("gpu3d_core_podf",  "gpu3d_core_sel",    base + 0x18, 26, 3);
+	clk[gpu3d_shader]     = imx_clk_divider("gpu3d_shader",     "gpu3d_shader_sel",  base + 0x18, 29, 3);
+	clk[ipu1_podf]        = imx_clk_divider("ipu1_podf",        "ipu1_sel",          base + 0x3c, 11, 3);
+	clk[ipu2_podf]        = imx_clk_divider("ipu2_podf",        "ipu2_sel",          base + 0x3c, 16, 3);
+	clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_sel",       base + 0x20, 10, 1);
+	clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_sel",       base + 0x20, 11, 1);
+	clk[ipu1_di0_pre]     = imx_clk_divider("ipu1_di0_pre",     "ipu1_di0_pre_sel",  base + 0x34, 3,  3);
+	clk[ipu1_di1_pre]     = imx_clk_divider("ipu1_di1_pre",     "ipu1_di1_pre_sel",  base + 0x34, 12, 3);
+	clk[ipu2_di0_pre]     = imx_clk_divider("ipu2_di0_pre",     "ipu2_di0_pre_sel",  base + 0x38, 3,  3);
+	clk[ipu2_di1_pre]     = imx_clk_divider("ipu2_di1_pre",     "ipu2_di1_pre_sel",  base + 0x38, 12, 3);
+	clk[hsi_tx_podf]      = imx_clk_divider("hsi_tx_podf",      "hsi_tx_sel",        base + 0x30, 29, 3);
+	clk[ssi1_pred]        = imx_clk_divider("ssi1_pred",        "ssi1_sel",          base + 0x28, 6,  3);
+	clk[ssi1_podf]        = imx_clk_divider("ssi1_podf",        "ssi1_pred",         base + 0x28, 0,  6);
+	clk[ssi2_pred]        = imx_clk_divider("ssi2_pred",        "ssi2_sel",          base + 0x2c, 6,  3);
+	clk[ssi2_podf]        = imx_clk_divider("ssi2_podf",        "ssi2_pred",         base + 0x2c, 0,  6);
+	clk[ssi3_pred]        = imx_clk_divider("ssi3_pred",        "ssi3_sel",          base + 0x28, 22, 3);
+	clk[ssi3_podf]        = imx_clk_divider("ssi3_podf",        "ssi3_pred",         base + 0x28, 16, 6);
+	clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
+	clk[usdhc1_podf]      = imx_clk_divider("usdhc1_podf",      "usdhc1_sel",        base + 0x24, 11, 3);
+	clk[usdhc2_podf]      = imx_clk_divider("usdhc2_podf",      "usdhc2_sel",        base + 0x24, 16, 3);
+	clk[usdhc3_podf]      = imx_clk_divider("usdhc3_podf",      "usdhc3_sel",        base + 0x24, 19, 3);
+	clk[usdhc4_podf]      = imx_clk_divider("usdhc4_podf",      "usdhc4_sel",        base + 0x24, 22, 3);
+	clk[enfc_pred]        = imx_clk_divider("enfc_pred",        "enfc_sel",          base + 0x2c, 18, 3);
+	clk[enfc_podf]        = imx_clk_divider("enfc_podf",        "enfc_pred",         base + 0x2c, 21, 6);
+	clk[emi_podf]         = imx_clk_divider("emi_podf",         "emi_sel",           base + 0x1c, 20, 3);
+	clk[emi_slow_podf]    = imx_clk_divider("emi_slow_podf",    "emi_slow_sel",      base + 0x1c, 23, 3);
+	clk[vpu_axi_podf]     = imx_clk_divider("vpu_axi_podf",     "vpu_axi_sel",       base + 0x24, 25, 3);
+	clk[cko1_podf]        = imx_clk_divider("cko1_podf",        "cko1_sel",          base + 0x60, 4,  3);
+
+	/*                                            name                 parent_name    reg        shift width busy: reg, shift */
+	clk[axi]               = imx_clk_busy_divider("axi",               "axi_sel",     base + 0x14, 16,  3,   base + 0x48, 0);
+	clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph",      base + 0x14, 19,  3,   base + 0x48, 4);
+	clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2",     base + 0x14, 3,   3,   base + 0x48, 2);
+	clk[arm]               = imx_clk_busy_divider("arm",               "pll1_sw",     base + 0x10, 0,   3,   base + 0x48, 16);
+	clk[ahb]               = imx_clk_busy_divider("ahb",               "periph",      base + 0x14, 10,  3,   base + 0x48, 1);
+
+	/*                                name             parent_name          reg         shift */
+	clk[apbh_dma]     = imx_clk_gate2("apbh_dma",      "ahb",               base + 0x68, 4);
+	clk[asrc]         = imx_clk_gate2("asrc",          "asrc_podf",         base + 0x68, 6);
+	clk[can1_ipg]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
+	clk[can1_serial]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
+	clk[can2_ipg]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);
+	clk[can2_serial]  = imx_clk_gate2("can2_serial",   "can_root",          base + 0x68, 20);
+	clk[ecspi1]       = imx_clk_gate2("ecspi1",        "ecspi_root",        base + 0x6c, 0);
+	clk[ecspi2]       = imx_clk_gate2("ecspi2",        "ecspi_root",        base + 0x6c, 2);
+	clk[ecspi3]       = imx_clk_gate2("ecspi3",        "ecspi_root",        base + 0x6c, 4);
+	clk[ecspi4]       = imx_clk_gate2("ecspi4",        "ecspi_root",        base + 0x6c, 6);
+	clk[ecspi5]       = imx_clk_gate2("ecspi5",        "ecspi_root",        base + 0x6c, 8);
+	clk[enet]         = imx_clk_gate2("enet",          "ipg",               base + 0x6c, 10);
+	clk[esai]         = imx_clk_gate2("esai",          "esai_podf",         base + 0x6c, 16);
+	clk[gpt_ipg]      = imx_clk_gate2("gpt_ipg",       "ipg",               base + 0x6c, 20);
+	clk[gpt_ipg_per]  = imx_clk_gate2("gpt_ipg_per",   "ipg_per",           base + 0x6c, 22);
+	clk[gpu2d_core]   = imx_clk_gate2("gpu2d_core",    "gpu2d_core_podf",   base + 0x6c, 24);
+	clk[gpu3d_core]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
+	clk[hdmi_iahb]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
+	clk[hdmi_isfr]    = imx_clk_gate2("hdmi_isfr",     "pll3_pfd1_540m",    base + 0x70, 4);
+	clk[i2c1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
+	clk[i2c2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
+	clk[i2c3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
+	clk[iim]          = imx_clk_gate2("iim",           "ipg",               base + 0x70, 12);
+	clk[enfc]         = imx_clk_gate2("enfc",          "enfc_podf",         base + 0x70, 14);
+	clk[ipu1]         = imx_clk_gate2("ipu1",          "ipu1_podf",         base + 0x74, 0);
+	clk[ipu1_di0]     = imx_clk_gate2("ipu1_di0",      "ipu1_di0_sel",      base + 0x74, 2);
+	clk[ipu1_di1]     = imx_clk_gate2("ipu1_di1",      "ipu1_di1_sel",      base + 0x74, 4);
+	clk[ipu2]         = imx_clk_gate2("ipu2",          "ipu2_podf",         base + 0x74, 6);
+	clk[ipu2_di0]     = imx_clk_gate2("ipu2_di0",      "ipu2_di0_sel",      base + 0x74, 8);
+	clk[ldb_di0]      = imx_clk_gate2("ldb_di0",       "ldb_di0_podf",      base + 0x74, 12);
+	clk[ldb_di1]      = imx_clk_gate2("ldb_di1",       "ldb_di1_podf",      base + 0x74, 14);
+	clk[ipu2_di1]     = imx_clk_gate2("ipu2_di1",      "ipu2_di1_sel",      base + 0x74, 10);
+	clk[hsi_tx]       = imx_clk_gate2("hsi_tx",        "hsi_tx_podf",       base + 0x74, 16);
+	clk[mlb]          = imx_clk_gate2("mlb",           "pll6_mlb",          base + 0x74, 18);
+	clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi",  "mmdc_ch0_axi_podf", base + 0x74, 20);
+	clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi",  "mmdc_ch1_axi_podf", base + 0x74, 22);
+	clk[ocram]        = imx_clk_gate2("ocram",         "ahb",               base + 0x74, 28);
+	clk[openvg_axi]   = imx_clk_gate2("openvg_axi",    "axi",               base + 0x74, 30);
+	clk[pcie_axi]     = imx_clk_gate2("pcie_axi",      "pcie_axi_sel",      base + 0x78, 0);
+	clk[pwm1]         = imx_clk_gate2("pwm1",          "ipg_per",           base + 0x78, 16);
+	clk[pwm2]         = imx_clk_gate2("pwm2",          "ipg_per",           base + 0x78, 18);
+	clk[pwm3]         = imx_clk_gate2("pwm3",          "ipg_per",           base + 0x78, 20);
+	clk[pwm4]         = imx_clk_gate2("pwm4",          "ipg_per",           base + 0x78, 22);
+	clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb",  "usdhc3",            base + 0x78, 24);
+	clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
+	clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
+	clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+	clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
+	clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
+	clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
+	clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
+	clk[ssi1_ipg]     = imx_clk_gate2("ssi1_ipg",      "ipg",               base + 0x7c, 18);
+	clk[ssi2_ipg]     = imx_clk_gate2("ssi2_ipg",      "ipg",               base + 0x7c, 20);
+	clk[ssi3_ipg]     = imx_clk_gate2("ssi3_ipg",      "ipg",               base + 0x7c, 22);
+	clk[uart_ipg]     = imx_clk_gate2("uart_ipg",      "ipg",               base + 0x7c, 24);
+	clk[uart_serial]  = imx_clk_gate2("uart_serial",   "uart_serial_podf",  base + 0x7c, 26);
+	clk[usboh3]       = imx_clk_gate2("usboh3",        "ipg",               base + 0x80, 0);
+	clk[usdhc1]       = imx_clk_gate2("usdhc1",        "usdhc1_podf",       base + 0x80, 2);
+	clk[usdhc2]       = imx_clk_gate2("usdhc2",        "usdhc2_podf",       base + 0x80, 4);
+	clk[usdhc3]       = imx_clk_gate2("usdhc3",        "usdhc3_podf",       base + 0x80, 6);
+	clk[usdhc4]       = imx_clk_gate2("usdhc4",        "usdhc4_podf",       base + 0x80, 8);
+	clk[vdo_axi]      = imx_clk_gate2("vdo_axi",       "vdo_axi_sel",       base + 0x80, 12);
+	clk[vpu_axi]      = imx_clk_gate2("vpu_axi",       "vpu_axi_podf",      base + 0x80, 14);
+	clk[cko1]         = imx_clk_gate("cko1",           "cko1_podf",         base + 0x60, 7);
+
+	for (i = 0; i < ARRAY_SIZE(clk); i++)
+		if (IS_ERR(clk[i]))
+			pr_err("i.MX6q clk %d: register failed with %ld\n",
+				i, PTR_ERR(clk[i]));
+
+	clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
+	clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
+	clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+	clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+	clk_register_clkdev(clk[twd], NULL, "smp_twd");
+	clk_register_clkdev(clk[usboh3], NULL, "usboh3");
+	clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
+	clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
+	clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
+	clk_register_clkdev(clk[uart_ipg], "ipg", "21e8000.serial");
+	clk_register_clkdev(clk[uart_serial], "per", "21ec000.serial");
+	clk_register_clkdev(clk[uart_ipg], "ipg", "21ec000.serial");
+	clk_register_clkdev(clk[uart_serial], "per", "21f0000.serial");
+	clk_register_clkdev(clk[uart_ipg], "ipg", "21f0000.serial");
+	clk_register_clkdev(clk[uart_serial], "per", "21f4000.serial");
+	clk_register_clkdev(clk[uart_ipg], "ipg", "21f4000.serial");
+	clk_register_clkdev(clk[enet], NULL, "2188000.ethernet");
+	clk_register_clkdev(clk[usdhc1], NULL, "2190000.usdhc");
+	clk_register_clkdev(clk[usdhc2], NULL, "2194000.usdhc");
+	clk_register_clkdev(clk[usdhc3], NULL, "2198000.usdhc");
+	clk_register_clkdev(clk[usdhc4], NULL, "219c000.usdhc");
+	clk_register_clkdev(clk[i2c1], NULL, "21a0000.i2c");
+	clk_register_clkdev(clk[i2c2], NULL, "21a4000.i2c");
+	clk_register_clkdev(clk[i2c3], NULL, "21a8000.i2c");
+	clk_register_clkdev(clk[ecspi1], NULL, "2008000.ecspi");
+	clk_register_clkdev(clk[ecspi2], NULL, "200c000.ecspi");
+	clk_register_clkdev(clk[ecspi3], NULL, "2010000.ecspi");
+	clk_register_clkdev(clk[ecspi4], NULL, "2014000.ecspi");
+	clk_register_clkdev(clk[ecspi5], NULL, "2018000.ecspi");
+	clk_register_clkdev(clk[sdma], NULL, "20ec000.sdma");
+	clk_register_clkdev(clk[dummy], NULL, "20bc000.wdog");
+	clk_register_clkdev(clk[dummy], NULL, "20c0000.wdog");
+	clk_register_clkdev(clk[ssi1_ipg], NULL, "2028000.ssi");
+	clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+	clk_register_clkdev(clk[ahb], "ahb", NULL);
+	clk_register_clkdev(clk[cko1], "cko1", NULL);
+
+	for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+		clk_prepare_enable(clk[clks_init_on[i]]);
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
+	base = of_iomap(np, 0);
+	WARN_ON(!base);
+	irq = irq_of_parse_and_map(np, 0);
+	mxc_timer_init(base, irq);
+
+	return 0;
+}
diff --git a/arch/arm/mach-imx/clk-pfd.c b/arch/arm/mach-imx/clk-pfd.c
new file mode 100644
index 0000000..e2ed416
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pfd.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw:	clock source
+ * @reg:	PFD register address
+ * @idx:	the index of PFD encoded in the register
+ *
+ * PFD clock found on i.MX6 series.  Each register for PFD has 4 clk_pfd
+ * data encoded, and member idx is used to specify the one.  And each
+ * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc.
+ */
+struct clk_pfd {
+	struct clk_hw	hw;
+	void __iomem	*reg;
+	u8		idx;
+};
+
+#define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw)
+
+#define SET	0x4
+#define CLR	0x8
+#define OTG	0xc
+
+static int clk_pfd_enable(struct clk_hw *hw)
+{
+	struct clk_pfd *pfd = to_clk_pfd(hw);
+
+	writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR);
+
+	return 0;
+}
+
+static void clk_pfd_disable(struct clk_hw *hw)
+{
+	struct clk_pfd *pfd = to_clk_pfd(hw);
+
+	writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET);
+}
+
+static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_pfd *pfd = to_clk_pfd(hw);
+	u64 tmp = parent_rate;
+	u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+	tmp *= 18;
+	do_div(tmp, frac);
+
+	return tmp;
+}
+
+static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
+			       unsigned long *prate)
+{
+	u64 tmp = *prate;
+	u8 frac;
+
+	tmp = tmp * 18 + rate / 2;
+	do_div(tmp, rate);
+	frac = tmp;
+	if (frac < 12)
+		frac = 12;
+	else if (frac > 35)
+		frac = 35;
+	tmp = *prate;
+	tmp *= 18;
+	do_div(tmp, frac);
+
+	return tmp;
+}
+
+static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pfd *pfd = to_clk_pfd(hw);
+	u64 tmp = parent_rate;
+	u8 frac;
+
+	tmp = tmp * 18 + rate / 2;
+	do_div(tmp, rate);
+	frac = tmp;
+	if (frac < 12)
+		frac = 12;
+	else if (frac > 35)
+		frac = 35;
+
+	writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR);
+	writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET);
+
+	return 0;
+}
+
+static const struct clk_ops clk_pfd_ops = {
+	.enable		= clk_pfd_enable,
+	.disable	= clk_pfd_disable,
+	.recalc_rate	= clk_pfd_recalc_rate,
+	.round_rate	= clk_pfd_round_rate,
+	.set_rate	= clk_pfd_set_rate,
+};
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+			void __iomem *reg, u8 idx)
+{
+	struct clk_pfd *pfd;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
+	if (!pfd)
+		return ERR_PTR(-ENOMEM);
+
+	pfd->reg = reg;
+	pfd->idx = idx;
+
+	init.name = name;
+	init.ops = &clk_pfd_ops;
+	init.flags = 0;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	pfd->hw.init = &init;
+
+	clk = clk_register(NULL, &pfd->hw);
+	if (IS_ERR(clk))
+		kfree(pfd);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv1.c b/arch/arm/mach-imx/clk-pllv1.c
new file mode 100644
index 0000000..2d856f9c
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv1.c
@@ -0,0 +1,66 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/clock.h>
+#include "clk.h"
+
+/**
+ * pll v1
+ *
+ * @clk_hw	clock source
+ * @parent	the parent clock name
+ * @base	base address of pll registers
+ *
+ * PLL clock version 1, found on i.MX1/21/25/27/31/35
+ */
+struct clk_pllv1 {
+	struct clk_hw	hw;
+	void __iomem	*base;
+};
+
+#define to_clk_pllv1(clk) (container_of(clk, struct clk_pllv1, clk))
+
+static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_pllv1 *pll = to_clk_pllv1(hw);
+
+	return mxc_decode_pll(readl(pll->base), parent_rate);
+}
+
+struct clk_ops clk_pllv1_ops = {
+	.recalc_rate = clk_pllv1_recalc_rate,
+};
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+		void __iomem *base)
+{
+	struct clk_pllv1 *pll;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pll = kmalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	pll->base = base;
+
+	init.name = name;
+	init.ops = &clk_pllv1_ops;
+	init.flags = 0;
+	init.parent_names = &parent;
+	init.num_parents = 1;
+
+	pll->hw.init = &init;
+
+	clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(clk))
+		kfree(pll);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
new file mode 100644
index 0000000..0440379
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -0,0 +1,266 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define to_clk_pllv2(clk) (container_of(clk, struct clk_pllv2, clk))
+
+/* PLL Register Offsets */
+#define MXC_PLL_DP_CTL			0x00
+#define MXC_PLL_DP_CONFIG		0x04
+#define MXC_PLL_DP_OP			0x08
+#define MXC_PLL_DP_MFD			0x0C
+#define MXC_PLL_DP_MFN			0x10
+#define MXC_PLL_DP_MFNMINUS		0x14
+#define MXC_PLL_DP_MFNPLUS		0x18
+#define MXC_PLL_DP_HFS_OP		0x1C
+#define MXC_PLL_DP_HFS_MFD		0x20
+#define MXC_PLL_DP_HFS_MFN		0x24
+#define MXC_PLL_DP_MFN_TOGC		0x28
+#define MXC_PLL_DP_DESTAT		0x2c
+
+/* PLL Register Bit definitions */
+#define MXC_PLL_DP_CTL_MUL_CTRL		0x2000
+#define MXC_PLL_DP_CTL_DPDCK0_2_EN	0x1000
+#define MXC_PLL_DP_CTL_DPDCK0_2_OFFSET	12
+#define MXC_PLL_DP_CTL_ADE		0x800
+#define MXC_PLL_DP_CTL_REF_CLK_DIV	0x400
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_MASK	(3 << 8)
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_OFFSET	8
+#define MXC_PLL_DP_CTL_HFSM		0x80
+#define MXC_PLL_DP_CTL_PRE		0x40
+#define MXC_PLL_DP_CTL_UPEN		0x20
+#define MXC_PLL_DP_CTL_RST		0x10
+#define MXC_PLL_DP_CTL_RCP		0x8
+#define MXC_PLL_DP_CTL_PLM		0x4
+#define MXC_PLL_DP_CTL_BRM0		0x2
+#define MXC_PLL_DP_CTL_LRF		0x1
+
+#define MXC_PLL_DP_CONFIG_BIST		0x8
+#define MXC_PLL_DP_CONFIG_SJC_CE	0x4
+#define MXC_PLL_DP_CONFIG_AREN		0x2
+#define MXC_PLL_DP_CONFIG_LDREQ		0x1
+
+#define MXC_PLL_DP_OP_MFI_OFFSET	4
+#define MXC_PLL_DP_OP_MFI_MASK		(0xF << 4)
+#define MXC_PLL_DP_OP_PDF_OFFSET	0
+#define MXC_PLL_DP_OP_PDF_MASK		0xF
+
+#define MXC_PLL_DP_MFD_OFFSET		0
+#define MXC_PLL_DP_MFD_MASK		0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_OFFSET		0x0
+#define MXC_PLL_DP_MFN_MASK		0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_TOGC_TOG_DIS	(1 << 17)
+#define MXC_PLL_DP_MFN_TOGC_TOG_EN	(1 << 16)
+#define MXC_PLL_DP_MFN_TOGC_CNT_OFFSET	0x0
+#define MXC_PLL_DP_MFN_TOGC_CNT_MASK	0xFFFF
+
+#define MXC_PLL_DP_DESTAT_TOG_SEL	(1 << 31)
+#define MXC_PLL_DP_DESTAT_MFN		0x07FFFFFF
+
+#define MAX_DPLL_WAIT_TRIES	1000 /* 1000 * udelay(1) = 1ms */
+
+struct clk_pllv2 {
+	struct clk_hw	hw;
+	void __iomem	*base;
+};
+
+static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
+		u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
+{
+	long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
+	unsigned long dbl;
+	s64 temp;
+
+	dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
+
+	pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
+	mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
+	mfi = (mfi <= 5) ? 5 : mfi;
+	mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
+	mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
+	/* Sign extend to 32-bits */
+	if (mfn >= 0x04000000) {
+		mfn |= 0xFC000000;
+		mfn_abs = -mfn;
+	}
+
+	ref_clk = 2 * parent_rate;
+	if (dbl != 0)
+		ref_clk *= 2;
+
+	ref_clk /= (pdf + 1);
+	temp = (u64) ref_clk * mfn_abs;
+	do_div(temp, mfd + 1);
+	if (mfn < 0)
+		temp = -temp;
+	temp = (ref_clk * mfi) + temp;
+
+	return temp;
+}
+
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
+	void __iomem *pllbase;
+	struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+	pllbase = pll->base;
+
+	dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+	dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+	dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+	dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+
+	return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
+}
+
+static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
+		u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
+{
+	u32 reg;
+	long mfi, pdf, mfn, mfd = 999999;
+	s64 temp64;
+	unsigned long quad_parent_rate;
+
+	quad_parent_rate = 4 * parent_rate;
+	pdf = mfi = -1;
+	while (++pdf < 16 && mfi < 5)
+		mfi = rate * (pdf+1) / quad_parent_rate;
+	if (mfi > 15)
+		return -EINVAL;
+	pdf--;
+
+	temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
+	do_div(temp64, quad_parent_rate / 1000000);
+	mfn = (long)temp64;
+
+	reg = mfi << 4 | pdf;
+
+	*dp_op = reg;
+	*dp_mfd = mfd;
+	*dp_mfn = mfn;
+
+	return 0;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pllv2 *pll = to_clk_pllv2(hw);
+	void __iomem *pllbase;
+	u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
+	int ret;
+
+	pllbase = pll->base;
+
+
+	ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
+	if (ret)
+		return ret;
+
+	dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+	/* use dpdck0_2 */
+	__raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
+
+	__raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
+	__raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
+	__raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
+
+	return 0;
+}
+
+static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *prate)
+{
+	u32 dp_op, dp_mfd, dp_mfn;
+
+	__clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+	return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
+			dp_op, dp_mfd, dp_mfn);
+}
+
+static int clk_pllv2_prepare(struct clk_hw *hw)
+{
+	struct clk_pllv2 *pll = to_clk_pllv2(hw);
+	u32 reg;
+	void __iomem *pllbase;
+	int i = 0;
+
+	pllbase = pll->base;
+	reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
+	__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+
+	/* Wait for lock */
+	do {
+		reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+		if (reg & MXC_PLL_DP_CTL_LRF)
+			break;
+
+		udelay(1);
+	} while (++i < MAX_DPLL_WAIT_TRIES);
+
+	if (i == MAX_DPLL_WAIT_TRIES) {
+		pr_err("MX5: pll locking failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void clk_pllv2_unprepare(struct clk_hw *hw)
+{
+	struct clk_pllv2 *pll = to_clk_pllv2(hw);
+	u32 reg;
+	void __iomem *pllbase;
+
+	pllbase = pll->base;
+	reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
+	__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+}
+
+struct clk_ops clk_pllv2_ops = {
+	.prepare = clk_pllv2_prepare,
+	.unprepare = clk_pllv2_unprepare,
+	.recalc_rate = clk_pllv2_recalc_rate,
+	.round_rate = clk_pllv2_round_rate,
+	.set_rate = clk_pllv2_set_rate,
+};
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+		void __iomem *base)
+{
+	struct clk_pllv2 *pll;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	pll->base = base;
+
+	init.name = name;
+	init.ops = &clk_pllv2_ops;
+	init.flags = 0;
+	init.parent_names = &parent;
+	init.num_parents = 1;
+
+	pll->hw.init = &init;
+
+	clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(clk))
+		kfree(pll);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c
new file mode 100644
index 0000000..36aac94
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv3.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define PLL_NUM_OFFSET		0x10
+#define PLL_DENOM_OFFSET	0x20
+
+#define BM_PLL_POWER		(0x1 << 12)
+#define BM_PLL_ENABLE		(0x1 << 13)
+#define BM_PLL_BYPASS		(0x1 << 16)
+#define BM_PLL_LOCK		(0x1 << 31)
+
+/**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw:	 clock source
+ * @base:	 base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
+ * @gate_mask:	 mask of gate bits
+ * @div_mask:	 mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series.  Divider for pllv3
+ * is actually a multiplier, and always sits at bit 0.
+ */
+struct clk_pllv3 {
+	struct clk_hw	hw;
+	void __iomem	*base;
+	bool		powerup_set;
+	u32		gate_mask;
+	u32		div_mask;
+};
+
+#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+
+static int clk_pllv3_prepare(struct clk_hw *hw)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	unsigned long timeout = jiffies + msecs_to_jiffies(10);
+	u32 val;
+
+	val = readl_relaxed(pll->base);
+	val &= ~BM_PLL_BYPASS;
+	if (pll->powerup_set)
+		val |= BM_PLL_POWER;
+	else
+		val &= ~BM_PLL_POWER;
+	writel_relaxed(val, pll->base);
+
+	/* Wait for PLL to lock */
+	while (!(readl_relaxed(pll->base) & BM_PLL_LOCK))
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void clk_pllv3_unprepare(struct clk_hw *hw)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 val;
+
+	val = readl_relaxed(pll->base);
+	val |= BM_PLL_BYPASS;
+	if (pll->powerup_set)
+		val &= ~BM_PLL_POWER;
+	else
+		val |= BM_PLL_POWER;
+	writel_relaxed(val, pll->base);
+}
+
+static int clk_pllv3_enable(struct clk_hw *hw)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 val;
+
+	val = readl_relaxed(pll->base);
+	val |= pll->gate_mask;
+	writel_relaxed(val, pll->base);
+
+	return 0;
+}
+
+static void clk_pllv3_disable(struct clk_hw *hw)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 val;
+
+	val = readl_relaxed(pll->base);
+	val &= ~pll->gate_mask;
+	writel_relaxed(val, pll->base);
+}
+
+static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+					   unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 div = readl_relaxed(pll->base)  & pll->div_mask;
+
+	return (div == 1) ? parent_rate * 22 : parent_rate * 20;
+}
+
+static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long *prate)
+{
+	unsigned long parent_rate = *prate;
+
+	return (rate >= parent_rate * 22) ? parent_rate * 22 :
+					    parent_rate * 20;
+}
+
+static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 val, div;
+
+	if (rate == parent_rate * 22)
+		div = 1;
+	else if (rate == parent_rate * 20)
+		div = 0;
+	else
+		return -EINVAL;
+
+	val = readl_relaxed(pll->base);
+	val &= ~pll->div_mask;
+	val |= div;
+	writel_relaxed(val, pll->base);
+
+	return 0;
+}
+
+static const struct clk_ops clk_pllv3_ops = {
+	.prepare	= clk_pllv3_prepare,
+	.unprepare	= clk_pllv3_unprepare,
+	.enable		= clk_pllv3_enable,
+	.disable	= clk_pllv3_disable,
+	.recalc_rate	= clk_pllv3_recalc_rate,
+	.round_rate	= clk_pllv3_round_rate,
+	.set_rate	= clk_pllv3_set_rate,
+};
+
+static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
+					       unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+	return parent_rate * div / 2;
+}
+
+static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long *prate)
+{
+	unsigned long parent_rate = *prate;
+	unsigned long min_rate = parent_rate * 54 / 2;
+	unsigned long max_rate = parent_rate * 108 / 2;
+	u32 div;
+
+	if (rate > max_rate)
+		rate = max_rate;
+	else if (rate < min_rate)
+		rate = min_rate;
+	div = rate * 2 / parent_rate;
+
+	return parent_rate * div / 2;
+}
+
+static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	unsigned long min_rate = parent_rate * 54 / 2;
+	unsigned long max_rate = parent_rate * 108 / 2;
+	u32 val, div;
+
+	if (rate < min_rate || rate > max_rate)
+		return -EINVAL;
+
+	div = rate * 2 / parent_rate;
+	val = readl_relaxed(pll->base);
+	val &= ~pll->div_mask;
+	val |= div;
+	writel_relaxed(val, pll->base);
+
+	return 0;
+}
+
+static const struct clk_ops clk_pllv3_sys_ops = {
+	.prepare	= clk_pllv3_prepare,
+	.unprepare	= clk_pllv3_unprepare,
+	.enable		= clk_pllv3_enable,
+	.disable	= clk_pllv3_disable,
+	.recalc_rate	= clk_pllv3_sys_recalc_rate,
+	.round_rate	= clk_pllv3_sys_round_rate,
+	.set_rate	= clk_pllv3_sys_set_rate,
+};
+
+static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+					      unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+	u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+	u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+	return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+}
+
+static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long *prate)
+{
+	unsigned long parent_rate = *prate;
+	unsigned long min_rate = parent_rate * 27;
+	unsigned long max_rate = parent_rate * 54;
+	u32 div;
+	u32 mfn, mfd = 1000000;
+	s64 temp64;
+
+	if (rate > max_rate)
+		rate = max_rate;
+	else if (rate < min_rate)
+		rate = min_rate;
+
+	div = rate / parent_rate;
+	temp64 = (u64) (rate - div * parent_rate);
+	temp64 *= mfd;
+	do_div(temp64, parent_rate);
+	mfn = temp64;
+
+	return parent_rate * div + parent_rate / mfd * mfn;
+}
+
+static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	unsigned long min_rate = parent_rate * 27;
+	unsigned long max_rate = parent_rate * 54;
+	u32 val, div;
+	u32 mfn, mfd = 1000000;
+	s64 temp64;
+
+	if (rate < min_rate || rate > max_rate)
+		return -EINVAL;
+
+	div = rate / parent_rate;
+	temp64 = (u64) (rate - div * parent_rate);
+	temp64 *= mfd;
+	do_div(temp64, parent_rate);
+	mfn = temp64;
+
+	val = readl_relaxed(pll->base);
+	val &= ~pll->div_mask;
+	val |= div;
+	writel_relaxed(val, pll->base);
+	writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+	writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
+	return 0;
+}
+
+static const struct clk_ops clk_pllv3_av_ops = {
+	.prepare	= clk_pllv3_prepare,
+	.unprepare	= clk_pllv3_unprepare,
+	.enable		= clk_pllv3_enable,
+	.disable	= clk_pllv3_disable,
+	.recalc_rate	= clk_pllv3_av_recalc_rate,
+	.round_rate	= clk_pllv3_av_round_rate,
+	.set_rate	= clk_pllv3_av_set_rate,
+};
+
+static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+	switch (div) {
+	case 0:
+		return 25000000;
+	case 1:
+		return 50000000;
+	case 2:
+		return 100000000;
+	case 3:
+		return 125000000;
+	}
+
+	return 0;
+}
+
+static long clk_pllv3_enet_round_rate(struct clk_hw *hw, unsigned long rate,
+				      unsigned long *prate)
+{
+	if (rate >= 125000000)
+		rate = 125000000;
+	else if (rate >= 100000000)
+		rate = 100000000;
+	else if (rate >= 50000000)
+		rate = 50000000;
+	else
+		rate = 25000000;
+	return rate;
+}
+
+static int clk_pllv3_enet_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_pllv3 *pll = to_clk_pllv3(hw);
+	u32 val, div;
+
+	switch (rate) {
+	case 25000000:
+		div = 0;
+		break;
+	case 50000000:
+		div = 1;
+		break;
+	case 100000000:
+		div = 2;
+		break;
+	case 125000000:
+		div = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val = readl_relaxed(pll->base);
+	val &= ~pll->div_mask;
+	val |= div;
+	writel_relaxed(val, pll->base);
+
+	return 0;
+}
+
+static const struct clk_ops clk_pllv3_enet_ops = {
+	.prepare	= clk_pllv3_prepare,
+	.unprepare	= clk_pllv3_unprepare,
+	.enable		= clk_pllv3_enable,
+	.disable	= clk_pllv3_disable,
+	.recalc_rate	= clk_pllv3_enet_recalc_rate,
+	.round_rate	= clk_pllv3_enet_round_rate,
+	.set_rate	= clk_pllv3_enet_set_rate,
+};
+
+static const struct clk_ops clk_pllv3_mlb_ops = {
+	.prepare	= clk_pllv3_prepare,
+	.unprepare	= clk_pllv3_unprepare,
+	.enable		= clk_pllv3_enable,
+	.disable	= clk_pllv3_disable,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+			  const char *parent_name, void __iomem *base,
+			  u32 gate_mask, u32 div_mask)
+{
+	struct clk_pllv3 *pll;
+	const struct clk_ops *ops;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	switch (type) {
+	case IMX_PLLV3_SYS:
+		ops = &clk_pllv3_sys_ops;
+		break;
+	case IMX_PLLV3_USB:
+		ops = &clk_pllv3_ops;
+		pll->powerup_set = true;
+		break;
+	case IMX_PLLV3_AV:
+		ops = &clk_pllv3_av_ops;
+		break;
+	case IMX_PLLV3_ENET:
+		ops = &clk_pllv3_enet_ops;
+		break;
+	case IMX_PLLV3_MLB:
+		ops = &clk_pllv3_mlb_ops;
+		break;
+	default:
+		ops = &clk_pllv3_ops;
+	}
+	pll->base = base;
+	pll->gate_mask = gate_mask;
+	pll->div_mask = div_mask;
+
+	init.name = name;
+	init.ops = ops;
+	init.flags = 0;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	pll->hw.init = &init;
+
+	clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(clk))
+		kfree(pll);
+
+	return clk;
+}
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
new file mode 100644
index 0000000..1bf64fe
--- /dev/null
+++ b/arch/arm/mach-imx/clk.h
@@ -0,0 +1,83 @@
+#ifndef __MACH_IMX_CLK_H
+#define __MACH_IMX_CLK_H
+
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include <mach/clock.h>
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+		void __iomem *base);
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+		void __iomem *base);
+
+enum imx_pllv3_type {
+	IMX_PLLV3_GENERIC,
+	IMX_PLLV3_SYS,
+	IMX_PLLV3_USB,
+	IMX_PLLV3_AV,
+	IMX_PLLV3_ENET,
+	IMX_PLLV3_MLB,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+		const char *parent_name, void __iomem *base, u32 gate_mask,
+		u32 div_mask);
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+		const char *parent_name, unsigned long flags,
+		void __iomem *reg, u8 bit_idx,
+		u8 clk_gate_flags, spinlock_t *lock);
+
+static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
+		void __iomem *reg, u8 shift)
+{
+	return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+			shift, 0, &imx_ccm_lock);
+}
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+		void __iomem *reg, u8 idx);
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+				 void __iomem *reg, u8 shift, u8 width,
+				 void __iomem *busy_reg, u8 busy_shift);
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+			     u8 width, void __iomem *busy_reg, u8 busy_shift,
+			     const char **parent_names, int num_parents);
+
+static inline struct clk *imx_clk_fixed(const char *name, int rate)
+{
+	return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *imx_clk_divider(const char *name, const char *parent,
+		void __iomem *reg, u8 shift, u8 width)
+{
+	return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
+			reg, shift, width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_gate(const char *name, const char *parent,
+		void __iomem *reg, u8 shift)
+{
+	return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+			shift, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
+		u8 shift, u8 width, const char **parents, int num_parents)
+{
+	return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
+			width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_fixed_factor(const char *name,
+		const char *parent, unsigned int mult, unsigned int div)
+{
+	return clk_register_fixed_factor(NULL, name, parent,
+			CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
deleted file mode 100644
index 4aabeb2..0000000
--- a/arch/arm/mach-imx/clock-imx1.c
+++ /dev/null
@@ -1,636 +0,0 @@
-/*
- *  Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/math64.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#define IO_ADDR_CCM(off)	(MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
-
-/* CCM register addresses */
-#define CCM_CSCR	IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0	IO_ADDR_CCM(0x4)
-#define CCM_SPCTL0	IO_ADDR_CCM(0xc)
-#define CCM_PCDR	IO_ADDR_CCM(0x20)
-
-#define CCM_CSCR_CLKO_OFFSET	29
-#define CCM_CSCR_CLKO_MASK	(0x7 << 29)
-#define CCM_CSCR_USB_OFFSET	26
-#define CCM_CSCR_USB_MASK	(0x7 << 26)
-#define CCM_CSCR_OSC_EN_SHIFT	17
-#define CCM_CSCR_SYSTEM_SEL	(1 << 16)
-#define CCM_CSCR_BCLK_OFFSET	10
-#define CCM_CSCR_BCLK_MASK	(0xf << 10)
-#define CCM_CSCR_PRESC		(1 << 15)
-
-#define CCM_PCDR_PCLK3_OFFSET	16
-#define CCM_PCDR_PCLK3_MASK	(0x7f << 16)
-#define CCM_PCDR_PCLK2_OFFSET	4
-#define CCM_PCDR_PCLK2_MASK	(0xf << 4)
-#define CCM_PCDR_PCLK1_OFFSET	0
-#define CCM_PCDR_PCLK1_MASK	0xf
-
-#define IO_ADDR_SCM(off)	(MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
-
-/* SCM register addresses */
-#define SCM_GCCR	IO_ADDR_SCM(0xc)
-
-#define SCM_GCCR_DMA_CLK_EN_OFFSET	3
-#define SCM_GCCR_CSI_CLK_EN_OFFSET	2
-#define SCM_GCCR_MMA_CLK_EN_OFFSET	1
-#define SCM_GCCR_USBD_CLK_EN_OFFSET	0
-
-static int _clk_enable(struct clk *clk)
-{
-	unsigned int reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 1 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-	unsigned int reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(1 << clk->enable_shift);
-	__raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
-			       struct clk *parent)
-{
-	int i;
-
-	for (i = 0; i < size; i++)
-		if (parent == clk_arr[i])
-			return i;
-
-	return -EINVAL;
-}
-
-static unsigned long
-_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
-{
-	int div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (parent_rate % rate)
-		div++;
-
-	if (div > limit)
-		div = limit;
-
-	return parent_rate / div;
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long clk16m_get_rate(struct clk *clk)
-{
-	return 16000000;
-}
-
-static struct clk clk16m = {
-	.get_rate = clk16m_get_rate,
-	.enable = _clk_enable,
-	.enable_reg = CCM_CSCR,
-	.enable_shift = CCM_CSCR_OSC_EN_SHIFT,
-	.disable = _clk_disable,
-};
-
-/* in Hz */
-static unsigned long clk32_rate;
-
-static unsigned long clk32_get_rate(struct clk *clk)
-{
-	return clk32_rate;
-}
-
-static struct clk clk32 = {
-	.get_rate = clk32_get_rate,
-};
-
-static unsigned long clk32_premult_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) * 512;
-}
-
-static struct clk clk32_premult = {
-	.parent = &clk32,
-	.get_rate = clk32_premult_get_rate,
-};
-
-static const struct clk *prem_clk_clocks[] = {
-	&clk32_premult,
-	&clk16m,
-};
-
-static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	int i;
-	unsigned int reg = __raw_readl(CCM_CSCR);
-
-	i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
-				parent);
-
-	switch (i) {
-	case 0:
-		reg &= ~CCM_CSCR_SYSTEM_SEL;
-		break;
-	case 1:
-		reg |= CCM_CSCR_SYSTEM_SEL;
-		break;
-	default:
-		return i;
-	}
-
-	__raw_writel(reg, CCM_CSCR);
-
-	return 0;
-}
-
-static struct clk prem_clk = {
-	.set_parent = prem_clk_set_parent,
-};
-
-static unsigned long system_clk_get_rate(struct clk *clk)
-{
-	return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
-			      clk_get_rate(clk->parent));
-}
-
-static struct clk system_clk = {
-	.parent = &prem_clk,
-	.get_rate = system_clk_get_rate,
-};
-
-static unsigned long mcu_clk_get_rate(struct clk *clk)
-{
-	return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
-			      clk_get_rate(clk->parent));
-}
-
-static struct clk mcu_clk = {
-	.parent = &clk32_premult,
-	.get_rate = mcu_clk_get_rate,
-};
-
-static unsigned long fclk_get_rate(struct clk *clk)
-{
-	unsigned long fclk = clk_get_rate(clk->parent);
-
-	if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
-		fclk /= 2;
-
-	return fclk;
-}
-
-static struct clk fclk = {
-	.parent = &mcu_clk,
-	.get_rate = fclk_get_rate,
-};
-
-/*
- *  get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
- */
-static unsigned long hclk_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
-			CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
-}
-
-static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int hclk_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	unsigned int reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg &= ~CCM_CSCR_BCLK_MASK;
-	reg |= div << CCM_CSCR_BCLK_OFFSET;
-	__raw_writel(reg, CCM_CSCR);
-
-	return 0;
-}
-
-static struct clk hclk = {
-	.parent = &system_clk,
-	.get_rate = hclk_get_rate,
-	.round_rate = hclk_round_rate,
-	.set_rate = hclk_set_rate,
-};
-
-static unsigned long clk48m_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
-			CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
-}
-
-static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_simple_round_rate(clk, rate, 8);
-}
-
-static int clk48m_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	unsigned int reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg &= ~CCM_CSCR_USB_MASK;
-	reg |= div << CCM_CSCR_USB_OFFSET;
-	__raw_writel(reg, CCM_CSCR);
-
-	return 0;
-}
-
-static struct clk clk48m = {
-	.parent = &system_clk,
-	.get_rate = clk48m_get_rate,
-	.round_rate = clk48m_round_rate,
-	.set_rate = clk48m_set_rate,
-};
-
-/*
- *  get peripheral clock 1 ( UART[12], Timer[12], PWM )
- */
-static unsigned long perclk1_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-			CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
-}
-
-static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk1_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	unsigned int reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_PCDR);
-	reg &= ~CCM_PCDR_PCLK1_MASK;
-	reg |= div << CCM_PCDR_PCLK1_OFFSET;
-	__raw_writel(reg, CCM_PCDR);
-
-	return 0;
-}
-
-/*
- *  get peripheral clock 2 ( LCD, SD, SPI[12] )
- */
-static unsigned long perclk2_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-			CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
-}
-
-static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk2_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	unsigned int reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_PCDR);
-	reg &= ~CCM_PCDR_PCLK2_MASK;
-	reg |= div << CCM_PCDR_PCLK2_OFFSET;
-	__raw_writel(reg, CCM_PCDR);
-
-	return 0;
-}
-
-/*
- *  get peripheral clock 3 ( SSI )
- */
-static unsigned long perclk3_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-			CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
-}
-
-static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_simple_round_rate(clk, rate, 128);
-}
-
-static int perclk3_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	unsigned int reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 128 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_PCDR);
-	reg &= ~CCM_PCDR_PCLK3_MASK;
-	reg |= div << CCM_PCDR_PCLK3_OFFSET;
-	__raw_writel(reg, CCM_PCDR);
-
-	return 0;
-}
-
-static struct clk perclk[] = {
-	{
-		.id = 0,
-		.parent = &system_clk,
-		.get_rate = perclk1_get_rate,
-		.round_rate = perclk1_round_rate,
-		.set_rate = perclk1_set_rate,
-	}, {
-		.id = 1,
-		.parent = &system_clk,
-		.get_rate = perclk2_get_rate,
-		.round_rate = perclk2_round_rate,
-		.set_rate = perclk2_set_rate,
-	}, {
-		.id = 2,
-		.parent = &system_clk,
-		.get_rate = perclk3_get_rate,
-		.round_rate = perclk3_round_rate,
-		.set_rate = perclk3_set_rate,
-	}
-};
-
-static const struct clk *clko_clocks[] = {
-	&perclk[0],
-	&hclk,
-	&clk48m,
-	&clk16m,
-	&prem_clk,
-	&fclk,
-};
-
-static int clko_set_parent(struct clk *clk, struct clk *parent)
-{
-	int i;
-	unsigned int reg;
-
-	i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
-	if (i < 0)
-		return i;
-
-	reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
-	reg |= i << CCM_CSCR_CLKO_OFFSET;
-	__raw_writel(reg, CCM_CSCR);
-
-	if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
-		clk->set_rate = _clk_parent_set_rate;
-		clk->round_rate = _clk_parent_round_rate;
-	} else {
-		clk->set_rate = NULL;
-		clk->round_rate = NULL;
-	}
-
-	return 0;
-}
-
-static struct clk clko_clk = {
-	.set_parent = clko_set_parent,
-};
-
-static struct clk dma_clk = {
-	.parent = &hclk,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-	.enable = _clk_enable,
-	.enable_reg = SCM_GCCR,
-	.enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk csi_clk = {
-	.parent = &hclk,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-	.enable = _clk_enable,
-	.enable_reg = SCM_GCCR,
-	.enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk mma_clk = {
-	.parent = &hclk,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-	.enable = _clk_enable,
-	.enable_reg = SCM_GCCR,
-	.enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk usbd_clk = {
-	.parent = &clk48m,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-	.enable = _clk_enable,
-	.enable_reg = SCM_GCCR,
-	.enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk gpt_clk = {
-	.parent = &perclk[0],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk uart_clk = {
-	.parent = &perclk[0],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk i2c_clk = {
-	.parent = &hclk,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk spi_clk = {
-	.parent = &perclk[1],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk sdhc_clk = {
-	.parent = &perclk[1],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk lcdc_clk = {
-	.parent = &perclk[1],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk mshc_clk = {
-	.parent = &hclk,
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk ssi_clk = {
-	.parent = &perclk[2],
-	.round_rate = _clk_parent_round_rate,
-	.set_rate = _clk_parent_set_rate,
-};
-
-static struct clk rtc_clk = {
-	.parent = &clk32,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-static struct clk_lookup lookups[] __initdata = {
-	_REGISTER_CLOCK(NULL, "dma", dma_clk)
-	_REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
-	_REGISTER_CLOCK(NULL, "mma", mma_clk)
-	_REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
-	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-	_REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
-	_REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
-	_REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-	_REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
-	_REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
-	_REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
-	_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-	_REGISTER_CLOCK(NULL, "mshc", mshc_clk)
-	_REGISTER_CLOCK(NULL, "ssi", ssi_clk)
-	_REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
-};
-
-int __init mx1_clocks_init(unsigned long fref)
-{
-	unsigned int reg;
-
-	/* disable clocks we are able to */
-	__raw_writel(0, SCM_GCCR);
-
-	clk32_rate = fref;
-	reg = __raw_readl(CCM_CSCR);
-
-	/* detect clock reference for system PLL */
-	if (reg & CCM_CSCR_SYSTEM_SEL) {
-		prem_clk.parent = &clk16m;
-	} else {
-		/* ensure that oscillator is disabled */
-		reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
-		__raw_writel(reg, CCM_CSCR);
-		prem_clk.parent = &clk32_premult;
-	}
-
-	/* detect reference for CLKO */
-	reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
-	clko_clk.parent = (struct clk *)clko_clocks[reg];
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	clk_enable(&hclk);
-	clk_enable(&fclk);
-
-	mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
-			MX1_TIM1_INT);
-
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
deleted file mode 100644
index ee15d8c..0000000
--- a/arch/arm/mach-imx/clock-imx21.c
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <asm/div64.h>
-
-#define IO_ADDR_CCM(off)	(MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR		IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0		IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1		IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0		IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1		IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL		IO_ADDR_CCM(0x14)
-#define CCM_PCDR0		IO_ADDR_CCM(0x18)
-#define CCM_PCDR1		IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0		IO_ADDR_CCM(0x20)
-#define CCM_PCCR1		IO_ADDR_CCM(0x24)
-#define CCM_CCSR		IO_ADDR_CCM(0x28)
-#define CCM_PMCTL		IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT		IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL		IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_PRESC_OFFSET	29
-#define CCM_CSCR_PRESC_MASK	(0x7 << CCM_CSCR_PRESC_OFFSET)
-
-#define CCM_CSCR_USB_OFFSET	26
-#define CCM_CSCR_USB_MASK	(0x7 << CCM_CSCR_USB_OFFSET)
-#define CCM_CSCR_SD_OFFSET	24
-#define CCM_CSCR_SD_MASK	(0x3 << CCM_CSCR_SD_OFFSET)
-#define CCM_CSCR_SPLLRES	(1 << 22)
-#define CCM_CSCR_MPLLRES	(1 << 21)
-#define CCM_CSCR_SSI2_OFFSET	20
-#define CCM_CSCR_SSI2		(1 << CCM_CSCR_SSI2_OFFSET)
-#define CCM_CSCR_SSI1_OFFSET	19
-#define CCM_CSCR_SSI1		(1 << CCM_CSCR_SSI1_OFFSET)
-#define CCM_CSCR_FIR_OFFSET	18
-#define CCM_CSCR_FIR		(1 << CCM_CSCR_FIR_OFFSET)
-#define CCM_CSCR_SP		(1 << 17)
-#define CCM_CSCR_MCU		(1 << 16)
-#define CCM_CSCR_BCLK_OFFSET	10
-#define CCM_CSCR_BCLK_MASK	(0xf << CCM_CSCR_BCLK_OFFSET)
-#define CCM_CSCR_IPDIV_OFFSET	9
-#define CCM_CSCR_IPDIV		(1 << CCM_CSCR_IPDIV_OFFSET)
-
-#define CCM_CSCR_OSC26MDIV	(1 << 4)
-#define CCM_CSCR_OSC26M		(1 << 3)
-#define CCM_CSCR_FPM		(1 << 2)
-#define CCM_CSCR_SPEN		(1 << 1)
-#define CCM_CSCR_MPEN		1
-
-#define CCM_MPCTL0_CPLM		(1 << 31)
-#define CCM_MPCTL0_PD_OFFSET	26
-#define CCM_MPCTL0_PD_MASK	(0xf << 26)
-#define CCM_MPCTL0_MFD_OFFSET	16
-#define CCM_MPCTL0_MFD_MASK	(0x3ff << 16)
-#define CCM_MPCTL0_MFI_OFFSET	10
-#define CCM_MPCTL0_MFI_MASK	(0xf << 10)
-#define CCM_MPCTL0_MFN_OFFSET	0
-#define CCM_MPCTL0_MFN_MASK	0x3ff
-
-#define CCM_MPCTL1_LF		(1 << 15)
-#define CCM_MPCTL1_BRMO		(1 << 6)
-
-#define CCM_SPCTL0_CPLM		(1 << 31)
-#define CCM_SPCTL0_PD_OFFSET	26
-#define CCM_SPCTL0_PD_MASK	(0xf << 26)
-#define CCM_SPCTL0_MFD_OFFSET	16
-#define CCM_SPCTL0_MFD_MASK	(0x3ff << 16)
-#define CCM_SPCTL0_MFI_OFFSET	10
-#define CCM_SPCTL0_MFI_MASK	(0xf << 10)
-#define CCM_SPCTL0_MFN_OFFSET	0
-#define CCM_SPCTL0_MFN_MASK	0x3ff
-
-#define CCM_SPCTL1_LF		(1 << 15)
-#define CCM_SPCTL1_BRMO		(1 << 6)
-
-#define CCM_OSC26MCTL_PEAK_OFFSET	16
-#define CCM_OSC26MCTL_PEAK_MASK		(0x3 << 16)
-#define CCM_OSC26MCTL_AGC_OFFSET	8
-#define CCM_OSC26MCTL_AGC_MASK		(0x3f << 8)
-#define CCM_OSC26MCTL_ANATEST_OFFSET	0
-#define CCM_OSC26MCTL_ANATEST_MASK	0x3f
-
-#define CCM_PCDR0_SSI2BAUDDIV_OFFSET	26
-#define CCM_PCDR0_SSI2BAUDDIV_MASK	(0x3f << 26)
-#define CCM_PCDR0_SSI1BAUDDIV_OFFSET	16
-#define CCM_PCDR0_SSI1BAUDDIV_MASK	(0x3f << 16)
-#define CCM_PCDR0_NFCDIV_OFFSET		12
-#define CCM_PCDR0_NFCDIV_MASK		(0xf << 12)
-#define CCM_PCDR0_48MDIV_OFFSET		5
-#define CCM_PCDR0_48MDIV_MASK		(0x7 << CCM_PCDR0_48MDIV_OFFSET)
-#define CCM_PCDR0_FIRIDIV_OFFSET	0
-#define CCM_PCDR0_FIRIDIV_MASK		0x1f
-#define CCM_PCDR1_PERDIV4_OFFSET	24
-#define CCM_PCDR1_PERDIV4_MASK		(0x3f << 24)
-#define CCM_PCDR1_PERDIV3_OFFSET	16
-#define CCM_PCDR1_PERDIV3_MASK		(0x3f << 16)
-#define CCM_PCDR1_PERDIV2_OFFSET	8
-#define CCM_PCDR1_PERDIV2_MASK		(0x3f << 8)
-#define CCM_PCDR1_PERDIV1_OFFSET	0
-#define CCM_PCDR1_PERDIV1_MASK		0x3f
-
-#define CCM_PCCR_HCLK_CSI_OFFSET	31
-#define CCM_PCCR_HCLK_CSI_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_DMA_OFFSET	30
-#define CCM_PCCR_HCLK_DMA_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_BROM_OFFSET	28
-#define CCM_PCCR_HCLK_BROM_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_EMMA_OFFSET	27
-#define CCM_PCCR_HCLK_EMMA_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_LCDC_OFFSET	26
-#define CCM_PCCR_HCLK_LCDC_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_SLCDC_OFFSET	25
-#define CCM_PCCR_HCLK_SLCDC_REG		CCM_PCCR0
-#define CCM_PCCR_HCLK_USBOTG_OFFSET	24
-#define CCM_PCCR_HCLK_USBOTG_REG	CCM_PCCR0
-#define CCM_PCCR_HCLK_BMI_OFFSET	23
-#define CCM_PCCR_BMI_MASK		(1 << CCM_PCCR_BMI_MASK)
-#define CCM_PCCR_HCLK_BMI_REG		CCM_PCCR0
-#define CCM_PCCR_PERCLK4_OFFSET		22
-#define CCM_PCCR_PERCLK4_REG		CCM_PCCR0
-#define CCM_PCCR_SLCDC_OFFSET		21
-#define CCM_PCCR_SLCDC_REG		CCM_PCCR0
-#define CCM_PCCR_FIRI_BAUD_OFFSET	20
-#define CCM_PCCR_FIRI_BAUD_MASK		(1 << CCM_PCCR_FIRI_BAUD_MASK)
-#define CCM_PCCR_FIRI_BAUD_REG		CCM_PCCR0
-#define CCM_PCCR_NFC_OFFSET		19
-#define CCM_PCCR_NFC_REG		CCM_PCCR0
-#define CCM_PCCR_LCDC_OFFSET		18
-#define CCM_PCCR_LCDC_REG		CCM_PCCR0
-#define CCM_PCCR_SSI1_BAUD_OFFSET	17
-#define CCM_PCCR_SSI1_BAUD_REG		CCM_PCCR0
-#define CCM_PCCR_SSI2_BAUD_OFFSET	16
-#define CCM_PCCR_SSI2_BAUD_REG		CCM_PCCR0
-#define CCM_PCCR_EMMA_OFFSET		15
-#define CCM_PCCR_EMMA_REG		CCM_PCCR0
-#define CCM_PCCR_USBOTG_OFFSET		14
-#define CCM_PCCR_USBOTG_REG		CCM_PCCR0
-#define CCM_PCCR_DMA_OFFSET		13
-#define CCM_PCCR_DMA_REG		CCM_PCCR0
-#define CCM_PCCR_I2C1_OFFSET		12
-#define CCM_PCCR_I2C1_REG		CCM_PCCR0
-#define CCM_PCCR_GPIO_OFFSET		11
-#define CCM_PCCR_GPIO_REG		CCM_PCCR0
-#define CCM_PCCR_SDHC2_OFFSET		10
-#define CCM_PCCR_SDHC2_REG		CCM_PCCR0
-#define CCM_PCCR_SDHC1_OFFSET		9
-#define CCM_PCCR_SDHC1_REG		CCM_PCCR0
-#define CCM_PCCR_FIRI_OFFSET		8
-#define CCM_PCCR_FIRI_MASK		(1 << CCM_PCCR_BAUD_MASK)
-#define CCM_PCCR_FIRI_REG		CCM_PCCR0
-#define CCM_PCCR_SSI2_IPG_OFFSET	7
-#define CCM_PCCR_SSI2_REG		CCM_PCCR0
-#define CCM_PCCR_SSI1_IPG_OFFSET	6
-#define CCM_PCCR_SSI1_REG		CCM_PCCR0
-#define CCM_PCCR_CSPI2_OFFSET		5
-#define	CCM_PCCR_CSPI2_REG		CCM_PCCR0
-#define CCM_PCCR_CSPI1_OFFSET		4
-#define	CCM_PCCR_CSPI1_REG		CCM_PCCR0
-#define CCM_PCCR_UART4_OFFSET		3
-#define CCM_PCCR_UART4_REG		CCM_PCCR0
-#define CCM_PCCR_UART3_OFFSET		2
-#define CCM_PCCR_UART3_REG		CCM_PCCR0
-#define CCM_PCCR_UART2_OFFSET		1
-#define CCM_PCCR_UART2_REG		CCM_PCCR0
-#define CCM_PCCR_UART1_OFFSET		0
-#define CCM_PCCR_UART1_REG		CCM_PCCR0
-
-#define CCM_PCCR_OWIRE_OFFSET		31
-#define CCM_PCCR_OWIRE_REG		CCM_PCCR1
-#define CCM_PCCR_KPP_OFFSET		30
-#define CCM_PCCR_KPP_REG		CCM_PCCR1
-#define CCM_PCCR_RTC_OFFSET		29
-#define CCM_PCCR_RTC_REG		CCM_PCCR1
-#define CCM_PCCR_PWM_OFFSET		28
-#define CCM_PCCR_PWM_REG		CCM_PCCR1
-#define CCM_PCCR_GPT3_OFFSET		27
-#define CCM_PCCR_GPT3_REG		CCM_PCCR1
-#define CCM_PCCR_GPT2_OFFSET		26
-#define CCM_PCCR_GPT2_REG		CCM_PCCR1
-#define CCM_PCCR_GPT1_OFFSET		25
-#define CCM_PCCR_GPT1_REG		CCM_PCCR1
-#define CCM_PCCR_WDT_OFFSET		24
-#define CCM_PCCR_WDT_REG		CCM_PCCR1
-#define CCM_PCCR_CSPI3_OFFSET		23
-#define	CCM_PCCR_CSPI3_REG		CCM_PCCR1
-
-#define CCM_PCCR_CSPI1_MASK		(1 << CCM_PCCR_CSPI1_OFFSET)
-#define CCM_PCCR_CSPI2_MASK		(1 << CCM_PCCR_CSPI2_OFFSET)
-#define CCM_PCCR_CSPI3_MASK		(1 << CCM_PCCR_CSPI3_OFFSET)
-#define CCM_PCCR_DMA_MASK		(1 << CCM_PCCR_DMA_OFFSET)
-#define CCM_PCCR_EMMA_MASK		(1 << CCM_PCCR_EMMA_OFFSET)
-#define CCM_PCCR_GPIO_MASK		(1 << CCM_PCCR_GPIO_OFFSET)
-#define CCM_PCCR_GPT1_MASK		(1 << CCM_PCCR_GPT1_OFFSET)
-#define CCM_PCCR_GPT2_MASK		(1 << CCM_PCCR_GPT2_OFFSET)
-#define CCM_PCCR_GPT3_MASK		(1 << CCM_PCCR_GPT3_OFFSET)
-#define CCM_PCCR_HCLK_BROM_MASK		(1 << CCM_PCCR_HCLK_BROM_OFFSET)
-#define CCM_PCCR_HCLK_CSI_MASK		(1 << CCM_PCCR_HCLK_CSI_OFFSET)
-#define CCM_PCCR_HCLK_DMA_MASK		(1 << CCM_PCCR_HCLK_DMA_OFFSET)
-#define CCM_PCCR_HCLK_EMMA_MASK		(1 << CCM_PCCR_HCLK_EMMA_OFFSET)
-#define CCM_PCCR_HCLK_LCDC_MASK		(1 << CCM_PCCR_HCLK_LCDC_OFFSET)
-#define CCM_PCCR_HCLK_SLCDC_MASK	(1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
-#define CCM_PCCR_HCLK_USBOTG_MASK	(1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
-#define CCM_PCCR_I2C1_MASK		(1 << CCM_PCCR_I2C1_OFFSET)
-#define CCM_PCCR_KPP_MASK		(1 << CCM_PCCR_KPP_OFFSET)
-#define CCM_PCCR_LCDC_MASK		(1 << CCM_PCCR_LCDC_OFFSET)
-#define CCM_PCCR_NFC_MASK		(1 << CCM_PCCR_NFC_OFFSET)
-#define CCM_PCCR_OWIRE_MASK		(1 << CCM_PCCR_OWIRE_OFFSET)
-#define CCM_PCCR_PERCLK4_MASK		(1 << CCM_PCCR_PERCLK4_OFFSET)
-#define CCM_PCCR_PWM_MASK		(1 << CCM_PCCR_PWM_OFFSET)
-#define CCM_PCCR_RTC_MASK		(1 << CCM_PCCR_RTC_OFFSET)
-#define CCM_PCCR_SDHC1_MASK		(1 << CCM_PCCR_SDHC1_OFFSET)
-#define CCM_PCCR_SDHC2_MASK		(1 << CCM_PCCR_SDHC2_OFFSET)
-#define CCM_PCCR_SLCDC_MASK		(1 << CCM_PCCR_SLCDC_OFFSET)
-#define CCM_PCCR_SSI1_BAUD_MASK		(1 << CCM_PCCR_SSI1_BAUD_OFFSET)
-#define CCM_PCCR_SSI1_IPG_MASK		(1 << CCM_PCCR_SSI1_IPG_OFFSET)
-#define CCM_PCCR_SSI2_BAUD_MASK		(1 << CCM_PCCR_SSI2_BAUD_OFFSET)
-#define CCM_PCCR_SSI2_IPG_MASK		(1 << CCM_PCCR_SSI2_IPG_OFFSET)
-#define CCM_PCCR_UART1_MASK		(1 << CCM_PCCR_UART1_OFFSET)
-#define CCM_PCCR_UART2_MASK		(1 << CCM_PCCR_UART2_OFFSET)
-#define CCM_PCCR_UART3_MASK		(1 << CCM_PCCR_UART3_OFFSET)
-#define CCM_PCCR_UART4_MASK		(1 << CCM_PCCR_UART4_OFFSET)
-#define CCM_PCCR_USBOTG_MASK		(1 << CCM_PCCR_USBOTG_OFFSET)
-#define CCM_PCCR_WDT_MASK		(1 << CCM_PCCR_WDT_OFFSET)
-
-#define CCM_CCSR_32KSR		(1 << 15)
-
-#define CCM_CCSR_CLKMODE1	(1 << 9)
-#define CCM_CCSR_CLKMODE0	(1 << 8)
-
-#define CCM_CCSR_CLKOSEL_OFFSET 0
-#define CCM_CCSR_CLKOSEL_MASK	0x1f
-
-#define SYS_FMCR		0x14	/* Functional Muxing Control Reg */
-#define SYS_CHIP_ID		0x00	/* The offset of CHIP ID register */
-
-static int _clk_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 1 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-	return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(1 << clk->enable_shift);
-	__raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long _clk_generic_round_rate(struct clk *clk,
-			unsigned long rate,
-			u32 max_divisor)
-{
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (parent_rate % rate)
-		div++;
-
-	if (div > max_divisor)
-		div = max_divisor;
-
-	return parent_rate / div;
-}
-
-static int _clk_spll_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg |= CCM_CSCR_SPEN;
-	__raw_writel(reg, CCM_CSCR);
-
-	while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
-		;
-	return 0;
-}
-
-static void _clk_spll_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg &= ~CCM_CSCR_SPEN;
-	__raw_writel(reg, CCM_CSCR);
-}
-
-
-#define CSCR() (__raw_readl(CCM_CSCR))
-#define PCDR0() (__raw_readl(CCM_PCDR0))
-#define PCDR1() (__raw_readl(CCM_PCDR1))
-
-static unsigned long _clk_perclkx_round_rate(struct clk *clk,
-					     unsigned long rate)
-{
-	return _clk_generic_round_rate(clk, rate, 64);
-}
-
-static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->id < 0 || clk->id > 3)
-		return -EINVAL;
-
-	div = parent_rate / rate;
-	if (div > 64 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-	div--;
-
-	reg =
-	    __raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
-				       (clk->id << 3));
-	reg |= div << (clk->id << 3);
-	__raw_writel(reg, CCM_PCDR1);
-
-	return 0;
-}
-
-static unsigned long _clk_usb_recalc(struct clk *clk)
-{
-	unsigned long usb_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
-
-	return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long _clk_usb_round_rate(struct clk *clk,
-					     unsigned long rate)
-{
-	return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-	div--;
-
-	reg = CSCR() & ~CCM_CSCR_USB_MASK;
-	reg |= div << CCM_CSCR_USB_OFFSET;
-	__raw_writel(reg, CCM_CSCR);
-
-	return 0;
-}
-
-static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
-{
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	pdf = (pdf < 2) ? 124UL : pdf;  /* MX21 & MX27 TO1 */
-
-	return 2UL * parent_rate / pdf;
-}
-
-static unsigned long _clk_ssi1_recalc(struct clk *clk)
-{
-	return _clk_ssix_recalc(clk,
-		(PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
-		>> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_ssi2_recalc(struct clk *clk)
-{
-	return _clk_ssix_recalc(clk,
-		(PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
-		CCM_PCDR0_SSI2BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_nfc_recalc(struct clk *clk)
-{
-	unsigned long nfc_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
-		>> CCM_PCDR0_NFCDIV_OFFSET;
-
-	return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long external_high_reference; /* in Hz */
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-	return external_high_reference;
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz.
- */
-static struct clk ckih_clk = {
-	.get_rate = get_high_reference_clock_rate,
-};
-
-static unsigned long external_low_reference; /* in Hz */
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-	return external_low_reference;
-}
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
-	.get_rate = get_low_reference_clock_rate,
-};
-
-
-static unsigned long _clk_fpm_recalc(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) * 512;
-}
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
-	.parent = &ckil_clk,
-	.get_rate = _clk_fpm_recalc,
-};
-
-static unsigned long get_mpll_clk(struct clk *clk)
-{
-	uint32_t reg;
-	unsigned long ref_clk;
-	unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
-	unsigned long long temp;
-
-	ref_clk = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(CCM_MPCTL0);
-	pdf = (reg & CCM_MPCTL0_PD_MASK)  >> CCM_MPCTL0_PD_OFFSET;
-	mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
-	mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
-	mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
-
-	mfi = (mfi <= 5) ? 5 : mfi;
-	temp = 2LL * ref_clk * mfn;
-	do_div(temp, mfd + 1);
-	temp = 2LL * ref_clk * mfi + temp;
-	do_div(temp, pdf + 1);
-
-	return (unsigned long)temp;
-}
-
-static struct clk mpll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = get_mpll_clk,
-};
-
-static unsigned long _clk_fclk_get_rate(struct clk *clk)
-{
-	unsigned long parent_rate;
-	u32 div;
-
-	div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
-	parent_rate = clk_get_rate(clk->parent);
-
-	return parent_rate / (div+1);
-}
-
-static struct clk fclk_clk = {
-	.parent = &mpll_clk,
-	.get_rate = _clk_fclk_get_rate
-};
-
-static unsigned long get_spll_clk(struct clk *clk)
-{
-	uint32_t reg;
-	unsigned long ref_clk;
-	unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
-	unsigned long long temp;
-
-	ref_clk = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(CCM_SPCTL0);
-	pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
-	mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
-	mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
-	mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
-
-	mfi = (mfi <= 5) ? 5 : mfi;
-	temp = 2LL * ref_clk * mfn;
-	do_div(temp, mfd + 1);
-	temp = 2LL * ref_clk * mfi + temp;
-	do_div(temp, pdf + 1);
-
-	return (unsigned long)temp;
-}
-
-static struct clk spll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = get_spll_clk,
-	.enable = _clk_spll_enable,
-	.disable = _clk_spll_disable,
-};
-
-static unsigned long get_hclk_clk(struct clk *clk)
-{
-	unsigned long rate;
-	unsigned long bclk_pdf;
-
-	bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
-		>> CCM_CSCR_BCLK_OFFSET;
-
-	rate = clk_get_rate(clk->parent);
-	return rate / (bclk_pdf + 1);
-}
-
-static struct clk hclk_clk = {
-	.parent = &fclk_clk,
-	.get_rate = get_hclk_clk,
-};
-
-static unsigned long get_ipg_clk(struct clk *clk)
-{
-	unsigned long rate;
-	unsigned long ipg_pdf;
-
-	ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
-
-	rate = clk_get_rate(clk->parent);
-	return rate / (ipg_pdf + 1);
-}
-
-static struct clk ipg_clk = {
-	.parent = &hclk_clk,
-	.get_rate = get_ipg_clk,
-};
-
-static unsigned long _clk_perclkx_recalc(struct clk *clk)
-{
-	unsigned long perclk_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->id < 0 || clk->id > 3)
-		return 0;
-
-	perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
-
-	return parent_rate / (perclk_pdf + 1);
-}
-
-static struct clk per_clk[] = {
-	{
-		.id = 0,
-		.parent = &mpll_clk,
-		.get_rate = _clk_perclkx_recalc,
-	}, {
-		.id = 1,
-		.parent = &mpll_clk,
-		.get_rate = _clk_perclkx_recalc,
-	}, {
-		.id = 2,
-		.parent = &mpll_clk,
-		.round_rate = _clk_perclkx_round_rate,
-		.set_rate = _clk_perclkx_set_rate,
-		.get_rate = _clk_perclkx_recalc,
-		/* Enable/Disable done via lcd_clkc[1] */
-	}, {
-		.id = 3,
-		.parent = &mpll_clk,
-		.round_rate = _clk_perclkx_round_rate,
-		.set_rate = _clk_perclkx_set_rate,
-		.get_rate = _clk_perclkx_recalc,
-		/* Enable/Disable done via csi_clk[1] */
-	},
-};
-
-static struct clk uart_ipg_clk[];
-
-static struct clk uart_clk[] = {
-	{
-		.id = 0,
-		.parent = &per_clk[0],
-		.secondary = &uart_ipg_clk[0],
-	}, {
-		.id = 1,
-		.parent = &per_clk[0],
-		.secondary = &uart_ipg_clk[1],
-	}, {
-		.id = 2,
-		.parent = &per_clk[0],
-		.secondary = &uart_ipg_clk[2],
-	}, {
-		.id = 3,
-		.parent = &per_clk[0],
-		.secondary = &uart_ipg_clk[3],
-	},
-};
-
-static struct clk uart_ipg_clk[] = {
-	{
-		.id = 0,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_UART1_REG,
-		.enable_shift = CCM_PCCR_UART1_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_UART2_REG,
-		.enable_shift = CCM_PCCR_UART2_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 2,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_UART3_REG,
-		.enable_shift = CCM_PCCR_UART3_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 3,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_UART4_REG,
-		.enable_shift = CCM_PCCR_UART4_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk gpt_ipg_clk[];
-
-static struct clk gpt_clk[] = {
-	{
-		.id = 0,
-		.parent = &per_clk[0],
-		.secondary = &gpt_ipg_clk[0],
-	}, {
-		.id = 1,
-		.parent = &per_clk[0],
-		.secondary = &gpt_ipg_clk[1],
-	}, {
-		.id = 2,
-		.parent = &per_clk[0],
-		.secondary = &gpt_ipg_clk[2],
-	},
-};
-
-static struct clk gpt_ipg_clk[] = {
-	{
-		.id = 0,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_GPT1_REG,
-		.enable_shift = CCM_PCCR_GPT1_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_GPT2_REG,
-		.enable_shift = CCM_PCCR_GPT2_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 2,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_GPT3_REG,
-		.enable_shift = CCM_PCCR_GPT3_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk pwm_clk[] = {
-	{
-		.parent = &per_clk[0],
-		.secondary = &pwm_clk[1],
-	}, {
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_PWM_REG,
-		.enable_shift = CCM_PCCR_PWM_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk sdhc_ipg_clk[];
-
-static struct clk sdhc_clk[] = {
-	{
-		.id = 0,
-		.parent = &per_clk[1],
-		.secondary = &sdhc_ipg_clk[0],
-	}, {
-		.id = 1,
-		.parent = &per_clk[1],
-		.secondary = &sdhc_ipg_clk[1],
-	},
-};
-
-static struct clk sdhc_ipg_clk[] = {
-	{
-		.id = 0,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SDHC1_REG,
-		.enable_shift = CCM_PCCR_SDHC1_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SDHC2_REG,
-		.enable_shift = CCM_PCCR_SDHC2_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk cspi_ipg_clk[];
-
-static struct clk cspi_clk[] = {
-	{
-		.id = 0,
-		.parent = &per_clk[1],
-		.secondary = &cspi_ipg_clk[0],
-	}, {
-		.id = 1,
-		.parent = &per_clk[1],
-		.secondary = &cspi_ipg_clk[1],
-	}, {
-		.id = 2,
-		.parent = &per_clk[1],
-		.secondary = &cspi_ipg_clk[2],
-	},
-};
-
-static struct clk cspi_ipg_clk[] = {
-	{
-		.id = 0,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_CSPI1_REG,
-		.enable_shift = CCM_PCCR_CSPI1_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_CSPI2_REG,
-		.enable_shift = CCM_PCCR_CSPI2_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 3,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_CSPI3_REG,
-		.enable_shift = CCM_PCCR_CSPI3_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk lcdc_clk[] = {
-	{
-		.parent = &per_clk[2],
-		.secondary = &lcdc_clk[1],
-		.round_rate = _clk_parent_round_rate,
-		.set_rate = _clk_parent_set_rate,
-	}, {
-		.parent = &ipg_clk,
-		.secondary = &lcdc_clk[2],
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_LCDC_REG,
-		.enable_shift = CCM_PCCR_LCDC_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_LCDC_REG,
-		.enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk csi_clk[] = {
-	{
-		.parent = &per_clk[3],
-		.secondary = &csi_clk[1],
-		.round_rate = _clk_parent_round_rate,
-		.set_rate = _clk_parent_set_rate,
-	}, {
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_CSI_REG,
-		.enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk usb_clk[] = {
-	{
-		.parent = &spll_clk,
-		.secondary = &usb_clk[1],
-		.get_rate = _clk_usb_recalc,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_USBOTG_REG,
-		.enable_shift = CCM_PCCR_USBOTG_OFFSET,
-		.disable = _clk_disable,
-		.round_rate = _clk_usb_round_rate,
-		.set_rate = _clk_usb_set_rate,
-	}, {
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
-		.enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
-		.disable = _clk_disable,
-	}
-};
-
-static struct clk ssi_ipg_clk[];
-
-static struct clk ssi_clk[] = {
-	{
-		.id = 0,
-		.parent = &mpll_clk,
-		.secondary = &ssi_ipg_clk[0],
-		.get_rate = _clk_ssi1_recalc,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SSI1_BAUD_REG,
-		.enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &mpll_clk,
-		.secondary = &ssi_ipg_clk[1],
-		.get_rate = _clk_ssi2_recalc,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SSI2_BAUD_REG,
-		.enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk ssi_ipg_clk[] = {
-	{
-		.id = 0,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SSI1_REG,
-		.enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
-		.disable = _clk_disable,
-	}, {
-		.id = 1,
-		.parent = &ipg_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SSI2_REG,
-		.enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-
-static struct clk nfc_clk = {
-	.parent = &fclk_clk,
-	.get_rate = _clk_nfc_recalc,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_NFC_REG,
-	.enable_shift = CCM_PCCR_NFC_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk dma_clk[] = {
-	{
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_DMA_REG,
-		.enable_shift = CCM_PCCR_DMA_OFFSET,
-		.disable = _clk_disable,
-		.secondary = &dma_clk[1],
-	},  {
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_DMA_REG,
-		.enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
-		.disable = _clk_disable,
-	},
-};
-
-static struct clk brom_clk = {
-	.parent = &hclk_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_HCLK_BROM_REG,
-	.enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk emma_clk[] = {
-	{
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_EMMA_REG,
-		.enable_shift = CCM_PCCR_EMMA_OFFSET,
-		.disable = _clk_disable,
-		.secondary = &emma_clk[1],
-	}, {
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_EMMA_REG,
-		.enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
-		.disable = _clk_disable,
-	}
-};
-
-static struct clk slcdc_clk[] = {
-	{
-		.parent = &hclk_clk,
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_SLCDC_REG,
-		.enable_shift = CCM_PCCR_SLCDC_OFFSET,
-		.disable = _clk_disable,
-		.secondary = &slcdc_clk[1],
-	}, {
-		.enable = _clk_enable,
-		.enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
-		.enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
-		.disable = _clk_disable,
-	}
-};
-
-static struct clk wdog_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_WDT_REG,
-	.enable_shift = CCM_PCCR_WDT_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk gpio_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_GPIO_REG,
-	.enable_shift = CCM_PCCR_GPIO_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk i2c_clk = {
-	.id = 0,
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_I2C1_REG,
-	.enable_shift = CCM_PCCR_I2C1_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk kpp_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_KPP_REG,
-	.enable_shift = CCM_PCCR_KPP_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk owire_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_OWIRE_REG,
-	.enable_shift = CCM_PCCR_OWIRE_OFFSET,
-	.disable = _clk_disable,
-};
-
-static struct clk rtc_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_enable,
-	.enable_reg = CCM_PCCR_RTC_REG,
-	.enable_shift = CCM_PCCR_RTC_OFFSET,
-	.disable = _clk_disable,
-};
-
-static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
-{
-	return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-	div--;
-
-	reg = __raw_readl(CCM_PCDR0);
-
-	if (clk->parent == &usb_clk[0]) {
-		reg &= ~CCM_PCDR0_48MDIV_MASK;
-		reg |= div << CCM_PCDR0_48MDIV_OFFSET;
-	}
-	__raw_writel(reg, CCM_PCDR0);
-
-	return 0;
-}
-
-static unsigned long _clk_clko_recalc(struct clk *clk)
-{
-	u32 div = 0;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->parent == &usb_clk[0]) /* 48M */
-		div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
-			 >> CCM_PCDR0_48MDIV_OFFSET;
-	div++;
-
-	return parent_rate / div;
-}
-
-static struct clk clko_clk;
-
-static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
-
-	if (parent == &ckil_clk)
-		reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &fpm_clk)
-		reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &ckih_clk)
-		reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == mpll_clk.parent)
-		reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == spll_clk.parent)
-		reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &mpll_clk)
-		reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &spll_clk)
-		reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &fclk_clk)
-		reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &hclk_clk)
-		reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &ipg_clk)
-		reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &per_clk[0])
-		reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &per_clk[1])
-		reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &per_clk[2])
-		reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &per_clk[3])
-		reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &ssi_clk[0])
-		reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &ssi_clk[1])
-		reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &nfc_clk)
-		reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &usb_clk[0])
-		reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
-	else if (parent == &clko_clk)
-		reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
-	else
-		return -EINVAL;
-
-	__raw_writel(reg, CCM_CCSR);
-
-	return 0;
-}
-
-static struct clk clko_clk = {
-	.get_rate = _clk_clko_recalc,
-	.set_rate = _clk_clko_set_rate,
-	.round_rate = _clk_clko_round_rate,
-	.set_parent = _clk_clko_set_parent,
-};
-
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-static struct clk_lookup lookups[] = {
-/* It's unlikely that any driver wants one of them directly:
-	_REGISTER_CLOCK(NULL, "ckih", ckih_clk)
-	_REGISTER_CLOCK(NULL, "ckil", ckil_clk)
-	_REGISTER_CLOCK(NULL, "fpm", fpm_clk)
-	_REGISTER_CLOCK(NULL, "mpll", mpll_clk)
-	_REGISTER_CLOCK(NULL, "spll", spll_clk)
-	_REGISTER_CLOCK(NULL, "fclk", fclk_clk)
-	_REGISTER_CLOCK(NULL, "hclk", hclk_clk)
-	_REGISTER_CLOCK(NULL, "ipg", ipg_clk)
-*/
-	_REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
-	_REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
-	_REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
-	_REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
-	_REGISTER_CLOCK(NULL, "clko", clko_clk)
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
-	_REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
-	_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
-	_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
-	_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
-	_REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
-	_REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
-	_REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
-	_REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
-	_REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
-	_REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
-	_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
-	_REGISTER_CLOCK(NULL, "csi", csi_clk[0])
-	_REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
-	_REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
-	_REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
-	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-	_REGISTER_CLOCK(NULL, "dma", dma_clk[0])
-	_REGISTER_CLOCK(NULL, "brom", brom_clk)
-	_REGISTER_CLOCK(NULL, "emma", emma_clk[0])
-	_REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-	_REGISTER_CLOCK(NULL, "gpio", gpio_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-	_REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
-	_REGISTER_CLOCK(NULL, "owire", owire_clk)
-	_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-};
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx21_clocks_init(unsigned long lref, unsigned long href)
-{
-	u32 cscr;
-
-	external_low_reference = lref;
-	external_high_reference = href;
-
-	/* detect clock reference for both system PLL */
-	cscr = CSCR();
-	if (cscr & CCM_CSCR_MCU)
-		mpll_clk.parent = &ckih_clk;
-	else
-		mpll_clk.parent = &fpm_clk;
-
-	if (cscr & CCM_CSCR_SP)
-		spll_clk.parent = &ckih_clk;
-	else
-		spll_clk.parent = &fpm_clk;
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	/* Turn off all clock gates */
-	__raw_writel(0, CCM_PCCR0);
-	__raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
-
-	/* This turns of the serial PLL as well */
-	spll_clk.disable(&spll_clk);
-
-	/* This will propagate to all children and init all the clock rates. */
-	clk_enable(&per_clk[0]);
-	clk_enable(&gpio_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-	clk_enable(&uart_clk[0]);
-#endif
-
-	mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
-			MX21_INT_GPT1);
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
deleted file mode 100644
index b0fec74c..0000000
--- a/arch/arm/mach-imx/clock-imx25.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/mx25.h>
-
-#define CRM_BASE	MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
-
-#define CCM_MPCTL	0x00
-#define CCM_UPCTL	0x04
-#define CCM_CCTL	0x08
-#define CCM_CGCR0	0x0C
-#define CCM_CGCR1	0x10
-#define CCM_CGCR2	0x14
-#define CCM_PCDR0	0x18
-#define CCM_PCDR1	0x1C
-#define CCM_PCDR2	0x20
-#define CCM_PCDR3	0x24
-#define CCM_RCSR	0x28
-#define CCM_CRDR	0x2C
-#define CCM_DCVR0	0x30
-#define CCM_DCVR1	0x34
-#define CCM_DCVR2	0x38
-#define CCM_DCVR3	0x3c
-#define CCM_LTR0	0x40
-#define CCM_LTR1	0x44
-#define CCM_LTR2	0x48
-#define CCM_LTR3	0x4c
-
-static unsigned long get_rate_mpll(void)
-{
-	ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
-
-	return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_upll(void)
-{
-	ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
-
-	return mxc_decode_pll(mpctl, 24000000);
-}
-
-unsigned long get_rate_arm(struct clk *clk)
-{
-	unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-	unsigned long rate = get_rate_mpll();
-
-	if (cctl & (1 << 14))
-		rate = (rate * 3) >> 2;
-
-	return rate / ((cctl >> 30) + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-	unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-
-	return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-	return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_per(int per)
-{
-	unsigned long ofs = (per & 0x3) * 8;
-	unsigned long reg = per & ~0x3;
-	unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
-	unsigned long fref;
-
-	if (readl(CRM_BASE + 0x64) & (1 << per))
-		fref = get_rate_upll();
-	else
-		fref = get_rate_ahb(NULL);
-
-	return fref / (val + 1);
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
-	return get_rate_per(15);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
-	return get_rate_per(14);
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
-	return get_rate_per(13);
-}
-
-static unsigned long get_rate_i2c(struct clk *clk)
-{
-	return get_rate_per(6);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-	return get_rate_per(8);
-}
-
-static unsigned long get_rate_gpt(struct clk *clk)
-{
-	return get_rate_per(5);
-}
-
-static unsigned long get_rate_lcdc(struct clk *clk)
-{
-	return get_rate_per(7);
-}
-
-static unsigned long get_rate_esdhc1(struct clk *clk)
-{
-	return get_rate_per(3);
-}
-
-static unsigned long get_rate_esdhc2(struct clk *clk)
-{
-	return get_rate_per(4);
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
-	return get_rate_per(0);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
-	unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-	unsigned long rate = get_rate_upll();
-
-	return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
-}
-
-static int clk_cgcr_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 1 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void clk_cgcr_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(1 << clk->enable_shift);
-	__raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, s)	\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= CRM_BASE + er,	\
-		.enable_shift	= es,			\
-		.get_rate	= gr,			\
-		.set_rate	= sr,			\
-		.enable		= clk_cgcr_enable,	\
-		.disable	= clk_cgcr_disable,	\
-		.secondary	= s,			\
-	}
-
-/*
- * Note: the following IPG clock gating bits are wrongly marked "Reserved" in
- * the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
- * taken from the Freescale released BSP.
- *
- * bit	reg	offset	clock
- *
- * 0	CGCR1	0	AUDMUX
- * 12	CGCR1	12	ESAI
- * 16	CGCR1	16	GPIO1
- * 17	CGCR1	17	GPIO2
- * 18	CGCR1	18	GPIO3
- * 23	CGCR1	23	I2C1
- * 24	CGCR1	24	I2C2
- * 25	CGCR1	25	I2C3
- * 27	CGCR1	27	IOMUXC
- * 28	CGCR1	28	KPP
- * 30	CGCR1	30	OWIRE
- * 36	CGCR2	4	RTIC
- * 51	CGCR2	19	WDOG
- */
-
-DEFINE_CLOCK(gpt_clk,    0, CCM_CGCR0,  5, get_rate_gpt, NULL, NULL);
-DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
-DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi1_clk,  0, CCM_CGCR1,  5, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi2_clk,  0, CCM_CGCR1,  6, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi3_clk,  0, CCM_CGCR1,  7, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1,	 NULL, NULL);
-DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0,  3, get_rate_esdhc1,	 NULL,
-		&esdhc1_ahb_clk);
-DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2,	 NULL, NULL);
-DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0,  4, get_rate_esdhc2,	 NULL,
-		&esdhc2_ahb_clk);
-DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL,	 NULL, NULL);
-DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL,	 NULL, NULL);
-DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL,	 NULL, NULL);
-DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0,  7, NULL,	 NULL, &lcdc_ahb_clk);
-DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
-DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
-DEFINE_CLOCK(uart1_clk,  0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart2_clk,  0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart3_clk,  0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart4_clk,  0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart5_clk,  0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(nfc_clk,    0, CCM_CGCR0,  8, get_rate_nfc, NULL, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
-DEFINE_CLOCK(pwm1_clk,	 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm2_clk,	 0, CCM_CGCR2,  0, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm3_clk,	 0, CCM_CGCR2,  1, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm4_clk,	 0, CCM_CGCR2,  2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(kpp_clk,	 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(tsc_clk,	 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(i2c_clk,	 0, CCM_CGCR0,  6, get_rate_i2c, NULL, NULL);
-DEFINE_CLOCK(fec_clk,	 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
-DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1,  8, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(lcdc_clk,	 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
-DEFINE_CLOCK(wdt_clk,    0, CCM_CGCR2, 19, get_rate_ipg, NULL,  NULL);
-DEFINE_CLOCK(ssi1_clk,  0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
-DEFINE_CLOCK(ssi2_clk,  1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
-DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2,  6, get_rate_ipg, NULL, &sdma_ahb_clk);
-DEFINE_CLOCK(esdhc1_clk,  0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
-		&esdhc1_per_clk);
-DEFINE_CLOCK(esdhc2_clk,  1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
-		&esdhc2_per_clk);
-DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
-DEFINE_CLOCK(csi_clk,    0, CCM_CGCR1,  4, get_rate_csi, NULL,  &csi_per_clk);
-DEFINE_CLOCK(can1_clk,	 0, CCM_CGCR1,  2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(can2_clk,	 1, CCM_CGCR1,  3, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(iim_clk,    0, CCM_CGCR1, 26, NULL, NULL, NULL);
-
-#define _REGISTER_CLOCK(d, n, c)	\
-	{				\
-		.dev_id = d,		\
-		.con_id = n,		\
-		.clk = &c,		\
-	},
-
-static struct clk_lookup lookups[] = {
-	/* i.mx25 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-	_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
-	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-	/* i.mx25 has the i.mx35 type cspi */
-	_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
-	_REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
-	_REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
-	_REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
-	_REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
-	_REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
-	_REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
-	_REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
-	_REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
-	_REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
-	_REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
-	_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
-	_REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
-	_REGISTER_CLOCK(NULL, "audmux", audmux_clk)
-	_REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
-	_REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
-	/* i.mx25 has the i.mx35 type sdma */
-	_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-	_REGISTER_CLOCK(NULL, "iim", iim_clk)
-};
-
-int __init mx25_clocks_init(void)
-{
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	/* Turn off all clocks except the ones we need to survive, namely:
-	 * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
-	 * SCC
-	 */
-	__raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
-	__raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
-	__raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-	clk_enable(&uart1_clk);
-#endif
-
-	/* Clock source for lcdc and csi is upll */
-	__raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
-			CRM_BASE + 0x64);
-
-	/* Clock source for gpt is ahb_div */
-	__raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
-
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX25", mx25_revision());
-	clk_disable(&iim_clk);
-
-	mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
-
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
deleted file mode 100644
index 98e04f5..0000000
--- a/arch/arm/mach-imx/clock-imx27.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define IO_ADDR_CCM(off)	(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR		IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0		IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1		IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0		IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1		IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL		IO_ADDR_CCM(0x14)
-#define CCM_PCDR0		IO_ADDR_CCM(0x18)
-#define CCM_PCDR1		IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0		IO_ADDR_CCM(0x20)
-#define CCM_PCCR1		IO_ADDR_CCM(0x24)
-#define CCM_CCSR		IO_ADDR_CCM(0x28)
-#define CCM_PMCTL		IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT		IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL		IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_UPDATE_DIS	(1 << 31)
-#define CCM_CSCR_SSI2		(1 << 23)
-#define CCM_CSCR_SSI1		(1 << 22)
-#define CCM_CSCR_VPU		(1 << 21)
-#define CCM_CSCR_MSHC           (1 << 20)
-#define CCM_CSCR_SPLLRES        (1 << 19)
-#define CCM_CSCR_MPLLRES        (1 << 18)
-#define CCM_CSCR_SP             (1 << 17)
-#define CCM_CSCR_MCU            (1 << 16)
-#define CCM_CSCR_OSC26MDIV      (1 << 4)
-#define CCM_CSCR_OSC26M         (1 << 3)
-#define CCM_CSCR_FPM            (1 << 2)
-#define CCM_CSCR_SPEN           (1 << 1)
-#define CCM_CSCR_MPEN           (1 << 0)
-
-/* i.MX27 TO 2+ */
-#define CCM_CSCR_ARM_SRC        (1 << 15)
-
-#define CCM_SPCTL1_LF           (1 << 15)
-#define CCM_SPCTL1_BRMO         (1 << 6)
-
-static struct clk mpll_main1_clk, mpll_main2_clk;
-
-static int clk_pccr_enable(struct clk *clk)
-{
-	unsigned long reg;
-
-	if (!clk->enable_reg)
-		return 0;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 1 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void clk_pccr_disable(struct clk *clk)
-{
-	unsigned long reg;
-
-	if (!clk->enable_reg)
-		return;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(1 << clk->enable_shift);
-	__raw_writel(reg, clk->enable_reg);
-}
-
-static int clk_spll_enable(struct clk *clk)
-{
-	unsigned long reg;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg |= CCM_CSCR_SPEN;
-	__raw_writel(reg, CCM_CSCR);
-
-	while (!(__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF));
-
-	return 0;
-}
-
-static void clk_spll_disable(struct clk *clk)
-{
-	unsigned long reg;
-
-	reg = __raw_readl(CCM_CSCR);
-	reg &= ~CCM_CSCR_SPEN;
-	__raw_writel(reg, CCM_CSCR);
-}
-
-static int clk_cpu_set_parent(struct clk *clk, struct clk *parent)
-{
-	int cscr = __raw_readl(CCM_CSCR);
-
-	if (clk->parent == parent)
-		return 0;
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-		if (parent == &mpll_main1_clk) {
-			cscr |= CCM_CSCR_ARM_SRC;
-		} else {
-			if (parent == &mpll_main2_clk)
-				cscr &= ~CCM_CSCR_ARM_SRC;
-			else
-				return -EINVAL;
-		}
-		__raw_writel(cscr, CCM_CSCR);
-		clk->parent = parent;
-		return 0;
-	}
-	return -ENODEV;
-}
-
-static unsigned long round_rate_cpu(struct clk *clk, unsigned long rate)
-{
-	int div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (parent_rate % rate)
-		div++;
-
-	if (div > 4)
-		div = 4;
-
-	return parent_rate / div;
-}
-
-static int set_rate_cpu(struct clk *clk, unsigned long rate)
-{
-	unsigned int div;
-	uint32_t reg;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-
-	if (div > 4 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	div--;
-
-	reg = __raw_readl(CCM_CSCR);
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-		reg &= ~(3 << 12);
-		reg |= div << 12;
-		reg &= ~(CCM_CSCR_FPM | CCM_CSCR_SPEN);
-		__raw_writel(reg | CCM_CSCR_UPDATE_DIS, CCM_CSCR);
-	} else {
-		printk(KERN_ERR "Can't set CPU frequency!\n");
-	}
-
-	return 0;
-}
-
-static unsigned long round_rate_per(struct clk *clk, unsigned long rate)
-{
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (parent_rate % rate)
-		div++;
-
-	if (div > 64)
-		div = 64;
-
-	return parent_rate / div;
-}
-
-static int set_rate_per(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->id < 0 || clk->id > 3)
-		return -EINVAL;
-
-	div = parent_rate / rate;
-	if (div > 64 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-	div--;
-
-	reg = __raw_readl(CCM_PCDR1) & ~(0x3f << (clk->id << 3));
-	reg |= div << (clk->id << 3);
-	__raw_writel(reg, CCM_PCDR1);
-
-	return 0;
-}
-
-static unsigned long get_rate_usb(struct clk *clk)
-{
-	unsigned long usb_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	usb_pdf = (__raw_readl(CCM_CSCR) >> 28) & 0x7;
-
-	return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long get_rate_ssix(struct clk *clk, unsigned long pdf)
-{
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		pdf += 4;  /* MX27 TO2+ */
-	else
-		pdf = (pdf < 2) ? 124UL : pdf;  /* MX21 & MX27 TO1 */
-
-	return 2UL * parent_rate / pdf;
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
-	return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 16) & 0x3f);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
-	return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 26) & 0x3f);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-	unsigned long nfc_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		nfc_pdf = (__raw_readl(CCM_PCDR0) >> 6) & 0xf;
-	else
-		nfc_pdf = (__raw_readl(CCM_PCDR0) >> 12) & 0xf;
-
-	return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long get_rate_vpu(struct clk *clk)
-{
-	unsigned long vpu_pdf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-		vpu_pdf = (__raw_readl(CCM_PCDR0) >> 10) & 0x3f;
-		vpu_pdf += 4;
-	} else {
-		vpu_pdf = (__raw_readl(CCM_PCDR0) >> 8) & 0xf;
-		vpu_pdf = (vpu_pdf < 2) ? 124 : vpu_pdf;
-	}
-
-	return 2UL * parent_rate / vpu_pdf;
-}
-
-static unsigned long round_rate_parent(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->round_rate(clk->parent, rate);
-}
-
-static unsigned long get_rate_parent(struct clk *clk)
-{
-	return clk_get_rate(clk->parent);
-}
-
-static int set_rate_parent(struct clk *clk, unsigned long rate)
-{
-	return clk->parent->set_rate(clk->parent, rate);
-}
-
-/* in Hz */
-static unsigned long external_high_reference = 26000000;
-
-static unsigned long get_rate_high_reference(struct clk *clk)
-{
-	return external_high_reference;
-}
-
-/* in Hz */
-static unsigned long external_low_reference = 32768;
-
-static unsigned long get_rate_low_reference(struct clk *clk)
-{
-	return external_low_reference;
-}
-
-static unsigned long get_rate_fpm(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) * 1024;
-}
-
-static unsigned long get_rate_mpll(struct clk *clk)
-{
-	return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
-			clk_get_rate(clk->parent));
-}
-
-static unsigned long get_rate_mpll_main(struct clk *clk)
-{
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	/* i.MX27 TO2:
-	 * clk->id == 0: arm clock source path 1 which is from 2 * MPLL / 2
-	 * clk->id == 1: arm clock source path 2 which is from 2 * MPLL / 3
-	 */
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0 && clk->id == 1)
-		return 2UL * parent_rate / 3UL;
-
-	return parent_rate;
-}
-
-static unsigned long get_rate_spll(struct clk *clk)
-{
-	uint32_t reg;
-	unsigned long rate;
-
-	rate = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(CCM_SPCTL0);
-
-	/* On TO2 we have to write the value back. Otherwise we
-	 * read 0 from this register the next time.
-	 */
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		__raw_writel(reg, CCM_SPCTL0);
-
-	return mxc_decode_pll(reg, rate);
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
-	u32 div;
-	unsigned long rate;
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		div = (__raw_readl(CCM_CSCR) >> 12) & 0x3;
-	else
-		div = (__raw_readl(CCM_CSCR) >> 13) & 0x7;
-
-	rate = clk_get_rate(clk->parent);
-	return rate / (div + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-	unsigned long rate, bclk_pdf;
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		bclk_pdf = (__raw_readl(CCM_CSCR) >> 8) & 0x3;
-	else
-		bclk_pdf = (__raw_readl(CCM_CSCR) >> 9) & 0xf;
-
-	rate = clk_get_rate(clk->parent);
-	return rate / (bclk_pdf + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-	unsigned long rate, ipg_pdf;
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-		return clk_get_rate(clk->parent);
-	else
-		ipg_pdf = (__raw_readl(CCM_CSCR) >> 8) & 1;
-
-	rate = clk_get_rate(clk->parent);
-	return rate / (ipg_pdf + 1);
-}
-
-static unsigned long get_rate_per(struct clk *clk)
-{
-	unsigned long perclk_pdf, parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->id < 0 || clk->id > 3)
-		return 0;
-
-	perclk_pdf = (__raw_readl(CCM_PCDR1) >> (clk->id << 3)) & 0x3f;
-
-	return parent_rate / (perclk_pdf + 1);
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz. Could be changed at runtime
- * with a call to change_external_high_reference()
- */
-static struct clk ckih_clk = {
-	.get_rate	= get_rate_high_reference,
-};
-
-static struct clk mpll_clk = {
-	.parent		= &ckih_clk,
-	.get_rate	= get_rate_mpll,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 1 of ARM core
- * It provides the clock source whose rate is same as MPLL
- */
-static struct clk mpll_main1_clk = {
-	.id		= 0,
-	.parent		= &mpll_clk,
-	.get_rate	= get_rate_mpll_main,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 2 of ARM core
- * It provides the clock source whose rate is same MPLL * 2 / 3
- */
-static struct clk mpll_main2_clk = {
-	.id		= 1,
-	.parent		= &mpll_clk,
-	.get_rate	= get_rate_mpll_main,
-};
-
-static struct clk ahb_clk = {
-	.parent		= &mpll_main2_clk,
-	.get_rate	= get_rate_ahb,
-};
-
-static struct clk ipg_clk = {
-	.parent		= &ahb_clk,
-	.get_rate	= get_rate_ipg,
-};
-
-static struct clk cpu_clk = {
-	.parent = &mpll_main2_clk,
-	.set_parent = clk_cpu_set_parent,
-	.round_rate = round_rate_cpu,
-	.get_rate = get_rate_cpu,
-	.set_rate = set_rate_cpu,
-};
-
-static struct clk spll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = get_rate_spll,
-	.enable = clk_spll_enable,
-	.disable = clk_spll_disable,
-};
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
-	.get_rate = get_rate_low_reference,
-};
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
-	.parent = &ckil_clk,
-	.get_rate = get_rate_fpm,
-};
-
-#define PCCR0 CCM_PCCR0
-#define PCCR1 CCM_PCCR1
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p)		\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.get_rate	= gr,			\
-		.enable		= clk_pccr_enable,	\
-		.disable	= clk_pccr_disable,	\
-		.secondary	= s,			\
-		.parent		= p,			\
-	}
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p)	\
-	static struct clk name = {				\
-		.id		= i,				\
-		.enable_reg	= er,				\
-		.enable_shift	= es,				\
-		.get_rate	= get_rate_##getsetround,	\
-		.set_rate	= set_rate_##getsetround,	\
-		.round_rate	= round_rate_##getsetround,	\
-		.enable		= clk_pccr_enable,		\
-		.disable	= clk_pccr_disable,		\
-		.secondary	= s,				\
-		.parent		= p,				\
-	}
-
-/* Forward declaration to keep the following list in order */
-static struct clk slcdc_clk1, sahara2_clk1, rtic_clk1, fec_clk1, emma_clk1,
-		  dma_clk1, lcdc_clk2, vpu_clk1;
-
-/* All clocks we can gate through PCCRx in the order of PCCRx bits */
-DEFINE_CLOCK(ssi2_clk1,    1, PCCR0,  0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ssi1_clk1,    0, PCCR0,  1, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(slcdc_clk,    0, PCCR0,  2, NULL, &slcdc_clk1, &ahb_clk);
-DEFINE_CLOCK(sdhc3_clk1,   0, PCCR0,  3, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc2_clk1,   0, PCCR0,  4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc1_clk1,   0, PCCR0,  5, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(scc_clk,      0, PCCR0,  6, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sahara2_clk,  0, PCCR0,  7, NULL, &sahara2_clk1, &ahb_clk);
-DEFINE_CLOCK(rtic_clk,     0, PCCR0,  8, NULL, &rtic_clk1, &ahb_clk);
-DEFINE_CLOCK(rtc_clk,      0, PCCR0,  9, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk1,     0, PCCR0, 11, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(owire_clk,    0, PCCR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mstick_clk1,  0, PCCR0, 13, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(lcdc_clk1,    0, PCCR0, 14, NULL, &lcdc_clk2, &ipg_clk);
-DEFINE_CLOCK(kpp_clk,      0, PCCR0, 15, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(iim_clk,      0, PCCR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c2_clk,     1, PCCR0, 17, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c1_clk,     0, PCCR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt6_clk1,    0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt5_clk1,    0, PCCR0, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt4_clk1,    0, PCCR0, 21, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt3_clk1,    0, PCCR0, 22, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt2_clk1,    0, PCCR0, 23, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt1_clk1,    0, PCCR0, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpio_clk,     0, PCCR0, 25, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(fec_clk,      0, PCCR0, 26, NULL, &fec_clk1, &ahb_clk);
-DEFINE_CLOCK(emma_clk,     0, PCCR0, 27, NULL, &emma_clk1, &ahb_clk);
-DEFINE_CLOCK(dma_clk,      0, PCCR0, 28, NULL, &dma_clk1, &ahb_clk);
-DEFINE_CLOCK(cspi13_clk1,  0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk1,   0, PCCR0, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi1_clk1,   0, PCCR0, 31, NULL, NULL, &ipg_clk);
-
-DEFINE_CLOCK(mstick_clk,   0, PCCR1,  2, NULL, &mstick_clk1, &ipg_clk);
-DEFINE_CLOCK(nfc_clk,      0, PCCR1,  3, get_rate_nfc, NULL, &cpu_clk);
-DEFINE_CLOCK(ssi2_clk,     1, PCCR1,  4, get_rate_ssi2, &ssi2_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(ssi1_clk,     0, PCCR1,  5, get_rate_ssi1, &ssi1_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(vpu_clk,      0, PCCR1,  6, get_rate_vpu, &vpu_clk1, &mpll_main2_clk);
-DEFINE_CLOCK1(per4_clk,    3, PCCR1,  7, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per3_clk,    2, PCCR1,  8, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per2_clk,    1, PCCR1,  9, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per1_clk,    0, PCCR1, 10, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK(usb_clk1,     0, PCCR1, 11, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(slcdc_clk1,   0, PCCR1, 12, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(sahara2_clk1, 0, PCCR1, 13, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk1,    0, PCCR1, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(lcdc_clk2,    0, PCCR1, 15, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(vpu_clk1,     0, PCCR1, 16, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(fec_clk1,     0, PCCR1, 17, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emma_clk1,    0, PCCR1, 18, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk,      0, PCCR1, 19, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(dma_clk1,     0, PCCR1, 20, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(csi_clk1,     0, PCCR1, 21, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(brom_clk,     0, PCCR1, 22, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(pata_clk,      0, PCCR1, 23, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(wdog_clk,     0, PCCR1, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk,      0, PCCR1, 25, get_rate_usb, &usb_clk1, &spll_clk);
-DEFINE_CLOCK(uart6_clk1,   0, PCCR1, 26, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart5_clk1,   0, PCCR1, 27, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart4_clk1,   0, PCCR1, 28, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart3_clk1,   0, PCCR1, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart2_clk1,   0, PCCR1, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk1,   0, PCCR1, 31, NULL, NULL, &ipg_clk);
-
-/* Clocks we cannot directly gate, but drivers need their rates */
-DEFINE_CLOCK(cspi1_clk,    0, NULL,   0, NULL, &cspi1_clk1, &per2_clk);
-DEFINE_CLOCK(cspi2_clk,    1, NULL,   0, NULL, &cspi2_clk1, &per2_clk);
-DEFINE_CLOCK(cspi3_clk,    2, NULL,   0, NULL, &cspi13_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc1_clk,    0, NULL,   0, NULL, &sdhc1_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc2_clk,    1, NULL,   0, NULL, &sdhc2_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc3_clk,    2, NULL,   0, NULL, &sdhc3_clk1, &per2_clk);
-DEFINE_CLOCK(pwm_clk,      0, NULL,   0, NULL, &pwm_clk1, &per1_clk);
-DEFINE_CLOCK(gpt1_clk,     0, NULL,   0, NULL, &gpt1_clk1, &per1_clk);
-DEFINE_CLOCK(gpt2_clk,     1, NULL,   0, NULL, &gpt2_clk1, &per1_clk);
-DEFINE_CLOCK(gpt3_clk,     2, NULL,   0, NULL, &gpt3_clk1, &per1_clk);
-DEFINE_CLOCK(gpt4_clk,     3, NULL,   0, NULL, &gpt4_clk1, &per1_clk);
-DEFINE_CLOCK(gpt5_clk,     4, NULL,   0, NULL, &gpt5_clk1, &per1_clk);
-DEFINE_CLOCK(gpt6_clk,     5, NULL,   0, NULL, &gpt6_clk1, &per1_clk);
-DEFINE_CLOCK(uart1_clk,    0, NULL,   0, NULL, &uart1_clk1, &per1_clk);
-DEFINE_CLOCK(uart2_clk,    1, NULL,   0, NULL, &uart2_clk1, &per1_clk);
-DEFINE_CLOCK(uart3_clk,    2, NULL,   0, NULL, &uart3_clk1, &per1_clk);
-DEFINE_CLOCK(uart4_clk,    3, NULL,   0, NULL, &uart4_clk1, &per1_clk);
-DEFINE_CLOCK(uart5_clk,    4, NULL,   0, NULL, &uart5_clk1, &per1_clk);
-DEFINE_CLOCK(uart6_clk,    5, NULL,   0, NULL, &uart6_clk1, &per1_clk);
-DEFINE_CLOCK1(lcdc_clk,    0, NULL,   0, parent, &lcdc_clk1, &per3_clk);
-DEFINE_CLOCK1(csi_clk,     0, NULL,   0, parent, &csi_clk1, &per4_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-
-static struct clk_lookup lookups[] = {
-	/* i.mx27 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-	_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-	_REGISTER_CLOCK("imx21-uart.5", NULL, uart6_clk)
-	_REGISTER_CLOCK(NULL, "gpt1", gpt1_clk)
-	_REGISTER_CLOCK(NULL, "gpt2", gpt2_clk)
-	_REGISTER_CLOCK(NULL, "gpt3", gpt3_clk)
-	_REGISTER_CLOCK(NULL, "gpt4", gpt4_clk)
-	_REGISTER_CLOCK(NULL, "gpt5", gpt5_clk)
-	_REGISTER_CLOCK(NULL, "gpt6", gpt6_clk)
-	_REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
-	_REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
-	_REGISTER_CLOCK("mxc-mmc.2", NULL, sdhc3_clk)
-	_REGISTER_CLOCK("imx27-cspi.0", NULL, cspi1_clk)
-	_REGISTER_CLOCK("imx27-cspi.1", NULL, cspi2_clk)
-	_REGISTER_CLOCK("imx27-cspi.2", NULL, cspi3_clk)
-	_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-	_REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk1)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-	_REGISTER_CLOCK(NULL, "vpu", vpu_clk)
-	_REGISTER_CLOCK(NULL, "dma", dma_clk)
-	_REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-	_REGISTER_CLOCK(NULL, "brom", brom_clk)
-	_REGISTER_CLOCK(NULL, "emma", emma_clk)
-	_REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
-	_REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
-	_REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK(NULL, "emi", emi_clk)
-	_REGISTER_CLOCK(NULL, "sahara2", sahara2_clk)
-	_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-	_REGISTER_CLOCK(NULL, "mstick", mstick_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-	_REGISTER_CLOCK(NULL, "gpio", gpio_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-	_REGISTER_CLOCK(NULL, "iim", iim_clk)
-	_REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-	_REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
-	_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-	_REGISTER_CLOCK(NULL, "scc", scc_clk)
-};
-
-/* Adjust the clock path for TO2 and later */
-static void __init to2_adjust_clocks(void)
-{
-	unsigned long cscr = __raw_readl(CCM_CSCR);
-
-	if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-		if (cscr & CCM_CSCR_ARM_SRC)
-			cpu_clk.parent = &mpll_main1_clk;
-
-		if (!(cscr & CCM_CSCR_SSI2))
-			ssi1_clk.parent = &spll_clk;
-
-		if (!(cscr & CCM_CSCR_SSI1))
-			ssi1_clk.parent = &spll_clk;
-
-		if (!(cscr & CCM_CSCR_VPU))
-			vpu_clk.parent = &spll_clk;
-	} else {
-		cpu_clk.parent = &mpll_clk;
-		cpu_clk.set_parent = NULL;
-		cpu_clk.round_rate = NULL;
-		cpu_clk.set_rate = NULL;
-		ahb_clk.parent = &mpll_clk;
-
-		per1_clk.parent = &mpll_clk;
-		per2_clk.parent = &mpll_clk;
-		per3_clk.parent = &mpll_clk;
-		per4_clk.parent = &mpll_clk;
-
-		ssi1_clk.parent = &mpll_clk;
-		ssi2_clk.parent = &mpll_clk;
-
-		vpu_clk.parent = &mpll_clk;
-	}
-}
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx27_clocks_init(unsigned long fref)
-{
-	u32 cscr = __raw_readl(CCM_CSCR);
-
-	external_high_reference = fref;
-
-	/* detect clock reference for both system PLLs */
-	if (cscr & CCM_CSCR_MCU)
-		mpll_clk.parent = &ckih_clk;
-	else
-		mpll_clk.parent = &fpm_clk;
-
-	if (cscr & CCM_CSCR_SP)
-		spll_clk.parent = &ckih_clk;
-	else
-		spll_clk.parent = &fpm_clk;
-
-	to2_adjust_clocks();
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	/* Turn off all clocks we do not need */
-	__raw_writel(0, CCM_PCCR0);
-	__raw_writel((1 << 10) | (1 << 19), CCM_PCCR1);
-
-	spll_clk.disable(&spll_clk);
-
-	/* enable basic clocks */
-	clk_enable(&per1_clk);
-	clk_enable(&gpio_clk);
-	clk_enable(&emi_clk);
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX27", mx27_revision());
-	clk_disable(&iim_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-	clk_enable(&uart1_clk);
-#endif
-
-	mxc_timer_init(&gpt1_clk, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
-			MX27_INT_GPT1);
-
-	return 0;
-}
-
-#ifdef CONFIG_OF
-int __init mx27_clocks_init_dt(void)
-{
-	struct device_node *np;
-	u32 fref = 26000000; /* default */
-
-	for_each_compatible_node(np, NULL, "fixed-clock") {
-		if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
-			continue;
-
-		if (!of_property_read_u32(np, "clock-frequency", &fref))
-			break;
-	}
-
-	return mx27_clocks_init(fref);
-}
-#endif
diff --git a/arch/arm/mach-imx/clock-imx31.c b/arch/arm/mach-imx/clock-imx31.c
deleted file mode 100644
index 3a943cd..0000000
--- a/arch/arm/mach-imx/clock-imx31.c
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/mx31.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#define PRE_DIV_MIN_FREQ    10000000 /* Minimum Frequency after Predivider */
-
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
-{
-	u32 min_pre, temp_pre, old_err, err;
-
-	if (div >= 512) {
-		*pre = 8;
-		*post = 64;
-	} else if (div >= 64) {
-		min_pre = (div - 1) / 64 + 1;
-		old_err = 8;
-		for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
-			err = div % temp_pre;
-			if (err == 0) {
-				*pre = temp_pre;
-				break;
-			}
-			err = temp_pre - err;
-			if (err < old_err) {
-				old_err = err;
-				*pre = temp_pre;
-			}
-		}
-		*post = (div + *pre - 1) / *pre;
-	} else if (div <= 8) {
-		*pre = div;
-		*post = 1;
-	} else {
-		*pre = 1;
-		*post = div;
-	}
-}
-
-static struct clk mcu_pll_clk;
-static struct clk serial_pll_clk;
-static struct clk ipg_clk;
-static struct clk ckih_clk;
-
-static int cgr_enable(struct clk *clk)
-{
-	u32 reg;
-
-	if (!clk->enable_reg)
-		return 0;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 3 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void cgr_disable(struct clk *clk)
-{
-	u32 reg;
-
-	if (!clk->enable_reg)
-		return;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(3 << clk->enable_shift);
-
-	/* special case for EMI clock */
-	if (clk->enable_reg == MXC_CCM_CGR2 && clk->enable_shift == 8)
-		reg |= (1 << clk->enable_shift);
-
-	__raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long pll_ref_get_rate(void)
-{
-	unsigned long ccmr;
-	unsigned int prcs;
-
-	ccmr = __raw_readl(MXC_CCM_CCMR);
-	prcs = (ccmr & MXC_CCM_CCMR_PRCS_MASK) >> MXC_CCM_CCMR_PRCS_OFFSET;
-	if (prcs == 0x1)
-		return CKIL_CLK_FREQ * 1024;
-	else
-		return clk_get_rate(&ckih_clk);
-}
-
-static unsigned long usb_pll_get_rate(struct clk *clk)
-{
-	unsigned long reg;
-
-	reg = __raw_readl(MXC_CCM_UPCTL);
-
-	return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long serial_pll_get_rate(struct clk *clk)
-{
-	unsigned long reg;
-
-	reg = __raw_readl(MXC_CCM_SRPCTL);
-
-	return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long mcu_pll_get_rate(struct clk *clk)
-{
-	unsigned long reg, ccmr;
-
-	ccmr = __raw_readl(MXC_CCM_CCMR);
-
-	if (!(ccmr & MXC_CCM_CCMR_MPE) || (ccmr & MXC_CCM_CCMR_MDS))
-		return clk_get_rate(&ckih_clk);
-
-	reg = __raw_readl(MXC_CCM_MPCTL);
-
-	return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static int usb_pll_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CCMR);
-	reg |= MXC_CCM_CCMR_UPE;
-	__raw_writel(reg, MXC_CCM_CCMR);
-
-	/* No lock bit on MX31, so using max time from spec */
-	udelay(80);
-
-	return 0;
-}
-
-static void usb_pll_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CCMR);
-	reg &= ~MXC_CCM_CCMR_UPE;
-	__raw_writel(reg, MXC_CCM_CCMR);
-}
-
-static int serial_pll_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CCMR);
-	reg |= MXC_CCM_CCMR_SPE;
-	__raw_writel(reg, MXC_CCM_CCMR);
-
-	/* No lock bit on MX31, so using max time from spec */
-	udelay(80);
-
-	return 0;
-}
-
-static void serial_pll_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CCMR);
-	reg &= ~MXC_CCM_CCMR_SPE;
-	__raw_writel(reg, MXC_CCM_CCMR);
-}
-
-#define PDR0(mask, off) ((__raw_readl(MXC_CCM_PDR0) & mask) >> off)
-#define PDR1(mask, off) ((__raw_readl(MXC_CCM_PDR1) & mask) >> off)
-#define PDR2(mask, off) ((__raw_readl(MXC_CCM_PDR2) & mask) >> off)
-
-static unsigned long mcu_main_get_rate(struct clk *clk)
-{
-	u32 pmcr0 = __raw_readl(MXC_CCM_PMCR0);
-
-	if ((pmcr0 & MXC_CCM_PMCR0_DFSUP1) == MXC_CCM_PMCR0_DFSUP1_SPLL)
-		return clk_get_rate(&serial_pll_clk);
-	else
-		return clk_get_rate(&mcu_pll_clk);
-}
-
-static unsigned long ahb_get_rate(struct clk *clk)
-{
-	unsigned long max_pdf;
-
-	max_pdf = PDR0(MXC_CCM_PDR0_MAX_PODF_MASK,
-		       MXC_CCM_PDR0_MAX_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (max_pdf + 1);
-}
-
-static unsigned long ipg_get_rate(struct clk *clk)
-{
-	unsigned long ipg_pdf;
-
-	ipg_pdf = PDR0(MXC_CCM_PDR0_IPG_PODF_MASK,
-		       MXC_CCM_PDR0_IPG_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (ipg_pdf + 1);
-}
-
-static unsigned long nfc_get_rate(struct clk *clk)
-{
-	unsigned long nfc_pdf;
-
-	nfc_pdf = PDR0(MXC_CCM_PDR0_NFC_PODF_MASK,
-		       MXC_CCM_PDR0_NFC_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (nfc_pdf + 1);
-}
-
-static unsigned long hsp_get_rate(struct clk *clk)
-{
-	unsigned long hsp_pdf;
-
-	hsp_pdf = PDR0(MXC_CCM_PDR0_HSP_PODF_MASK,
-		       MXC_CCM_PDR0_HSP_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (hsp_pdf + 1);
-}
-
-static unsigned long usb_get_rate(struct clk *clk)
-{
-	unsigned long usb_pdf, usb_prepdf;
-
-	usb_pdf = PDR1(MXC_CCM_PDR1_USB_PODF_MASK,
-		       MXC_CCM_PDR1_USB_PODF_OFFSET);
-	usb_prepdf = PDR1(MXC_CCM_PDR1_USB_PRDF_MASK,
-			  MXC_CCM_PDR1_USB_PRDF_OFFSET);
-	return clk_get_rate(clk->parent) / (usb_prepdf + 1) / (usb_pdf + 1);
-}
-
-static unsigned long csi_get_rate(struct clk *clk)
-{
-	u32 reg, pre, post;
-
-	reg = __raw_readl(MXC_CCM_PDR0);
-	pre = (reg & MXC_CCM_PDR0_CSI_PRDF_MASK) >>
-	    MXC_CCM_PDR0_CSI_PRDF_OFFSET;
-	pre++;
-	post = (reg & MXC_CCM_PDR0_CSI_PODF_MASK) >>
-	    MXC_CCM_PDR0_CSI_PODF_OFFSET;
-	post++;
-	return clk_get_rate(clk->parent) / (pre * post);
-}
-
-static unsigned long csi_round_rate(struct clk *clk, unsigned long rate)
-{
-	u32 pre, post, parent = clk_get_rate(clk->parent);
-	u32 div = parent / rate;
-
-	if (parent % rate)
-		div++;
-
-	__calc_pre_post_dividers(div, &pre, &post);
-
-	return parent / (pre * post);
-}
-
-static int csi_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
-	div = parent / rate;
-
-	if ((parent / div) != rate)
-		return -EINVAL;
-
-	__calc_pre_post_dividers(div, &pre, &post);
-
-	/* Set CSI clock divider */
-	reg = __raw_readl(MXC_CCM_PDR0) &
-	    ~(MXC_CCM_PDR0_CSI_PODF_MASK | MXC_CCM_PDR0_CSI_PRDF_MASK);
-	reg |= (post - 1) << MXC_CCM_PDR0_CSI_PODF_OFFSET;
-	reg |= (pre - 1) << MXC_CCM_PDR0_CSI_PRDF_OFFSET;
-	__raw_writel(reg, MXC_CCM_PDR0);
-
-	return 0;
-}
-
-static unsigned long ssi1_get_rate(struct clk *clk)
-{
-	unsigned long ssi1_pdf, ssi1_prepdf;
-
-	ssi1_pdf = PDR1(MXC_CCM_PDR1_SSI1_PODF_MASK,
-			MXC_CCM_PDR1_SSI1_PODF_OFFSET);
-	ssi1_prepdf = PDR1(MXC_CCM_PDR1_SSI1_PRE_PODF_MASK,
-			   MXC_CCM_PDR1_SSI1_PRE_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (ssi1_prepdf + 1) / (ssi1_pdf + 1);
-}
-
-static unsigned long ssi2_get_rate(struct clk *clk)
-{
-	unsigned long ssi2_pdf, ssi2_prepdf;
-
-	ssi2_pdf = PDR1(MXC_CCM_PDR1_SSI2_PODF_MASK,
-			MXC_CCM_PDR1_SSI2_PODF_OFFSET);
-	ssi2_prepdf = PDR1(MXC_CCM_PDR1_SSI2_PRE_PODF_MASK,
-			   MXC_CCM_PDR1_SSI2_PRE_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (ssi2_prepdf + 1) / (ssi2_pdf + 1);
-}
-
-static unsigned long firi_get_rate(struct clk *clk)
-{
-	unsigned long firi_pdf, firi_prepdf;
-
-	firi_pdf = PDR1(MXC_CCM_PDR1_FIRI_PODF_MASK,
-			MXC_CCM_PDR1_FIRI_PODF_OFFSET);
-	firi_prepdf = PDR1(MXC_CCM_PDR1_FIRI_PRE_PODF_MASK,
-			   MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET);
-	return clk_get_rate(clk->parent) / (firi_prepdf + 1) / (firi_pdf + 1);
-}
-
-static unsigned long firi_round_rate(struct clk *clk, unsigned long rate)
-{
-	u32 pre, post;
-	u32 parent = clk_get_rate(clk->parent);
-	u32 div = parent / rate;
-
-	if (parent % rate)
-		div++;
-
-	__calc_pre_post_dividers(div, &pre, &post);
-
-	return parent / (pre * post);
-
-}
-
-static int firi_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
-	div = parent / rate;
-
-	if ((parent / div) != rate)
-		return -EINVAL;
-
-	__calc_pre_post_dividers(div, &pre, &post);
-
-	/* Set FIRI clock divider */
-	reg = __raw_readl(MXC_CCM_PDR1) &
-	    ~(MXC_CCM_PDR1_FIRI_PODF_MASK | MXC_CCM_PDR1_FIRI_PRE_PODF_MASK);
-	reg |= (pre - 1) << MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET;
-	reg |= (post - 1) << MXC_CCM_PDR1_FIRI_PODF_OFFSET;
-	__raw_writel(reg, MXC_CCM_PDR1);
-
-	return 0;
-}
-
-static unsigned long mbx_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 2;
-}
-
-static unsigned long mstick1_get_rate(struct clk *clk)
-{
-	unsigned long msti_pdf;
-
-	msti_pdf = PDR2(MXC_CCM_PDR2_MST1_PDF_MASK,
-			MXC_CCM_PDR2_MST1_PDF_OFFSET);
-	return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long mstick2_get_rate(struct clk *clk)
-{
-	unsigned long msti_pdf;
-
-	msti_pdf = PDR2(MXC_CCM_PDR2_MST2_PDF_MASK,
-			MXC_CCM_PDR2_MST2_PDF_OFFSET);
-	return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long ckih_rate;
-
-static unsigned long clk_ckih_get_rate(struct clk *clk)
-{
-	return ckih_rate;
-}
-
-static unsigned long clk_ckil_get_rate(struct clk *clk)
-{
-	return CKIL_CLK_FREQ;
-}
-
-static struct clk ckih_clk = {
-	.get_rate = clk_ckih_get_rate,
-};
-
-static struct clk mcu_pll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = mcu_pll_get_rate,
-};
-
-static struct clk mcu_main_clk = {
-	.parent = &mcu_pll_clk,
-	.get_rate = mcu_main_get_rate,
-};
-
-static struct clk serial_pll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = serial_pll_get_rate,
-	.enable = serial_pll_enable,
-	.disable = serial_pll_disable,
-};
-
-static struct clk usb_pll_clk = {
-	.parent = &ckih_clk,
-	.get_rate = usb_pll_get_rate,
-	.enable = usb_pll_enable,
-	.disable = usb_pll_disable,
-};
-
-static struct clk ahb_clk = {
-	.parent = &mcu_main_clk,
-	.get_rate = ahb_get_rate,
-};
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p)		\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.get_rate	= gr,			\
-		.enable		= cgr_enable,		\
-		.disable	= cgr_disable,		\
-		.secondary	= s,			\
-		.parent		= p,			\
-	}
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p)	\
-	static struct clk name = {				\
-		.id		= i,				\
-		.enable_reg	= er,				\
-		.enable_shift	= es,				\
-		.get_rate	= getsetround##_get_rate,	\
-		.set_rate	= getsetround##_set_rate,	\
-		.round_rate	= getsetround##_round_rate,	\
-		.enable		= cgr_enable,			\
-		.disable	= cgr_disable,			\
-		.secondary	= s,				\
-		.parent		= p,				\
-	}
-
-DEFINE_CLOCK(perclk_clk,  0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ckil_clk,    0, NULL,          0, clk_ckil_get_rate, NULL, NULL);
-
-DEFINE_CLOCK(sdhc1_clk,   0, MXC_CCM_CGR0,  0, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(sdhc2_clk,   1, MXC_CCM_CGR0,  2, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(gpt_clk,     0, MXC_CCM_CGR0,  4, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit1_clk,   0, MXC_CCM_CGR0,  6, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit2_clk,   1, MXC_CCM_CGR0,  8, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(iim_clk,     0, MXC_CCM_CGR0, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pata_clk,     0, MXC_CCM_CGR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdma_clk1,   0, MXC_CCM_CGR0, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(cspi3_clk,   2, MXC_CCM_CGR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(rng_clk,     0, MXC_CCM_CGR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk,   0, MXC_CCM_CGR0, 20, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart2_clk,   1, MXC_CCM_CGR0, 22, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(ssi1_clk,    0, MXC_CCM_CGR0, 24, ssi1_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(i2c1_clk,    0, MXC_CCM_CGR0, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c2_clk,    1, MXC_CCM_CGR0, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c3_clk,    2, MXC_CCM_CGR0, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(mpeg4_clk,   0, MXC_CCM_CGR1,  0, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1,  2, mstick1_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1,  4, mstick2_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK1(csi_clk,    0, MXC_CCM_CGR1,  6, csi, NULL, &serial_pll_clk);
-DEFINE_CLOCK(rtc_clk,     0, MXC_CCM_CGR1,  8, NULL, NULL, &ckil_clk);
-DEFINE_CLOCK(wdog_clk,    0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk,     0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(usb_clk2,    0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(kpp_clk,     0, MXC_CCM_CGR1, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipu_clk,     0, MXC_CCM_CGR1, 22, hsp_get_rate, NULL, &mcu_main_clk);
-DEFINE_CLOCK(uart3_clk,   2, MXC_CCM_CGR1, 24, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart4_clk,   3, MXC_CCM_CGR1, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart5_clk,   4, MXC_CCM_CGR1, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(owire_clk,   0, MXC_CCM_CGR1, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(ssi2_clk,    1, MXC_CCM_CGR2,  0, ssi2_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(cspi1_clk,   0, MXC_CCM_CGR2,  2, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk,   1, MXC_CCM_CGR2,  4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mbx_clk,     0, MXC_CCM_CGR2,  6, mbx_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk,     0, MXC_CCM_CGR2,  8, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk,    0, MXC_CCM_CGR2, 10, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK1(firi_clk,   0, MXC_CCM_CGR2, 12, firi, NULL, &usb_pll_clk);
-
-DEFINE_CLOCK(sdma_clk2,   0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk1,    0, NULL,          0, usb_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(nfc_clk,     0, NULL,          0, nfc_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(scc_clk,     0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipg_clk,     0, NULL,          0, ipg_get_rate, NULL, &ahb_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-
-static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK(NULL, "emi", emi_clk)
-	_REGISTER_CLOCK("imx31-cspi.0", NULL, cspi1_clk)
-	_REGISTER_CLOCK("imx31-cspi.1", NULL, cspi2_clk)
-	_REGISTER_CLOCK("imx31-cspi.2", NULL, cspi3_clk)
-	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-	_REGISTER_CLOCK(NULL, "pwm", pwm_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-	_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-	_REGISTER_CLOCK(NULL, "epit", epit1_clk)
-	_REGISTER_CLOCK(NULL, "epit", epit2_clk)
-	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-	_REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
-	_REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
-	_REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk2)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk2)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk1)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk2)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk1)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk2)
-	_REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
-	/* i.mx31 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-	_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-	_REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
-	_REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
-	_REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
-	_REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	_REGISTER_CLOCK(NULL, "firi", firi_clk)
-	_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-	_REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-	_REGISTER_CLOCK(NULL, "rng", rng_clk)
-	_REGISTER_CLOCK("imx31-sdma", NULL, sdma_clk1)
-	_REGISTER_CLOCK(NULL, "sdma_ipg", sdma_clk2)
-	_REGISTER_CLOCK(NULL, "mstick", mstick1_clk)
-	_REGISTER_CLOCK(NULL, "mstick", mstick2_clk)
-	_REGISTER_CLOCK(NULL, "scc", scc_clk)
-	_REGISTER_CLOCK(NULL, "iim", iim_clk)
-	_REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
-	_REGISTER_CLOCK(NULL, "mbx", mbx_clk)
-};
-
-int __init mx31_clocks_init(unsigned long fref)
-{
-	u32 reg;
-
-	ckih_rate = fref;
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	/* change the csi_clk parent if necessary */
-	reg = __raw_readl(MXC_CCM_CCMR);
-	if (!(reg & MXC_CCM_CCMR_CSCS))
-		if (clk_set_parent(&csi_clk, &usb_pll_clk))
-			pr_err("%s: error changing csi_clk parent\n", __func__);
-
-
-	/* Turn off all possible clocks */
-	__raw_writel((3 << 4), MXC_CCM_CGR0);
-	__raw_writel(0, MXC_CCM_CGR1);
-	__raw_writel((3 << 8) | (3 << 14) | (3 << 16)|
-		     1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for
-					   MX32, but still required to be set */
-		     MXC_CCM_CGR2);
-
-	/*
-	 * Before turning off usb_pll make sure ipg_per_clk is generated
-	 * by ipg_clk and not usb_pll.
-	 */
-	__raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR);
-
-	usb_pll_disable(&usb_pll_clk);
-
-	pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk));
-
-	clk_enable(&gpt_clk);
-	clk_enable(&emi_clk);
-	clk_enable(&iim_clk);
-	mx31_revision();
-	clk_disable(&iim_clk);
-
-	clk_enable(&serial_pll_clk);
-
-	if (mx31_revision() >= IMX_CHIP_REVISION_2_0) {
-		reg = __raw_readl(MXC_CCM_PMCR1);
-		/* No PLL restart on DVFS switch; enable auto EMI handshake */
-		reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN;
-		__raw_writel(reg, MXC_CCM_PMCR1);
-	}
-
-	mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
-			MX31_INT_GPT);
-
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
deleted file mode 100644
index e56c1a8..0000000
--- a/arch/arm/mach-imx/clock-imx35.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#ifdef HAVE_SET_RATE_SUPPORT
-static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost)
-{
-	u32 min_pre, temp_pre, old_err, err;
-
-	min_pre = (div - 1) / maxpost + 1;
-	old_err = 8;
-
-	for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
-		if (div > (temp_pre * maxpost))
-			break;
-
-		if (div < (temp_pre * temp_pre))
-			continue;
-
-		err = div % temp_pre;
-
-		if (err == 0) {
-			*pre = temp_pre;
-			break;
-		}
-
-		err = temp_pre - err;
-
-		if (err < old_err) {
-			old_err = err;
-			*pre = temp_pre;
-		}
-	}
-
-	*post = (div + *pre - 1) / *pre;
-}
-
-/* get the best values for a 3-bit divider combined with a 6-bit divider */
-static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post)
-{
-	if (div >= 512) {
-		*pre = 8;
-		*post = 64;
-	} else if (div >= 64) {
-		calc_dividers(div, pre, post, 64);
-	} else if (div <= 8) {
-		*pre = div;
-		*post = 1;
-	} else {
-		*pre = 1;
-		*post = div;
-	}
-}
-
-/* get the best values for two cascaded 3-bit dividers */
-static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post)
-{
-	if (div >= 64) {
-		*pre = *post = 8;
-	} else if (div > 8) {
-		calc_dividers(div, pre, post, 8);
-	} else {
-		*pre = 1;
-		*post = div;
-	}
-}
-#endif
-
-static unsigned long get_rate_mpll(void)
-{
-	ulong mpctl = __raw_readl(MX35_CCM_MPCTL);
-
-	return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_ppll(void)
-{
-	ulong ppctl = __raw_readl(MX35_CCM_PPCTL);
-
-	return mxc_decode_pll(ppctl, 24000000);
-}
-
-struct arm_ahb_div {
-	unsigned char arm, ahb, sel;
-};
-
-static struct arm_ahb_div clk_consumer[] = {
-	{ .arm = 1, .ahb = 4, .sel = 0},
-	{ .arm = 1, .ahb = 3, .sel = 1},
-	{ .arm = 2, .ahb = 2, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 4, .ahb = 1, .sel = 0},
-	{ .arm = 1, .ahb = 5, .sel = 0},
-	{ .arm = 1, .ahb = 8, .sel = 0},
-	{ .arm = 1, .ahb = 6, .sel = 1},
-	{ .arm = 2, .ahb = 4, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-	{ .arm = 4, .ahb = 2, .sel = 0},
-	{ .arm = 0, .ahb = 0, .sel = 0},
-};
-
-static unsigned long get_rate_arm(void)
-{
-	unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-	struct arm_ahb_div *aad;
-	unsigned long fref = get_rate_mpll();
-
-	aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-	if (aad->sel)
-		fref = fref * 3 / 4;
-
-	return fref / aad->arm;
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-	unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-	struct arm_ahb_div *aad;
-	unsigned long fref = get_rate_arm();
-
-	aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-
-	return fref / aad->ahb;
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-	return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
-	unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
-	unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-	unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
-
-	if (pdr3 & (1 << 14))
-		return get_rate_arm() / div;
-	else
-		return get_rate_ppll() / div;
-}
-
-static unsigned long get_rate_sdhc(struct clk *clk)
-{
-	unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
-	unsigned long div, rate;
-
-	if (pdr3 & (1 << 6))
-		rate = get_rate_arm();
-	else
-		rate = get_rate_ppll();
-
-	switch (clk->id) {
-	default:
-	case 0:
-		div = pdr3 & 0x3f;
-		break;
-	case 1:
-		div = (pdr3 >> 8) & 0x3f;
-		break;
-	case 2:
-		div = (pdr3 >> 16) & 0x3f;
-		break;
-	}
-
-	return rate / (div + 1);
-}
-
-static unsigned long get_rate_mshc(struct clk *clk)
-{
-	unsigned long pdr1 = __raw_readl(MXC_CCM_PDR1);
-	unsigned long div1, div2, rate;
-
-	if (pdr1 & (1 << 7))
-		rate = get_rate_arm();
-	else
-		rate = get_rate_ppll();
-
-	div1 = (pdr1 >> 29) & 0x7;
-	div2 = (pdr1 >> 22) & 0x3f;
-
-	return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_ssi(struct clk *clk)
-{
-	unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
-	unsigned long div1, div2, rate;
-
-	if (pdr2 & (1 << 6))
-		rate = get_rate_arm();
-	else
-		rate = get_rate_ppll();
-
-	switch (clk->id) {
-	default:
-	case 0:
-		div1 = pdr2 & 0x3f;
-		div2 = (pdr2 >> 24) & 0x7;
-		break;
-	case 1:
-		div1 = (pdr2 >> 8) & 0x3f;
-		div2 = (pdr2 >> 27) & 0x7;
-		break;
-	}
-
-	return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
-	unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
-	unsigned long rate;
-
-	if (pdr2 & (1 << 7))
-		rate = get_rate_arm();
-	else
-		rate = get_rate_ppll();
-
-	return rate / (((pdr2 >> 16) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
-	unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-	unsigned long rate;
-
-	if (pdr4 & (1 << 9))
-		rate = get_rate_arm();
-	else
-		rate = get_rate_ppll();
-
-	return rate / (((pdr4 >> 22) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_ipg_per(struct clk *clk)
-{
-	unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-	unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-	unsigned long div;
-
-	if (pdr0 & (1 << 26)) {
-		div = (pdr4 >> 16) & 0x3f;
-		return get_rate_arm() / (div + 1);
-	} else {
-		div = (pdr0 >> 12) & 0x7;
-		return get_rate_ahb(NULL) / (div + 1);
-	}
-}
-
-static unsigned long get_rate_hsp(struct clk *clk)
-{
-	unsigned long hsp_podf = (__raw_readl(MXC_CCM_PDR0) >> 20) & 0x03;
-	unsigned long fref = get_rate_mpll();
-
-	if (fref > 400 * 1000 * 1000) {
-		switch (hsp_podf) {
-		case 0:
-			return fref >> 2;
-		case 1:
-			return fref >> 3;
-		case 2:
-			return fref / 3;
-		}
-	} else {
-		switch (hsp_podf) {
-		case 0:
-		case 2:
-			return fref / 3;
-		case 1:
-			return fref / 6;
-		}
-	}
-
-	return 0;
-}
-
-static int clk_cgr_enable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg |= 3 << clk->enable_shift;
-	__raw_writel(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void clk_cgr_disable(struct clk *clk)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->enable_reg);
-	reg &= ~(3 << clk->enable_shift);
-	__raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr)		\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.get_rate	= gr,			\
-		.set_rate	= sr,			\
-		.enable		= clk_cgr_enable,	\
-		.disable	= clk_cgr_disable,	\
-	}
-
-DEFINE_CLOCK(asrc_clk,   0, MX35_CCM_CGR0,  0, NULL, NULL);
-DEFINE_CLOCK(pata_clk,    0, MX35_CCM_CGR0,  2, get_rate_ipg, NULL);
-/* DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR0,  4, NULL, NULL); */
-DEFINE_CLOCK(can1_clk,   0, MX35_CCM_CGR0,  6, get_rate_ipg, NULL);
-DEFINE_CLOCK(can2_clk,   1, MX35_CCM_CGR0,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi1_clk,  0, MX35_CCM_CGR0, 10, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi2_clk,  1, MX35_CCM_CGR0, 12, get_rate_ipg, NULL);
-DEFINE_CLOCK(ect_clk,    0, MX35_CCM_CGR0, 14, get_rate_ipg, NULL);
-DEFINE_CLOCK(edio_clk,   0, MX35_CCM_CGR0, 16, NULL, NULL);
-DEFINE_CLOCK(emi_clk,    0, MX35_CCM_CGR0, 18, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit1_clk,  0, MX35_CCM_CGR0, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit2_clk,  1, MX35_CCM_CGR0, 22, get_rate_ipg, NULL);
-DEFINE_CLOCK(esai_clk,   0, MX35_CCM_CGR0, 24, NULL, NULL);
-DEFINE_CLOCK(esdhc1_clk, 0, MX35_CCM_CGR0, 26, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc2_clk, 1, MX35_CCM_CGR0, 28, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc3_clk, 2, MX35_CCM_CGR0, 30, get_rate_sdhc, NULL);
-
-DEFINE_CLOCK(fec_clk,    0, MX35_CCM_CGR1,  0, get_rate_ipg, NULL);
-DEFINE_CLOCK(gpio1_clk,  0, MX35_CCM_CGR1,  2, NULL, NULL);
-DEFINE_CLOCK(gpio2_clk,  1, MX35_CCM_CGR1,  4, NULL, NULL);
-DEFINE_CLOCK(gpio3_clk,  2, MX35_CCM_CGR1,  6, NULL, NULL);
-DEFINE_CLOCK(gpt_clk,    0, MX35_CCM_CGR1,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(i2c1_clk,   0, MX35_CCM_CGR1, 10, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c2_clk,   1, MX35_CCM_CGR1, 12, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c3_clk,   2, MX35_CCM_CGR1, 14, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(iomuxc_clk, 0, MX35_CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, MX35_CCM_CGR1, 18, get_rate_hsp, NULL);
-DEFINE_CLOCK(kpp_clk,    0, MX35_CCM_CGR1, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(mlb_clk,    0, MX35_CCM_CGR1, 22, get_rate_ahb, NULL);
-DEFINE_CLOCK(mshc_clk,   0, MX35_CCM_CGR1, 24, get_rate_mshc, NULL);
-DEFINE_CLOCK(owire_clk,  0, MX35_CCM_CGR1, 26, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(pwm_clk,    0, MX35_CCM_CGR1, 28, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(rngc_clk,   0, MX35_CCM_CGR1, 30, get_rate_ipg, NULL);
-
-DEFINE_CLOCK(rtc_clk,    0, MX35_CCM_CGR2,  0, get_rate_ipg, NULL);
-DEFINE_CLOCK(rtic_clk,   0, MX35_CCM_CGR2,  2, get_rate_ahb, NULL);
-DEFINE_CLOCK(scc_clk,    0, MX35_CCM_CGR2,  4, get_rate_ipg, NULL);
-DEFINE_CLOCK(sdma_clk,   0, MX35_CCM_CGR2,  6, NULL, NULL);
-DEFINE_CLOCK(spba_clk,   0, MX35_CCM_CGR2,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(spdif_clk,  0, MX35_CCM_CGR2, 10, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk,   0, MX35_CCM_CGR2, 12, get_rate_ssi, NULL);
-DEFINE_CLOCK(ssi2_clk,   1, MX35_CCM_CGR2, 14, get_rate_ssi, NULL);
-DEFINE_CLOCK(uart1_clk,  0, MX35_CCM_CGR2, 16, get_rate_uart, NULL);
-DEFINE_CLOCK(uart2_clk,  1, MX35_CCM_CGR2, 18, get_rate_uart, NULL);
-DEFINE_CLOCK(uart3_clk,  2, MX35_CCM_CGR2, 20, get_rate_uart, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, MX35_CCM_CGR2, 22, get_rate_otg, NULL);
-DEFINE_CLOCK(wdog_clk,   0, MX35_CCM_CGR2, 24, NULL, NULL);
-DEFINE_CLOCK(max_clk,    0, MX35_CCM_CGR2, 26, NULL, NULL);
-DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR2, 30, NULL, NULL);
-
-DEFINE_CLOCK(csi_clk,    0, MX35_CCM_CGR3,  0, get_rate_csi, NULL);
-DEFINE_CLOCK(iim_clk,    0, MX35_CCM_CGR3,  2, NULL, NULL);
-DEFINE_CLOCK(gpu2d_clk,  0, MX35_CCM_CGR3,  4, NULL, NULL);
-
-DEFINE_CLOCK(usbahb_clk, 0, 0,         0, get_rate_ahb, NULL);
-
-static int clk_dummy_enable(struct clk *clk)
-{
-	return 0;
-}
-
-static void clk_dummy_disable(struct clk *clk)
-{
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-	unsigned long div1;
-
-	div1 = (__raw_readl(MX35_CCM_PDR4) >> 28) + 1;
-
-	return get_rate_ahb(NULL) / div1;
-}
-
-/* NAND Controller: It seems it can't be disabled */
-static struct clk nfc_clk = {
-	.id		= 0,
-	.enable_reg	= 0,
-	.enable_shift	= 0,
-	.get_rate	= get_rate_nfc,
-	.set_rate	= NULL, /* set_rate_nfc, */
-	.enable		= clk_dummy_enable,
-	.disable	= clk_dummy_disable
-};
-
-#define _REGISTER_CLOCK(d, n, c)	\
-	{				\
-		.dev_id = d,		\
-		.con_id = n,		\
-		.clk = &c,		\
-	},
-
-static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK(NULL, "asrc", asrc_clk)
-	_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-	_REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
-	_REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
-	_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
-	_REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
-	_REGISTER_CLOCK(NULL, "ect", ect_clk)
-	_REGISTER_CLOCK(NULL, "edio", edio_clk)
-	_REGISTER_CLOCK(NULL, "emi", emi_clk)
-	_REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk)
-	_REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk)
-	_REGISTER_CLOCK(NULL, "esai", esai_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx35.0", NULL, esdhc1_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx35.1", NULL, esdhc2_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx35.2", NULL, esdhc3_clk)
-	/* i.mx35 has the i.mx27 type fec */
-	_REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK(NULL, "gpio", gpio1_clk)
-	_REGISTER_CLOCK(NULL, "gpio", gpio2_clk)
-	_REGISTER_CLOCK(NULL, "gpio", gpio3_clk)
-	_REGISTER_CLOCK("gpt.0", NULL, gpt_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-	_REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
-	_REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk)
-	_REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
-	_REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
-	_REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-	_REGISTER_CLOCK(NULL, "mlb", mlb_clk)
-	_REGISTER_CLOCK(NULL, "mshc", mshc_clk)
-	_REGISTER_CLOCK("mxc_w1", NULL, owire_clk)
-	_REGISTER_CLOCK(NULL, "pwm", pwm_clk)
-	_REGISTER_CLOCK(NULL, "rngc", rngc_clk)
-	_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-	_REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-	_REGISTER_CLOCK(NULL, "scc", scc_clk)
-	_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-	_REGISTER_CLOCK(NULL, "spba", spba_clk)
-	_REGISTER_CLOCK(NULL, "spdif", spdif_clk)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	/* i.mx35 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-	_REGISTER_CLOCK(NULL, "max", max_clk)
-	_REGISTER_CLOCK(NULL, "audmux", audmux_clk)
-	_REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
-	_REGISTER_CLOCK(NULL, "iim", iim_clk)
-	_REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
-	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-};
-
-int __init mx35_clocks_init()
-{
-	unsigned int cgr2 = 3 << 26;
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-	cgr2 |= 3 << 16;
-#endif
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	/* Turn off all clocks except the ones we need to survive, namely:
-	 * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
-	 */
-	__raw_writel((3 << 18), MX35_CCM_CGR0);
-	__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
-			MX35_CCM_CGR1);
-	__raw_writel(cgr2, MX35_CCM_CGR2);
-	__raw_writel(0, MX35_CCM_CGR3);
-
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX35", mx35_revision());
-	clk_disable(&iim_clk);
-
-	/*
-	 * Check if we came up in internal boot mode. If yes, we need some
-	 * extra clocks turned on, otherwise the MX35 boot ROM code will
-	 * hang after a watchdog reset.
-	 */
-	if (!(__raw_readl(MX35_CCM_RCSR) & (3 << 10))) {
-		/* Additionally turn on UART1, SCC, and IIM clocks */
-		clk_enable(&iim_clk);
-		clk_enable(&uart1_clk);
-		clk_enable(&scc_clk);
-	}
-
-#ifdef CONFIG_MXC_USE_EPIT
-	epit_timer_init(&epit1_clk,
-			MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
-#else
-	mxc_timer_init(&gpt_clk,
-			MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
-#endif
-
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
deleted file mode 100644
index 111c328..0000000
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ /dev/null
@@ -1,2111 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/div64.h>
-#include <asm/mach/map.h>
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define PLL_BASE		IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR)
-#define PLL1_SYS		(PLL_BASE + 0x000)
-#define PLL2_BUS		(PLL_BASE + 0x030)
-#define PLL3_USB_OTG		(PLL_BASE + 0x010)
-#define PLL4_AUDIO		(PLL_BASE + 0x070)
-#define PLL5_VIDEO		(PLL_BASE + 0x0a0)
-#define PLL6_MLB		(PLL_BASE + 0x0d0)
-#define PLL7_USB_HOST		(PLL_BASE + 0x020)
-#define PLL8_ENET		(PLL_BASE + 0x0e0)
-#define PFD_480			(PLL_BASE + 0x0f0)
-#define PFD_528			(PLL_BASE + 0x100)
-#define PLL_NUM_OFFSET		0x010
-#define PLL_DENOM_OFFSET	0x020
-
-#define PFD0			7
-#define PFD1			15
-#define PFD2			23
-#define PFD3			31
-#define PFD_FRAC_MASK		0x3f
-
-#define BM_PLL_BYPASS			(0x1 << 16)
-#define BM_PLL_ENABLE			(0x1 << 13)
-#define BM_PLL_POWER_DOWN		(0x1 << 12)
-#define BM_PLL_LOCK			(0x1 << 31)
-#define BP_PLL_SYS_DIV_SELECT		0
-#define BM_PLL_SYS_DIV_SELECT		(0x7f << 0)
-#define BP_PLL_BUS_DIV_SELECT		0
-#define BM_PLL_BUS_DIV_SELECT		(0x1 << 0)
-#define BP_PLL_USB_DIV_SELECT		0
-#define BM_PLL_USB_DIV_SELECT		(0x3 << 0)
-#define BP_PLL_AV_DIV_SELECT		0
-#define BM_PLL_AV_DIV_SELECT		(0x7f << 0)
-#define BP_PLL_ENET_DIV_SELECT		0
-#define BM_PLL_ENET_DIV_SELECT		(0x3 << 0)
-#define BM_PLL_ENET_EN_PCIE		(0x1 << 19)
-#define BM_PLL_ENET_EN_SATA		(0x1 << 20)
-
-#define CCM_BASE	IMX_IO_ADDRESS(MX6Q_CCM_BASE_ADDR)
-#define CCR		(CCM_BASE + 0x00)
-#define CCDR		(CCM_BASE + 0x04)
-#define CSR		(CCM_BASE + 0x08)
-#define CCSR		(CCM_BASE + 0x0c)
-#define CACRR		(CCM_BASE + 0x10)
-#define CBCDR		(CCM_BASE + 0x14)
-#define CBCMR		(CCM_BASE + 0x18)
-#define CSCMR1		(CCM_BASE + 0x1c)
-#define CSCMR2		(CCM_BASE + 0x20)
-#define CSCDR1		(CCM_BASE + 0x24)
-#define CS1CDR		(CCM_BASE + 0x28)
-#define CS2CDR		(CCM_BASE + 0x2c)
-#define CDCDR		(CCM_BASE + 0x30)
-#define CHSCCDR		(CCM_BASE + 0x34)
-#define CSCDR2		(CCM_BASE + 0x38)
-#define CSCDR3		(CCM_BASE + 0x3c)
-#define CSCDR4		(CCM_BASE + 0x40)
-#define CWDR		(CCM_BASE + 0x44)
-#define CDHIPR		(CCM_BASE + 0x48)
-#define CDCR		(CCM_BASE + 0x4c)
-#define CTOR		(CCM_BASE + 0x50)
-#define CLPCR		(CCM_BASE + 0x54)
-#define CISR		(CCM_BASE + 0x58)
-#define CIMR		(CCM_BASE + 0x5c)
-#define CCOSR		(CCM_BASE + 0x60)
-#define CGPR		(CCM_BASE + 0x64)
-#define CCGR0		(CCM_BASE + 0x68)
-#define CCGR1		(CCM_BASE + 0x6c)
-#define CCGR2		(CCM_BASE + 0x70)
-#define CCGR3		(CCM_BASE + 0x74)
-#define CCGR4		(CCM_BASE + 0x78)
-#define CCGR5		(CCM_BASE + 0x7c)
-#define CCGR6		(CCM_BASE + 0x80)
-#define CCGR7		(CCM_BASE + 0x84)
-#define CMEOR		(CCM_BASE + 0x88)
-
-#define CG0		0
-#define CG1		2
-#define CG2		4
-#define CG3		6
-#define CG4		8
-#define CG5		10
-#define CG6		12
-#define CG7		14
-#define CG8		16
-#define CG9		18
-#define CG10		20
-#define CG11		22
-#define CG12		24
-#define CG13		26
-#define CG14		28
-#define CG15		30
-
-#define BM_CCSR_PLL1_SW_SEL		(0x1 << 2)
-#define BM_CCSR_STEP_SEL		(0x1 << 8)
-
-#define BP_CACRR_ARM_PODF		0
-#define BM_CACRR_ARM_PODF		(0x7 << 0)
-
-#define BP_CBCDR_PERIPH2_CLK2_PODF	0
-#define BM_CBCDR_PERIPH2_CLK2_PODF	(0x7 << 0)
-#define BP_CBCDR_MMDC_CH1_AXI_PODF	3
-#define BM_CBCDR_MMDC_CH1_AXI_PODF	(0x7 << 3)
-#define BP_CBCDR_AXI_SEL		6
-#define BM_CBCDR_AXI_SEL		(0x3 << 6)
-#define BP_CBCDR_IPG_PODF		8
-#define BM_CBCDR_IPG_PODF		(0x3 << 8)
-#define BP_CBCDR_AHB_PODF		10
-#define BM_CBCDR_AHB_PODF		(0x7 << 10)
-#define BP_CBCDR_AXI_PODF		16
-#define BM_CBCDR_AXI_PODF		(0x7 << 16)
-#define BP_CBCDR_MMDC_CH0_AXI_PODF	19
-#define BM_CBCDR_MMDC_CH0_AXI_PODF	(0x7 << 19)
-#define BP_CBCDR_PERIPH_CLK_SEL		25
-#define BM_CBCDR_PERIPH_CLK_SEL		(0x1 << 25)
-#define BP_CBCDR_PERIPH2_CLK_SEL	26
-#define BM_CBCDR_PERIPH2_CLK_SEL	(0x1 << 26)
-#define BP_CBCDR_PERIPH_CLK2_PODF	27
-#define BM_CBCDR_PERIPH_CLK2_PODF	(0x7 << 27)
-
-#define BP_CBCMR_GPU2D_AXI_SEL		0
-#define BM_CBCMR_GPU2D_AXI_SEL		(0x1 << 0)
-#define BP_CBCMR_GPU3D_AXI_SEL		1
-#define BM_CBCMR_GPU3D_AXI_SEL		(0x1 << 1)
-#define BP_CBCMR_GPU3D_CORE_SEL		4
-#define BM_CBCMR_GPU3D_CORE_SEL		(0x3 << 4)
-#define BP_CBCMR_GPU3D_SHADER_SEL	8
-#define BM_CBCMR_GPU3D_SHADER_SEL	(0x3 << 8)
-#define BP_CBCMR_PCIE_AXI_SEL		10
-#define BM_CBCMR_PCIE_AXI_SEL		(0x1 << 10)
-#define BP_CBCMR_VDO_AXI_SEL		11
-#define BM_CBCMR_VDO_AXI_SEL		(0x1 << 11)
-#define BP_CBCMR_PERIPH_CLK2_SEL	12
-#define BM_CBCMR_PERIPH_CLK2_SEL	(0x3 << 12)
-#define BP_CBCMR_VPU_AXI_SEL		14
-#define BM_CBCMR_VPU_AXI_SEL		(0x3 << 14)
-#define BP_CBCMR_GPU2D_CORE_SEL		16
-#define BM_CBCMR_GPU2D_CORE_SEL		(0x3 << 16)
-#define BP_CBCMR_PRE_PERIPH_CLK_SEL	18
-#define BM_CBCMR_PRE_PERIPH_CLK_SEL	(0x3 << 18)
-#define BP_CBCMR_PERIPH2_CLK2_SEL	20
-#define BM_CBCMR_PERIPH2_CLK2_SEL	(0x1 << 20)
-#define BP_CBCMR_PRE_PERIPH2_CLK_SEL	21
-#define BM_CBCMR_PRE_PERIPH2_CLK_SEL	(0x3 << 21)
-#define BP_CBCMR_GPU2D_CORE_PODF	23
-#define BM_CBCMR_GPU2D_CORE_PODF	(0x7 << 23)
-#define BP_CBCMR_GPU3D_CORE_PODF	26
-#define BM_CBCMR_GPU3D_CORE_PODF	(0x7 << 26)
-#define BP_CBCMR_GPU3D_SHADER_PODF	29
-#define BM_CBCMR_GPU3D_SHADER_PODF	(0x7 << 29)
-
-#define BP_CSCMR1_PERCLK_PODF		0
-#define BM_CSCMR1_PERCLK_PODF		(0x3f << 0)
-#define BP_CSCMR1_SSI1_SEL		10
-#define BM_CSCMR1_SSI1_SEL		(0x3 << 10)
-#define BP_CSCMR1_SSI2_SEL		12
-#define BM_CSCMR1_SSI2_SEL		(0x3 << 12)
-#define BP_CSCMR1_SSI3_SEL		14
-#define BM_CSCMR1_SSI3_SEL		(0x3 << 14)
-#define BP_CSCMR1_USDHC1_SEL		16
-#define BM_CSCMR1_USDHC1_SEL		(0x1 << 16)
-#define BP_CSCMR1_USDHC2_SEL		17
-#define BM_CSCMR1_USDHC2_SEL		(0x1 << 17)
-#define BP_CSCMR1_USDHC3_SEL		18
-#define BM_CSCMR1_USDHC3_SEL		(0x1 << 18)
-#define BP_CSCMR1_USDHC4_SEL		19
-#define BM_CSCMR1_USDHC4_SEL		(0x1 << 19)
-#define BP_CSCMR1_EMI_PODF		20
-#define BM_CSCMR1_EMI_PODF		(0x7 << 20)
-#define BP_CSCMR1_EMI_SLOW_PODF		23
-#define BM_CSCMR1_EMI_SLOW_PODF		(0x7 << 23)
-#define BP_CSCMR1_EMI_SEL		27
-#define BM_CSCMR1_EMI_SEL		(0x3 << 27)
-#define BP_CSCMR1_EMI_SLOW_SEL		29
-#define BM_CSCMR1_EMI_SLOW_SEL		(0x3 << 29)
-
-#define BP_CSCMR2_CAN_PODF		2
-#define BM_CSCMR2_CAN_PODF		(0x3f << 2)
-#define BM_CSCMR2_LDB_DI0_IPU_DIV	(0x1 << 10)
-#define BM_CSCMR2_LDB_DI1_IPU_DIV	(0x1 << 11)
-#define BP_CSCMR2_ESAI_SEL		19
-#define BM_CSCMR2_ESAI_SEL		(0x3 << 19)
-
-#define BP_CSCDR1_UART_PODF		0
-#define BM_CSCDR1_UART_PODF		(0x3f << 0)
-#define BP_CSCDR1_USDHC1_PODF		11
-#define BM_CSCDR1_USDHC1_PODF		(0x7 << 11)
-#define BP_CSCDR1_USDHC2_PODF		16
-#define BM_CSCDR1_USDHC2_PODF		(0x7 << 16)
-#define BP_CSCDR1_USDHC3_PODF		19
-#define BM_CSCDR1_USDHC3_PODF		(0x7 << 19)
-#define BP_CSCDR1_USDHC4_PODF		22
-#define BM_CSCDR1_USDHC4_PODF		(0x7 << 22)
-#define BP_CSCDR1_VPU_AXI_PODF		25
-#define BM_CSCDR1_VPU_AXI_PODF		(0x7 << 25)
-
-#define BP_CS1CDR_SSI1_PODF		0
-#define BM_CS1CDR_SSI1_PODF		(0x3f << 0)
-#define BP_CS1CDR_SSI1_PRED		6
-#define BM_CS1CDR_SSI1_PRED		(0x7 << 6)
-#define BP_CS1CDR_ESAI_PRED		9
-#define BM_CS1CDR_ESAI_PRED		(0x7 << 9)
-#define BP_CS1CDR_SSI3_PODF		16
-#define BM_CS1CDR_SSI3_PODF		(0x3f << 16)
-#define BP_CS1CDR_SSI3_PRED		22
-#define BM_CS1CDR_SSI3_PRED		(0x7 << 22)
-#define BP_CS1CDR_ESAI_PODF		25
-#define BM_CS1CDR_ESAI_PODF		(0x7 << 25)
-
-#define BP_CS2CDR_SSI2_PODF		0
-#define BM_CS2CDR_SSI2_PODF		(0x3f << 0)
-#define BP_CS2CDR_SSI2_PRED		6
-#define BM_CS2CDR_SSI2_PRED		(0x7 << 6)
-#define BP_CS2CDR_LDB_DI0_SEL		9
-#define BM_CS2CDR_LDB_DI0_SEL		(0x7 << 9)
-#define BP_CS2CDR_LDB_DI1_SEL		12
-#define BM_CS2CDR_LDB_DI1_SEL		(0x7 << 12)
-#define BP_CS2CDR_ENFC_SEL		16
-#define BM_CS2CDR_ENFC_SEL		(0x3 << 16)
-#define BP_CS2CDR_ENFC_PRED		18
-#define BM_CS2CDR_ENFC_PRED		(0x7 << 18)
-#define BP_CS2CDR_ENFC_PODF		21
-#define BM_CS2CDR_ENFC_PODF		(0x3f << 21)
-
-#define BP_CDCDR_ASRC_SERIAL_SEL	7
-#define BM_CDCDR_ASRC_SERIAL_SEL	(0x3 << 7)
-#define BP_CDCDR_ASRC_SERIAL_PODF	9
-#define BM_CDCDR_ASRC_SERIAL_PODF	(0x7 << 9)
-#define BP_CDCDR_ASRC_SERIAL_PRED	12
-#define BM_CDCDR_ASRC_SERIAL_PRED	(0x7 << 12)
-#define BP_CDCDR_SPDIF_SEL		20
-#define BM_CDCDR_SPDIF_SEL		(0x3 << 20)
-#define BP_CDCDR_SPDIF_PODF		22
-#define BM_CDCDR_SPDIF_PODF		(0x7 << 22)
-#define BP_CDCDR_SPDIF_PRED		25
-#define BM_CDCDR_SPDIF_PRED		(0x7 << 25)
-#define BP_CDCDR_HSI_TX_PODF		29
-#define BM_CDCDR_HSI_TX_PODF		(0x7 << 29)
-#define BP_CDCDR_HSI_TX_SEL		28
-#define BM_CDCDR_HSI_TX_SEL		(0x1 << 28)
-
-#define BP_CHSCCDR_IPU1_DI0_SEL		0
-#define BM_CHSCCDR_IPU1_DI0_SEL		(0x7 << 0)
-#define BP_CHSCCDR_IPU1_DI0_PRE_PODF	3
-#define BM_CHSCCDR_IPU1_DI0_PRE_PODF	(0x7 << 3)
-#define BP_CHSCCDR_IPU1_DI0_PRE_SEL	6
-#define BM_CHSCCDR_IPU1_DI0_PRE_SEL	(0x7 << 6)
-#define BP_CHSCCDR_IPU1_DI1_SEL		9
-#define BM_CHSCCDR_IPU1_DI1_SEL		(0x7 << 9)
-#define BP_CHSCCDR_IPU1_DI1_PRE_PODF	12
-#define BM_CHSCCDR_IPU1_DI1_PRE_PODF	(0x7 << 12)
-#define BP_CHSCCDR_IPU1_DI1_PRE_SEL	15
-#define BM_CHSCCDR_IPU1_DI1_PRE_SEL	(0x7 << 15)
-
-#define BP_CSCDR2_IPU2_DI0_SEL		0
-#define BM_CSCDR2_IPU2_DI0_SEL		(0x7)
-#define BP_CSCDR2_IPU2_DI0_PRE_PODF	3
-#define BM_CSCDR2_IPU2_DI0_PRE_PODF	(0x7 << 3)
-#define BP_CSCDR2_IPU2_DI0_PRE_SEL	6
-#define BM_CSCDR2_IPU2_DI0_PRE_SEL	(0x7 << 6)
-#define BP_CSCDR2_IPU2_DI1_SEL		9
-#define BM_CSCDR2_IPU2_DI1_SEL		(0x7 << 9)
-#define BP_CSCDR2_IPU2_DI1_PRE_PODF	12
-#define BM_CSCDR2_IPU2_DI1_PRE_PODF	(0x7 << 12)
-#define BP_CSCDR2_IPU2_DI1_PRE_SEL	15
-#define BM_CSCDR2_IPU2_DI1_PRE_SEL	(0x7 << 15)
-#define BP_CSCDR2_ECSPI_CLK_PODF	19
-#define BM_CSCDR2_ECSPI_CLK_PODF	(0x3f << 19)
-
-#define BP_CSCDR3_IPU1_HSP_SEL		9
-#define BM_CSCDR3_IPU1_HSP_SEL		(0x3 << 9)
-#define BP_CSCDR3_IPU1_HSP_PODF		11
-#define BM_CSCDR3_IPU1_HSP_PODF		(0x7 << 11)
-#define BP_CSCDR3_IPU2_HSP_SEL		14
-#define BM_CSCDR3_IPU2_HSP_SEL		(0x3 << 14)
-#define BP_CSCDR3_IPU2_HSP_PODF		16
-#define BM_CSCDR3_IPU2_HSP_PODF		(0x7 << 16)
-
-#define BM_CDHIPR_AXI_PODF_BUSY		(0x1 << 0)
-#define BM_CDHIPR_AHB_PODF_BUSY		(0x1 << 1)
-#define BM_CDHIPR_MMDC_CH1_PODF_BUSY	(0x1 << 2)
-#define BM_CDHIPR_PERIPH2_SEL_BUSY	(0x1 << 3)
-#define BM_CDHIPR_MMDC_CH0_PODF_BUSY	(0x1 << 4)
-#define BM_CDHIPR_PERIPH_SEL_BUSY	(0x1 << 5)
-#define BM_CDHIPR_ARM_PODF_BUSY		(0x1 << 16)
-
-#define BP_CLPCR_LPM			0
-#define BM_CLPCR_LPM			(0x3 << 0)
-#define BM_CLPCR_BYPASS_PMIC_READY	(0x1 << 2)
-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM	(0x1 << 5)
-#define BM_CLPCR_SBYOS			(0x1 << 6)
-#define BM_CLPCR_DIS_REF_OSC		(0x1 << 7)
-#define BM_CLPCR_VSTBY			(0x1 << 8)
-#define BP_CLPCR_STBY_COUNT		9
-#define BM_CLPCR_STBY_COUNT		(0x3 << 9)
-#define BM_CLPCR_COSC_PWRDOWN		(0x1 << 11)
-#define BM_CLPCR_WB_PER_AT_LPM		(0x1 << 16)
-#define BM_CLPCR_WB_CORE_AT_LPM		(0x1 << 17)
-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS	(0x1 << 19)
-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS	(0x1 << 21)
-#define BM_CLPCR_MASK_CORE0_WFI		(0x1 << 22)
-#define BM_CLPCR_MASK_CORE1_WFI		(0x1 << 23)
-#define BM_CLPCR_MASK_CORE2_WFI		(0x1 << 24)
-#define BM_CLPCR_MASK_CORE3_WFI		(0x1 << 25)
-#define BM_CLPCR_MASK_SCU_IDLE		(0x1 << 26)
-#define BM_CLPCR_MASK_L2CC_IDLE		(0x1 << 27)
-
-#define BP_CCOSR_CKO1_EN		7
-#define BP_CCOSR_CKO1_PODF		4
-#define BM_CCOSR_CKO1_PODF		(0x7 << 4)
-#define BP_CCOSR_CKO1_SEL		0
-#define BM_CCOSR_CKO1_SEL		(0xf << 0)
-
-#define FREQ_480M	480000000
-#define FREQ_528M	528000000
-#define FREQ_594M	594000000
-#define FREQ_650M	650000000
-#define FREQ_1300M	1300000000
-
-static struct clk pll1_sys;
-static struct clk pll2_bus;
-static struct clk pll3_usb_otg;
-static struct clk pll4_audio;
-static struct clk pll5_video;
-static struct clk pll6_mlb;
-static struct clk pll7_usb_host;
-static struct clk pll8_enet;
-static struct clk apbh_dma_clk;
-static struct clk arm_clk;
-static struct clk ipg_clk;
-static struct clk ahb_clk;
-static struct clk axi_clk;
-static struct clk mmdc_ch0_axi_clk;
-static struct clk mmdc_ch1_axi_clk;
-static struct clk periph_clk;
-static struct clk periph_pre_clk;
-static struct clk periph_clk2_clk;
-static struct clk periph2_clk;
-static struct clk periph2_pre_clk;
-static struct clk periph2_clk2_clk;
-static struct clk gpu2d_core_clk;
-static struct clk gpu3d_core_clk;
-static struct clk gpu3d_shader_clk;
-static struct clk ipg_perclk;
-static struct clk emi_clk;
-static struct clk emi_slow_clk;
-static struct clk can1_clk;
-static struct clk uart_clk;
-static struct clk usdhc1_clk;
-static struct clk usdhc2_clk;
-static struct clk usdhc3_clk;
-static struct clk usdhc4_clk;
-static struct clk vpu_clk;
-static struct clk hsi_tx_clk;
-static struct clk ipu1_di0_pre_clk;
-static struct clk ipu1_di1_pre_clk;
-static struct clk ipu2_di0_pre_clk;
-static struct clk ipu2_di1_pre_clk;
-static struct clk ipu1_clk;
-static struct clk ipu2_clk;
-static struct clk ssi1_clk;
-static struct clk ssi3_clk;
-static struct clk esai_clk;
-static struct clk ssi2_clk;
-static struct clk spdif_clk;
-static struct clk asrc_serial_clk;
-static struct clk gpu2d_axi_clk;
-static struct clk gpu3d_axi_clk;
-static struct clk pcie_clk;
-static struct clk vdo_axi_clk;
-static struct clk ldb_di0_clk;
-static struct clk ldb_di1_clk;
-static struct clk ipu1_di0_clk;
-static struct clk ipu1_di1_clk;
-static struct clk ipu2_di0_clk;
-static struct clk ipu2_di1_clk;
-static struct clk enfc_clk;
-static struct clk cko1_clk;
-static struct clk dummy_clk = {};
-
-static unsigned long external_high_reference;
-static unsigned long external_low_reference;
-static unsigned long oscillator_reference;
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
-	return oscillator_reference;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-	return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-	return external_low_reference;
-}
-
-static struct clk ckil_clk = {
-	.get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk ckih_clk = {
-	.get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
-	.get_rate = get_oscillator_reference_clock_rate,
-};
-
-static inline void __iomem *pll_get_reg_addr(struct clk *pll)
-{
-	if (pll == &pll1_sys)
-		return PLL1_SYS;
-	else if (pll == &pll2_bus)
-		return PLL2_BUS;
-	else if (pll == &pll3_usb_otg)
-		return PLL3_USB_OTG;
-	else if (pll == &pll4_audio)
-		return PLL4_AUDIO;
-	else if (pll == &pll5_video)
-		return PLL5_VIDEO;
-	else if (pll == &pll6_mlb)
-		return PLL6_MLB;
-	else if (pll == &pll7_usb_host)
-		return PLL7_USB_HOST;
-	else if (pll == &pll8_enet)
-		return PLL8_ENET;
-	else
-		BUG();
-
-	return NULL;
-}
-
-static int pll_enable(struct clk *clk)
-{
-	int timeout = 0x100000;
-	void __iomem *reg;
-	u32 val;
-
-	reg = pll_get_reg_addr(clk);
-	val = readl_relaxed(reg);
-	val &= ~BM_PLL_BYPASS;
-	val &= ~BM_PLL_POWER_DOWN;
-	/* 480MHz PLLs have the opposite definition for power bit */
-	if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
-		val |= BM_PLL_POWER_DOWN;
-	writel_relaxed(val, reg);
-
-	/* Wait for PLL to lock */
-	while (!(readl_relaxed(reg) & BM_PLL_LOCK) && --timeout)
-		cpu_relax();
-
-	if (unlikely(!timeout))
-		return -EBUSY;
-
-	/* Enable the PLL output now */
-	val = readl_relaxed(reg);
-	val |= BM_PLL_ENABLE;
-	writel_relaxed(val, reg);
-
-	return 0;
-}
-
-static void pll_disable(struct clk *clk)
-{
-	void __iomem *reg;
-	u32 val;
-
-	reg = pll_get_reg_addr(clk);
-	val = readl_relaxed(reg);
-	val &= ~BM_PLL_ENABLE;
-	val |= BM_PLL_BYPASS;
-	val |= BM_PLL_POWER_DOWN;
-	if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
-		val &= ~BM_PLL_POWER_DOWN;
-	writel_relaxed(val, reg);
-}
-
-static unsigned long pll1_sys_get_rate(struct clk *clk)
-{
-	u32 div = (readl_relaxed(PLL1_SYS) & BM_PLL_SYS_DIV_SELECT) >>
-		  BP_PLL_SYS_DIV_SELECT;
-
-	return clk_get_rate(clk->parent) * div / 2;
-}
-
-static int pll1_sys_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 val, div;
-
-	if (rate < FREQ_650M || rate > FREQ_1300M)
-		return -EINVAL;
-
-	div = rate * 2 / clk_get_rate(clk->parent);
-	val = readl_relaxed(PLL1_SYS);
-	val &= ~BM_PLL_SYS_DIV_SELECT;
-	val |= div << BP_PLL_SYS_DIV_SELECT;
-	writel_relaxed(val, PLL1_SYS);
-
-	return 0;
-}
-
-static unsigned long pll8_enet_get_rate(struct clk *clk)
-{
-	u32 div = (readl_relaxed(PLL8_ENET) & BM_PLL_ENET_DIV_SELECT) >>
-		  BP_PLL_ENET_DIV_SELECT;
-
-	switch (div) {
-	case 0:
-		return 25000000;
-	case 1:
-		return 50000000;
-	case 2:
-		return 100000000;
-	case 3:
-		return 125000000;
-	}
-
-	return 0;
-}
-
-static int pll8_enet_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 val, div;
-
-	switch (rate) {
-	case 25000000:
-		div = 0;
-		break;
-	case 50000000:
-		div = 1;
-		break;
-	case 100000000:
-		div = 2;
-		break;
-	case 125000000:
-		div = 3;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	val = readl_relaxed(PLL8_ENET);
-	val &= ~BM_PLL_ENET_DIV_SELECT;
-	val |= div << BP_PLL_ENET_DIV_SELECT;
-	writel_relaxed(val, PLL8_ENET);
-
-	return 0;
-}
-
-static unsigned long pll_av_get_rate(struct clk *clk)
-{
-	void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-	u32 mfn = readl_relaxed(reg + PLL_NUM_OFFSET);
-	u32 mfd = readl_relaxed(reg + PLL_DENOM_OFFSET);
-	u32 div = (readl_relaxed(reg) & BM_PLL_AV_DIV_SELECT) >>
-		  BP_PLL_AV_DIV_SELECT;
-
-	return (parent_rate * div) + ((parent_rate / mfd) * mfn);
-}
-
-static int pll_av_set_rate(struct clk *clk, unsigned long rate)
-{
-	void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
-	unsigned int parent_rate = clk_get_rate(clk->parent);
-	u32 val, div;
-	u32 mfn, mfd = 1000000;
-	s64 temp64;
-
-	if (rate < FREQ_650M || rate > FREQ_1300M)
-		return -EINVAL;
-
-	div = rate / parent_rate;
-	temp64 = (u64) (rate - div * parent_rate);
-	temp64 *= mfd;
-	do_div(temp64, parent_rate);
-	mfn = temp64;
-
-	val = readl_relaxed(reg);
-	val &= ~BM_PLL_AV_DIV_SELECT;
-	val |= div << BP_PLL_AV_DIV_SELECT;
-	writel_relaxed(val, reg);
-	writel_relaxed(mfn, reg + PLL_NUM_OFFSET);
-	writel_relaxed(mfd, reg + PLL_DENOM_OFFSET);
-
-	return 0;
-}
-
-static void __iomem *pll_get_div_reg_bit(struct clk *clk, u32 *bp, u32 *bm)
-{
-	void __iomem *reg;
-
-	if (clk == &pll2_bus) {
-		reg = PLL2_BUS;
-		*bp = BP_PLL_BUS_DIV_SELECT;
-		*bm = BM_PLL_BUS_DIV_SELECT;
-	} else if (clk == &pll3_usb_otg) {
-		reg = PLL3_USB_OTG;
-		*bp = BP_PLL_USB_DIV_SELECT;
-		*bm = BM_PLL_USB_DIV_SELECT;
-	} else if (clk == &pll7_usb_host) {
-		reg = PLL7_USB_HOST;
-		*bp = BP_PLL_USB_DIV_SELECT;
-		*bm = BM_PLL_USB_DIV_SELECT;
-	} else {
-		BUG();
-	}
-
-	return reg;
-}
-
-static unsigned long pll_get_rate(struct clk *clk)
-{
-	void __iomem *reg;
-	u32 div, bp, bm;
-
-	reg = pll_get_div_reg_bit(clk, &bp, &bm);
-	div = (readl_relaxed(reg) & bm) >> bp;
-
-	return (div == 1) ? clk_get_rate(clk->parent) * 22 :
-			    clk_get_rate(clk->parent) * 20;
-}
-
-static int pll_set_rate(struct clk *clk, unsigned long rate)
-{
-	void __iomem *reg;
-	u32 val, div, bp, bm;
-
-	if (rate == FREQ_528M)
-		div = 1;
-	else if (rate == FREQ_480M)
-		div = 0;
-	else
-		return -EINVAL;
-
-	reg = pll_get_div_reg_bit(clk, &bp, &bm);
-	val = readl_relaxed(reg);
-	val &= ~bm;
-	val |= div << bp;
-	writel_relaxed(val, reg);
-
-	return 0;
-}
-
-#define pll2_bus_get_rate	pll_get_rate
-#define pll2_bus_set_rate	pll_set_rate
-#define pll3_usb_otg_get_rate	pll_get_rate
-#define pll3_usb_otg_set_rate	pll_set_rate
-#define pll7_usb_host_get_rate	pll_get_rate
-#define pll7_usb_host_set_rate	pll_set_rate
-#define pll4_audio_get_rate	pll_av_get_rate
-#define pll4_audio_set_rate	pll_av_set_rate
-#define pll5_video_get_rate	pll_av_get_rate
-#define pll5_video_set_rate	pll_av_set_rate
-#define pll6_mlb_get_rate	NULL
-#define pll6_mlb_set_rate	NULL
-
-#define DEF_PLL(name)					\
-	static struct clk name = {			\
-		.enable		= pll_enable,		\
-		.disable	= pll_disable,		\
-		.get_rate	= name##_get_rate,	\
-		.set_rate	= name##_set_rate,	\
-		.parent		= &osc_clk,		\
-	}
-
-DEF_PLL(pll1_sys);
-DEF_PLL(pll2_bus);
-DEF_PLL(pll3_usb_otg);
-DEF_PLL(pll4_audio);
-DEF_PLL(pll5_video);
-DEF_PLL(pll6_mlb);
-DEF_PLL(pll7_usb_host);
-DEF_PLL(pll8_enet);
-
-static unsigned long pfd_get_rate(struct clk *clk)
-{
-	u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-	u32 frac, bp_frac;
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.enable(&apbh_dma_clk);
-
-	bp_frac = clk->enable_shift - 7;
-	frac = readl_relaxed(clk->enable_reg) >> bp_frac & PFD_FRAC_MASK;
-	do_div(tmp, frac);
-
-	return tmp;
-}
-
-static int pfd_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 val, frac, bp_frac;
-	u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.enable(&apbh_dma_clk);
-
-	/*
-	 * Round up the divider so that we don't set a rate
-	 * higher than what is requested
-	 */
-	tmp += rate / 2;
-	do_div(tmp, rate);
-	frac = tmp;
-	frac = (frac < 12) ? 12 : frac;
-	frac = (frac > 35) ? 35 : frac;
-
-	/*
-	 * The frac field always starts from 7 bits lower
-	 * position of enable bit
-	 */
-	bp_frac = clk->enable_shift - 7;
-	val = readl_relaxed(clk->enable_reg);
-	val &= ~(PFD_FRAC_MASK << bp_frac);
-	val |= frac << bp_frac;
-	writel_relaxed(val, clk->enable_reg);
-
-	tmp = (u64) clk_get_rate(clk->parent) * 18;
-	do_div(tmp, frac);
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.disable(&apbh_dma_clk);
-
-	return 0;
-}
-
-static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
-{
-	u32 frac;
-	u64 tmp;
-
-	tmp = (u64) clk_get_rate(clk->parent) * 18;
-	tmp += rate / 2;
-	do_div(tmp, rate);
-	frac = tmp;
-	frac = (frac < 12) ? 12 : frac;
-	frac = (frac > 35) ? 35 : frac;
-	tmp = (u64) clk_get_rate(clk->parent) * 18;
-	do_div(tmp, frac);
-
-	return tmp;
-}
-
-static int pfd_enable(struct clk *clk)
-{
-	u32 val;
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.enable(&apbh_dma_clk);
-
-	val = readl_relaxed(clk->enable_reg);
-	val &= ~(1 << clk->enable_shift);
-	writel_relaxed(val, clk->enable_reg);
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.disable(&apbh_dma_clk);
-
-	return 0;
-}
-
-static void pfd_disable(struct clk *clk)
-{
-	u32 val;
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.enable(&apbh_dma_clk);
-
-	val = readl_relaxed(clk->enable_reg);
-	val |= 1 << clk->enable_shift;
-	writel_relaxed(val, clk->enable_reg);
-
-	if (apbh_dma_clk.usecount == 0)
-		apbh_dma_clk.disable(&apbh_dma_clk);
-}
-
-#define DEF_PFD(name, er, es, p)			\
-	static struct clk name = {			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.enable		= pfd_enable,		\
-		.disable	= pfd_disable,		\
-		.get_rate	= pfd_get_rate,		\
-		.set_rate	= pfd_set_rate,		\
-		.round_rate	= pfd_round_rate,	\
-		.parent		= p,			\
-	}
-
-DEF_PFD(pll2_pfd_352m, PFD_528, PFD0, &pll2_bus);
-DEF_PFD(pll2_pfd_594m, PFD_528, PFD1, &pll2_bus);
-DEF_PFD(pll2_pfd_400m, PFD_528, PFD2, &pll2_bus);
-DEF_PFD(pll3_pfd_720m, PFD_480, PFD0, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
-
-static unsigned long twd_clk_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk twd_clk = {
-	.parent = &arm_clk,
-	.get_rate = twd_clk_get_rate,
-};
-
-static unsigned long pll2_200m_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk pll2_200m = {
-	.parent = &pll2_pfd_400m,
-	.get_rate = pll2_200m_get_rate,
-};
-
-static unsigned long pll3_120m_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 4;
-}
-
-static struct clk pll3_120m = {
-	.parent = &pll3_usb_otg,
-	.get_rate = pll3_120m_get_rate,
-};
-
-static unsigned long pll3_80m_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 6;
-}
-
-static struct clk pll3_80m = {
-	.parent = &pll3_usb_otg,
-	.get_rate = pll3_80m_get_rate,
-};
-
-static unsigned long pll3_60m_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 8;
-}
-
-static struct clk pll3_60m = {
-	.parent = &pll3_usb_otg,
-	.get_rate = pll3_60m_get_rate,
-};
-
-static int pll1_sw_clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 val = readl_relaxed(CCSR);
-
-	if (parent == &pll1_sys) {
-		val &= ~BM_CCSR_PLL1_SW_SEL;
-		val &= ~BM_CCSR_STEP_SEL;
-	} else if (parent == &osc_clk) {
-		val |= BM_CCSR_PLL1_SW_SEL;
-		val &= ~BM_CCSR_STEP_SEL;
-	} else if (parent == &pll2_pfd_400m) {
-		val |= BM_CCSR_PLL1_SW_SEL;
-		val |= BM_CCSR_STEP_SEL;
-	} else {
-		return -EINVAL;
-	}
-
-	writel_relaxed(val, CCSR);
-
-	return 0;
-}
-
-static struct clk pll1_sw_clk = {
-	.parent = &pll1_sys,
-	.set_parent = pll1_sw_clk_set_parent,
-};
-
-static void calc_pred_podf_dividers(u32 div, u32 *pred, u32 *podf)
-{
-	u32 min_pred, temp_pred, old_err, err;
-
-	if (div >= 512) {
-		*pred = 8;
-		*podf = 64;
-	} else if (div >= 8) {
-		min_pred = (div - 1) / 64 + 1;
-		old_err = 8;
-		for (temp_pred = 8; temp_pred >= min_pred; temp_pred--) {
-			err = div % temp_pred;
-			if (err == 0) {
-				*pred = temp_pred;
-				break;
-			}
-			err = temp_pred - err;
-			if (err < old_err) {
-				old_err = err;
-				*pred = temp_pred;
-			}
-		}
-		*podf = (div + *pred - 1) / *pred;
-	} else if (div < 8) {
-		*pred = div;
-		*podf = 1;
-	}
-}
-
-static int _clk_enable(struct clk *clk)
-{
-	u32 reg;
-	reg = readl_relaxed(clk->enable_reg);
-	reg |= 0x3 << clk->enable_shift;
-	writel_relaxed(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-	u32 reg;
-	reg = readl_relaxed(clk->enable_reg);
-	reg &= ~(0x3 << clk->enable_shift);
-	writel_relaxed(reg, clk->enable_reg);
-}
-
-static int _clk_enable_1b(struct clk *clk)
-{
-	u32 reg;
-	reg = readl_relaxed(clk->enable_reg);
-	reg |= 0x1 << clk->enable_shift;
-	writel_relaxed(reg, clk->enable_reg);
-
-	return 0;
-}
-
-static void _clk_disable_1b(struct clk *clk)
-{
-	u32 reg;
-	reg = readl_relaxed(clk->enable_reg);
-	reg &= ~(0x1 << clk->enable_shift);
-	writel_relaxed(reg, clk->enable_reg);
-}
-
-struct divider {
-	struct clk *clk;
-	void __iomem *reg;
-	u32 bp_pred;
-	u32 bm_pred;
-	u32 bp_podf;
-	u32 bm_podf;
-};
-
-#define DEF_CLK_DIV1(d, c, r, b)				\
-	static struct divider d = {				\
-		.clk = c,					\
-		.reg = r,					\
-		.bp_podf = BP_##r##_##b##_PODF,			\
-		.bm_podf = BM_##r##_##b##_PODF,			\
-	}
-
-DEF_CLK_DIV1(arm_div,		&arm_clk,		CACRR,	ARM);
-DEF_CLK_DIV1(ipg_div,		&ipg_clk,		CBCDR,	IPG);
-DEF_CLK_DIV1(ahb_div,		&ahb_clk,		CBCDR,	AHB);
-DEF_CLK_DIV1(axi_div,		&axi_clk,		CBCDR,	AXI);
-DEF_CLK_DIV1(mmdc_ch0_axi_div,	&mmdc_ch0_axi_clk,	CBCDR,	MMDC_CH0_AXI);
-DEF_CLK_DIV1(mmdc_ch1_axi_div,	&mmdc_ch1_axi_clk,	CBCDR,	MMDC_CH1_AXI);
-DEF_CLK_DIV1(periph_clk2_div,	&periph_clk2_clk,	CBCDR,	PERIPH_CLK2);
-DEF_CLK_DIV1(periph2_clk2_div,	&periph2_clk2_clk,	CBCDR,	PERIPH2_CLK2);
-DEF_CLK_DIV1(gpu2d_core_div,	&gpu2d_core_clk,	CBCMR,	GPU2D_CORE);
-DEF_CLK_DIV1(gpu3d_core_div,	&gpu3d_core_clk,	CBCMR,	GPU3D_CORE);
-DEF_CLK_DIV1(gpu3d_shader_div,	&gpu3d_shader_clk,	CBCMR,	GPU3D_SHADER);
-DEF_CLK_DIV1(ipg_perclk_div,	&ipg_perclk,		CSCMR1,	PERCLK);
-DEF_CLK_DIV1(emi_div,		&emi_clk,		CSCMR1,	EMI);
-DEF_CLK_DIV1(emi_slow_div,	&emi_slow_clk,		CSCMR1,	EMI_SLOW);
-DEF_CLK_DIV1(can_div,		&can1_clk,		CSCMR2,	CAN);
-DEF_CLK_DIV1(uart_div,		&uart_clk,		CSCDR1,	UART);
-DEF_CLK_DIV1(usdhc1_div,	&usdhc1_clk,		CSCDR1,	USDHC1);
-DEF_CLK_DIV1(usdhc2_div,	&usdhc2_clk,		CSCDR1,	USDHC2);
-DEF_CLK_DIV1(usdhc3_div,	&usdhc3_clk,		CSCDR1,	USDHC3);
-DEF_CLK_DIV1(usdhc4_div,	&usdhc4_clk,		CSCDR1,	USDHC4);
-DEF_CLK_DIV1(vpu_div,		&vpu_clk,		CSCDR1,	VPU_AXI);
-DEF_CLK_DIV1(hsi_tx_div,	&hsi_tx_clk,		CDCDR,	HSI_TX);
-DEF_CLK_DIV1(ipu1_di0_pre_div,	&ipu1_di0_pre_clk,	CHSCCDR, IPU1_DI0_PRE);
-DEF_CLK_DIV1(ipu1_di1_pre_div,	&ipu1_di1_pre_clk,	CHSCCDR, IPU1_DI1_PRE);
-DEF_CLK_DIV1(ipu2_di0_pre_div,	&ipu2_di0_pre_clk,	CSCDR2,	IPU2_DI0_PRE);
-DEF_CLK_DIV1(ipu2_di1_pre_div,	&ipu2_di1_pre_clk,	CSCDR2,	IPU2_DI1_PRE);
-DEF_CLK_DIV1(ipu1_div,		&ipu1_clk,		CSCDR3,	IPU1_HSP);
-DEF_CLK_DIV1(ipu2_div,		&ipu2_clk,		CSCDR3,	IPU2_HSP);
-DEF_CLK_DIV1(cko1_div,		&cko1_clk,		CCOSR, CKO1);
-
-#define DEF_CLK_DIV2(d, c, r, b)				\
-	static struct divider d = {				\
-		.clk = c,					\
-		.reg = r,					\
-		.bp_pred = BP_##r##_##b##_PRED,			\
-		.bm_pred = BM_##r##_##b##_PRED,			\
-		.bp_podf = BP_##r##_##b##_PODF,			\
-		.bm_podf = BM_##r##_##b##_PODF,			\
-	}
-
-DEF_CLK_DIV2(ssi1_div,		&ssi1_clk,		CS1CDR,	SSI1);
-DEF_CLK_DIV2(ssi3_div,		&ssi3_clk,		CS1CDR,	SSI3);
-DEF_CLK_DIV2(esai_div,		&esai_clk,		CS1CDR,	ESAI);
-DEF_CLK_DIV2(ssi2_div,		&ssi2_clk,		CS2CDR,	SSI2);
-DEF_CLK_DIV2(enfc_div,		&enfc_clk,		CS2CDR,	ENFC);
-DEF_CLK_DIV2(spdif_div,		&spdif_clk,		CDCDR,	SPDIF);
-DEF_CLK_DIV2(asrc_serial_div,	&asrc_serial_clk,	CDCDR,	ASRC_SERIAL);
-
-static struct divider *dividers[] = {
-	&arm_div,
-	&ipg_div,
-	&ahb_div,
-	&axi_div,
-	&mmdc_ch0_axi_div,
-	&mmdc_ch1_axi_div,
-	&periph_clk2_div,
-	&periph2_clk2_div,
-	&gpu2d_core_div,
-	&gpu3d_core_div,
-	&gpu3d_shader_div,
-	&ipg_perclk_div,
-	&emi_div,
-	&emi_slow_div,
-	&can_div,
-	&uart_div,
-	&usdhc1_div,
-	&usdhc2_div,
-	&usdhc3_div,
-	&usdhc4_div,
-	&vpu_div,
-	&hsi_tx_div,
-	&ipu1_di0_pre_div,
-	&ipu1_di1_pre_div,
-	&ipu2_di0_pre_div,
-	&ipu2_di1_pre_div,
-	&ipu1_div,
-	&ipu2_div,
-	&ssi1_div,
-	&ssi3_div,
-	&esai_div,
-	&ssi2_div,
-	&enfc_div,
-	&spdif_div,
-	&asrc_serial_div,
-	&cko1_div,
-};
-
-static unsigned long ldb_di_clk_get_rate(struct clk *clk)
-{
-	u32 val = readl_relaxed(CSCMR2);
-
-	val &= (clk == &ldb_di0_clk) ? BM_CSCMR2_LDB_DI0_IPU_DIV :
-				       BM_CSCMR2_LDB_DI1_IPU_DIV;
-	if (val)
-		return clk_get_rate(clk->parent) / 7;
-	else
-		return clk_get_rate(clk->parent) * 2 / 7;
-}
-
-static int ldb_di_clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-	u32 val = readl_relaxed(CSCMR2);
-
-	if (rate * 7 <= parent_rate + parent_rate / 20)
-		val |= BM_CSCMR2_LDB_DI0_IPU_DIV;
-	else
-		val &= ~BM_CSCMR2_LDB_DI0_IPU_DIV;
-
-	writel_relaxed(val, CSCMR2);
-
-	return 0;
-}
-
-static unsigned long ldb_di_clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-
-	if (rate * 7 <= parent_rate + parent_rate / 20)
-		return parent_rate / 7;
-	else
-		return 2 * parent_rate / 7;
-}
-
-static unsigned long _clk_get_rate(struct clk *clk)
-{
-	struct divider *d;
-	u32 val, pred, podf;
-	int i, num;
-
-	if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-		return ldb_di_clk_get_rate(clk);
-
-	num = ARRAY_SIZE(dividers);
-	for (i = 0; i < num; i++)
-		if (dividers[i]->clk == clk) {
-			d = dividers[i];
-			break;
-		}
-	if (i == num)
-		return clk_get_rate(clk->parent);
-
-	val = readl_relaxed(d->reg);
-	pred = ((val & d->bm_pred) >> d->bp_pred) + 1;
-	podf = ((val & d->bm_podf) >> d->bp_podf) + 1;
-
-	return clk_get_rate(clk->parent) / (pred * podf);
-}
-
-static int clk_busy_wait(struct clk *clk)
-{
-	int timeout = 0x100000;
-	u32 bm;
-
-	if (clk == &axi_clk)
-		bm = BM_CDHIPR_AXI_PODF_BUSY;
-	else if (clk == &ahb_clk)
-		bm = BM_CDHIPR_AHB_PODF_BUSY;
-	else if (clk == &mmdc_ch0_axi_clk)
-		bm = BM_CDHIPR_MMDC_CH0_PODF_BUSY;
-	else if (clk == &periph_clk)
-		bm = BM_CDHIPR_PERIPH_SEL_BUSY;
-	else if (clk == &arm_clk)
-		bm = BM_CDHIPR_ARM_PODF_BUSY;
-	else
-		return -EINVAL;
-
-	while ((readl_relaxed(CDHIPR) & bm) && --timeout)
-		cpu_relax();
-
-	if (unlikely(!timeout))
-		return -EBUSY;
-
-	return 0;
-}
-
-static int _clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-	struct divider *d;
-	u32 val, div, max_div, pred = 0, podf;
-	int i, num;
-
-	if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-		return ldb_di_clk_set_rate(clk, rate);
-
-	num = ARRAY_SIZE(dividers);
-	for (i = 0; i < num; i++)
-		if (dividers[i]->clk == clk) {
-			d = dividers[i];
-			break;
-		}
-	if (i == num)
-		return -EINVAL;
-
-	max_div = ((d->bm_pred >> d->bp_pred) + 1) *
-		  ((d->bm_podf >> d->bp_podf) + 1);
-
-	div = parent_rate / rate;
-	if (div == 0)
-		div++;
-
-	if ((parent_rate / div != rate) || div > max_div)
-		return -EINVAL;
-
-	if (d->bm_pred) {
-		calc_pred_podf_dividers(div, &pred, &podf);
-	} else {
-		pred = 1;
-		podf = div;
-	}
-
-	val = readl_relaxed(d->reg);
-	val &= ~(d->bm_pred | d->bm_podf);
-	val |= (pred - 1) << d->bp_pred | (podf - 1) << d->bp_podf;
-	writel_relaxed(val, d->reg);
-
-	if (clk == &axi_clk || clk == &ahb_clk ||
-	    clk == &mmdc_ch0_axi_clk || clk == &arm_clk)
-		return clk_busy_wait(clk);
-
-	return 0;
-}
-
-static unsigned long _clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-	u32 div = parent_rate / rate;
-	u32 div_max, pred = 0, podf;
-	struct divider *d;
-	int i, num;
-
-	if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-		return ldb_di_clk_round_rate(clk, rate);
-
-	num = ARRAY_SIZE(dividers);
-	for (i = 0; i < num; i++)
-		if (dividers[i]->clk == clk) {
-			d = dividers[i];
-			break;
-		}
-	if (i == num)
-		return -EINVAL;
-
-	if (div == 0 || parent_rate % rate)
-		div++;
-
-	if (d->bm_pred) {
-		calc_pred_podf_dividers(div, &pred, &podf);
-		div = pred * podf;
-	} else {
-		div_max = (d->bm_podf >> d->bp_podf) + 1;
-		if (div > div_max)
-			div = div_max;
-	}
-
-	return parent_rate / div;
-}
-
-struct multiplexer {
-	struct clk *clk;
-	void __iomem *reg;
-	u32 bp;
-	u32 bm;
-	int pnum;
-	struct clk *parents[];
-};
-
-static struct multiplexer axi_mux = {
-	.clk = &axi_clk,
-	.reg = CBCDR,
-	.bp = BP_CBCDR_AXI_SEL,
-	.bm = BM_CBCDR_AXI_SEL,
-	.parents = {
-		&periph_clk,
-		&pll2_pfd_400m,
-		&pll3_pfd_540m,
-		NULL
-	},
-};
-
-static struct multiplexer periph_mux = {
-	.clk = &periph_clk,
-	.reg = CBCDR,
-	.bp = BP_CBCDR_PERIPH_CLK_SEL,
-	.bm = BM_CBCDR_PERIPH_CLK_SEL,
-	.parents = {
-		&periph_pre_clk,
-		&periph_clk2_clk,
-		NULL
-	},
-};
-
-static struct multiplexer periph_pre_mux = {
-	.clk = &periph_pre_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_PRE_PERIPH_CLK_SEL,
-	.bm = BM_CBCMR_PRE_PERIPH_CLK_SEL,
-	.parents = {
-		&pll2_bus,
-		&pll2_pfd_400m,
-		&pll2_pfd_352m,
-		&pll2_200m,
-		NULL
-	},
-};
-
-static struct multiplexer periph_clk2_mux = {
-	.clk = &periph_clk2_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_PERIPH_CLK2_SEL,
-	.bm = BM_CBCMR_PERIPH_CLK2_SEL,
-	.parents = {
-		&pll3_usb_otg,
-		&osc_clk,
-		NULL
-	},
-};
-
-static struct multiplexer periph2_mux = {
-	.clk = &periph2_clk,
-	.reg = CBCDR,
-	.bp = BP_CBCDR_PERIPH2_CLK_SEL,
-	.bm = BM_CBCDR_PERIPH2_CLK_SEL,
-	.parents = {
-		&periph2_pre_clk,
-		&periph2_clk2_clk,
-		NULL
-	},
-};
-
-static struct multiplexer periph2_pre_mux = {
-	.clk = &periph2_pre_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_PRE_PERIPH2_CLK_SEL,
-	.bm = BM_CBCMR_PRE_PERIPH2_CLK_SEL,
-	.parents = {
-		&pll2_bus,
-		&pll2_pfd_400m,
-		&pll2_pfd_352m,
-		&pll2_200m,
-		NULL
-	},
-};
-
-static struct multiplexer periph2_clk2_mux = {
-	.clk = &periph2_clk2_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_PERIPH2_CLK2_SEL,
-	.bm = BM_CBCMR_PERIPH2_CLK2_SEL,
-	.parents = {
-		&pll3_usb_otg,
-		&osc_clk,
-		NULL
-	},
-};
-
-static struct multiplexer gpu2d_axi_mux = {
-	.clk = &gpu2d_axi_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_GPU2D_AXI_SEL,
-	.bm = BM_CBCMR_GPU2D_AXI_SEL,
-	.parents = {
-		&axi_clk,
-		&ahb_clk,
-		NULL
-	},
-};
-
-static struct multiplexer gpu3d_axi_mux = {
-	.clk = &gpu3d_axi_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_GPU3D_AXI_SEL,
-	.bm = BM_CBCMR_GPU3D_AXI_SEL,
-	.parents = {
-		&axi_clk,
-		&ahb_clk,
-		NULL
-	},
-};
-
-static struct multiplexer gpu3d_core_mux = {
-	.clk = &gpu3d_core_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_GPU3D_CORE_SEL,
-	.bm = BM_CBCMR_GPU3D_CORE_SEL,
-	.parents = {
-		&mmdc_ch0_axi_clk,
-		&pll3_usb_otg,
-		&pll2_pfd_594m,
-		&pll2_pfd_400m,
-		NULL
-	},
-};
-
-static struct multiplexer gpu3d_shader_mux = {
-	.clk = &gpu3d_shader_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_GPU3D_SHADER_SEL,
-	.bm = BM_CBCMR_GPU3D_SHADER_SEL,
-	.parents = {
-		&mmdc_ch0_axi_clk,
-		&pll3_usb_otg,
-		&pll2_pfd_594m,
-		&pll3_pfd_720m,
-		NULL
-	},
-};
-
-static struct multiplexer pcie_axi_mux = {
-	.clk = &pcie_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_PCIE_AXI_SEL,
-	.bm = BM_CBCMR_PCIE_AXI_SEL,
-	.parents = {
-		&axi_clk,
-		&ahb_clk,
-		NULL
-	},
-};
-
-static struct multiplexer vdo_axi_mux = {
-	.clk = &vdo_axi_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_VDO_AXI_SEL,
-	.bm = BM_CBCMR_VDO_AXI_SEL,
-	.parents = {
-		&axi_clk,
-		&ahb_clk,
-		NULL
-	},
-};
-
-static struct multiplexer vpu_axi_mux = {
-	.clk = &vpu_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_VPU_AXI_SEL,
-	.bm = BM_CBCMR_VPU_AXI_SEL,
-	.parents = {
-		&axi_clk,
-		&pll2_pfd_400m,
-		&pll2_pfd_352m,
-		NULL
-	},
-};
-
-static struct multiplexer gpu2d_core_mux = {
-	.clk = &gpu2d_core_clk,
-	.reg = CBCMR,
-	.bp = BP_CBCMR_GPU2D_CORE_SEL,
-	.bm = BM_CBCMR_GPU2D_CORE_SEL,
-	.parents = {
-		&axi_clk,
-		&pll3_usb_otg,
-		&pll2_pfd_352m,
-		&pll2_pfd_400m,
-		NULL
-	},
-};
-
-#define DEF_SSI_MUX(id)							\
-	static struct multiplexer ssi##id##_mux = {			\
-		.clk = &ssi##id##_clk,					\
-		.reg = CSCMR1,						\
-		.bp = BP_CSCMR1_SSI##id##_SEL,				\
-		.bm = BM_CSCMR1_SSI##id##_SEL,				\
-		.parents = {						\
-			&pll3_pfd_508m,					\
-			&pll3_pfd_454m,					\
-			&pll4_audio,					\
-			NULL						\
-		},							\
-	}
-
-DEF_SSI_MUX(1);
-DEF_SSI_MUX(2);
-DEF_SSI_MUX(3);
-
-#define DEF_USDHC_MUX(id)						\
-	static struct multiplexer usdhc##id##_mux = {			\
-		.clk = &usdhc##id##_clk,				\
-		.reg = CSCMR1,						\
-		.bp = BP_CSCMR1_USDHC##id##_SEL,			\
-		.bm = BM_CSCMR1_USDHC##id##_SEL,			\
-		.parents = {						\
-			&pll2_pfd_400m,					\
-			&pll2_pfd_352m,					\
-			NULL						\
-		},							\
-	}
-
-DEF_USDHC_MUX(1);
-DEF_USDHC_MUX(2);
-DEF_USDHC_MUX(3);
-DEF_USDHC_MUX(4);
-
-static struct multiplexer emi_mux = {
-	.clk = &emi_clk,
-	.reg = CSCMR1,
-	.bp = BP_CSCMR1_EMI_SEL,
-	.bm = BM_CSCMR1_EMI_SEL,
-	.parents = {
-		&axi_clk,
-		&pll3_usb_otg,
-		&pll2_pfd_400m,
-		&pll2_pfd_352m,
-		NULL
-	},
-};
-
-static struct multiplexer emi_slow_mux = {
-	.clk = &emi_slow_clk,
-	.reg = CSCMR1,
-	.bp = BP_CSCMR1_EMI_SLOW_SEL,
-	.bm = BM_CSCMR1_EMI_SLOW_SEL,
-	.parents = {
-		&axi_clk,
-		&pll3_usb_otg,
-		&pll2_pfd_400m,
-		&pll2_pfd_352m,
-		NULL
-	},
-};
-
-static struct multiplexer esai_mux = {
-	.clk = &esai_clk,
-	.reg = CSCMR2,
-	.bp = BP_CSCMR2_ESAI_SEL,
-	.bm = BM_CSCMR2_ESAI_SEL,
-	.parents = {
-		&pll4_audio,
-		&pll3_pfd_508m,
-		&pll3_pfd_454m,
-		&pll3_usb_otg,
-		NULL
-	},
-};
-
-#define DEF_LDB_DI_MUX(id)						\
-	static struct multiplexer ldb_di##id##_mux = {			\
-		.clk = &ldb_di##id##_clk,				\
-		.reg = CS2CDR,						\
-		.bp = BP_CS2CDR_LDB_DI##id##_SEL,			\
-		.bm = BM_CS2CDR_LDB_DI##id##_SEL,			\
-		.parents = {						\
-			&pll5_video,					\
-			&pll2_pfd_352m,					\
-			&pll2_pfd_400m,					\
-			&pll3_pfd_540m,					\
-			&pll3_usb_otg,					\
-			NULL						\
-		},							\
-	}
-
-DEF_LDB_DI_MUX(0);
-DEF_LDB_DI_MUX(1);
-
-static struct multiplexer enfc_mux = {
-	.clk = &enfc_clk,
-	.reg = CS2CDR,
-	.bp = BP_CS2CDR_ENFC_SEL,
-	.bm = BM_CS2CDR_ENFC_SEL,
-	.parents = {
-		&pll2_pfd_352m,
-		&pll2_bus,
-		&pll3_usb_otg,
-		&pll2_pfd_400m,
-		NULL
-	},
-};
-
-static struct multiplexer spdif_mux = {
-	.clk = &spdif_clk,
-	.reg = CDCDR,
-	.bp = BP_CDCDR_SPDIF_SEL,
-	.bm = BM_CDCDR_SPDIF_SEL,
-	.parents = {
-		&pll4_audio,
-		&pll3_pfd_508m,
-		&pll3_pfd_454m,
-		&pll3_usb_otg,
-		NULL
-	},
-};
-
-static struct multiplexer asrc_serial_mux = {
-	.clk = &asrc_serial_clk,
-	.reg = CDCDR,
-	.bp = BP_CDCDR_ASRC_SERIAL_SEL,
-	.bm = BM_CDCDR_ASRC_SERIAL_SEL,
-	.parents = {
-		&pll4_audio,
-		&pll3_pfd_508m,
-		&pll3_pfd_454m,
-		&pll3_usb_otg,
-		NULL
-	},
-};
-
-static struct multiplexer hsi_tx_mux = {
-	.clk = &hsi_tx_clk,
-	.reg = CDCDR,
-	.bp = BP_CDCDR_HSI_TX_SEL,
-	.bm = BM_CDCDR_HSI_TX_SEL,
-	.parents = {
-		&pll3_120m,
-		&pll2_pfd_400m,
-		NULL
-	},
-};
-
-#define DEF_IPU_DI_PRE_MUX(r, i, d)					\
-	static struct multiplexer ipu##i##_di##d##_pre_mux = {		\
-		.clk = &ipu##i##_di##d##_pre_clk,			\
-		.reg = r,						\
-		.bp = BP_##r##_IPU##i##_DI##d##_PRE_SEL,		\
-		.bm = BM_##r##_IPU##i##_DI##d##_PRE_SEL,		\
-		.parents = {						\
-			&mmdc_ch0_axi_clk,				\
-			&pll3_usb_otg,					\
-			&pll5_video,					\
-			&pll2_pfd_352m,					\
-			&pll2_pfd_400m,					\
-			&pll3_pfd_540m,					\
-			NULL						\
-		},							\
-	}
-
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_DI_MUX(r, i, d)						\
-	static struct multiplexer ipu##i##_di##d##_mux = {		\
-		.clk = &ipu##i##_di##d##_clk,				\
-		.reg = r,						\
-		.bp = BP_##r##_IPU##i##_DI##d##_SEL,			\
-		.bm = BM_##r##_IPU##i##_DI##d##_SEL,			\
-		.parents = {						\
-			&ipu##i##_di##d##_pre_clk,			\
-			&dummy_clk,					\
-			&dummy_clk,					\
-			&ldb_di0_clk,					\
-			&ldb_di1_clk,					\
-			NULL						\
-		},							\
-	}
-
-DEF_IPU_DI_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_MUX(id)							\
-	static struct multiplexer ipu##id##_mux = {			\
-		.clk = &ipu##id##_clk,					\
-		.reg = CSCDR3,						\
-		.bp = BP_CSCDR3_IPU##id##_HSP_SEL,			\
-		.bm = BM_CSCDR3_IPU##id##_HSP_SEL,			\
-		.parents = {						\
-			&mmdc_ch0_axi_clk,				\
-			&pll2_pfd_400m,					\
-			&pll3_120m,					\
-			&pll3_pfd_540m,					\
-			NULL						\
-		},							\
-	}
-
-DEF_IPU_MUX(1);
-DEF_IPU_MUX(2);
-
-static struct multiplexer cko1_mux = {
-	.clk = &cko1_clk,
-	.reg = CCOSR,
-	.bp = BP_CCOSR_CKO1_SEL,
-	.bm = BM_CCOSR_CKO1_SEL,
-	.parents = {
-		&pll3_usb_otg,
-		&pll2_bus,
-		&pll1_sys,
-		&pll5_video,
-		&dummy_clk,
-		&axi_clk,
-		&enfc_clk,
-		&ipu1_di0_clk,
-		&ipu1_di1_clk,
-		&ipu2_di0_clk,
-		&ipu2_di1_clk,
-		&ahb_clk,
-		&ipg_clk,
-		&ipg_perclk,
-		&ckil_clk,
-		&pll4_audio,
-		NULL
-	},
-};
-
-static struct multiplexer *multiplexers[] = {
-	&axi_mux,
-	&periph_mux,
-	&periph_pre_mux,
-	&periph_clk2_mux,
-	&periph2_mux,
-	&periph2_pre_mux,
-	&periph2_clk2_mux,
-	&gpu2d_axi_mux,
-	&gpu3d_axi_mux,
-	&gpu3d_core_mux,
-	&gpu3d_shader_mux,
-	&pcie_axi_mux,
-	&vdo_axi_mux,
-	&vpu_axi_mux,
-	&gpu2d_core_mux,
-	&ssi1_mux,
-	&ssi2_mux,
-	&ssi3_mux,
-	&usdhc1_mux,
-	&usdhc2_mux,
-	&usdhc3_mux,
-	&usdhc4_mux,
-	&emi_mux,
-	&emi_slow_mux,
-	&esai_mux,
-	&ldb_di0_mux,
-	&ldb_di1_mux,
-	&enfc_mux,
-	&spdif_mux,
-	&asrc_serial_mux,
-	&hsi_tx_mux,
-	&ipu1_di0_pre_mux,
-	&ipu1_di0_mux,
-	&ipu1_di1_pre_mux,
-	&ipu1_di1_mux,
-	&ipu2_di0_pre_mux,
-	&ipu2_di0_mux,
-	&ipu2_di1_pre_mux,
-	&ipu2_di1_mux,
-	&ipu1_mux,
-	&ipu2_mux,
-	&cko1_mux,
-};
-
-static int _clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	struct multiplexer *m;
-	int i, num;
-	u32 val;
-
-	num = ARRAY_SIZE(multiplexers);
-	for (i = 0; i < num; i++)
-		if (multiplexers[i]->clk == clk) {
-			m = multiplexers[i];
-			break;
-		}
-	if (i == num)
-		return -EINVAL;
-
-	i = 0;
-	while (m->parents[i]) {
-		if (parent == m->parents[i])
-			break;
-		i++;
-	}
-	if (!m->parents[i] || m->parents[i] == &dummy_clk)
-		return -EINVAL;
-
-	val = readl_relaxed(m->reg);
-	val &= ~m->bm;
-	val |= i << m->bp;
-	writel_relaxed(val, m->reg);
-
-	if (clk == &periph_clk)
-		return clk_busy_wait(clk);
-
-	return 0;
-}
-
-#define DEF_NG_CLK(name, p)				\
-	static struct clk name = {			\
-		.get_rate	= _clk_get_rate,	\
-		.set_rate	= _clk_set_rate,	\
-		.round_rate	= _clk_round_rate,	\
-		.set_parent	= _clk_set_parent,	\
-		.parent		= p,			\
-	}
-
-DEF_NG_CLK(periph_clk2_clk,	&osc_clk);
-DEF_NG_CLK(periph_pre_clk,	&pll2_bus);
-DEF_NG_CLK(periph_clk,		&periph_pre_clk);
-DEF_NG_CLK(periph2_clk2_clk,	&osc_clk);
-DEF_NG_CLK(periph2_pre_clk,	&pll2_bus);
-DEF_NG_CLK(periph2_clk,		&periph2_pre_clk);
-DEF_NG_CLK(axi_clk,		&periph_clk);
-DEF_NG_CLK(emi_clk,		&axi_clk);
-DEF_NG_CLK(arm_clk,		&pll1_sw_clk);
-DEF_NG_CLK(ahb_clk,		&periph_clk);
-DEF_NG_CLK(ipg_clk,		&ahb_clk);
-DEF_NG_CLK(ipg_perclk,		&ipg_clk);
-DEF_NG_CLK(ipu1_di0_pre_clk,	&pll3_pfd_540m);
-DEF_NG_CLK(ipu1_di1_pre_clk,	&pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di0_pre_clk,	&pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di1_pre_clk,	&pll3_pfd_540m);
-DEF_NG_CLK(asrc_serial_clk,	&pll3_usb_otg);
-
-#define DEF_CLK(name, er, es, p, s)			\
-	static struct clk name = {			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.enable		= _clk_enable,		\
-		.disable	= _clk_disable,		\
-		.get_rate	= _clk_get_rate,	\
-		.set_rate	= _clk_set_rate,	\
-		.round_rate	= _clk_round_rate,	\
-		.set_parent	= _clk_set_parent,	\
-		.parent		= p,			\
-		.secondary	= s,			\
-	}
-
-#define DEF_CLK_1B(name, er, es, p, s)			\
-	static struct clk name = {			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.enable		= _clk_enable_1b,	\
-		.disable	= _clk_disable_1b,	\
-		.get_rate	= _clk_get_rate,	\
-		.set_rate	= _clk_set_rate,	\
-		.round_rate	= _clk_round_rate,	\
-		.set_parent	= _clk_set_parent,	\
-		.parent		= p,			\
-		.secondary	= s,			\
-	}
-
-DEF_CLK(aips_tz1_clk,	  CCGR0, CG0,  &ahb_clk,	  NULL);
-DEF_CLK(aips_tz2_clk,	  CCGR0, CG1,  &ahb_clk,	  NULL);
-DEF_CLK(apbh_dma_clk,	  CCGR0, CG2,  &ahb_clk,	  NULL);
-DEF_CLK(asrc_clk,	  CCGR0, CG3,  &pll4_audio,	  NULL);
-DEF_CLK(can1_serial_clk,  CCGR0, CG8,  &pll3_usb_otg,	  NULL);
-DEF_CLK(can1_clk,	  CCGR0, CG7,  &pll3_usb_otg,	  &can1_serial_clk);
-DEF_CLK(can2_serial_clk,  CCGR0, CG10, &pll3_usb_otg,	  NULL);
-DEF_CLK(can2_clk,	  CCGR0, CG9,  &pll3_usb_otg,	  &can2_serial_clk);
-DEF_CLK(ecspi1_clk,	  CCGR1, CG0,  &pll3_60m,	  NULL);
-DEF_CLK(ecspi2_clk,	  CCGR1, CG1,  &pll3_60m,	  NULL);
-DEF_CLK(ecspi3_clk,	  CCGR1, CG2,  &pll3_60m,	  NULL);
-DEF_CLK(ecspi4_clk,	  CCGR1, CG3,  &pll3_60m,	  NULL);
-DEF_CLK(ecspi5_clk,	  CCGR1, CG4,  &pll3_60m,	  NULL);
-DEF_CLK(enet_clk,	  CCGR1, CG5,  &ipg_clk,	  NULL);
-DEF_CLK(esai_clk,	  CCGR1, CG8,  &pll3_usb_otg,	  NULL);
-DEF_CLK(gpt_serial_clk,	  CCGR1, CG11, &ipg_perclk,	  NULL);
-DEF_CLK(gpt_clk,	  CCGR1, CG10, &ipg_perclk,	  &gpt_serial_clk);
-DEF_CLK(gpu2d_core_clk,	  CCGR1, CG12, &pll2_pfd_352m,	  &gpu2d_axi_clk);
-DEF_CLK(gpu3d_core_clk,	  CCGR1, CG13, &pll2_pfd_594m,	  &gpu3d_axi_clk);
-DEF_CLK(gpu3d_shader_clk, CCGR1, CG13, &pll3_pfd_720m,	  &gpu3d_axi_clk);
-DEF_CLK(hdmi_iahb_clk,	  CCGR2, CG0,  &ahb_clk,	  NULL);
-DEF_CLK(hdmi_isfr_clk,	  CCGR2, CG2,  &pll3_pfd_540m,	  &hdmi_iahb_clk);
-DEF_CLK(i2c1_clk,	  CCGR2, CG3,  &ipg_perclk,	  NULL);
-DEF_CLK(i2c2_clk,	  CCGR2, CG4,  &ipg_perclk,	  NULL);
-DEF_CLK(i2c3_clk,	  CCGR2, CG5,  &ipg_perclk,	  NULL);
-DEF_CLK(iim_clk,	  CCGR2, CG6,  &ipg_clk,	  NULL);
-DEF_CLK(enfc_clk,	  CCGR2, CG7,  &pll2_pfd_352m,	  NULL);
-DEF_CLK(ipu1_clk,	  CCGR3, CG0,  &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu1_di0_clk,	  CCGR3, CG1,  &ipu1_di0_pre_clk, NULL);
-DEF_CLK(ipu1_di1_clk,	  CCGR3, CG2,  &ipu1_di1_pre_clk, NULL);
-DEF_CLK(ipu2_clk,	  CCGR3, CG3,  &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu2_di0_clk,	  CCGR3, CG4,  &ipu2_di0_pre_clk, NULL);
-DEF_CLK(ipu2_di1_clk,	  CCGR3, CG5,  &ipu2_di1_pre_clk, NULL);
-DEF_CLK(ldb_di0_clk,	  CCGR3, CG6,  &pll3_pfd_540m,	  NULL);
-DEF_CLK(ldb_di1_clk,	  CCGR3, CG7,  &pll3_pfd_540m,	  NULL);
-DEF_CLK(hsi_tx_clk,	  CCGR3, CG8,  &pll2_pfd_400m,	  NULL);
-DEF_CLK(mlb_clk,	  CCGR3, CG9,  &pll6_mlb,	  NULL);
-DEF_CLK(mmdc_ch0_ipg_clk, CCGR3, CG12, &ipg_clk,	  NULL);
-DEF_CLK(mmdc_ch0_axi_clk, CCGR3, CG10, &periph_clk,	  &mmdc_ch0_ipg_clk);
-DEF_CLK(mmdc_ch1_ipg_clk, CCGR3, CG13, &ipg_clk,	  NULL);
-DEF_CLK(mmdc_ch1_axi_clk, CCGR3, CG11, &periph2_clk,	  &mmdc_ch1_ipg_clk);
-DEF_CLK(openvg_axi_clk,   CCGR3, CG13, &axi_clk,	  NULL);
-DEF_CLK(pwm1_clk,	  CCGR4, CG8,  &ipg_perclk,	  NULL);
-DEF_CLK(pwm2_clk,	  CCGR4, CG9,  &ipg_perclk,	  NULL);
-DEF_CLK(pwm3_clk,	  CCGR4, CG10, &ipg_perclk,	  NULL);
-DEF_CLK(pwm4_clk,	  CCGR4, CG11, &ipg_perclk,	  NULL);
-DEF_CLK(gpmi_bch_apb_clk, CCGR4, CG12, &usdhc3_clk,	  NULL);
-DEF_CLK(gpmi_bch_clk,	  CCGR4, CG13, &usdhc4_clk,	  &gpmi_bch_apb_clk);
-DEF_CLK(gpmi_apb_clk,	  CCGR4, CG15, &usdhc3_clk,	  &gpmi_bch_clk);
-DEF_CLK(gpmi_io_clk,	  CCGR4, CG14, &enfc_clk,	  &gpmi_apb_clk);
-DEF_CLK(sdma_clk,	  CCGR5, CG3,  &ahb_clk,	  NULL);
-DEF_CLK(spba_clk,	  CCGR5, CG6,  &ipg_clk,	  NULL);
-DEF_CLK(spdif_clk,	  CCGR5, CG7,  &pll3_usb_otg,	  &spba_clk);
-DEF_CLK(ssi1_clk,	  CCGR5, CG9,  &pll3_pfd_508m,	  NULL);
-DEF_CLK(ssi2_clk,	  CCGR5, CG10, &pll3_pfd_508m,	  NULL);
-DEF_CLK(ssi3_clk,	  CCGR5, CG11, &pll3_pfd_508m,	  NULL);
-DEF_CLK(uart_serial_clk,  CCGR5, CG13, &pll3_usb_otg,	  NULL);
-DEF_CLK(uart_clk,	  CCGR5, CG12, &pll3_80m,	  &uart_serial_clk);
-DEF_CLK(usboh3_clk,	  CCGR6, CG0,  &ipg_clk,	  NULL);
-DEF_CLK(usdhc1_clk,	  CCGR6, CG1,  &pll2_pfd_400m,	  NULL);
-DEF_CLK(usdhc2_clk,	  CCGR6, CG2,  &pll2_pfd_400m,	  NULL);
-DEF_CLK(usdhc3_clk,	  CCGR6, CG3,  &pll2_pfd_400m,	  NULL);
-DEF_CLK(usdhc4_clk,	  CCGR6, CG4,  &pll2_pfd_400m,	  NULL);
-DEF_CLK(emi_slow_clk,	  CCGR6, CG5,  &axi_clk,	  NULL);
-DEF_CLK(vdo_axi_clk,	  CCGR6, CG6,  &axi_clk,	  NULL);
-DEF_CLK(vpu_clk,	  CCGR6, CG7,  &axi_clk,	  NULL);
-DEF_CLK_1B(cko1_clk,	  CCOSR, BP_CCOSR_CKO1_EN, &pll2_bus, NULL);
-
-static int pcie_clk_enable(struct clk *clk)
-{
-	u32 val;
-
-	val = readl_relaxed(PLL8_ENET);
-	val |= BM_PLL_ENET_EN_PCIE;
-	writel_relaxed(val, PLL8_ENET);
-
-	return _clk_enable(clk);
-}
-
-static void pcie_clk_disable(struct clk *clk)
-{
-	u32 val;
-
-	_clk_disable(clk);
-
-	val = readl_relaxed(PLL8_ENET);
-	val &= BM_PLL_ENET_EN_PCIE;
-	writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk pcie_clk = {
-	.enable_reg = CCGR4,
-	.enable_shift = CG0,
-	.enable = pcie_clk_enable,
-	.disable = pcie_clk_disable,
-	.set_parent = _clk_set_parent,
-	.parent = &axi_clk,
-	.secondary = &pll8_enet,
-};
-
-static int sata_clk_enable(struct clk *clk)
-{
-	u32 val;
-
-	val = readl_relaxed(PLL8_ENET);
-	val |= BM_PLL_ENET_EN_SATA;
-	writel_relaxed(val, PLL8_ENET);
-
-	return _clk_enable(clk);
-}
-
-static void sata_clk_disable(struct clk *clk)
-{
-	u32 val;
-
-	_clk_disable(clk);
-
-	val = readl_relaxed(PLL8_ENET);
-	val &= BM_PLL_ENET_EN_SATA;
-	writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk sata_clk = {
-	.enable_reg = CCGR5,
-	.enable_shift = CG2,
-	.enable = sata_clk_enable,
-	.disable = sata_clk_disable,
-	.parent = &ipg_clk,
-	.secondary = &pll8_enet,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	}
-
-static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK("2020000.uart", NULL, uart_clk),
-	_REGISTER_CLOCK("21e8000.uart", NULL, uart_clk),
-	_REGISTER_CLOCK("21ec000.uart", NULL, uart_clk),
-	_REGISTER_CLOCK("21f0000.uart", NULL, uart_clk),
-	_REGISTER_CLOCK("21f4000.uart", NULL, uart_clk),
-	_REGISTER_CLOCK("2188000.enet", NULL, enet_clk),
-	_REGISTER_CLOCK("2190000.usdhc", NULL, usdhc1_clk),
-	_REGISTER_CLOCK("2194000.usdhc", NULL, usdhc2_clk),
-	_REGISTER_CLOCK("2198000.usdhc", NULL, usdhc3_clk),
-	_REGISTER_CLOCK("219c000.usdhc", NULL, usdhc4_clk),
-	_REGISTER_CLOCK("21a0000.i2c", NULL, i2c1_clk),
-	_REGISTER_CLOCK("21a4000.i2c", NULL, i2c2_clk),
-	_REGISTER_CLOCK("21a8000.i2c", NULL, i2c3_clk),
-	_REGISTER_CLOCK("2008000.ecspi", NULL, ecspi1_clk),
-	_REGISTER_CLOCK("200c000.ecspi", NULL, ecspi2_clk),
-	_REGISTER_CLOCK("2010000.ecspi", NULL, ecspi3_clk),
-	_REGISTER_CLOCK("2014000.ecspi", NULL, ecspi4_clk),
-	_REGISTER_CLOCK("2018000.ecspi", NULL, ecspi5_clk),
-	_REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
-	_REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
-	_REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
-	_REGISTER_CLOCK("smp_twd", NULL, twd_clk),
-	_REGISTER_CLOCK(NULL, "ckih", ckih_clk),
-	_REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
-	_REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
-	_REGISTER_CLOCK(NULL, "aips_tz2_clk", aips_tz2_clk),
-	_REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
-	_REGISTER_CLOCK(NULL, "can2_clk", can2_clk),
-	_REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_isfr_clk),
-	_REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
-	_REGISTER_CLOCK(NULL, "mlb_clk", mlb_clk),
-	_REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
-	_REGISTER_CLOCK(NULL, "pwm1_clk", pwm1_clk),
-	_REGISTER_CLOCK(NULL, "pwm2_clk", pwm2_clk),
-	_REGISTER_CLOCK(NULL, "pwm3_clk", pwm3_clk),
-	_REGISTER_CLOCK(NULL, "pwm4_clk", pwm4_clk),
-	_REGISTER_CLOCK(NULL, "gpmi_io_clk", gpmi_io_clk),
-	_REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
-	_REGISTER_CLOCK(NULL, "sata_clk", sata_clk),
-	_REGISTER_CLOCK(NULL, "cko1_clk", cko1_clk),
-};
-
-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
-{
-	u32 val = readl_relaxed(CLPCR);
-
-	val &= ~BM_CLPCR_LPM;
-	switch (mode) {
-	case WAIT_CLOCKED:
-		break;
-	case WAIT_UNCLOCKED:
-		val |= 0x1 << BP_CLPCR_LPM;
-		break;
-	case STOP_POWER_ON:
-		val |= 0x2 << BP_CLPCR_LPM;
-		break;
-	case WAIT_UNCLOCKED_POWER_OFF:
-		val |= 0x1 << BP_CLPCR_LPM;
-		val &= ~BM_CLPCR_VSTBY;
-		val &= ~BM_CLPCR_SBYOS;
-		break;
-	case STOP_POWER_OFF:
-		val |= 0x2 << BP_CLPCR_LPM;
-		val |= 0x3 << BP_CLPCR_STBY_COUNT;
-		val |= BM_CLPCR_VSTBY;
-		val |= BM_CLPCR_SBYOS;
-		break;
-	default:
-		return -EINVAL;
-	}
-	writel_relaxed(val, CLPCR);
-
-	return 0;
-}
-
-static struct map_desc imx6q_clock_desc[] = {
-	imx_map_entry(MX6Q, CCM, MT_DEVICE),
-	imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
-};
-
-void __init imx6q_clock_map_io(void)
-{
-	iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-}
-
-int __init mx6q_clocks_init(void)
-{
-	struct device_node *np;
-	void __iomem *base;
-	int i, irq;
-
-	/* retrieve the freqency of fixed clocks from device tree */
-	for_each_compatible_node(np, NULL, "fixed-clock") {
-		u32 rate;
-		if (of_property_read_u32(np, "clock-frequency", &rate))
-			continue;
-
-		if (of_device_is_compatible(np, "fsl,imx-ckil"))
-			external_low_reference = rate;
-		else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
-			external_high_reference = rate;
-		else if (of_device_is_compatible(np, "fsl,imx-osc"))
-			oscillator_reference = rate;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(lookups); i++)
-		clkdev_add(&lookups[i]);
-
-	/* only keep necessary clocks on */
-	writel_relaxed(0x3 << CG0  | 0x3 << CG1  | 0x3 << CG2,	CCGR0);
-	writel_relaxed(0x3 << CG8  | 0x3 << CG9  | 0x3 << CG10,	CCGR2);
-	writel_relaxed(0x3 << CG10 | 0x3 << CG12,		CCGR3);
-	writel_relaxed(0x3 << CG4  | 0x3 << CG6  | 0x3 << CG7,	CCGR4);
-	writel_relaxed(0x3 << CG0,				CCGR5);
-	writel_relaxed(0,					CCGR6);
-	writel_relaxed(0,					CCGR7);
-
-	clk_enable(&uart_clk);
-	clk_enable(&mmdc_ch0_axi_clk);
-
-	clk_set_rate(&pll4_audio, FREQ_650M);
-	clk_set_rate(&pll5_video, FREQ_650M);
-	clk_set_parent(&ipu1_di0_clk, &ipu1_di0_pre_clk);
-	clk_set_parent(&ipu1_di0_pre_clk, &pll5_video);
-	clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594m);
-	clk_set_rate(&gpu3d_shader_clk, FREQ_594M);
-	clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk);
-	clk_set_rate(&gpu3d_core_clk, FREQ_528M);
-	clk_set_parent(&asrc_serial_clk, &pll3_usb_otg);
-	clk_set_rate(&asrc_serial_clk, 1500000);
-	clk_set_rate(&enfc_clk, 11000000);
-
-	/*
-	 * Before pinctrl API is available, we have to rely on the pad
-	 * configuration set up by bootloader.  For usdhc example here,
-	 * u-boot sets up the pads for 49.5 MHz case, and we have to lower
-	 * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
-	 *
-	 * FIXME: This is should be removed after pinctrl API is available.
-	 * At that time, usdhc driver can call pinctrl API to change pad
-	 * configuration dynamically per different usdhc clock settings.
-	 */
-	clk_set_rate(&usdhc1_clk, 49500000);
-	clk_set_rate(&usdhc2_clk, 49500000);
-	clk_set_rate(&usdhc3_clk, 49500000);
-	clk_set_rate(&usdhc4_clk, 49500000);
-
-	clk_set_parent(&cko1_clk, &ahb_clk);
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
-	base = of_iomap(np, 0);
-	WARN_ON(!base);
-	irq = irq_of_parse_and_map(np, 0);
-	mxc_timer_init(&gpt_clk, base, irq);
-
-	return 0;
-}
diff --git a/arch/arm/mach-imx/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
deleted file mode 100644
index 0847050..0000000
--- a/arch/arm/mach-imx/clock-mx51-mx53.c
+++ /dev/null
@@ -1,1675 +0,0 @@
-/*
- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "crm-regs-imx5.h"
-
-/* External clock values passed-in by the board code */
-static unsigned long external_high_reference, external_low_reference;
-static unsigned long oscillator_reference, ckih2_reference;
-
-static struct clk osc_clk;
-static struct clk pll1_main_clk;
-static struct clk pll1_sw_clk;
-static struct clk pll2_sw_clk;
-static struct clk pll3_sw_clk;
-static struct clk mx53_pll4_sw_clk;
-static struct clk lp_apm_clk;
-static struct clk periph_apm_clk;
-static struct clk ahb_clk;
-static struct clk ipg_clk;
-static struct clk usboh3_clk;
-static struct clk emi_fast_clk;
-static struct clk ipu_clk;
-static struct clk mipi_hsc1_clk;
-static struct clk esdhc1_clk;
-static struct clk esdhc2_clk;
-static struct clk esdhc3_mx53_clk;
-
-#define MAX_DPLL_WAIT_TRIES	1000 /* 1000 * udelay(1) = 1ms */
-
-/* calculate best pre and post dividers to get the required divider */
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
-	u32 max_pre, u32 max_post)
-{
-	if (div >= max_pre * max_post) {
-		*pre = max_pre;
-		*post = max_post;
-	} else if (div >= max_pre) {
-		u32 min_pre, temp_pre, old_err, err;
-		min_pre = DIV_ROUND_UP(div, max_post);
-		old_err = max_pre;
-		for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
-			err = div % temp_pre;
-			if (err == 0) {
-				*pre = temp_pre;
-				break;
-			}
-			err = temp_pre - err;
-			if (err < old_err) {
-				old_err = err;
-				*pre = temp_pre;
-			}
-		}
-		*post = DIV_ROUND_UP(div, *pre);
-	} else {
-		*pre = div;
-		*post = 1;
-	}
-}
-
-static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
-{
-	u32 reg = __raw_readl(clk->enable_reg);
-
-	reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
-	reg |= mode << clk->enable_shift;
-
-	__raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_ccgr_enable(struct clk *clk)
-{
-	_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
-	return 0;
-}
-
-static void _clk_ccgr_disable(struct clk *clk)
-{
-	_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
-}
-
-static int _clk_ccgr_enable_inrun(struct clk *clk)
-{
-	_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-	return 0;
-}
-
-static void _clk_ccgr_disable_inwait(struct clk *clk)
-{
-	_clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-}
-
-/*
- * For the 4-to-1 muxed input clock
- */
-static inline u32 _get_mux(struct clk *parent, struct clk *m0,
-			   struct clk *m1, struct clk *m2, struct clk *m3)
-{
-	if (parent == m0)
-		return 0;
-	else if (parent == m1)
-		return 1;
-	else if (parent == m2)
-		return 2;
-	else if (parent == m3)
-		return 3;
-	else
-		BUG();
-
-	return -EINVAL;
-}
-
-static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
-{
-	if (pll == &pll1_main_clk)
-		return MX51_DPLL1_BASE;
-	else if (pll == &pll2_sw_clk)
-		return MX51_DPLL2_BASE;
-	else if (pll == &pll3_sw_clk)
-		return MX51_DPLL3_BASE;
-	else
-		BUG();
-
-	return NULL;
-}
-
-static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
-{
-	if (pll == &pll1_main_clk)
-		return MX53_DPLL1_BASE;
-	else if (pll == &pll2_sw_clk)
-		return MX53_DPLL2_BASE;
-	else if (pll == &pll3_sw_clk)
-		return MX53_DPLL3_BASE;
-	else if (pll == &mx53_pll4_sw_clk)
-		return MX53_DPLL4_BASE;
-	else
-		BUG();
-
-	return NULL;
-}
-
-static inline void __iomem *_get_pll_base(struct clk *pll)
-{
-	if (cpu_is_mx51())
-		return _mx51_get_pll_base(pll);
-	else
-		return _mx53_get_pll_base(pll);
-}
-
-static unsigned long clk_pll_get_rate(struct clk *clk)
-{
-	long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
-	unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
-	void __iomem *pllbase;
-	s64 temp;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	pllbase = _get_pll_base(clk);
-
-	dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-	pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-	dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
-
-	if (pll_hfsm == 0) {
-		dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
-		dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
-		dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
-	} else {
-		dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
-		dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
-		dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
-	}
-	pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
-	mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
-	mfi = (mfi <= 5) ? 5 : mfi;
-	mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
-	mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
-	/* Sign extend to 32-bits */
-	if (mfn >= 0x04000000) {
-		mfn |= 0xFC000000;
-		mfn_abs = -mfn;
-	}
-
-	ref_clk = 2 * parent_rate;
-	if (dbl != 0)
-		ref_clk *= 2;
-
-	ref_clk /= (pdf + 1);
-	temp = (u64) ref_clk * mfn_abs;
-	do_div(temp, mfd + 1);
-	if (mfn < 0)
-		temp = -temp;
-	temp = (ref_clk * mfi) + temp;
-
-	return temp;
-}
-
-static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-	void __iomem *pllbase;
-
-	long mfi, pdf, mfn, mfd = 999999;
-	s64 temp64;
-	unsigned long quad_parent_rate;
-	unsigned long pll_hfsm, dp_ctl;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	pllbase = _get_pll_base(clk);
-
-	quad_parent_rate = 4 * parent_rate;
-	pdf = mfi = -1;
-	while (++pdf < 16 && mfi < 5)
-		mfi = rate * (pdf+1) / quad_parent_rate;
-	if (mfi > 15)
-		return -EINVAL;
-	pdf--;
-
-	temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
-	do_div(temp64, quad_parent_rate/1000000);
-	mfn = (long)temp64;
-
-	dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-	/* use dpdck0_2 */
-	__raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
-	pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-	if (pll_hfsm == 0) {
-		reg = mfi << 4 | pdf;
-		__raw_writel(reg, pllbase + MXC_PLL_DP_OP);
-		__raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
-		__raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
-	} else {
-		reg = mfi << 4 | pdf;
-		__raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
-		__raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
-		__raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
-	}
-
-	return 0;
-}
-
-static int _clk_pll_enable(struct clk *clk)
-{
-	u32 reg;
-	void __iomem *pllbase;
-	int i = 0;
-
-	pllbase = _get_pll_base(clk);
-	reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-	if (reg & MXC_PLL_DP_CTL_UPEN)
-		return 0;
-
-	reg |= MXC_PLL_DP_CTL_UPEN;
-	__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-
-	/* Wait for lock */
-	do {
-		reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-		if (reg & MXC_PLL_DP_CTL_LRF)
-			break;
-
-		udelay(1);
-	} while (++i < MAX_DPLL_WAIT_TRIES);
-
-	if (i == MAX_DPLL_WAIT_TRIES) {
-		pr_err("MX5: pll locking failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void _clk_pll_disable(struct clk *clk)
-{
-	u32 reg;
-	void __iomem *pllbase;
-
-	pllbase = _get_pll_base(clk);
-	reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
-	__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-}
-
-static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg, step;
-
-	reg = __raw_readl(MXC_CCM_CCSR);
-
-	/* When switching from pll_main_clk to a bypass clock, first select a
-	 * multiplexed clock in 'step_sel', then shift the glitchless mux
-	 * 'pll1_sw_clk_sel'.
-	 *
-	 * When switching back, do it in reverse order
-	 */
-	if (parent == &pll1_main_clk) {
-		/* Switch to pll1_main_clk */
-		reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
-		__raw_writel(reg, MXC_CCM_CCSR);
-		/* step_clk mux switched to lp_apm, to save power. */
-		reg = __raw_readl(MXC_CCM_CCSR);
-		reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
-		reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
-				MXC_CCM_CCSR_STEP_SEL_OFFSET);
-	} else {
-		if (parent == &lp_apm_clk) {
-			step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
-		} else  if (parent == &pll2_sw_clk) {
-			step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
-		} else  if (parent == &pll3_sw_clk) {
-			step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
-		} else
-			return -EINVAL;
-
-		reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
-		reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
-
-		__raw_writel(reg, MXC_CCM_CCSR);
-		/* Switch to step_clk */
-		reg = __raw_readl(MXC_CCM_CCSR);
-		reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
-	}
-	__raw_writel(reg, MXC_CCM_CCSR);
-	return 0;
-}
-
-static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
-{
-	u32 reg, div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(MXC_CCM_CCSR);
-
-	if (clk->parent == &pll2_sw_clk) {
-		div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
-		       MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
-	} else if (clk->parent == &pll3_sw_clk) {
-		div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
-		       MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
-	} else
-		div = 1;
-	return parent_rate / div;
-}
-
-static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CCSR);
-
-	if (parent == &pll2_sw_clk)
-		reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-	else
-		reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-
-	__raw_writel(reg, MXC_CCM_CCSR);
-	return 0;
-}
-
-static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	if (parent == &osc_clk)
-		reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
-	else
-		return -EINVAL;
-
-	__raw_writel(reg, MXC_CCM_CCSR);
-
-	return 0;
-}
-
-static unsigned long clk_cpu_get_rate(struct clk *clk)
-{
-	u32 cacrr, div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-	cacrr = __raw_readl(MXC_CCM_CACRR);
-	div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
-
-	return parent_rate / div;
-}
-
-static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, cpu_podf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-	cpu_podf = parent_rate / rate - 1;
-	/* use post divider to change freq */
-	reg = __raw_readl(MXC_CCM_CACRR);
-	reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
-	reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
-	__raw_writel(reg, MXC_CCM_CACRR);
-
-	return 0;
-}
-
-static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg, mux;
-	int i = 0;
-
-	mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
-
-	reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
-	reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
-	__raw_writel(reg, MXC_CCM_CBCMR);
-
-	/* Wait for lock */
-	do {
-		reg = __raw_readl(MXC_CCM_CDHIPR);
-		if (!(reg &  MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
-			break;
-
-		udelay(1);
-	} while (++i < MAX_DPLL_WAIT_TRIES);
-
-	if (i == MAX_DPLL_WAIT_TRIES) {
-		pr_err("MX5: Set parent for periph_apm clock failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-
-	if (parent == &pll2_sw_clk)
-		reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
-	else if (parent == &periph_apm_clk)
-		reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
-	else
-		return -EINVAL;
-
-	__raw_writel(reg, MXC_CCM_CBCDR);
-
-	return 0;
-}
-
-static struct clk main_bus_clk = {
-	.parent = &pll2_sw_clk,
-	.set_parent = _clk_main_bus_set_parent,
-};
-
-static unsigned long clk_ahb_get_rate(struct clk *clk)
-{
-	u32 reg, div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
-	       MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
-	return parent_rate / div;
-}
-
-
-static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, div;
-	unsigned long parent_rate;
-	int i = 0;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-		return -EINVAL;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
-	reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
-	__raw_writel(reg, MXC_CCM_CBCDR);
-
-	/* Wait for lock */
-	do {
-		reg = __raw_readl(MXC_CCM_CDHIPR);
-		if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
-			break;
-
-		udelay(1);
-	} while (++i < MAX_DPLL_WAIT_TRIES);
-
-	if (i == MAX_DPLL_WAIT_TRIES) {
-		pr_err("MX5: clk_ahb_set_rate failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static unsigned long _clk_ahb_round_rate(struct clk *clk,
-						unsigned long rate)
-{
-	u32 div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	div = parent_rate / rate;
-	if (div > 8)
-		div = 8;
-	else if (div == 0)
-		div++;
-	return parent_rate / div;
-}
-
-
-static int _clk_max_enable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_enable(clk);
-
-	/* Handshake with MAX when LPM is entered. */
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	if (cpu_is_mx51())
-		reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-	else if (cpu_is_mx53())
-		reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-
-	return 0;
-}
-
-static void _clk_max_disable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_disable_inwait(clk);
-
-	/* No Handshake with MAX when LPM is entered as its disabled. */
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	if (cpu_is_mx51())
-		reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-	else if (cpu_is_mx53())
-		reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static unsigned long clk_ipg_get_rate(struct clk *clk)
-{
-	u32 reg, div;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
-	       MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
-
-	return parent_rate / div;
-}
-
-static unsigned long clk_ipg_per_get_rate(struct clk *clk)
-{
-	u32 reg, prediv1, prediv2, podf;
-	unsigned long parent_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
-		/* the main_bus_clk is the one before the DVFS engine */
-		reg = __raw_readl(MXC_CCM_CBCDR);
-		prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
-			   MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
-		prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
-			   MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
-		podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
-			MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
-		return parent_rate / (prediv1 * prediv2 * podf);
-	} else if (clk->parent == &ipg_clk)
-		return parent_rate;
-	else
-		BUG();
-}
-
-static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CBCMR);
-
-	reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
-	reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-
-	if (parent == &ipg_clk)
-		reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-	else if (parent == &lp_apm_clk)
-		reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
-	else if (parent != &main_bus_clk)
-		return -EINVAL;
-
-	__raw_writel(reg, MXC_CCM_CBCMR);
-
-	return 0;
-}
-
-#define clk_nfc_set_parent	NULL
-
-static unsigned long clk_nfc_get_rate(struct clk *clk)
-{
-	unsigned long rate;
-	u32 reg, div;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
-	       MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
-	rate = clk_get_rate(clk->parent) / div;
-	WARN_ON(rate == 0);
-	return rate;
-}
-
-static unsigned long clk_nfc_round_rate(struct clk *clk,
-						unsigned long rate)
-{
-	u32 div;
-	unsigned long parent_rate = clk_get_rate(clk->parent);
-
-	if (!rate)
-		return -EINVAL;
-
-	div = parent_rate / rate;
-
-	if (parent_rate % rate)
-		div++;
-
-	if (div > 8)
-		return -EINVAL;
-
-	return parent_rate / div;
-
-}
-
-static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, div;
-
-	div = clk_get_rate(clk->parent) / rate;
-	if (div == 0)
-		div++;
-	if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
-		return -EINVAL;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
-	reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
-	__raw_writel(reg, MXC_CCM_CBCDR);
-
-	while (__raw_readl(MXC_CCM_CDHIPR) &
-			MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
-	}
-
-	return 0;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-	return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-	return external_low_reference;
-}
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
-	return oscillator_reference;
-}
-
-static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
-{
-	return ckih2_reference;
-}
-
-static unsigned long clk_emi_slow_get_rate(struct clk *clk)
-{
-	u32 reg, div;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
-	       MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
-
-	return clk_get_rate(clk->parent) / div;
-}
-
-static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
-{
-	unsigned long rate;
-	u32 reg, div;
-
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
-		MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
-	rate = clk_get_rate(clk->parent) / div;
-
-	return rate;
-}
-
-/* External high frequency clock */
-static struct clk ckih_clk = {
-	.get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk ckih2_clk = {
-	.get_rate = get_ckih2_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
-	.get_rate = get_oscillator_reference_clock_rate,
-};
-
-/* External low frequency (32kHz) clock */
-static struct clk ckil_clk = {
-	.get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk pll1_main_clk = {
-	.parent = &osc_clk,
-	.get_rate = clk_pll_get_rate,
-	.enable = _clk_pll_enable,
-	.disable = _clk_pll_disable,
-};
-
-/* Clock tree block diagram (WIP):
- * 	CCM: Clock Controller Module
- *
- * PLL output -> |
- *               | CCM Switcher -> CCM_CLK_ROOT_GEN ->
- * PLL bypass -> |
- *
- */
-
-/* PLL1 SW supplies to ARM core */
-static struct clk pll1_sw_clk = {
-	.parent = &pll1_main_clk,
-	.set_parent = _clk_pll1_sw_set_parent,
-	.get_rate = clk_pll1_sw_get_rate,
-};
-
-/* PLL2 SW supplies to AXI/AHB/IP buses */
-static struct clk pll2_sw_clk = {
-	.parent = &osc_clk,
-	.get_rate = clk_pll_get_rate,
-	.set_rate = _clk_pll_set_rate,
-	.set_parent = _clk_pll2_sw_set_parent,
-	.enable = _clk_pll_enable,
-	.disable = _clk_pll_disable,
-};
-
-/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
-static struct clk pll3_sw_clk = {
-	.parent = &osc_clk,
-	.set_rate = _clk_pll_set_rate,
-	.get_rate = clk_pll_get_rate,
-	.enable = _clk_pll_enable,
-	.disable = _clk_pll_disable,
-};
-
-/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
-static struct clk mx53_pll4_sw_clk = {
-	.parent = &osc_clk,
-	.set_rate = _clk_pll_set_rate,
-	.enable = _clk_pll_enable,
-	.disable = _clk_pll_disable,
-};
-
-/* Low-power Audio Playback Mode clock */
-static struct clk lp_apm_clk = {
-	.parent = &osc_clk,
-	.set_parent = _clk_lp_apm_set_parent,
-};
-
-static struct clk periph_apm_clk = {
-	.parent = &pll1_sw_clk,
-	.set_parent = _clk_periph_apm_set_parent,
-};
-
-static struct clk cpu_clk = {
-	.parent = &pll1_sw_clk,
-	.get_rate = clk_cpu_get_rate,
-	.set_rate = clk_cpu_set_rate,
-};
-
-static struct clk ahb_clk = {
-	.parent = &main_bus_clk,
-	.get_rate = clk_ahb_get_rate,
-	.set_rate = _clk_ahb_set_rate,
-	.round_rate = _clk_ahb_round_rate,
-};
-
-static struct clk iim_clk = {
-	.parent = &ipg_clk,
-	.enable_reg = MXC_CCM_CCGR0,
-	.enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
-};
-
-/* Main IP interface clock for access to registers */
-static struct clk ipg_clk = {
-	.parent = &ahb_clk,
-	.get_rate = clk_ipg_get_rate,
-};
-
-static struct clk ipg_perclk = {
-	.parent = &lp_apm_clk,
-	.get_rate = clk_ipg_per_get_rate,
-	.set_parent = _clk_ipg_per_set_parent,
-};
-
-static struct clk ahb_max_clk = {
-	.parent = &ahb_clk,
-	.enable_reg = MXC_CCM_CCGR0,
-	.enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-	.enable = _clk_max_enable,
-	.disable = _clk_max_disable,
-};
-
-static struct clk aips_tz1_clk = {
-	.parent = &ahb_clk,
-	.secondary = &ahb_max_clk,
-	.enable_reg = MXC_CCM_CCGR0,
-	.enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk aips_tz2_clk = {
-	.parent = &ahb_clk,
-	.secondary = &ahb_max_clk,
-	.enable_reg = MXC_CCM_CCGR0,
-	.enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk gpc_dvfs_clk = {
-	.enable_reg = MXC_CCM_CCGR5,
-	.enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable,
-};
-
-static struct clk gpt_32k_clk = {
-	.id = 0,
-	.parent = &ckil_clk,
-};
-
-static struct clk dummy_clk = {
-	.id = 0,
-};
-
-static struct clk emi_slow_clk = {
-	.parent = &pll2_sw_clk,
-	.enable_reg = MXC_CCM_CCGR5,
-	.enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable_inwait,
-	.get_rate = clk_emi_slow_get_rate,
-};
-
-static int clk_ipu_enable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_enable(clk);
-
-	/* Enable handshake with IPU when certain clock rates are changed */
-	reg = __raw_readl(MXC_CCM_CCDR);
-	reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
-	__raw_writel(reg, MXC_CCM_CCDR);
-
-	/* Enable handshake with IPU when LPM is entered */
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-
-	return 0;
-}
-
-static void clk_ipu_disable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_disable(clk);
-
-	/* Disable handshake with IPU whe dividers are changed */
-	reg = __raw_readl(MXC_CCM_CCDR);
-	reg |= MXC_CCM_CCDR_IPU_HS_MASK;
-	__raw_writel(reg, MXC_CCM_CCDR);
-
-	/* Disable handshake with IPU when LPM is entered */
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk ahbmux1_clk = {
-	.parent = &ahb_clk,
-	.secondary = &ahb_max_clk,
-	.enable_reg = MXC_CCM_CCGR0,
-	.enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk ipu_sec_clk = {
-	.parent = &emi_fast_clk,
-	.secondary = &ahbmux1_clk,
-};
-
-static struct clk ddr_hf_clk = {
-	.parent = &pll1_sw_clk,
-	.get_rate = _clk_ddr_hf_get_rate,
-};
-
-static struct clk ddr_clk = {
-	.parent = &ddr_hf_clk,
-};
-
-/* clock definitions for MIPI HSC unit which has been removed
- * from documentation, but not from hardware
- */
-static int _clk_hsc_enable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_enable(clk);
-	/* Handshake with IPU when certain clock rates are changed. */
-	reg = __raw_readl(MXC_CCM_CCDR);
-	reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
-	__raw_writel(reg, MXC_CCM_CCDR);
-
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-
-	return 0;
-}
-
-static void _clk_hsc_disable(struct clk *clk)
-{
-	u32 reg;
-
-	_clk_ccgr_disable(clk);
-	/* No handshake with HSC as its not enabled. */
-	reg = __raw_readl(MXC_CCM_CCDR);
-	reg |= MXC_CCM_CCDR_HSC_HS_MASK;
-	__raw_writel(reg, MXC_CCM_CCDR);
-
-	reg = __raw_readl(MXC_CCM_CLPCR);
-	reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
-	__raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk mipi_hsp_clk = {
-	.parent = &ipu_clk,
-	.enable_reg = MXC_CCM_CCGR4,
-	.enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
-	.enable = _clk_hsc_enable,
-	.disable = _clk_hsc_disable,
-	.secondary = &mipi_hsc1_clk,
-};
-
-#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s)	\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.get_rate	= pfx##_get_rate,	\
-		.set_rate	= pfx##_set_rate,	\
-		.round_rate	= pfx##_round_rate,	\
-		.set_parent	= pfx##_set_parent,	\
-		.enable		= _clk_ccgr_enable,	\
-		.disable	= _clk_ccgr_disable,	\
-		.parent		= p,			\
-		.secondary	= s,			\
-	}
-
-#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s)	\
-	static struct clk name = {			\
-		.id		= i,			\
-		.enable_reg	= er,			\
-		.enable_shift	= es,			\
-		.get_rate	= pfx##_get_rate,	\
-		.set_rate	= pfx##_set_rate,	\
-		.set_parent	= pfx##_set_parent,	\
-		.enable		= _clk_max_enable,	\
-		.disable	= _clk_max_disable,	\
-		.parent		= p,			\
-		.secondary	= s,			\
-	}
-
-#define CLK_GET_RATE(name, nr, bitsname)				\
-static unsigned long clk_##name##_get_rate(struct clk *clk)		\
-{									\
-	u32 reg, pred, podf;						\
-									\
-	reg = __raw_readl(MXC_CCM_CSCDR##nr);				\
-	pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK)	\
-		>> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET;	\
-	podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK)	\
-		>> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET;	\
-									\
-	return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent),		\
-			(pred + 1) * (podf + 1));			\
-}
-
-#define CLK_SET_PARENT(name, nr, bitsname)				\
-static int clk_##name##_set_parent(struct clk *clk, struct clk *parent)	\
-{									\
-	u32 reg, mux;							\
-									\
-	mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk,		\
-			&pll3_sw_clk, &lp_apm_clk);			\
-	reg = __raw_readl(MXC_CCM_CSCMR##nr) &				\
-		~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK;		\
-	reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET;	\
-	__raw_writel(reg, MXC_CCM_CSCMR##nr);				\
-									\
-	return 0;							\
-}
-
-#define CLK_SET_RATE(name, nr, bitsname)				\
-static int clk_##name##_set_rate(struct clk *clk, unsigned long rate)	\
-{									\
-	u32 reg, div, parent_rate;					\
-	u32 pre = 0, post = 0;						\
-									\
-	parent_rate = clk_get_rate(clk->parent);			\
-	div = parent_rate / rate;					\
-									\
-	if ((parent_rate / div) != rate)				\
-		return -EINVAL;						\
-									\
-	__calc_pre_post_dividers(div, &pre, &post,			\
-		(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >>	\
-		MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1,	\
-		(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >>	\
-		MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
-									\
-	/* Set sdhc1 clock divider */					\
-	reg = __raw_readl(MXC_CCM_CSCDR##nr) &				\
-		~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK	\
-		| MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK);	\
-	reg |= (post - 1) <<						\
-		MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET;	\
-	reg |= (pre - 1) <<						\
-		MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET;	\
-	__raw_writel(reg, MXC_CCM_CSCDR##nr);				\
-									\
-	return 0;							\
-}
-
-/* UART */
-CLK_GET_RATE(uart, 1, UART)
-CLK_SET_PARENT(uart, 1, UART)
-
-static struct clk uart_root_clk = {
-	.parent = &pll2_sw_clk,
-	.get_rate = clk_uart_get_rate,
-	.set_parent = clk_uart_set_parent,
-};
-
-/* USBOH3 */
-CLK_GET_RATE(usboh3, 1, USBOH3)
-CLK_SET_PARENT(usboh3, 1, USBOH3)
-
-static struct clk usboh3_clk = {
-	.parent = &pll2_sw_clk,
-	.get_rate = clk_usboh3_get_rate,
-	.set_parent = clk_usboh3_set_parent,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable,
-	.enable_reg = MXC_CCM_CCGR2,
-	.enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-};
-
-static struct clk usb_ahb_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_ccgr_enable,
-	.disable = _clk_ccgr_disable,
-	.enable_reg = MXC_CCM_CCGR2,
-	.enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-};
-
-static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
-
-	if (parent == &pll3_sw_clk)
-		reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
-
-	__raw_writel(reg, MXC_CCM_CSCMR1);
-
-	return 0;
-}
-
-static struct clk usb_phy1_clk = {
-	.parent = &pll3_sw_clk,
-	.set_parent = clk_usb_phy1_set_parent,
-	.enable = _clk_ccgr_enable,
-	.enable_reg = MXC_CCM_CCGR2,
-	.enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
-	.disable = _clk_ccgr_disable,
-};
-
-/* eCSPI */
-CLK_GET_RATE(ecspi, 2, CSPI)
-CLK_SET_PARENT(ecspi, 1, CSPI)
-
-static struct clk ecspi_main_clk = {
-	.parent = &pll3_sw_clk,
-	.get_rate = clk_ecspi_get_rate,
-	.set_parent = clk_ecspi_set_parent,
-};
-
-/* eSDHC */
-CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-
-/* mx51 specific */
-CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-
-static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CSCMR1);
-	if (parent == &esdhc1_clk)
-		reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
-	else if (parent == &esdhc2_clk)
-		reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
-	else
-		return -EINVAL;
-	__raw_writel(reg, MXC_CCM_CSCMR1);
-
-	return 0;
-}
-
-static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CSCMR1);
-	if (parent == &esdhc1_clk)
-		reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-	else if (parent == &esdhc2_clk)
-		reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-	else
-		return -EINVAL;
-	__raw_writel(reg, MXC_CCM_CSCMR1);
-
-	return 0;
-}
-
-/* mx53 specific */
-static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CSCMR1);
-	if (parent == &esdhc1_clk)
-		reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
-	else if (parent == &esdhc3_mx53_clk)
-		reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
-	else
-		return -EINVAL;
-	__raw_writel(reg, MXC_CCM_CSCMR1);
-
-	return 0;
-}
-
-CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-
-static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
-	u32 reg;
-
-	reg = __raw_readl(MXC_CCM_CSCMR1);
-	if (parent == &esdhc1_clk)
-		reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-	else if (parent == &esdhc3_mx53_clk)
-		reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-	else
-		return -EINVAL;
-	__raw_writel(reg, MXC_CCM_CSCMR1);
-
-	return 0;
-}
-
-#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s)		\
-	static struct clk name = {					\
-		.id		= i,					\
-		.enable_reg	= er,					\
-		.enable_shift	= es,					\
-		.get_rate	= gr,					\
-		.set_rate	= sr,					\
-		.enable		= e,					\
-		.disable	= d,					\
-		.parent		= p,					\
-		.secondary	= s,					\
-	}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s)			\
-	DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
-
-/* Shared peripheral bus arbiter */
-DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
-	NULL,  NULL, &ipg_clk, NULL);
-
-/* UART */
-DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
-	NULL,  NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
-	NULL,  NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
-	NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
-	NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
-	NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
-	NULL,  NULL, &uart_root_clk, &uart1_ipg_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
-	NULL,  NULL, &uart_root_clk, &uart2_ipg_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
-	NULL,  NULL, &uart_root_clk, &uart3_ipg_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
-	NULL,  NULL, &uart_root_clk, &uart4_ipg_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
-	NULL,  NULL, &uart_root_clk, &uart5_ipg_clk);
-
-/* GPT */
-DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
-	NULL,  NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
-	NULL,  NULL, &ipg_clk, &gpt_ipg_clk);
-
-DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
-	NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
-	NULL, NULL, &ipg_perclk, NULL);
-
-/* I2C */
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
-	NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
-	NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
-	NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
-	NULL, NULL, &ipg_perclk, NULL);
-
-/* FEC */
-DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
-	NULL,  NULL, &ipg_clk, NULL);
-
-/* NFC */
-DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
-	clk_nfc, &emi_slow_clk, NULL);
-
-/* SSI */
-DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
-	NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
-	NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
-DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
-	NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
-	NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
-DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
-	NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
-	NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
-
-/* eCSPI */
-DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
-		NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
-		&ipg_clk, &spba_clk);
-DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
-		NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
-DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
-		NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
-		&ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
-		NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
-
-/* CSPI */
-DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
-		NULL, NULL, &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
-		NULL, NULL, &ipg_clk, &cspi_ipg_clk);
-
-/* SDMA */
-DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
-		NULL, NULL, &ahb_clk, NULL);
-
-/* eSDHC */
-DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
-	NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
-	clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
-DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
-	NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
-	NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
-	NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-
-/* mx51 specific */
-DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
-	clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc3_clk = {
-	.id = 2,
-	.parent = &esdhc1_clk,
-	.set_parent = clk_esdhc3_set_parent,
-	.enable_reg = MXC_CCM_CCGR3,
-	.enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
-	.enable  = _clk_max_enable,
-	.disable = _clk_max_disable,
-	.secondary = &esdhc3_ipg_clk,
-};
-static struct clk esdhc4_clk = {
-	.id = 3,
-	.parent = &esdhc1_clk,
-	.set_parent = clk_esdhc4_set_parent,
-	.enable_reg = MXC_CCM_CCGR3,
-	.enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
-	.enable  = _clk_max_enable,
-	.disable = _clk_max_disable,
-	.secondary = &esdhc4_ipg_clk,
-};
-
-/* mx53 specific */
-static struct clk esdhc2_mx53_clk = {
-	.id = 2,
-	.parent = &esdhc1_clk,
-	.set_parent = clk_esdhc2_mx53_set_parent,
-	.enable_reg = MXC_CCM_CCGR3,
-	.enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
-	.enable  = _clk_max_enable,
-	.disable = _clk_max_disable,
-	.secondary = &esdhc3_ipg_clk,
-};
-
-DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
-	clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc4_mx53_clk = {
-	.id = 3,
-	.parent = &esdhc1_clk,
-	.set_parent = clk_esdhc4_mx53_set_parent,
-	.enable_reg = MXC_CCM_CCGR3,
-	.enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
-	.enable  = _clk_max_enable,
-	.disable = _clk_max_disable,
-	.secondary = &esdhc4_ipg_clk,
-};
-
-static struct clk sata_clk = {
-	.parent = &ipg_clk,
-	.enable = _clk_max_enable,
-	.enable_reg = MXC_CCM_CCGR4,
-	.enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
-	.disable = _clk_max_disable,
-};
-
-static struct clk ahci_phy_clk = {
-	.parent = &usb_phy1_clk,
-};
-
-static struct clk ahci_dma_clk = {
-	.parent = &ahb_clk,
-};
-
-DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
-
-/* IPU */
-DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
-	NULL,  NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
-
-DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
-		NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
-		&ddr_clk, NULL);
-
-DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
-		NULL, NULL, &pll3_sw_clk, NULL);
-DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
-		NULL, NULL, &pll3_sw_clk, NULL);
-
-/* PATA */
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
-		NULL, NULL, &ipg_clk, &spba_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c,   \
-       },
-
-static struct clk_lookup mx51_lookups[] = {
-	/* i.mx51 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-	/* i.mx51 has the i.mx27 type fec */
-	_REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
-	_REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-	_REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
-	_REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
-	_REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
-	_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
-	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
-	_REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
-	_REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	_REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
-	/* i.mx51 has the i.mx35 type sdma */
-	_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-	_REGISTER_CLOCK(NULL, "ckih", ckih_clk)
-	_REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
-	_REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
-	_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
-	_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
-	/* i.mx51 has the i.mx35 type cspi */
-	_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
-	_REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
-	_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
-	_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
-	_REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
-	_REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
-	_REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
-	_REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
-	_REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
-	_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-};
-
-static struct clk_lookup mx53_lookups[] = {
-	/* i.mx53 has the i.mx21 type uart */
-	_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-	_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-	_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-	_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-	_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-	/* i.mx53 has the i.mx25 type fec */
-	_REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
-	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-	_REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
-	/* i.mx53 has the i.mx51 type ecspi */
-	_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
-	_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
-	/* i.mx53 has the i.mx25 type cspi */
-	_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
-	_REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
-	_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
-	_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
-	/* i.mx53 has the i.mx35 type sdma */
-	_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-	_REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
-	_REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
-	_REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-	_REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
-	_REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
-	_REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
-};
-
-static void clk_tree_init(void)
-{
-	u32 reg;
-
-	ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
-
-	/*
-	 * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
-	 * 8MHz, its derived from lp_apm.
-	 *
-	 * FIXME: Verify if true for all boards
-	 */
-	reg = __raw_readl(MXC_CCM_CBCDR);
-	reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
-	reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
-	reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
-	reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
-	__raw_writel(reg, MXC_CCM_CBCDR);
-}
-
-int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
-			unsigned long ckih1, unsigned long ckih2)
-{
-	int i;
-
-	external_low_reference = ckil;
-	external_high_reference = ckih1;
-	ckih2_reference = ckih2;
-	oscillator_reference = osc;
-
-	for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
-		clkdev_add(&mx51_lookups[i]);
-
-	clk_tree_init();
-
-	clk_enable(&cpu_clk);
-	clk_enable(&main_bus_clk);
-
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX51", mx51_revision());
-	clk_disable(&iim_clk);
-
-	/* move usb_phy_clk to 24MHz */
-	clk_set_parent(&usb_phy1_clk, &osc_clk);
-
-	/* set the usboh3_clk parent to pll2_sw_clk */
-	clk_set_parent(&usboh3_clk, &pll2_sw_clk);
-
-	/* Set SDHC parents to be PLL2 */
-	clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
-	clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
-
-	/* set SDHC root clock as 166.25MHZ*/
-	clk_set_rate(&esdhc1_clk, 166250000);
-	clk_set_rate(&esdhc2_clk, 166250000);
-
-	/* System timer */
-	mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
-		MX51_INT_GPT);
-	return 0;
-}
-
-int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
-			unsigned long ckih1, unsigned long ckih2)
-{
-	int i;
-
-	external_low_reference = ckil;
-	external_high_reference = ckih1;
-	ckih2_reference = ckih2;
-	oscillator_reference = osc;
-
-	for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
-		clkdev_add(&mx53_lookups[i]);
-
-	clk_tree_init();
-
-	clk_set_parent(&uart_root_clk, &pll3_sw_clk);
-	clk_enable(&cpu_clk);
-	clk_enable(&main_bus_clk);
-
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX53", mx53_revision());
-	clk_disable(&iim_clk);
-
-	/* Set SDHC parents to be PLL2 */
-	clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
-	clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
-
-	/* set SDHC root clock as 200MHZ*/
-	clk_set_rate(&esdhc1_clk, 200000000);
-	clk_set_rate(&esdhc3_mx53_clk, 200000000);
-
-	/* System timer */
-	mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
-		MX53_INT_GPT);
-	return 0;
-}
-
-#ifdef CONFIG_OF
-static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
-				   unsigned long *ckih1, unsigned long *ckih2)
-{
-	struct device_node *np;
-
-	/* retrieve the freqency of fixed clocks from device tree */
-	for_each_compatible_node(np, NULL, "fixed-clock") {
-		u32 rate;
-		if (of_property_read_u32(np, "clock-frequency", &rate))
-			continue;
-
-		if (of_device_is_compatible(np, "fsl,imx-ckil"))
-			*ckil = rate;
-		else if (of_device_is_compatible(np, "fsl,imx-osc"))
-			*osc = rate;
-		else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
-			*ckih1 = rate;
-		else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
-			*ckih2 = rate;
-	}
-}
-
-int __init mx51_clocks_init_dt(void)
-{
-	unsigned long ckil, osc, ckih1, ckih2;
-
-	clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
-	return mx51_clocks_init(ckil, osc, ckih1, ckih2);
-}
-
-int __init mx53_clocks_init_dt(void)
-{
-	unsigned long ckil, osc, ckih1, ckih2;
-
-	clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
-	return mx53_clocks_init(ckil, osc, ckih1, ckih2);
-}
-#endif
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index aa15c51..8eb15a2 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -62,11 +62,8 @@
  * Dependent on link order - so the assumption is that vfp_init is called
  * before us.
  */
-static int __init mx51_neon_fixup(void)
+int __init mx51_neon_fixup(void)
 {
-	if (!cpu_is_mx51())
-		return 0;
-
 	if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
 			(elf_hwcap & HWCAP_NEON)) {
 		elf_hwcap &= ~HWCAP_NEON;
@@ -75,7 +72,6 @@
 	return 0;
 }
 
-late_initcall(mx51_neon_fixup);
 #endif
 
 static int get_mx53_srev(void)
diff --git a/arch/arm/mach-imx/crm-regs-imx5.h b/arch/arm/mach-imx/crm-regs-imx5.h
index 5e11ba7..5e3f1f0 100644
--- a/arch/arm/mach-imx/crm-regs-imx5.h
+++ b/arch/arm/mach-imx/crm-regs-imx5.h
@@ -23,7 +23,7 @@
 #define MX53_DPLL1_BASE		MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
 #define MX53_DPLL2_BASE		MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
 #define MX53_DPLL3_BASE		MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
-#define MX53_DPLL4_BASE		MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+#define MX53_DPLL4_BASE		MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
 
 /* PLL Register Offsets */
 #define MXC_PLL_DP_CTL			0x00
diff --git a/arch/arm/mach-imx/crmregs-imx3.h b/arch/arm/mach-imx/crmregs-imx3.h
index 5314127..a1dfde5 100644
--- a/arch/arm/mach-imx/crmregs-imx3.h
+++ b/arch/arm/mach-imx/crmregs-imx3.h
@@ -24,48 +24,47 @@
 #define CKIH_CLK_FREQ_27MHZ     27000000
 #define CKIL_CLK_FREQ           32768
 
-#define MXC_CCM_BASE		(cpu_is_mx31() ? \
-MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR) : MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR))
+extern void __iomem *mx3_ccm_base;
 
 /* Register addresses */
-#define MXC_CCM_CCMR		(MXC_CCM_BASE + 0x00)
-#define MXC_CCM_PDR0		(MXC_CCM_BASE + 0x04)
-#define MXC_CCM_PDR1		(MXC_CCM_BASE + 0x08)
-#define MX35_CCM_PDR2		(MXC_CCM_BASE + 0x0C)
-#define MXC_CCM_RCSR		(MXC_CCM_BASE + 0x0C)
-#define MX35_CCM_PDR3		(MXC_CCM_BASE + 0x10)
-#define MXC_CCM_MPCTL		(MXC_CCM_BASE + 0x10)
-#define MX35_CCM_PDR4		(MXC_CCM_BASE + 0x14)
-#define MXC_CCM_UPCTL		(MXC_CCM_BASE + 0x14)
-#define MX35_CCM_RCSR		(MXC_CCM_BASE + 0x18)
-#define MXC_CCM_SRPCTL		(MXC_CCM_BASE + 0x18)
-#define MX35_CCM_MPCTL		(MXC_CCM_BASE + 0x1C)
-#define MXC_CCM_COSR		(MXC_CCM_BASE + 0x1C)
-#define MX35_CCM_PPCTL		(MXC_CCM_BASE + 0x20)
-#define MXC_CCM_CGR0		(MXC_CCM_BASE + 0x20)
-#define MX35_CCM_ACMR		(MXC_CCM_BASE + 0x24)
-#define MXC_CCM_CGR1		(MXC_CCM_BASE + 0x24)
-#define MX35_CCM_COSR		(MXC_CCM_BASE + 0x28)
-#define MXC_CCM_CGR2		(MXC_CCM_BASE + 0x28)
-#define MX35_CCM_CGR0		(MXC_CCM_BASE + 0x2C)
-#define MXC_CCM_WIMR		(MXC_CCM_BASE + 0x2C)
-#define MX35_CCM_CGR1		(MXC_CCM_BASE + 0x30)
-#define MXC_CCM_LDC		(MXC_CCM_BASE + 0x30)
-#define MX35_CCM_CGR2		(MXC_CCM_BASE + 0x34)
-#define MXC_CCM_DCVR0		(MXC_CCM_BASE + 0x34)
-#define MX35_CCM_CGR3		(MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR1		(MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR2		(MXC_CCM_BASE + 0x3C)
-#define MXC_CCM_DCVR3		(MXC_CCM_BASE + 0x40)
-#define MXC_CCM_LTR0		(MXC_CCM_BASE + 0x44)
-#define MXC_CCM_LTR1		(MXC_CCM_BASE + 0x48)
-#define MXC_CCM_LTR2		(MXC_CCM_BASE + 0x4C)
-#define MXC_CCM_LTR3		(MXC_CCM_BASE + 0x50)
-#define MXC_CCM_LTBR0		(MXC_CCM_BASE + 0x54)
-#define MXC_CCM_LTBR1		(MXC_CCM_BASE + 0x58)
-#define MXC_CCM_PMCR0		(MXC_CCM_BASE + 0x5C)
-#define MXC_CCM_PMCR1		(MXC_CCM_BASE + 0x60)
-#define MXC_CCM_PDR2		(MXC_CCM_BASE + 0x64)
+#define MXC_CCM_CCMR		0x00
+#define MXC_CCM_PDR0		0x04
+#define MXC_CCM_PDR1		0x08
+#define MX35_CCM_PDR2		0x0C
+#define MXC_CCM_RCSR		0x0C
+#define MX35_CCM_PDR3		0x10
+#define MXC_CCM_MPCTL		0x10
+#define MX35_CCM_PDR4		0x14
+#define MXC_CCM_UPCTL		0x14
+#define MX35_CCM_RCSR		0x18
+#define MXC_CCM_SRPCTL		0x18
+#define MX35_CCM_MPCTL		0x1C
+#define MXC_CCM_COSR		0x1C
+#define MX35_CCM_PPCTL		0x20
+#define MXC_CCM_CGR0		0x20
+#define MX35_CCM_ACMR		0x24
+#define MXC_CCM_CGR1		0x24
+#define MX35_CCM_COSR		0x28
+#define MXC_CCM_CGR2		0x28
+#define MX35_CCM_CGR0		0x2C
+#define MXC_CCM_WIMR		0x2C
+#define MX35_CCM_CGR1		0x30
+#define MXC_CCM_LDC		0x30
+#define MX35_CCM_CGR2		0x34
+#define MXC_CCM_DCVR0		0x34
+#define MX35_CCM_CGR3		0x38
+#define MXC_CCM_DCVR1		0x38
+#define MXC_CCM_DCVR2		0x3C
+#define MXC_CCM_DCVR3		0x40
+#define MXC_CCM_LTR0		0x44
+#define MXC_CCM_LTR1		0x48
+#define MXC_CCM_LTR2		0x4C
+#define MXC_CCM_LTR3		0x50
+#define MXC_CCM_LTBR0		0x54
+#define MXC_CCM_LTBR1		0x58
+#define MXC_CCM_PMCR0		0x5C
+#define MXC_CCM_PMCR1		0x60
+#define MXC_CCM_PDR2		0x64
 
 /* Register bit definitions */
 #define MXC_CCM_CCMR_WBEN                       (1 << 27)
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 89493ab..20ed2d5 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -12,6 +12,7 @@
 
 #include <linux/errno.h>
 #include <asm/cacheflush.h>
+#include <asm/cp15.h>
 #include <mach/common.h>
 
 int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@
 	return 1;
 }
 
+static inline void cpu_enter_lowpower(void)
+{
+	unsigned int v;
+
+	flush_cache_all();
+	asm volatile(
+		"mcr	p15, 0, %1, c7, c5, 0\n"
+	"	mcr	p15, 0, %1, c7, c10, 4\n"
+	/*
+	 * Turn off coherency
+	 */
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	bic	%0, %0, %3\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	"	mrc	p15, 0, %0, c1, c0, 0\n"
+	"	bic	%0, %0, %2\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	  : "=&r" (v)
+	  : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+	  : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+	unsigned int v;
+
+	asm volatile(
+		"mrc	p15, 0, %0, c1, c0, 0\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	orr	%0, %0, %2\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	  : "=&r" (v)
+	  : "Ir" (CR_C), "Ir" (0x40)
+	  : "cc");
+}
+
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -26,9 +65,10 @@
  */
 void platform_cpu_die(unsigned int cpu)
 {
-	flush_cache_all();
+	cpu_enter_lowpower();
 	imx_enable_cpu(cpu, false);
 	cpu_do_idle();
+	cpu_leave_lowpower();
 
 	/* We should never return from idle */
 	panic("cpu %d unexpectedly exit from shutdown\n", cpu);
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index ed38d03..eee0cc8 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -29,6 +29,7 @@
 	OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL),
 	OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL),
 	OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL),
+	OF_DEV_AUXDATA("fsl,imx27-nand", MX27_NFC_BASE_ADDR, "mxc_nand.0", NULL),
 	{ /* sentinel */ }
 };
 
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 5f577fb..18e78db 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -118,6 +118,7 @@
 	.handle_irq	= imx51_handle_irq,
 	.timer		= &imx51_timer,
 	.init_machine	= imx51_dt_init,
+	.init_late	= imx51_init_late,
 	.dt_compat	= imx51_dt_board_compat,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 574eca4..eb04b62 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -10,6 +10,9 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
@@ -81,6 +84,19 @@
 	{ /* sentinel */ }
 };
 
+static void __init imx53_qsb_init(void)
+{
+	struct clk *clk;
+
+	clk = clk_get_sys(NULL, "ssi_ext1");
+	if (IS_ERR(clk)) {
+		pr_err("failed to get clk ssi_ext1\n");
+		return;
+	}
+
+	clk_register_clkdev(clk, NULL, "0-000a");
+}
+
 static void __init imx53_dt_init(void)
 {
 	struct device_node *node;
@@ -99,6 +115,9 @@
 		of_node_put(node);
 	}
 
+	if (of_machine_is_compatible("fsl,imx53-qsb"))
+		imx53_qsb_init();
+
 	of_platform_populate(NULL, of_default_bus_match_table,
 			     imx53_auxdata_lookup, NULL);
 }
diff --git a/arch/arm/mach-imx/lluart.c b/arch/arm/mach-imx/lluart.c
index 0213f8d..c40a34c 100644
--- a/arch/arm/mach-imx/lluart.c
+++ b/arch/arm/mach-imx/lluart.c
@@ -17,6 +17,12 @@
 #include <mach/hardware.h>
 
 static struct map_desc imx_lluart_desc = {
+#ifdef CONFIG_DEBUG_IMX6Q_UART2
+	.virtual	= MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
+	.pfn		= __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
+	.length		= MX6Q_UART2_SIZE,
+	.type		= MT_DEVICE,
+#endif
 #ifdef CONFIG_DEBUG_IMX6Q_UART4
 	.virtual	= MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
 	.pfn		= __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index c515f8e..6450303 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -70,7 +70,6 @@
 		I2C_BOARD_INFO("pcf8563", 0x51),
 	}, {
 		I2C_BOARD_INFO("tsc2007", 0x48),
-		.type		= "tsc2007",
 		.platform_data	= &tsc2007_info,
 		.irq		= IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
 	},
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ce341a6..1e09de5 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -142,7 +142,6 @@
 		I2C_BOARD_INFO("pcf8563", 0x51),
 	}, {
 		I2C_BOARD_INFO("tsc2007", 0x49),
-		.type		= "tsc2007",
 		.platform_data	= &tsc2007_info,
 	},
 };
@@ -369,5 +368,6 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mxc_timer,
 	.init_machine = eukrea_cpuimx51sd_init,
+	.init_late	= imx51_init_late,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index dff82eb..f76edb9 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -116,6 +116,8 @@
 	PB23_PF_USB_PWR,
 	PB24_PF_USB_OC,
 	/* CSI */
+	TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
+	TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
 	PB10_PF_CSI_D0,
 	PB11_PF_CSI_D1,
 	PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@
 	{ MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
 };
 
+static const struct gpio visstrim_m10_gpios[] __initconst = {
+	{
+		.gpio = TVP5150_RSTN,
+		.flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
+		.label = "tvp5150_rstn",
+	},
+	{
+		.gpio = TVP5150_PWDN,
+		.flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+		.label = "tvp5150_pwdn",
+	},
+	{
+		.gpio = OTG_PHY_CS_GPIO,
+		.flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+		.label = "usbotg_cs",
+	},
+};
+
 /* Camera */
 static int visstrim_camera_power(struct device *dev, int on)
 {
@@ -190,13 +210,6 @@
 	struct platform_device *pdev;
 	int dma;
 
-	/* Initialize tvp5150 gpios */
-	mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
-	mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
-	gpio_set_value(TVP5150_RSTN, 1);
-	gpio_set_value(TVP5150_PWDN, 0);
-	ndelay(1);
-
 	gpio_set_value(TVP5150_PWDN, 1);
 	ndelay(1);
 	gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@
 /* USB OTG */
 static int otg_phy_init(struct platform_device *pdev)
 {
-	gpio_set_value(OTG_PHY_CS_GPIO, 0);
-
-	mdelay(10);
-
 	return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
 }
 
@@ -435,6 +444,11 @@
 	if (ret)
 		pr_err("Failed to setup pins (%d)\n", ret);
 
+	ret = gpio_request_array(visstrim_m10_gpios,
+				ARRAY_SIZE(visstrim_m10_gpios));
+	if (ret)
+		pr_err("Failed to request gpios (%d)\n", ret);
+
 	imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
 	imx27_add_imx_uart0(&uart_pdata);
 
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 3df360a..b47e98b 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,6 +10,8 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -64,18 +66,53 @@
 /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
 static int ksz9021rn_phy_fixup(struct phy_device *phydev)
 {
-	/* min rx data delay */
-	phy_write(phydev, 0x0b, 0x8105);
-	phy_write(phydev, 0x0c, 0x0000);
+	if (IS_ENABLED(CONFIG_PHYLIB)) {
+		/* min rx data delay */
+		phy_write(phydev, 0x0b, 0x8105);
+		phy_write(phydev, 0x0c, 0x0000);
 
-	/* max rx/tx clock delay, min rx/tx control delay */
-	phy_write(phydev, 0x0b, 0x8104);
-	phy_write(phydev, 0x0c, 0xf0f0);
-	phy_write(phydev, 0x0b, 0x104);
+		/* max rx/tx clock delay, min rx/tx control delay */
+		phy_write(phydev, 0x0b, 0x8104);
+		phy_write(phydev, 0x0c, 0xf0f0);
+		phy_write(phydev, 0x0b, 0x104);
+	}
 
 	return 0;
 }
 
+static void __init imx6q_sabrelite_cko1_setup(void)
+{
+	struct clk *cko1_sel, *ahb, *cko1;
+	unsigned long rate;
+
+	cko1_sel = clk_get_sys(NULL, "cko1_sel");
+	ahb = clk_get_sys(NULL, "ahb");
+	cko1 = clk_get_sys(NULL, "cko1");
+	if (IS_ERR(cko1_sel) || IS_ERR(ahb) || IS_ERR(cko1)) {
+		pr_err("cko1 setup failed!\n");
+		goto put_clk;
+	}
+	clk_set_parent(cko1_sel, ahb);
+	rate = clk_round_rate(cko1, 16000000);
+	clk_set_rate(cko1, rate);
+	clk_register_clkdev(cko1, NULL, "0-000a");
+put_clk:
+	if (!IS_ERR(cko1_sel))
+		clk_put(cko1_sel);
+	if (!IS_ERR(ahb))
+		clk_put(ahb);
+	if (!IS_ERR(cko1))
+		clk_put(cko1);
+}
+
+static void __init imx6q_sabrelite_init(void)
+{
+	if (IS_ENABLED(CONFIG_PHYLIB))
+		phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
+				ksz9021rn_phy_fixup);
+	imx6q_sabrelite_cko1_setup();
+}
+
 static void __init imx6q_init_machine(void)
 {
 	/*
@@ -85,8 +122,7 @@
 	pinctrl_provide_dummies();
 
 	if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
-		phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
-					   ksz9021rn_phy_fixup);
+		imx6q_sabrelite_init();
 
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 
@@ -139,6 +175,7 @@
 static const char *imx6q_dt_compat[] __initdata = {
 	"fsl,imx6q-arm2",
 	"fsl,imx6q-sabrelite",
+	"fsl,imx6q-sabresd",
 	"fsl,imx6q",
 	NULL,
 };
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index d14bbe9..3e7401f 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -32,7 +32,7 @@
  * Memory-mapped I/O on MX21ADS base board
  */
 #define MX21ADS_MMIO_BASE_ADDR   0xf5000000
-#define MX21ADS_MMIO_SIZE        SZ_16M
+#define MX21ADS_MMIO_SIZE        0xc00000
 
 #define MX21ADS_REG_ADDR(offset)    (void __force __iomem *) \
 		(MX21ADS_MMIO_BASE_ADDR + (offset))
diff --git a/arch/arm/mach-imx/mach-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 83eab41..3c5b163 100644
--- a/arch/arm/mach-imx/mach-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
@@ -175,5 +175,6 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_3ds_timer,
 	.init_machine = mx51_3ds_init,
+	.init_late	= imx51_init_late,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index e4b822e..dde3970 100644
--- a/arch/arm/mach-imx/mach-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
@@ -163,6 +163,12 @@
 	MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
 	MX51_PAD_CSPI1_SS0__GPIO4_24,
 	MX51_PAD_CSPI1_SS1__GPIO4_25,
+
+	/* Audio */
+	MX51_PAD_AUD3_BB_TXD__AUD3_TXD,
+	MX51_PAD_AUD3_BB_RXD__AUD3_RXD,
+	MX51_PAD_AUD3_BB_CK__AUD3_TXC,
+	MX51_PAD_AUD3_BB_FS__AUD3_TXFS,
 };
 
 /* Serial ports */
@@ -426,5 +432,6 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_babbage_timer,
 	.init_machine = mx51_babbage_init,
+	.init_late	= imx51_init_late,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
index 86e96ef..8d09c01 100644
--- a/arch/arm/mach-imx/mach-mx51_efikamx.c
+++ b/arch/arm/mach-imx/mach-mx51_efikamx.c
@@ -207,29 +207,32 @@
 
 static int __init mx51_efikamx_power_init(void)
 {
-	if (machine_is_mx51_efikamx()) {
-		pwgt1 = regulator_get(NULL, "pwgt1");
-		pwgt2 = regulator_get(NULL, "pwgt2");
-		if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
-			regulator_enable(pwgt1);
-			regulator_enable(pwgt2);
-		}
-		gpio_request(EFIKAMX_POWEROFF, "poweroff");
-		pm_power_off = mx51_efikamx_power_off;
-
-		/* enable coincell charger. maybe need a small power driver ? */
-		coincell = regulator_get(NULL, "coincell");
-		if (!IS_ERR(coincell)) {
-			regulator_set_voltage(coincell, 3000000, 3000000);
-			regulator_enable(coincell);
-		}
-
-		regulator_has_full_constraints();
+	pwgt1 = regulator_get(NULL, "pwgt1");
+	pwgt2 = regulator_get(NULL, "pwgt2");
+	if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+		regulator_enable(pwgt1);
+		regulator_enable(pwgt2);
 	}
+	gpio_request(EFIKAMX_POWEROFF, "poweroff");
+	pm_power_off = mx51_efikamx_power_off;
+
+	/* enable coincell charger. maybe need a small power driver ? */
+	coincell = regulator_get(NULL, "coincell");
+	if (!IS_ERR(coincell)) {
+		regulator_set_voltage(coincell, 3000000, 3000000);
+		regulator_enable(coincell);
+	}
+
+	regulator_has_full_constraints();
 
 	return 0;
 }
-late_initcall(mx51_efikamx_power_init);
+
+static void __init mx51_efikamx_init_late(void)
+{
+	imx51_init_late();
+	mx51_efikamx_power_init();
+}
 
 static void __init mx51_efikamx_init(void)
 {
@@ -292,5 +295,6 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_efikamx_timer,
 	.init_machine = mx51_efikamx_init,
+	.init_late = mx51_efikamx_init_late,
 	.restart = mx51_efikamx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
index 88f837a..fdbd181 100644
--- a/arch/arm/mach-imx/mach-mx51_efikasb.c
+++ b/arch/arm/mach-imx/mach-mx51_efikasb.c
@@ -211,22 +211,25 @@
 
 static int __init mx51_efikasb_power_init(void)
 {
-	if (machine_is_mx51_efikasb()) {
-		pwgt1 = regulator_get(NULL, "pwgt1");
-		pwgt2 = regulator_get(NULL, "pwgt2");
-		if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
-			regulator_enable(pwgt1);
-			regulator_enable(pwgt2);
-		}
-		gpio_request(EFIKASB_POWEROFF, "poweroff");
-		pm_power_off = mx51_efikasb_power_off;
-
-		regulator_has_full_constraints();
+	pwgt1 = regulator_get(NULL, "pwgt1");
+	pwgt2 = regulator_get(NULL, "pwgt2");
+	if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+		regulator_enable(pwgt1);
+		regulator_enable(pwgt2);
 	}
+	gpio_request(EFIKASB_POWEROFF, "poweroff");
+	pm_power_off = mx51_efikasb_power_off;
+
+	regulator_has_full_constraints();
 
 	return 0;
 }
-late_initcall(mx51_efikasb_power_init);
+
+static void __init mx51_efikasb_init_late(void)
+{
+	imx51_init_late();
+	mx51_efikasb_power_init();
+}
 
 /* 01     R1.3 board
    10     R2.0 board */
@@ -287,6 +290,7 @@
 	.init_irq = mx51_init_irq,
 	.handle_irq = imx51_handle_irq,
 	.init_machine =  efikasb_board_init,
+	.init_late = mx51_efikasb_init_late,
 	.timer = &mx51_efikasb_timer,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 10c9795..0a40004 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -694,6 +694,11 @@
 			MX3_CAMERA_BUF_SIZE);
 }
 
+static void __init pcm037_init_late(void)
+{
+	pcm037_eet_init_devices();
+}
+
 MACHINE_START(PCM037, "Phytec Phycore pcm037")
 	/* Maintainer: Pengutronix */
 	.atag_offset = 0x100,
@@ -704,5 +709,6 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &pcm037_timer,
 	.init_machine = pcm037_init,
+	.init_late = pcm037_init_late,
 	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c
index 1b7606b..11ffa81 100644
--- a/arch/arm/mach-imx/mach-pcm037_eet.c
+++ b/arch/arm/mach-imx/mach-pcm037_eet.c
@@ -160,9 +160,9 @@
 	.rep		= 0, /* No auto-repeat */
 };
 
-static int __init eet_init_devices(void)
+int __init pcm037_eet_init_devices(void)
 {
-	if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
+	if (pcm037_variant() != PCM037_EET)
 		return 0;
 
 	mxc_iomux_setup_multiple_pins(pcm037_eet_pins,
@@ -176,4 +176,3 @@
 
 	return 0;
 }
-late_initcall(eet_init_devices);
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9128d15..a8983b9 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -32,6 +32,10 @@
 #include <mach/iomux-v3.h>
 #include <mach/irqs.h>
 
+#include "crmregs-imx3.h"
+
+void __iomem *mx3_ccm_base;
+
 static void imx3_idle(void)
 {
 	unsigned long reg = 0;
@@ -82,6 +86,7 @@
 
 void __init imx3_init_l2x0(void)
 {
+#ifdef CONFIG_CACHE_L2X0
 	void __iomem *l2x0_base;
 	void __iomem *clkctl_base;
 
@@ -111,6 +116,7 @@
 	}
 
 	l2x0_init(l2x0_base, 0x00030024, 0x00000000);
+#endif
 }
 
 #ifdef CONFIG_SOC_IMX31
@@ -138,6 +144,7 @@
 	mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
 	arch_ioremap_caller = imx3_ioremap_caller;
 	arm_pm_idle = imx3_idle;
+	mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
 }
 
 void __init mx31_init_irq(void)
@@ -174,6 +181,8 @@
 	mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
 	mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
 
+	pinctrl_provide_dummies();
+
 	if (to_version == 1) {
 		strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
 			strlen(imx31_sdma_pdata.fw_name));
@@ -211,6 +220,7 @@
 	mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
 	arm_pm_idle = imx3_idle;
 	arch_ioremap_caller = imx3_ioremap_caller;
+	mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
 }
 
 void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index ba91e6b..1d00305 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -33,6 +33,7 @@
 		gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
 		if (IS_ERR(gpc_dvfs_clk))
 			return;
+		clk_prepare(gpc_dvfs_clk);
 	}
 	clk_enable(gpc_dvfs_clk);
 	mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
@@ -201,6 +202,8 @@
 	mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
 	mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
 
+	pinctrl_provide_dummies();
+
 	/* i.mx51 has the i.mx35 type sdma */
 	imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
 
@@ -236,3 +239,8 @@
 	platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res,
 					ARRAY_SIZE(imx53_audmux_res));
 }
+
+void __init imx51_init_late(void)
+{
+	mx51_neon_fixup();
+}
diff --git a/arch/arm/mach-imx/pcm037.h b/arch/arm/mach-imx/pcm037.h
index d692972..7d16769 100644
--- a/arch/arm/mach-imx/pcm037.h
+++ b/arch/arm/mach-imx/pcm037.h
@@ -8,4 +8,10 @@
 
 extern enum pcm037_board_variant pcm037_variant(void);
 
+#ifdef CONFIG_MACH_PCM037_EET
+int pcm037_eet_init_devices(void);
+#else
+static inline int pcm037_eet_init_devices(void) { return 0; }
+#endif
+
 #endif
diff --git a/arch/arm/mach-imx/pm-imx3.c b/arch/arm/mach-imx/pm-imx3.c
index b375243..822103b 100644
--- a/arch/arm/mach-imx/pm-imx3.c
+++ b/arch/arm/mach-imx/pm-imx3.c
@@ -21,14 +21,14 @@
  */
 void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
 {
-	int reg = __raw_readl(MXC_CCM_CCMR);
+	int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
 	reg &= ~MXC_CCM_CCMR_LPM_MASK;
 
 	switch (mode) {
 	case MX3_WAIT:
 		if (cpu_is_mx35())
 			reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
-		__raw_writel(reg, MXC_CCM_CCMR);
+		__raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
 		break;
 	default:
 		pr_err("Unknown cpu power mode: %d\n", mode);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index ebbd7fc..a9f80943 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -28,6 +28,7 @@
 #include <linux/clockchips.h>
 #include <linux/io.h>
 #include <linux/export.h>
+#include <linux/gpio.h>
 
 #include <mach/udc.h>
 #include <mach/hardware.h>
@@ -107,7 +108,7 @@
 	 7,  8,  9, 10, 11, 12, -1, -1,
 };
 
-int gpio_to_irq(int gpio)
+static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
 	int irq;
 
@@ -117,7 +118,6 @@
 	}
 	return -EINVAL;
 }
-EXPORT_SYMBOL(gpio_to_irq);
 
 int irq_to_gpio(unsigned int irq)
 {
@@ -383,12 +383,56 @@
 unsigned long ixp4xx_exp_bus_size;
 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
 
+static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	gpio_line_config(gpio, IXP4XX_GPIO_IN);
+
+	return 0;
+}
+
+static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+					int level)
+{
+	gpio_line_set(gpio, level);
+	gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+
+	return 0;
+}
+
+static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+	int value;
+
+	gpio_line_get(gpio, &value);
+
+	return value;
+}
+
+static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+				  int value)
+{
+	gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip ixp4xx_gpio_chip = {
+	.label			= "IXP4XX_GPIO_CHIP",
+	.direction_input	= ixp4xx_gpio_direction_input,
+	.direction_output	= ixp4xx_gpio_direction_output,
+	.get			= ixp4xx_gpio_get_value,
+	.set			= ixp4xx_gpio_set_value,
+	.to_irq			= ixp4xx_gpio_to_irq,
+	.base			= 0,
+	.ngpio			= 16,
+};
+
 void __init ixp4xx_sys_init(void)
 {
 	ixp4xx_exp_bus_size = SZ_16M;
 
 	platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
 
+	gpiochip_add(&ixp4xx_gpio_chip);
+
 	if (cpu_is_ixp46x()) {
 		int region;
 
diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
index 83d6b4e..ef37f26 100644
--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
+++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
@@ -1,79 +1,2 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gpio.h
- *
- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
-#define __ASM_ARCH_IXP4XX_GPIO_H
-
-#include <linux/kernel.h>
-#include <mach/hardware.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-	return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-	might_sleep();
-
-	return;
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
-	gpio_line_config(gpio, IXP4XX_GPIO_IN);
-	return 0;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int level)
-{
-	gpio_line_set(gpio, level);
-	gpio_line_config(gpio, IXP4XX_GPIO_OUT);
-	return 0;
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
-	int value;
-
-	gpio_line_get(gpio, &value);
-
-	return value;
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-	gpio_line_set(gpio, value);
-}
-
-#include <asm-generic/gpio.h>			/* cansleep wrappers */
-
-extern int gpio_to_irq(int gpio);
-#define gpio_to_irq gpio_to_irq
-extern int irq_to_gpio(unsigned int irq);
-
-#endif
+/* empty */
 
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 3d742ae..108a9d3 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -60,8 +60,6 @@
 #if defined(CONFIG_MTD_NAND_PLATFORM) || \
     defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ixdp425_partitions[] = {
 	{
 		.name	= "ixp400 NAND FS 0",
@@ -100,8 +98,6 @@
 	.chip = {
 		.nr_chips		= 1,
 		.chip_delay		= 30,
-		.options		= NAND_NO_AUTOINCR,
-		.part_probe_types 	= part_probes,
 		.partitions	 	= ixdp425_partitions,
 		.nr_partitions	 	= ARRAY_SIZE(ixdp425_partitions),
 	},
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 9854539..55e357a 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -27,7 +27,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 10d1969..edc3f8a 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -43,6 +43,9 @@
 	kirkwood_l2_init();
 #endif
 
+	/* Setup root of clk tree */
+	kirkwood_clk_init();
+
 	/* internal devices that every board has */
 	kirkwood_wdt_init();
 	kirkwood_xor0_init();
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c
index 2222c57..b0d3cc4 100644
--- a/arch/arm/mach-kirkwood/board-iconnect.c
+++ b/arch/arm/mach-kirkwood/board-iconnect.c
@@ -20,9 +20,6 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3ad0373..f261cd2 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -15,7 +15,8 @@
 #include <linux/ata_platform.h>
 #include <linux/mtd/nand.h>
 #include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
 #include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/timex.h>
@@ -32,6 +33,7 @@
 #include <plat/common.h>
 #include <plat/time.h>
 #include <plat/addr-map.h>
+#include <plat/mv_xor.h>
 #include "common.h"
 
 /*****************************************************************************
@@ -61,20 +63,191 @@
 	iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc));
 }
 
-/*
- * Default clock control bits.  Any bit _not_ set in this variable
- * will be cleared from the hardware after platform devices have been
- * registered.  Some reserved bits must be set to 1.
- */
-unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
 
+static void disable_sata0(void)
+{
+	/* Disable PLL and IVREF */
+	writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
+	/* Disable PHY */
+	writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
+}
+
+static void disable_sata1(void)
+{
+	/* Disable PLL and IVREF */
+	writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
+	/* Disable PHY */
+	writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
+}
+
+static void disable_pcie0(void)
+{
+	writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
+	while (1)
+		if (readl(PCIE_STATUS) & 0x1)
+			break;
+	writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
+}
+
+static void disable_pcie1(void)
+{
+	u32 dev, rev;
+
+	kirkwood_pcie_id(&dev, &rev);
+
+	if (dev == MV88F6282_DEV_ID) {
+		writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
+		while (1)
+			if (readl(PCIE1_STATUS) & 0x1)
+				break;
+		writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
+	}
+}
+
+/* An extended version of the gated clk. This calls fn() before
+ * disabling the clock. We use this to turn off PHYs etc. */
+struct clk_gate_fn {
+	struct clk_gate gate;
+	void (*fn)(void);
+};
+
+#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static void clk_gate_fn_disable(struct clk_hw *hw)
+{
+	struct clk_gate *gate = to_clk_gate(hw);
+	struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
+
+	if (gate_fn->fn)
+		gate_fn->fn();
+
+	clk_gate_ops.disable(hw);
+}
+
+static struct clk_ops clk_gate_fn_ops;
+
+static struct clk __init *clk_register_gate_fn(struct device *dev,
+		const char *name,
+		const char *parent_name, unsigned long flags,
+		void __iomem *reg, u8 bit_idx,
+		u8 clk_gate_flags, spinlock_t *lock,
+		void (*fn)(void))
+{
+	struct clk_gate_fn *gate_fn;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	gate_fn = kzalloc(sizeof(struct clk_gate_fn), GFP_KERNEL);
+	if (!gate_fn) {
+		pr_err("%s: could not allocate gated clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_gate_fn_ops;
+	init.flags = flags;
+	init.parent_names = (parent_name ? &parent_name : NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	/* struct clk_gate assignments */
+	gate_fn->gate.reg = reg;
+	gate_fn->gate.bit_idx = bit_idx;
+	gate_fn->gate.flags = clk_gate_flags;
+	gate_fn->gate.lock = lock;
+	gate_fn->gate.hw.init = &init;
+	gate_fn->fn = fn;
+
+	/* ops is the gate ops, but with our disable function */
+	if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+		clk_gate_fn_ops = clk_gate_ops;
+		clk_gate_fn_ops.disable = clk_gate_fn_disable;
+	}
+
+	clk = clk_register(dev, &gate_fn->gate.hw);
+
+	if (IS_ERR(clk))
+		kfree(gate_fn);
+
+	return clk;
+}
+
+static DEFINE_SPINLOCK(gating_lock);
+static struct clk *tclk;
+
+static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
+{
+	return clk_register_gate(NULL, name, "tclk", 0,
+				 (void __iomem *)CLOCK_GATING_CTRL,
+				 bit_idx, 0, &gating_lock);
+}
+
+static struct clk __init *kirkwood_register_gate_fn(const char *name,
+						    u8 bit_idx,
+						    void (*fn)(void))
+{
+	return clk_register_gate_fn(NULL, name, "tclk", 0,
+				    (void __iomem *)CLOCK_GATING_CTRL,
+				    bit_idx, 0, &gating_lock, fn);
+}
+
+static struct clk *ge0, *ge1;
+
+void __init kirkwood_clk_init(void)
+{
+	struct clk *runit, *sata0, *sata1, *usb0, *sdio;
+	struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
+
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
+				       CLK_IS_ROOT, kirkwood_tclk);
+
+	runit = kirkwood_register_gate("runit",  CGC_BIT_RUNIT);
+	ge0 = kirkwood_register_gate("ge0",    CGC_BIT_GE0);
+	ge1 = kirkwood_register_gate("ge1",    CGC_BIT_GE1);
+	sata0 = kirkwood_register_gate_fn("sata0",  CGC_BIT_SATA0,
+					  disable_sata0);
+	sata1 = kirkwood_register_gate_fn("sata1",  CGC_BIT_SATA1,
+					  disable_sata1);
+	usb0 = kirkwood_register_gate("usb0",   CGC_BIT_USB0);
+	sdio = kirkwood_register_gate("sdio",   CGC_BIT_SDIO);
+	crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
+	xor0 = kirkwood_register_gate("xor0",   CGC_BIT_XOR0);
+	xor1 = kirkwood_register_gate("xor1",   CGC_BIT_XOR1);
+	pex0 = kirkwood_register_gate_fn("pex0",   CGC_BIT_PEX0,
+					 disable_pcie0);
+	pex1 = kirkwood_register_gate_fn("pex1",   CGC_BIT_PEX1,
+					 disable_pcie1);
+	audio = kirkwood_register_gate("audio",  CGC_BIT_AUDIO);
+	kirkwood_register_gate("tdm",    CGC_BIT_TDM);
+	kirkwood_register_gate("tsu",    CGC_BIT_TSU);
+
+	/* clkdev entries, mapping clks to devices */
+	orion_clkdev_add(NULL, "orion_spi.0", runit);
+	orion_clkdev_add(NULL, "orion_spi.1", runit);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", ge0);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", ge1);
+	orion_clkdev_add(NULL, "orion_wdt", tclk);
+	orion_clkdev_add("0", "sata_mv.0", sata0);
+	orion_clkdev_add("1", "sata_mv.0", sata1);
+	orion_clkdev_add(NULL, "orion-ehci.0", usb0);
+	orion_clkdev_add(NULL, "orion_nand", runit);
+	orion_clkdev_add(NULL, "mvsdio", sdio);
+	orion_clkdev_add(NULL, "mv_crypto", crypto);
+	orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
+	orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
+	orion_clkdev_add("0", "pcie", pex0);
+	orion_clkdev_add("1", "pcie", pex1);
+	orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+}
 
 /*****************************************************************************
  * EHCI0
  ****************************************************************************/
 void __init kirkwood_ehci_init(void)
 {
-	kirkwood_clk_ctrl |= CGC_USB0;
 	orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
 }
 
@@ -84,11 +257,12 @@
  ****************************************************************************/
 void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
-	kirkwood_clk_ctrl |= CGC_GE0;
-
 	orion_ge00_init(eth_data,
 			GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
-			IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
+			IRQ_KIRKWOOD_GE00_ERR);
+	/* The interface forgets the MAC address assigned by u-boot if
+	the clock is turned off, so claim the clk now. */
+	clk_prepare_enable(ge0);
 }
 
 
@@ -97,12 +271,10 @@
  ****************************************************************************/
 void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
-
-	kirkwood_clk_ctrl |= CGC_GE1;
-
 	orion_ge01_init(eth_data,
 			GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
-			IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
+			IRQ_KIRKWOOD_GE01_ERR);
+	clk_prepare_enable(ge1);
 }
 
 
@@ -144,7 +316,6 @@
 void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
 			       int chip_delay)
 {
-	kirkwood_clk_ctrl |= CGC_RUNIT;
 	kirkwood_nand_data.parts = parts;
 	kirkwood_nand_data.nr_parts = nr_parts;
 	kirkwood_nand_data.chip_delay = chip_delay;
@@ -154,7 +325,6 @@
 void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
 				   int (*dev_ready)(struct mtd_info *))
 {
-	kirkwood_clk_ctrl |= CGC_RUNIT;
 	kirkwood_nand_data.parts = parts;
 	kirkwood_nand_data.nr_parts = nr_parts;
 	kirkwood_nand_data.dev_ready = dev_ready;
@@ -175,10 +345,6 @@
  ****************************************************************************/
 void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
 {
-	kirkwood_clk_ctrl |= CGC_SATA0;
-	if (sata_data->n_ports > 1)
-		kirkwood_clk_ctrl |= CGC_SATA1;
-
 	orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
 }
 
@@ -221,7 +387,6 @@
 		mvsdio_data->clock = 100000000;
 	else
 		mvsdio_data->clock = 200000000;
-	kirkwood_clk_ctrl |= CGC_SDIO;
 	kirkwood_sdio.dev.platform_data = mvsdio_data;
 	platform_device_register(&kirkwood_sdio);
 }
@@ -232,8 +397,7 @@
  ****************************************************************************/
 void __init kirkwood_spi_init()
 {
-	kirkwood_clk_ctrl |= CGC_RUNIT;
-	orion_spi_init(SPI_PHYS_BASE, kirkwood_tclk);
+	orion_spi_init(SPI_PHYS_BASE);
 }
 
 
@@ -253,7 +417,7 @@
 void __init kirkwood_uart0_init(void)
 {
 	orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-			 IRQ_KIRKWOOD_UART_0, kirkwood_tclk);
+			 IRQ_KIRKWOOD_UART_0, tclk);
 }
 
 
@@ -263,7 +427,7 @@
 void __init kirkwood_uart1_init(void)
 {
 	orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-			 IRQ_KIRKWOOD_UART_1, kirkwood_tclk);
+			 IRQ_KIRKWOOD_UART_1, tclk);
 }
 
 /*****************************************************************************
@@ -271,7 +435,6 @@
  ****************************************************************************/
 void __init kirkwood_crypto_init(void)
 {
-	kirkwood_clk_ctrl |= CGC_CRYPTO;
 	orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE,
 			  KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO);
 }
@@ -282,8 +445,6 @@
  ****************************************************************************/
 void __init kirkwood_xor0_init(void)
 {
-	kirkwood_clk_ctrl |= CGC_XOR0;
-
 	orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
 			IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
 }
@@ -294,8 +455,6 @@
  ****************************************************************************/
 void __init kirkwood_xor1_init(void)
 {
-	kirkwood_clk_ctrl |= CGC_XOR1;
-
 	orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE,
 			IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11);
 }
@@ -306,7 +465,7 @@
  ****************************************************************************/
 void __init kirkwood_wdt_init(void)
 {
-	orion_wdt_init(kirkwood_tclk);
+	orion_wdt_init();
 }
 
 
@@ -382,7 +541,6 @@
 
 void __init kirkwood_audio_init(void)
 {
-	kirkwood_clk_ctrl |= CGC_AUDIO;
 	platform_device_register(&kirkwood_i2s_device);
 	platform_device_register(&kirkwood_pcm_device);
 }
@@ -466,6 +624,9 @@
 	kirkwood_l2_init();
 #endif
 
+	/* Setup root of clk tree */
+	kirkwood_clk_init();
+
 	/* internal devices that every board has */
 	kirkwood_rtc_init();
 	kirkwood_wdt_init();
@@ -478,72 +639,6 @@
 #endif
 }
 
-static int __init kirkwood_clock_gate(void)
-{
-	unsigned int curr = readl(CLOCK_GATING_CTRL);
-	u32 dev, rev;
-
-#ifdef CONFIG_OF
-	struct device_node *np;
-#endif
-	kirkwood_pcie_id(&dev, &rev);
-	printk(KERN_DEBUG "Gating clock of unused units\n");
-	printk(KERN_DEBUG "before: 0x%08x\n", curr);
-
-	/* Make sure those units are accessible */
-	writel(curr | CGC_SATA0 | CGC_SATA1 | CGC_PEX0 | CGC_PEX1, CLOCK_GATING_CTRL);
-
-#ifdef CONFIG_OF
-	np = of_find_compatible_node(NULL, NULL, "mrvl,orion-nand");
-	if (np && of_device_is_available(np)) {
-		kirkwood_clk_ctrl |= CGC_RUNIT;
-		of_node_put(np);
-	}
-#endif
-
-	/* For SATA: first shutdown the phy */
-	if (!(kirkwood_clk_ctrl & CGC_SATA0)) {
-		/* Disable PLL and IVREF */
-		writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
-		/* Disable PHY */
-		writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
-	}
-	if (!(kirkwood_clk_ctrl & CGC_SATA1)) {
-		/* Disable PLL and IVREF */
-		writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
-		/* Disable PHY */
-		writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
-	}
-	
-	/* For PCIe: first shutdown the phy */
-	if (!(kirkwood_clk_ctrl & CGC_PEX0)) {
-		writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
-		while (1)
-			if (readl(PCIE_STATUS) & 0x1)
-				break;
-		writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
-	}
-
-	/* For PCIe 1: first shutdown the phy */
-	if (dev == MV88F6282_DEV_ID) {
-		if (!(kirkwood_clk_ctrl & CGC_PEX1)) {
-			writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
-			while (1)
-				if (readl(PCIE1_STATUS) & 0x1)
-					break;
-			writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
-		}
-	} else  /* keep this bit set for devices that don't have PCIe1 */
-		kirkwood_clk_ctrl |= CGC_PEX1;
-
-	/* Now gate clock the required units */
-	writel(kirkwood_clk_ctrl, CLOCK_GATING_CTRL);
-	printk(KERN_DEBUG " after: 0x%08x\n", readl(CLOCK_GATING_CTRL));
-
-	return 0;
-}
-late_initcall(kirkwood_clock_gate);
-
 void kirkwood_restart(char mode, const char *cmd)
 {
 	/*
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index a34c41a..9248fa2 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -50,6 +50,7 @@
 void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
 void kirkwood_audio_init(void);
 void kirkwood_restart(char, const char *);
+void kirkwood_clk_init(void);
 
 /* board init functions for boards not fully converted to fdt */
 #ifdef CONFIG_MACH_DREAMPLUG_DT
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 957bd79..a115142 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -38,11 +38,28 @@
 #define IRQ_MASK_HIGH_OFF	0x0014
 
 #define TIMER_VIRT_BASE		(BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE		(BRIDGE_PHYS_BASE | 0x0300)
 
 #define L2_CONFIG_REG		(BRIDGE_VIRT_BASE | 0x0128)
 #define L2_WRITETHROUGH		0x00000010
 
 #define CLOCK_GATING_CTRL	(BRIDGE_VIRT_BASE | 0x11c)
+#define CGC_BIT_GE0		(0)
+#define CGC_BIT_PEX0		(2)
+#define CGC_BIT_USB0		(3)
+#define CGC_BIT_SDIO		(4)
+#define CGC_BIT_TSU		(5)
+#define CGC_BIT_DUNIT		(6)
+#define CGC_BIT_RUNIT		(7)
+#define CGC_BIT_XOR0		(8)
+#define CGC_BIT_AUDIO		(9)
+#define CGC_BIT_SATA0		(14)
+#define CGC_BIT_SATA1		(15)
+#define CGC_BIT_XOR1		(16)
+#define CGC_BIT_CRYPTO		(17)
+#define CGC_BIT_PEX1		(18)
+#define CGC_BIT_GE1		(19)
+#define CGC_BIT_TDM		(20)
 #define CGC_GE0			(1 << 0)
 #define CGC_PEX0		(1 << 2)
 #define CGC_USB0		(1 << 3)
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index fede3d5..c5b6851 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -80,6 +80,7 @@
 #define  UART1_VIRT_BASE	(DEV_BUS_VIRT_BASE | 0x2100)
 
 #define BRIDGE_VIRT_BASE	(KIRKWOOD_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE	(KIRKWOOD_REGS_PHYS_BASE | 0x20000)
 
 #define CRYPTO_PHYS_BASE	(KIRKWOOD_REGS_PHYS_BASE | 0x30000)
 
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 85f6169..6d8364a 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -23,7 +23,6 @@
 #include <linux/gpio_keys.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index de37317..6e8b2ef 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 #include <video/vga.h>
 #include <asm/irq.h>
 #include <asm/mach/pci.h>
@@ -19,6 +20,23 @@
 #include <plat/addr-map.h>
 #include "common.h"
 
+static void kirkwood_enable_pcie_clk(const char *port)
+{
+	struct clk *clk;
+
+	clk = clk_get_sys("pcie", port);
+	if (IS_ERR(clk)) {
+		printk(KERN_ERR "PCIE clock %s missing\n", port);
+		return;
+	}
+	clk_prepare_enable(clk);
+	clk_put(clk);
+}
+
+/* This function is called very early in the boot when probing the
+   hardware to determine what we actually are, and what rate tclk is
+   ticking at. Hence calling kirkwood_enable_pcie_clk() is not
+   possible since the clk tree has not been created yet. */
 void kirkwood_enable_pcie(void)
 {
 	u32 curr = readl(CLOCK_GATING_CTRL);
@@ -26,7 +44,7 @@
 		writel(curr | CGC_PEX0, CLOCK_GATING_CTRL);
 }
 
-void __init kirkwood_pcie_id(u32 *dev, u32 *rev)
+void kirkwood_pcie_id(u32 *dev, u32 *rev)
 {
 	kirkwood_enable_pcie();
 	*dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE);
@@ -159,7 +177,6 @@
 
 static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
 {
-	extern unsigned int kirkwood_clk_ctrl;
 	struct pcie_port *pp;
 	int index;
 
@@ -178,11 +195,11 @@
 
 	switch (index) {
 	case 0:
-		kirkwood_clk_ctrl |= CGC_PEX0;
+		kirkwood_enable_pcie_clk("0");
 		pcie0_ioresources_init(pp);
 		break;
 	case 1:
-		kirkwood_clk_ctrl |= CGC_PEX1;
+		kirkwood_enable_pcie_clk("1");
 		pcie1_ioresources_init(pp);
 		break;
 	default:
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index fd2c9c8..f742a66 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -16,7 +16,6 @@
 #include <linux/gpio.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <mach/kirkwood.h>
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index f9d2a11..bad738e 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -16,7 +16,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ata_platform.h>
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.c b/arch/arm/mach-kirkwood/tsx1x-common.c
index 24294b2..8943ede 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.c
+++ b/arch/arm/mach-kirkwood/tsx1x-common.c
@@ -4,7 +4,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/serial_reg.h>
 #include <mach/kirkwood.h>
 #include "common.h"
diff --git a/arch/arm/mach-mmp/irq.c b/arch/arm/mach-mmp/irq.c
index fcfe0e3..e60c7d9 100644
--- a/arch/arm/mach-mmp/irq.c
+++ b/arch/arm/mach-mmp/irq.c
@@ -241,6 +241,7 @@
 	icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
 	icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
 	icu_data[1].nr_irqs = 2;
+	icu_data[1].cascade_irq = 4;
 	icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
 	icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
 						   icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@
 	icu_data[2].reg_status = mmp_icu_base + 0x154;
 	icu_data[2].reg_mask = mmp_icu_base + 0x16c;
 	icu_data[2].nr_irqs = 2;
+	icu_data[2].cascade_irq = 5;
 	icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
 	icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
 						   icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@
 	icu_data[3].reg_status = mmp_icu_base + 0x180;
 	icu_data[3].reg_mask = mmp_icu_base + 0x17c;
 	icu_data[3].nr_irqs = 3;
+	icu_data[3].cascade_irq = 9;
 	icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
 	icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
 						   icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@
 	icu_data[4].reg_status = mmp_icu_base + 0x158;
 	icu_data[4].reg_mask = mmp_icu_base + 0x170;
 	icu_data[4].nr_irqs = 5;
+	icu_data[4].cascade_irq = 17;
 	icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
 	icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
 						   icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@
 	icu_data[5].reg_status = mmp_icu_base + 0x15c;
 	icu_data[5].reg_mask = mmp_icu_base + 0x174;
 	icu_data[5].nr_irqs = 15;
+	icu_data[5].cascade_irq = 35;
 	icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
 	icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
 						   icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@
 	icu_data[6].reg_status = mmp_icu_base + 0x160;
 	icu_data[6].reg_mask = mmp_icu_base + 0x178;
 	icu_data[6].nr_irqs = 2;
+	icu_data[6].cascade_irq = 51;
 	icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
 	icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
 						   icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@
 	icu_data[7].reg_status = mmp_icu_base + 0x188;
 	icu_data[7].reg_mask = mmp_icu_base + 0x184;
 	icu_data[7].nr_irqs = 2;
+	icu_data[7].cascade_irq = 55;
 	icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
 	icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
 						   icu_data[7].virq_base, 0,
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 26aac36..4fa3e99 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -94,6 +94,11 @@
 	msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
 }
 
+static void __init halibut_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
 	.atag_offset	= 0x100,
 	.fixup		= halibut_fixup,
@@ -101,5 +106,6 @@
 	.init_early	= halibut_init_early,
 	.init_irq	= halibut_init_irq,
 	.init_machine	= halibut_init,
+	.init_late	= halibut_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 5a4882f..cf1f89a 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -71,6 +71,11 @@
 	msm_clock_init();
 }
 
+static void __init mahimahi_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 extern struct sys_timer msm_timer;
 
 MACHINE_START(MAHIMAHI, "mahimahi")
@@ -79,5 +84,6 @@
 	.map_io		= mahimahi_map_io,
 	.init_irq	= msm_init_irq,
 	.init_machine	= mahimahi_init,
+	.init_late	= mahimahi_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index 6d84ee7..451ab1d 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -128,11 +128,17 @@
 #endif
 }
 
+static void __init msm7x2x_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
 	.atag_offset	= 0x100,
 	.map_io		= msm7x2x_map_io,
 	.init_irq	= msm7x2x_init_irq,
 	.init_machine	= msm7x2x_init,
+	.init_late	= msm7x2x_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
 
@@ -141,6 +147,7 @@
 	.map_io		= msm7x2x_map_io,
 	.init_irq	= msm7x2x_init_irq,
 	.init_machine	= msm7x2x_init,
+	.init_late	= msm7x2x_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
 
@@ -149,6 +156,7 @@
 	.map_io		= msm7x2x_map_io,
 	.init_irq	= msm7x2x_init_irq,
 	.init_machine	= msm7x2x_init,
+	.init_late	= msm7x2x_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
 
@@ -157,5 +165,6 @@
 	.map_io		= msm7x2x_map_io,
 	.init_irq	= msm7x2x_init_irq,
 	.init_machine	= msm7x2x_init,
+	.init_late	= msm7x2x_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 75b3cfc..a500137 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -119,6 +119,11 @@
 	msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
 }
 
+static void __init msm7x30_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
 	.atag_offset = 0x100,
 	.fixup = msm7x30_fixup,
@@ -126,6 +131,7 @@
 	.map_io = msm7x30_map_io,
 	.init_irq = msm7x30_init_irq,
 	.init_machine = msm7x30_init,
+	.init_late = msm7x30_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -136,6 +142,7 @@
 	.map_io = msm7x30_map_io,
 	.init_irq = msm7x30_init_irq,
 	.init_machine = msm7x30_init,
+	.init_late = msm7x30_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -146,5 +153,6 @@
 	.map_io = msm7x30_map_io,
 	.init_irq = msm7x30_init_irq,
 	.init_machine = msm7x30_init,
+	.init_late = msm7x30_init_late,
 	.timer = &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index ed35981..65f4a1d 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -93,6 +93,11 @@
 	platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
 }
 
+static void __init msm8960_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
 	.fixup = msm8960_fixup,
 	.reserve = msm8960_reserve,
@@ -101,6 +106,7 @@
 	.timer = &msm_timer,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8960_sim_init,
+	.init_late = msm8960_init_late,
 MACHINE_END
 
 MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
@@ -111,5 +117,6 @@
 	.timer = &msm_timer,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8960_rumi3_init,
+	.init_late = msm8960_init_late,
 MACHINE_END
 
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index fb3496a..e37a724 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -81,6 +81,11 @@
 {
 }
 
+static void __init msm8x60_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 #ifdef CONFIG_OF
 static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
 	{}
@@ -111,6 +116,7 @@
 	.init_irq = msm8x60_init_irq,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
+	.init_late = msm8x60_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -121,6 +127,7 @@
 	.init_irq = msm8x60_init_irq,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
+	.init_late = msm8x60_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -131,6 +138,7 @@
 	.init_irq = msm8x60_init_irq,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
+	.init_late = msm8x60_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -141,6 +149,7 @@
 	.init_irq = msm8x60_init_irq,
 	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
+	.init_late = msm8x60_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -150,6 +159,7 @@
 	.map_io = msm8x60_map_io,
 	.init_irq = msm8x60_init_irq,
 	.init_machine = msm8x60_dt_init,
+	.init_late = msm8x60_init_late,
 	.timer = &msm_timer,
 	.dt_compat = msm8x60_fluid_match,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index fbaa4ed..c8fe0ed 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -190,11 +190,17 @@
 	qsd8x50_init_mmc();
 }
 
+static void __init qsd8x50_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
 	.atag_offset = 0x100,
 	.map_io = qsd8x50_map_io,
 	.init_irq = qsd8x50_init_irq,
 	.init_machine = qsd8x50_init,
+	.init_late = qsd8x50_init_late,
 	.timer = &msm_timer,
 MACHINE_END
 
@@ -203,5 +209,6 @@
 	.map_io = qsd8x50_map_io,
 	.init_irq = qsd8x50_init_irq,
 	.init_machine = qsd8x50_init,
+	.init_late = qsd8x50_init_late,
 	.timer = &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 4a8ea0d..2e569ab 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -101,6 +101,11 @@
 	msm_clock_init();
 }
 
+static void __init sapphire_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(SAPPHIRE, "sapphire")
 /* Maintainer: Brian Swetland <swetland@google.com> */
 	.atag_offset    = 0x100,
@@ -108,5 +113,6 @@
 	.map_io         = sapphire_map_io,
 	.init_irq       = sapphire_init_irq,
 	.init_machine   = sapphire_init,
+	.init_late      = sapphire_init_late,
 	.timer          = &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index d4060a3..bbe13f1 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -98,6 +98,11 @@
 	msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
 }
 
+static void __init trout_init_late(void)
+{
+	smd_debugfs_init();
+}
+
 MACHINE_START(TROUT, "HTC Dream")
 	.atag_offset	= 0x100,
 	.fixup		= trout_fixup,
@@ -105,5 +110,6 @@
 	.init_early	= trout_init_early,
 	.init_irq	= trout_init_irq,
 	.init_machine	= trout_init,
+	.init_late	= trout_init_late,
 	.timer		= &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 2ce8f1f..435f8ed 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -47,4 +47,10 @@
 			struct msm_mmc_platform_data *plat,
 			unsigned int stat_irq, unsigned long stat_irq_flags);
 
+#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
+int smd_debugfs_init(void);
+#else
+static inline int smd_debugfs_init(void) { return 0; }
+#endif
+
 #endif
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index c56df9e..8056b3e 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -216,7 +216,7 @@
 	debugfs_create_file(name, mode, dent, fill, &debug_ops);
 }
 
-static int smd_debugfs_init(void)
+int __init smd_debugfs_init(void)
 {
 	struct dentry *dent;
 
@@ -234,7 +234,6 @@
 	return 0;
 }
 
-late_initcall(smd_debugfs_init);
 #endif
 
 
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index a5dcf766..b4c53b8 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/serial_8250.h>
 #include <linux/ata_platform.h>
+#include <linux/clk-provider.h>
 #include <linux/ethtool.h>
 #include <asm/mach/map.h>
 #include <asm/mach/time.h>
@@ -103,24 +104,24 @@
 
 static int get_tclk(void)
 {
-	int tclk;
+	int tclk_freq;
 
 	/*
 	 * TCLK tick rate is configured by DEV_A[2:0] strap pins.
 	 */
 	switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
 	case 1:
-		tclk = 166666667;
+		tclk_freq = 166666667;
 		break;
 	case 3:
-		tclk = 200000000;
+		tclk_freq = 200000000;
 		break;
 	default:
 		panic("unknown TCLK PLL setting: %.8x\n",
 			readl(SAMPLE_AT_RESET_HIGH));
 	}
 
-	return tclk;
+	return tclk_freq;
 }
 
 
@@ -166,6 +167,19 @@
 
 
 /*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+				       get_tclk());
+
+	orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
  * EHCI
  ****************************************************************************/
 void __init mv78xx0_ehci0_init(void)
@@ -199,7 +213,7 @@
 {
 	orion_ge00_init(eth_data,
 			GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
-			IRQ_MV78XX0_GE_ERR, get_tclk());
+			IRQ_MV78XX0_GE_ERR);
 }
 
 
@@ -210,7 +224,7 @@
 {
 	orion_ge01_init(eth_data,
 			GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
-			NO_IRQ, get_tclk());
+			NO_IRQ);
 }
 
 
@@ -234,7 +248,7 @@
 
 	orion_ge10_init(eth_data,
 			GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
-			NO_IRQ, get_tclk());
+			NO_IRQ);
 }
 
 
@@ -258,7 +272,7 @@
 
 	orion_ge11_init(eth_data,
 			GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
-			NO_IRQ, get_tclk());
+			NO_IRQ);
 }
 
 /*****************************************************************************
@@ -285,7 +299,7 @@
 void __init mv78xx0_uart0_init(void)
 {
 	orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-			 IRQ_MV78XX0_UART_0, get_tclk());
+			 IRQ_MV78XX0_UART_0, tclk);
 }
 
 
@@ -295,7 +309,7 @@
 void __init mv78xx0_uart1_init(void)
 {
 	orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-			 IRQ_MV78XX0_UART_1, get_tclk());
+			 IRQ_MV78XX0_UART_1, tclk);
 }
 
 
@@ -305,7 +319,7 @@
 void __init mv78xx0_uart2_init(void)
 {
 	orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
-			 IRQ_MV78XX0_UART_2, get_tclk());
+			 IRQ_MV78XX0_UART_2, tclk);
 }
 
 /*****************************************************************************
@@ -314,7 +328,7 @@
 void __init mv78xx0_uart3_init(void)
 {
 	orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
-			 IRQ_MV78XX0_UART_3, get_tclk());
+			 IRQ_MV78XX0_UART_3, tclk);
 }
 
 /*****************************************************************************
@@ -378,25 +392,26 @@
 	int hclk;
 	int pclk;
 	int l2clk;
-	int tclk;
 
 	core_index = mv78xx0_core_index();
 	hclk = get_hclk();
 	get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
-	tclk = get_tclk();
 
 	printk(KERN_INFO "%s ", mv78xx0_id());
 	printk("core #%d, ", core_index);
 	printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
 	printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
 	printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
-	printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+	printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
 
 	mv78xx0_setup_cpu_mbus();
 
 #ifdef CONFIG_CACHE_FEROCEON_L2
 	feroceon_l2_init(is_l2_writethrough());
 #endif
+
+	/* Setup root of clk tree */
+	clk_init();
 }
 
 void mv78xx0_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 07d5383..91cf062 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -7,18 +7,28 @@
 
 config SOC_IMX23
 	bool
+	select ARM_AMBA
 	select CPU_ARM926T
 	select HAVE_PWM
 	select PINCTRL_IMX23
 
 config SOC_IMX28
 	bool
+	select ARM_AMBA
 	select CPU_ARM926T
 	select HAVE_PWM
 	select PINCTRL_IMX28
 
 comment "MXS platforms:"
 
+config MACH_MXS_DT
+	bool "Support MXS platforms from device tree"
+	select SOC_IMX23
+	select SOC_IMX28
+	help
+	  Include support for Freescale MXS platforms(i.MX23 and i.MX28)
+	  using the device tree for discovery
+
 config MACH_STMP378X_DEVB
 	bool "Support STMP378x_devb Platform"
 	select SOC_IMX23
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 908bf9a..e41590c 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,12 +1,10 @@
 # Common support
-obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
 
 obj-$(CONFIG_MXS_OCOTP) += ocotp.o
 obj-$(CONFIG_PM) += pm.o
 
-obj-$(CONFIG_SOC_IMX23) += clock-mx23.o
-obj-$(CONFIG_SOC_IMX28) += clock-mx28.o
-
+obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
 obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o
 obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
 obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
deleted file mode 100644
index e3ac52c..0000000
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx23.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "regs-clkctrl-mx23.h"
-
-#define CLKCTRL_BASE_ADDR	MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR	MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT	8
-
-static int _raw_clk_enable(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->enable_reg) {
-		reg = __raw_readl(clk->enable_reg);
-		reg &= ~(1 << clk->enable_shift);
-		__raw_writel(reg, clk->enable_reg);
-	}
-
-	return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->enable_reg) {
-		reg = __raw_readl(clk->enable_reg);
-		reg |= 1 << clk->enable_shift;
-		__raw_writel(reg, clk->enable_reg);
-	}
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
-	return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
-	.get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll_clk_get_rate(struct clk *clk)
-{
-	return 480000000;
-}
-
-static int pll_clk_enable(struct clk *clk)
-{
-	__raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
-			BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET);
-
-	/* Only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer
-	 * and is incorrect (excessive). Per definition of the PLLCTRL0
-	 * POWER field, waiting at least 10us.
-	 */
-	udelay(10);
-
-	return 0;
-}
-
-static void pll_clk_disable(struct clk *clk)
-{
-	__raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
-			BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_CLR);
-}
-
-static struct clk pll_clk = {
-	 .get_rate = pll_clk_get_rate,
-	 .enable = pll_clk_enable,
-	 .disable = pll_clk_disable,
-	 .parent = &ref_xtal_clk,
-};
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	unsigned long parent_rate;					\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr);		\
-	div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f;		\
-	parent_rate = clk_get_rate(clk->parent);			\
-									\
-	return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18,		\
-			div, PARENT_RATE_SHIFT);			\
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC, EMI)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC, PIX)
-_CLK_GET_RATE_REF(ref_io_clk, FRAC, IO)
-
-#define _DEFINE_CLOCK_REF(name, er, es)					\
-	static struct clk name = {					\
-		.enable_reg	= CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,	\
-		.enable_shift	= BP_CLKCTRL_##er##_CLKGATE##es,	\
-		.get_rate	= name##_get_rate,			\
-		.enable		= _raw_clk_enable,			\
-		.disable	= _raw_clk_disable,			\
-		.parent		= &pll_clk,				\
-	}
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC, EMI);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC, PIX);
-_DEFINE_CLOCK_REF(ref_io_clk, FRAC, IO);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
-	/* ref_xtal_clk is implemented as the only parent */
-	return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
-	return clk->parent->get_rate(clk->parent) / 750;
-}
-
-#define _CLK_GET_RATE(name, rs)						\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-									\
-	if (clk->parent == &ref_xtal_clk)				\
-		div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >>		\
-			BP_CLKCTRL_##rs##_DIV_XTAL;			\
-	else								\
-		div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >>		\
-			BP_CLKCTRL_##rs##_DIV_##rs;			\
-									\
-	if (!div)							\
-		return -EINVAL;						\
-									\
-	return clk_get_rate(clk->parent) / div;				\
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-	div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV;	\
-									\
-	if (!div)							\
-		return -EINVAL;						\
-									\
-	return clk_get_rate(clk->parent) / div;				\
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp_clk, SSP)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, PIX)
-
-#define _CLK_GET_RATE_STUB(name)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	return clk_get_rate(clk->parent);				\
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(audio_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-
-/*
- * clk_set_rate
- */
-static int cpu_clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg, bm_busy, div_max, d, f, div, frac;
-	unsigned long diff, parent_rate, calc_rate;
-
-	parent_rate = clk_get_rate(clk->parent);
-
-	if (clk->parent == &ref_xtal_clk) {
-		div_max = BM_CLKCTRL_CPU_DIV_XTAL >> BP_CLKCTRL_CPU_DIV_XTAL;
-		bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;
-		div = DIV_ROUND_UP(parent_rate, rate);
-		if (div == 0 || div > div_max)
-			return -EINVAL;
-	} else {
-		div_max = BM_CLKCTRL_CPU_DIV_CPU >> BP_CLKCTRL_CPU_DIV_CPU;
-		bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;
-		rate >>= PARENT_RATE_SHIFT;
-		parent_rate >>= PARENT_RATE_SHIFT;
-		diff = parent_rate;
-		div = frac = 1;
-		for (d = 1; d <= div_max; d++) {
-			f = parent_rate * 18 / d / rate;
-			if ((parent_rate * 18 / d) % rate)
-				f++;
-			if (f < 18 || f > 35)
-				continue;
-
-			calc_rate = parent_rate * 18 / f / d;
-			if (calc_rate > rate)
-				continue;
-
-			if (rate - calc_rate < diff) {
-				frac = f;
-				div = d;
-				diff = rate - calc_rate;
-			}
-
-			if (diff == 0)
-				break;
-		}
-
-		if (diff == parent_rate)
-			return -EINVAL;
-
-		reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-		reg &= ~BM_CLKCTRL_FRAC_CPUFRAC;
-		reg |= frac;
-		__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-	}
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-	reg &= ~BM_CLKCTRL_CPU_DIV_CPU;
-	reg |= div << BP_CLKCTRL_CPU_DIV_CPU;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-
-	mxs_clkctrl_timeout(HW_CLKCTRL_CPU, bm_busy);
-
-	return 0;
-}
-
-#define _CLK_SET_RATE(name, dr)						\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	u32 reg, div_max, div;						\
-	unsigned long parent_rate;					\
-									\
-	parent_rate = clk_get_rate(clk->parent);			\
-	div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;	\
-									\
-	div = DIV_ROUND_UP(parent_rate, rate);				\
-	if (div == 0 || div > div_max)					\
-		return -EINVAL;						\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-	reg &= ~BM_CLKCTRL_##dr##_DIV;					\
-	reg |= div << BP_CLKCTRL_##dr##_DIV;				\
-	if (reg & (1 << clk->enable_shift)) {				\
-		pr_err("%s: clock is gated\n", __func__);		\
-		return -EINVAL;						\
-	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-									\
-	mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);	\
-	return 0;							\
-}
-
-_CLK_SET_RATE(xbus_clk, XBUS)
-_CLK_SET_RATE(ssp_clk, SSP)
-_CLK_SET_RATE(gpmi_clk, GPMI)
-_CLK_SET_RATE(lcdif_clk, PIX)
-
-#define _CLK_SET_RATE_STUB(name)					\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	return -EINVAL;							\
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(audio_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit)					\
-static int name##_set_parent(struct clk *clk, struct clk *parent)	\
-{									\
-	if (parent != clk->parent) {					\
-		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
-		clk->parent = parent;					\
-	}								\
-									\
-	return 0;							\
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp_clk, SSP)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(lcdif_clk, PIX)
-
-#define _CLK_SET_PARENT_STUB(name)					\
-static int name##_set_parent(struct clk *clk, struct clk *parent)	\
-{									\
-	if (parent != clk->parent)					\
-		return -EINVAL;						\
-	else								\
-		return 0;						\
-}
-
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(audio_clk)
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
-	.get_rate = cpu_clk_get_rate,
-	.set_rate = cpu_clk_set_rate,
-	.set_parent = cpu_clk_set_parent,
-	.parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
-	.get_rate = hbus_clk_get_rate,
-	.parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
-	.get_rate = xbus_clk_get_rate,
-	.set_rate = xbus_clk_set_rate,
-	.parent = &ref_xtal_clk,
-};
-
-static struct clk rtc_clk = {
-	.get_rate = rtc_clk_get_rate,
-	.parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb_clk = {
-	.enable_reg = DIGCTRL_BASE_ADDR,
-	.enable_shift = 2,
-	.enable = _raw_clk_enable,
-	.disable = _raw_clk_disable,
-	.parent = &pll_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p)					\
-	static struct clk name = {					\
-		.enable_reg	= CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,	\
-		.enable_shift	= BP_CLKCTRL_##er##_##es,		\
-		.get_rate	= name##_get_rate,			\
-		.set_rate	= name##_set_rate,			\
-		.set_parent	= name##_set_parent,			\
-		.enable		= _raw_clk_enable,			\
-		.disable	= _raw_clk_disable,			\
-		.parent		= p,					\
-	}
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp_clk, SSP, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, PIX, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(audio_clk, XTAL, FILT_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-
-static struct clk_lookup lookups[] = {
-	/* for amba bus driver */
-	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
-	/* for amba-pl011 driver */
-	_REGISTER_CLOCK("duart", NULL, uart_clk)
-	_REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
-	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
-	_REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
-	_REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
-	_REGISTER_CLOCK("mxs-mmc.0", NULL, ssp_clk)
-	_REGISTER_CLOCK("mxs-mmc.1", NULL, ssp_clk)
-	_REGISTER_CLOCK(NULL, "usb", usb_clk)
-	_REGISTER_CLOCK(NULL, "audio", audio_clk)
-	_REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
-	_REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
-	_REGISTER_CLOCK("imx23-gpmi-nand", NULL, gpmi_clk)
-};
-
-static int clk_misc_init(void)
-{
-	u32 reg;
-	int ret;
-
-	/* Fix up parent per register setting */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
-	cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
-			&ref_xtal_clk : &ref_cpu_clk;
-	emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
-			&ref_xtal_clk : &ref_emi_clk;
-	ssp_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ?
-			&ref_xtal_clk : &ref_io_clk;
-	gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
-			&ref_xtal_clk : &ref_io_clk;
-	lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_PIX) ?
-			&ref_xtal_clk : &ref_pix_clk;
-
-	/* Use int div over frac when both are available */
-	__raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-	__raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-	__raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-	reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-	reg &= ~BM_CLKCTRL_SSP_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-	reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-	reg &= ~BM_CLKCTRL_PIX_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-
-	/*
-	 * Set safe hbus clock divider. A divider of 3 ensure that
-	 * the Vddd voltage required for the cpu clock is sufficiently
-	 * high for the hbus clock.
-	 */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-	reg &= BM_CLKCTRL_HBUS_DIV;
-	reg |= 3 << BP_CLKCTRL_HBUS_DIV;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
-	ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_BUSY);
-
-	/* Gate off cpu clock in WFI for power saving */
-	__raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
-	/*
-	 * 480 MHz seems too high to be ssp clock source directly,
-	 * so set frac to get a 288 MHz ref_io.
-	 */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-	reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
-	reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-
-	return ret;
-}
-
-int __init mx23_clocks_init(void)
-{
-	clk_misc_init();
-
-	/*
-	 * source ssp clock from ref_io than ref_xtal,
-	 * as ref_xtal only provides 24 MHz as maximum.
-	 */
-	clk_set_parent(&ssp_clk, &ref_io_clk);
-
-	clk_prepare_enable(&cpu_clk);
-	clk_prepare_enable(&hbus_clk);
-	clk_prepare_enable(&xbus_clk);
-	clk_prepare_enable(&emi_clk);
-	clk_prepare_enable(&uart_clk);
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
-
-	return 0;
-}
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
deleted file mode 100644
index cea29c9..0000000
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-#include <linux/spinlock.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx28.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-#include <mach/digctl.h>
-
-#include "regs-clkctrl-mx28.h"
-
-#define CLKCTRL_BASE_ADDR	MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR	MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT	8
-
-static struct clk pll2_clk;
-static struct clk cpu_clk;
-static struct clk emi_clk;
-static struct clk saif0_clk;
-static struct clk saif1_clk;
-static struct clk clk32k_clk;
-static DEFINE_SPINLOCK(clkmux_lock);
-
-/*
- * HW_SAIF_CLKMUX_SEL:
- *  DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
- *		clock pins selected for SAIF1 input clocks.
- *  CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
- *		SAIF0 clock inputs selected for SAIF1 input clocks.
- *  EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
- *		clocks.
- *  EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
- *		clocks.
- */
-int mxs_saif_clkmux_select(unsigned int clkmux)
-{
-	if (clkmux > 0x3)
-		return -EINVAL;
-
-	spin_lock(&clkmux_lock);
-	__raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
-			DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
-	__raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
-			DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
-	spin_unlock(&clkmux_lock);
-
-	return 0;
-}
-
-static int _raw_clk_enable(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->enable_reg) {
-		reg = __raw_readl(clk->enable_reg);
-		reg &= ~(1 << clk->enable_shift);
-		__raw_writel(reg, clk->enable_reg);
-	}
-
-	return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->enable_reg) {
-		reg = __raw_readl(clk->enable_reg);
-		reg |= 1 << clk->enable_shift;
-		__raw_writel(reg, clk->enable_reg);
-	}
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
-	return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
-	.get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll0_clk_get_rate(struct clk *clk)
-{
-	return 480000000;
-}
-
-static unsigned long pll1_clk_get_rate(struct clk *clk)
-{
-	return 480000000;
-}
-
-static unsigned long pll2_clk_get_rate(struct clk *clk)
-{
-	return 50000000;
-}
-
-#define _CLK_ENABLE_PLL(name, r, g)					\
-static int name##_enable(struct clk *clk)				\
-{									\
-	__raw_writel(BM_CLKCTRL_##r##CTRL0_POWER,			\
-		     CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET);	\
-	udelay(10);							\
-									\
-	if (clk == &pll2_clk)						\
-		__raw_writel(BM_CLKCTRL_##r##CTRL0_##g,			\
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR);	\
-	else								\
-		__raw_writel(BM_CLKCTRL_##r##CTRL0_##g,			\
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET);	\
-									\
-	return 0;							\
-}
-
-_CLK_ENABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _CLK_DISABLE_PLL(name, r, g)					\
-static void name##_disable(struct clk *clk)				\
-{									\
-	__raw_writel(BM_CLKCTRL_##r##CTRL0_POWER,			\
-		     CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR);	\
-									\
-	if (clk == &pll2_clk)						\
-		__raw_writel(BM_CLKCTRL_##r##CTRL0_##g,			\
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET);	\
-	else								\
-		__raw_writel(BM_CLKCTRL_##r##CTRL0_##g,			\
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR);	\
-									\
-}
-
-_CLK_DISABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _DEFINE_CLOCK_PLL(name)						\
-	static struct clk name = {					\
-		.get_rate	= name##_get_rate,			\
-		.enable		= name##_enable,			\
-		.disable	= name##_disable,			\
-		.parent		= &ref_xtal_clk,			\
-	}
-
-_DEFINE_CLOCK_PLL(pll0_clk);
-_DEFINE_CLOCK_PLL(pll1_clk);
-_DEFINE_CLOCK_PLL(pll2_clk);
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	unsigned long parent_rate;					\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr);		\
-	div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f;		\
-	parent_rate = clk_get_rate(clk->parent);			\
-									\
-	return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18,		\
-			div, PARENT_RATE_SHIFT);			\
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC0, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC0, EMI)
-_CLK_GET_RATE_REF(ref_io0_clk, FRAC0, IO0)
-_CLK_GET_RATE_REF(ref_io1_clk, FRAC0, IO1)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC1, PIX)
-_CLK_GET_RATE_REF(ref_gpmi_clk, FRAC1, GPMI)
-
-#define _DEFINE_CLOCK_REF(name, er, es)					\
-	static struct clk name = {					\
-		.enable_reg	= CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,	\
-		.enable_shift	= BP_CLKCTRL_##er##_CLKGATE##es,	\
-		.get_rate	= name##_get_rate,			\
-		.enable		= _raw_clk_enable,			\
-		.disable	= _raw_clk_disable,			\
-		.parent		= &pll0_clk,				\
-	}
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC0, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC0, EMI);
-_DEFINE_CLOCK_REF(ref_io0_clk, FRAC0, IO0);
-_DEFINE_CLOCK_REF(ref_io1_clk, FRAC0, IO1);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC1, PIX);
-_DEFINE_CLOCK_REF(ref_gpmi_clk, FRAC1, GPMI);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long lradc_clk_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 16;
-}
-
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
-	/* ref_xtal_clk is implemented as the only parent */
-	return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
-	return clk->parent->get_rate(clk->parent) / 750;
-}
-
-static unsigned long spdif_clk_get_rate(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 4;
-}
-
-#define _CLK_GET_RATE(name, rs)						\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-									\
-	if (clk->parent == &ref_xtal_clk)				\
-		div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >>		\
-			BP_CLKCTRL_##rs##_DIV_XTAL;			\
-	else								\
-		div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >>		\
-			BP_CLKCTRL_##rs##_DIV_##rs;			\
-									\
-	if (!div)							\
-		return -EINVAL;						\
-									\
-	return clk_get_rate(clk->parent) / div;				\
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	u32 reg, div;							\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-	div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV;	\
-									\
-	if (!div)							\
-		return -EINVAL;						\
-									\
-	if (clk == &saif0_clk || clk == &saif1_clk)			\
-		return clk_get_rate(clk->parent) >> 16 * div;		\
-	else								\
-		return clk_get_rate(clk->parent) / div;			\
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp0_clk, SSP0)
-_CLK_GET_RATE1(ssp1_clk, SSP1)
-_CLK_GET_RATE1(ssp2_clk, SSP2)
-_CLK_GET_RATE1(ssp3_clk, SSP3)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, DIS_LCDIF)
-_CLK_GET_RATE1(saif0_clk, SAIF0)
-_CLK_GET_RATE1(saif1_clk, SAIF1)
-
-#define _CLK_GET_RATE_STUB(name)					\
-static unsigned long name##_get_rate(struct clk *clk)			\
-{									\
-	return clk_get_rate(clk->parent);				\
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-_CLK_GET_RATE_STUB(can0_clk)
-_CLK_GET_RATE_STUB(can1_clk)
-_CLK_GET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_rate
- */
-/* fool compiler */
-#define BM_CLKCTRL_CPU_DIV	0
-#define BP_CLKCTRL_CPU_DIV	0
-#define BM_CLKCTRL_CPU_BUSY	0
-
-#define _CLK_SET_RATE(name, dr, fr, fs)					\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	u32 reg, bm_busy, div_max, d, f, div, frac;			\
-	unsigned long diff, parent_rate, calc_rate;			\
-									\
-	div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;	\
-	bm_busy = BM_CLKCTRL_##dr##_BUSY;				\
-									\
-	if (clk->parent == &ref_xtal_clk) {				\
-		parent_rate = clk_get_rate(clk->parent);		\
-		div = DIV_ROUND_UP(parent_rate, rate);			\
-		if (clk == &cpu_clk) {					\
-			div_max = BM_CLKCTRL_CPU_DIV_XTAL >>		\
-				BP_CLKCTRL_CPU_DIV_XTAL;		\
-			bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;		\
-		}							\
-		if (div == 0 || div > div_max)				\
-			return -EINVAL;					\
-	} else {							\
-		/*							\
-		 * hack alert: this block modifies clk->parent, too,	\
-		 * so the base to use it the grand parent.		\
-		 */							\
-		parent_rate = clk_get_rate(clk->parent->parent);	\
-		rate >>= PARENT_RATE_SHIFT;				\
-		parent_rate >>= PARENT_RATE_SHIFT;			\
-		diff = parent_rate;					\
-		div = frac = 1;						\
-		if (clk == &cpu_clk) {					\
-			div_max = BM_CLKCTRL_CPU_DIV_CPU >>		\
-				BP_CLKCTRL_CPU_DIV_CPU;			\
-			bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;		\
-		}							\
-		for (d = 1; d <= div_max; d++) {			\
-			f = parent_rate * 18 / d / rate;		\
-			if ((parent_rate * 18 / d) % rate)		\
-				f++;					\
-			if (f < 18 || f > 35)				\
-				continue;				\
-									\
-			calc_rate = parent_rate * 18 / f / d;		\
-			if (calc_rate > rate)				\
-				continue;				\
-									\
-			if (rate - calc_rate < diff) {			\
-				frac = f;				\
-				div = d;				\
-				diff = rate - calc_rate;		\
-			}						\
-									\
-			if (diff == 0)					\
-				break;					\
-		}							\
-									\
-		if (diff == parent_rate)				\
-			return -EINVAL;					\
-									\
-		reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr);	\
-		reg &= ~BM_CLKCTRL_##fr##_##fs##FRAC;			\
-		reg |= frac << BP_CLKCTRL_##fr##_##fs##FRAC;		\
-		__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr);	\
-	}								\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-	if (clk == &cpu_clk) {						\
-		reg &= ~BM_CLKCTRL_CPU_DIV_CPU;				\
-		reg |= div << BP_CLKCTRL_CPU_DIV_CPU;			\
-	} else {							\
-		reg &= ~BM_CLKCTRL_##dr##_DIV;				\
-		reg |= div << BP_CLKCTRL_##dr##_DIV;			\
-		if (reg & (1 << clk->enable_shift)) {			\
-			pr_err("%s: clock is gated\n", __func__);	\
-			return -EINVAL;					\
-		}							\
-	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-									\
-	return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, bm_busy);		\
-}
-
-_CLK_SET_RATE(cpu_clk, CPU, FRAC0, CPU)
-_CLK_SET_RATE(ssp0_clk, SSP0, FRAC0, IO0)
-_CLK_SET_RATE(ssp1_clk, SSP1, FRAC0, IO0)
-_CLK_SET_RATE(ssp2_clk, SSP2, FRAC0, IO1)
-_CLK_SET_RATE(ssp3_clk, SSP3, FRAC0, IO1)
-_CLK_SET_RATE(lcdif_clk, DIS_LCDIF, FRAC1, PIX)
-_CLK_SET_RATE(gpmi_clk, GPMI, FRAC1, GPMI)
-
-#define _CLK_SET_RATE1(name, dr)					\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	u32 reg, div_max, div;						\
-	unsigned long parent_rate;					\
-									\
-	parent_rate = clk_get_rate(clk->parent);			\
-	div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;	\
-									\
-	div = DIV_ROUND_UP(parent_rate, rate);				\
-	if (div == 0 || div > div_max)					\
-		return -EINVAL;						\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-	reg &= ~BM_CLKCTRL_##dr##_DIV;					\
-	reg |= div << BP_CLKCTRL_##dr##_DIV;				\
-	if (reg & (1 << clk->enable_shift)) {				\
-		pr_err("%s: clock is gated\n", __func__);		\
-		return -EINVAL;						\
-	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
-									\
-	return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);\
-}
-
-_CLK_SET_RATE1(xbus_clk, XBUS)
-
-/* saif clock uses 16 bits frac div */
-#define _CLK_SET_RATE_SAIF(name, rs)					\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	u16 div;							\
-	u32 reg;							\
-	u64 lrate;							\
-	unsigned long parent_rate;					\
-									\
-	parent_rate = clk_get_rate(clk->parent);			\
-	if (rate > parent_rate)						\
-		return -EINVAL;						\
-									\
-	lrate = (u64)rate << 16;					\
-	do_div(lrate, parent_rate);					\
-	div = (u16)lrate;						\
-									\
-	if (!div)							\
-		return -EINVAL;						\
-									\
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-	reg &= ~BM_CLKCTRL_##rs##_DIV;					\
-	reg |= div << BP_CLKCTRL_##rs##_DIV;				\
-	if (reg & (1 << clk->enable_shift)) {				\
-		pr_err("%s: clock is gated\n", __func__);		\
-		return -EINVAL;						\
-	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);		\
-									\
-	return mxs_clkctrl_timeout(HW_CLKCTRL_##rs, BM_CLKCTRL_##rs##_BUSY);\
-}
-
-_CLK_SET_RATE_SAIF(saif0_clk, SAIF0)
-_CLK_SET_RATE_SAIF(saif1_clk, SAIF1)
-
-#define _CLK_SET_RATE_STUB(name)					\
-static int name##_set_rate(struct clk *clk, unsigned long rate)		\
-{									\
-	return -EINVAL;							\
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(spdif_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-_CLK_SET_RATE_STUB(can0_clk)
-_CLK_SET_RATE_STUB(can1_clk)
-_CLK_SET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit)					\
-static int name##_set_parent(struct clk *clk, struct clk *parent)	\
-{									\
-	if (parent != clk->parent) {					\
-		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
-		clk->parent = parent;					\
-	}								\
-									\
-	return 0;							\
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp0_clk, SSP0)
-_CLK_SET_PARENT(ssp1_clk, SSP1)
-_CLK_SET_PARENT(ssp2_clk, SSP2)
-_CLK_SET_PARENT(ssp3_clk, SSP3)
-_CLK_SET_PARENT(lcdif_clk, DIS_LCDIF)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(saif0_clk, SAIF0)
-_CLK_SET_PARENT(saif1_clk, SAIF1)
-
-#define _CLK_SET_PARENT_STUB(name)					\
-static int name##_set_parent(struct clk *clk, struct clk *parent)	\
-{									\
-	if (parent != clk->parent)					\
-		return -EINVAL;						\
-	else								\
-		return 0;						\
-}
-
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-_CLK_SET_PARENT_STUB(spdif_clk)
-_CLK_SET_PARENT_STUB(fec_clk)
-_CLK_SET_PARENT_STUB(can0_clk)
-_CLK_SET_PARENT_STUB(can1_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
-	.get_rate = cpu_clk_get_rate,
-	.set_rate = cpu_clk_set_rate,
-	.set_parent = cpu_clk_set_parent,
-	.parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
-	.get_rate = hbus_clk_get_rate,
-	.parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
-	.get_rate = xbus_clk_get_rate,
-	.set_rate = xbus_clk_set_rate,
-	.parent = &ref_xtal_clk,
-};
-
-static struct clk lradc_clk = {
-	.get_rate = lradc_clk_get_rate,
-	.parent = &clk32k_clk,
-};
-
-static struct clk rtc_clk = {
-	.get_rate = rtc_clk_get_rate,
-	.parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb0_clk = {
-	.enable_reg = DIGCTRL_BASE_ADDR,
-	.enable_shift = 2,
-	.enable = _raw_clk_enable,
-	.disable = _raw_clk_disable,
-	.parent = &pll0_clk,
-};
-
-static struct clk usb1_clk = {
-	.enable_reg = DIGCTRL_BASE_ADDR,
-	.enable_shift = 16,
-	.enable = _raw_clk_enable,
-	.disable = _raw_clk_disable,
-	.parent = &pll1_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p)					\
-	static struct clk name = {					\
-		.enable_reg	= CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,	\
-		.enable_shift	= BP_CLKCTRL_##er##_##es,		\
-		.get_rate	= name##_get_rate,			\
-		.set_rate	= name##_set_rate,			\
-		.set_parent	= name##_set_parent,			\
-		.enable		= _raw_clk_enable,			\
-		.disable	= _raw_clk_disable,			\
-		.parent		= p,					\
-	}
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp0_clk, SSP0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp1_clk, SSP1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp2_clk, SSP2, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp3_clk, SSP3, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, DIS_LCDIF, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif0_clk, SAIF0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif1_clk, SAIF1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(can0_clk, FLEXCAN, STOP_CAN0, &ref_xtal_clk);
-_DEFINE_CLOCK(can1_clk, FLEXCAN, STOP_CAN1, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(spdif_clk, SPDIF, CLKGATE, &pll0_clk);
-_DEFINE_CLOCK(fec_clk, ENET, DISABLE, &hbus_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-
-static struct clk_lookup lookups[] = {
-	/* for amba bus driver */
-	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
-	/* for amba-pl011 driver */
-	_REGISTER_CLOCK("duart", NULL, uart_clk)
-	_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
-	_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
-	_REGISTER_CLOCK("imx28-gpmi-nand", NULL, gpmi_clk)
-	_REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
-	_REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
-	_REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
-	_REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
-	_REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
-	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
-	_REGISTER_CLOCK("pll2", NULL, pll2_clk)
-	_REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
-	_REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
-	_REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
-	_REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
-	_REGISTER_CLOCK("mxs-mmc.2", NULL, ssp2_clk)
-	_REGISTER_CLOCK("mxs-mmc.3", NULL, ssp3_clk)
-	_REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
-	_REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
-	_REGISTER_CLOCK(NULL, "usb0", usb0_clk)
-	_REGISTER_CLOCK(NULL, "usb1", usb1_clk)
-	_REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
-	_REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
-	_REGISTER_CLOCK(NULL, "lradc", lradc_clk)
-	_REGISTER_CLOCK(NULL, "spdif", spdif_clk)
-	_REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
-	_REGISTER_CLOCK("mxs-saif.0", NULL, saif0_clk)
-	_REGISTER_CLOCK("mxs-saif.1", NULL, saif1_clk)
-};
-
-static int clk_misc_init(void)
-{
-	u32 reg;
-	int ret;
-
-	/* Fix up parent per register setting */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
-	cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
-			&ref_xtal_clk : &ref_cpu_clk;
-	emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
-			&ref_xtal_clk : &ref_emi_clk;
-	ssp0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP0) ?
-			&ref_xtal_clk : &ref_io0_clk;
-	ssp1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP1) ?
-			&ref_xtal_clk : &ref_io0_clk;
-	ssp2_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP2) ?
-			&ref_xtal_clk : &ref_io1_clk;
-	ssp3_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP3) ?
-			&ref_xtal_clk : &ref_io1_clk;
-	lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF) ?
-			&ref_xtal_clk : &ref_pix_clk;
-	gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
-			&ref_xtal_clk : &ref_gpmi_clk;
-	saif0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0) ?
-			&ref_xtal_clk : &pll0_clk;
-	saif1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1) ?
-			&ref_xtal_clk : &pll0_clk;
-
-	/* Use int div over frac when both are available */
-	__raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-	__raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-	__raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-	reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-	reg &= ~BM_CLKCTRL_SSP0_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-	reg &= ~BM_CLKCTRL_SSP1_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-	reg &= ~BM_CLKCTRL_SSP2_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-	reg &= ~BM_CLKCTRL_SSP3_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-	reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-	reg &= ~BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-
-	/* SAIF has to use frac div for functional operation */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-	reg |= BM_CLKCTRL_SAIF0_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-	reg |= BM_CLKCTRL_SAIF1_DIV_FRAC_EN;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-
-	/*
-	 * Set safe hbus clock divider. A divider of 3 ensure that
-	 * the Vddd voltage required for the cpu clock is sufficiently
-	 * high for the hbus clock.
-	 */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-	reg &= BM_CLKCTRL_HBUS_DIV;
-	reg |= 3 << BP_CLKCTRL_HBUS_DIV;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
-	ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_ASM_BUSY);
-
-	/* Gate off cpu clock in WFI for power saving */
-	__raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
-			CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
-	/*
-	 * Extra fec clock setting
-	 * The DENX M28 uses an external clock source
-	 * and the clock output must not be enabled
-	 */
-	if (!machine_is_m28evk()) {
-		reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
-		reg &= ~BM_CLKCTRL_ENET_SLEEP;
-		reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
-		__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
-	}
-
-	/*
-	 * 480 MHz seems too high to be ssp clock source directly,
-	 * so set frac0 to get a 288 MHz ref_io0.
-	 */
-	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-	reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
-	reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-
-	return ret;
-}
-
-int __init mx28_clocks_init(void)
-{
-	clk_misc_init();
-
-	/*
-	 * source ssp clock from ref_io0 than ref_xtal,
-	 * as ref_xtal only provides 24 MHz as maximum.
-	 */
-	clk_set_parent(&ssp0_clk, &ref_io0_clk);
-	clk_set_parent(&ssp1_clk, &ref_io0_clk);
-	clk_set_parent(&ssp2_clk, &ref_io1_clk);
-	clk_set_parent(&ssp3_clk, &ref_io1_clk);
-
-	clk_prepare_enable(&cpu_clk);
-	clk_prepare_enable(&hbus_clk);
-	clk_prepare_enable(&xbus_clk);
-	clk_prepare_enable(&emi_clk);
-	clk_prepare_enable(&uart_clk);
-
-	clk_set_parent(&lcdif_clk, &ref_pix_clk);
-	clk_set_parent(&saif0_clk, &pll0_clk);
-	clk_set_parent(&saif1_clk, &pll0_clk);
-
-	/*
-	 * Set an initial clock rate for the saif internal logic to work
-	 * properly. This is important when working in EXTMASTER mode that
-	 * uses the other saif's BITCLK&LRCLK but it still needs a basic
-	 * clock which should be fast enough for the internal logic.
-	 */
-	clk_set_rate(&saif0_clk, 24000000);
-	clk_set_rate(&saif1_clk, 24000000);
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
-
-	return 0;
-}
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
deleted file mode 100644
index 97a6f4a..0000000
--- a/arch/arm/mach-mxs/clock.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Based on arch/arm/plat-omap/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA  02110-1301, USA.
- */
-
-/* #define DEBUG */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/proc_fs.h>
-#include <linux/semaphore.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
-	if (clk == NULL || IS_ERR(clk))
-		return;
-	WARN_ON(!clk->usecount);
-
-	if (!(--clk->usecount)) {
-		if (clk->disable)
-			clk->disable(clk);
-		__clk_disable(clk->parent);
-	}
-}
-
-static int __clk_enable(struct clk *clk)
-{
-	if (clk == NULL || IS_ERR(clk))
-		return -EINVAL;
-
-	if (clk->usecount++ == 0) {
-		__clk_enable(clk->parent);
-
-		if (clk->enable)
-			clk->enable(clk);
-	}
-	return 0;
-}
-
-/*
- * The clk_enable/clk_disable could be called by drivers in atomic context,
- * so they should not really hold mutex.  Instead, clk_prepare/clk_unprepare
- * can hold a mutex, as the pair will only be called in non-atomic context.
- * Before migrating to common clk framework, we can have __clk_enable and
- * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
- * leave clk_enable/clk_disable as the dummy functions.
- */
-int clk_prepare(struct clk *clk)
-{
-	int ret = 0;
-
-	if (clk == NULL || IS_ERR(clk))
-		return -EINVAL;
-
-	mutex_lock(&clocks_mutex);
-	ret = __clk_enable(clk);
-	mutex_unlock(&clocks_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(clk_prepare);
-
-void clk_unprepare(struct clk *clk)
-{
-	if (clk == NULL || IS_ERR(clk))
-		return;
-
-	mutex_lock(&clocks_mutex);
-	__clk_disable(clk);
-	mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unprepare);
-
-int clk_enable(struct clk *clk)
-{
-	return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-	/* nothing to do */
-}
-EXPORT_SYMBOL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
-	if (clk == NULL || IS_ERR(clk))
-		return 0UL;
-
-	if (clk->get_rate)
-		return clk->get_rate(clk);
-
-	return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	if (clk == NULL || IS_ERR(clk) || !clk->round_rate)
-		return 0;
-
-	return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	int ret = -EINVAL;
-
-	if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0)
-		return ret;
-
-	mutex_lock(&clocks_mutex);
-	ret = clk->set_rate(clk, rate);
-	mutex_unlock(&clocks_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	int ret = -EINVAL;
-	struct clk *old;
-
-	if (clk == NULL || IS_ERR(clk) || parent == NULL ||
-	    IS_ERR(parent) || clk->set_parent == NULL)
-		return ret;
-
-	if (clk->usecount)
-		clk_prepare_enable(parent);
-
-	mutex_lock(&clocks_mutex);
-	ret = clk->set_parent(clk, parent);
-	if (ret == 0) {
-		old = clk->parent;
-		clk->parent = parent;
-	} else {
-		old = parent;
-	}
-	mutex_unlock(&clocks_mutex);
-
-	if (clk->usecount)
-		clk_disable(old);
-
-	return ret;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
-	struct clk *ret = NULL;
-
-	if (clk == NULL || IS_ERR(clk))
-		return ret;
-
-	return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index b8913df..19659de 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,6 +1,5 @@
 config MXS_HAVE_AMBA_DUART
 	bool
-	select ARM_AMBA
 
 config MXS_HAVE_PLATFORM_AUART
 	bool
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 6a0202b1..4682450 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -14,7 +14,7 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-static struct platform_device *__init mxs_add_dma(const char *devid,
+struct platform_device *__init mxs_add_dma(const char *devid,
 						resource_size_t base)
 {
 	struct resource res[] = {
@@ -29,22 +29,3 @@
 				res, ARRAY_SIZE(res), NULL, 0,
 				DMA_BIT_MASK(32));
 }
-
-static int __init mxs_add_mxs_dma(void)
-{
-	char *apbh = "mxs-dma-apbh";
-	char *apbx = "mxs-dma-apbx";
-
-	if (cpu_is_mx23()) {
-		mxs_add_dma(apbh, MX23_APBH_DMA_BASE_ADDR);
-		mxs_add_dma(apbx, MX23_APBX_DMA_BASE_ADDR);
-	}
-
-	if (cpu_is_mx28()) {
-		mxs_add_dma(apbh, MX28_APBH_DMA_BASE_ADDR);
-		mxs_add_dma(apbx, MX28_APBX_DMA_BASE_ADDR);
-	}
-
-	return 0;
-}
-arch_initcall(mxs_add_mxs_dma);
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
index ed0885e..cd99f19 100644
--- a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -14,7 +14,7 @@
 #include <mach/devices-common.h>
 
 struct platform_device *__init mxs_add_gpio(
-	int id, resource_size_t iobase, int irq)
+	char *name, int id, resource_size_t iobase, int irq)
 {
 	struct resource res[] = {
 		{
@@ -29,25 +29,5 @@
 	};
 
 	return platform_device_register_resndata(&mxs_apbh_bus,
-			"gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+			name, id, res, ARRAY_SIZE(res), NULL, 0);
 }
-
-static int __init mxs_add_mxs_gpio(void)
-{
-	if (cpu_is_mx23()) {
-		mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
-		mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
-		mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
-	}
-
-	if (cpu_is_mx28()) {
-		mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
-		mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
-		mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
-		mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
-		mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
-	}
-
-	return 0;
-}
-postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
index bef9d92..b33c9d0 100644
--- a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
+++ b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
@@ -17,8 +17,9 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid)			\
+#define mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)		\
 	{								\
+		.devid = _devid,					\
 		.id = _id,						\
 		.iobase = soc ## _SSP ## hwid ## _BASE_ADDR,		\
 		.dma = soc ## _DMA_SSP ## hwid,				\
@@ -26,23 +27,23 @@
 		.irq_dma = soc ## _INT_SSP ## hwid ## _DMA,		\
 	}
 
-#define mxs_mxs_mmc_data_entry(soc, _id, hwid)				\
-	[_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid)
+#define mxs_mxs_mmc_data_entry(soc, _devid, _id, hwid)			\
+	[_id] = mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)
 
 
 #ifdef CONFIG_SOC_IMX23
 const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
-	mxs_mxs_mmc_data_entry(MX23, 0, 1),
-	mxs_mxs_mmc_data_entry(MX23, 1, 2),
+	mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 0, 1),
+	mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 1, 2),
 };
 #endif
 
 #ifdef CONFIG_SOC_IMX28
 const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
-	mxs_mxs_mmc_data_entry(MX28, 0, 0),
-	mxs_mxs_mmc_data_entry(MX28, 1, 1),
-	mxs_mxs_mmc_data_entry(MX28, 2, 2),
-	mxs_mxs_mmc_data_entry(MX28, 3, 3),
+	mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 0, 0),
+	mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 1, 1),
+	mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 2, 2),
+	mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 3, 3),
 };
 #endif
 
@@ -70,6 +71,6 @@
 		},
 	};
 
-	return mxs_add_platform_device("mxs-mmc", data->id,
+	return mxs_add_platform_device(data->devid, data->id,
 			res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
 }
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
deleted file mode 100644
index 592c9ab..0000000
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA  02110-1301, USA.
- */
-
-#ifndef __MACH_MXS_CLOCK_H__
-#define __MACH_MXS_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-#include <linux/list.h>
-
-struct module;
-
-struct clk {
-	int id;
-	/* Source clock this clk depends on */
-	struct clk *parent;
-	/* Reference count of clock enable/disable */
-	__s8 usecount;
-	/* Register bit position for clock's enable/disable control. */
-	u8 enable_shift;
-	/* Register address for clock's enable/disable control. */
-	void __iomem *enable_reg;
-	u32 flags;
-	/* get the current clock rate (always a fresh value) */
-	unsigned long (*get_rate) (struct clk *);
-	/* Function ptr to set the clock to a new rate. The rate must match a
-	   supported rate returned from round_rate. Leave blank if clock is not
-	   programmable */
-	int (*set_rate) (struct clk *, unsigned long);
-	/* Function ptr to round the requested clock rate to the nearest
-	   supported rate that is less than or equal to the requested rate. */
-	unsigned long (*round_rate) (struct clk *, unsigned long);
-	/* Function ptr to enable the clock. Leave blank if clock can not
-	   be gated. */
-	int (*enable) (struct clk *);
-	/* Function ptr to disable the clock. Leave blank if clock can not
-	   be gated. */
-	void (*disable) (struct clk *);
-	/* Function ptr to set the parent clock of the clock. */
-	int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __MACH_MXS_CLOCK_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 8d88399..de6c7ba 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -11,28 +11,27 @@
 #ifndef __MACH_MXS_COMMON_H__
 #define __MACH_MXS_COMMON_H__
 
-struct clk;
-
 extern const u32 *mxs_get_ocotp(void);
 extern int mxs_reset_block(void __iomem *);
-extern void mxs_timer_init(struct clk *, int);
+extern void mxs_timer_init(int);
 extern void mxs_restart(char, const char *);
 extern int mxs_saif_clkmux_select(unsigned int clkmux);
 
 extern void mx23_soc_init(void);
-extern int mx23_register_gpios(void);
 extern int mx23_clocks_init(void);
 extern void mx23_map_io(void);
 extern void mx23_init_irq(void);
 
 extern void mx28_soc_init(void);
-extern int mx28_register_gpios(void);
 extern int mx28_clocks_init(void);
 extern void mx28_map_io(void);
 extern void mx28_init_irq(void);
 
 extern void icoll_init_irq(void);
 
-extern int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask);
+extern struct platform_device *mxs_add_dma(const char *devid,
+						resource_size_t base);
+extern struct platform_device *mxs_add_gpio(char *name, int id,
+					    resource_size_t iobase, int irq);
 
 #endif /* __MACH_MXS_COMMON_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 21e45a7..e8b1d95 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -82,8 +82,9 @@
 		const struct mxs_mxs_i2c_data *data);
 
 /* mmc */
-#include <mach/mmc.h>
+#include <linux/mmc/mxs-mmc.h>
 struct mxs_mxs_mmc_data {
+	const char *devid;
 	int id;
 	resource_size_t iobase;
 	resource_size_t dma;
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index da4610e..dafd48e 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -226,7 +226,7 @@
 	struct clk *clk;
 
 	/* Enable fec phy clock */
-	clk = clk_get_sys("pll2", NULL);
+	clk = clk_get_sys("enet_out", NULL);
 	if (!IS_ERR(clk))
 		clk_prepare_enable(clk);
 
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
new file mode 100644
index 0000000..8cac94b
--- /dev/null
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/common.h>
+
+static int __init mxs_icoll_add_irq_domain(struct device_node *np,
+				struct device_node *interrupt_parent)
+{
+	irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
+
+	return 0;
+}
+
+static int __init mxs_gpio_add_irq_domain(struct device_node *np,
+				struct device_node *interrupt_parent)
+{
+	static int gpio_irq_base = MXS_GPIO_IRQ_START;
+
+	irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
+	gpio_irq_base += 32;
+
+	return 0;
+}
+
+static const struct of_device_id mxs_irq_match[] __initconst = {
+	{ .compatible = "fsl,mxs-icoll", .data = mxs_icoll_add_irq_domain, },
+	{ .compatible = "fsl,mxs-gpio", .data = mxs_gpio_add_irq_domain, },
+	{ /* sentinel */ }
+};
+
+static void __init mxs_dt_init_irq(void)
+{
+	icoll_init_irq();
+	of_irq_init(mxs_irq_match);
+}
+
+static void __init imx23_timer_init(void)
+{
+	mx23_clocks_init();
+}
+
+static struct sys_timer imx23_timer = {
+	.init = imx23_timer_init,
+};
+
+static void __init imx28_timer_init(void)
+{
+	mx28_clocks_init();
+}
+
+static struct sys_timer imx28_timer = {
+	.init = imx28_timer_init,
+};
+
+static void __init imx28_evk_init(void)
+{
+	struct clk *clk;
+
+	/* Enable fec phy clock */
+	clk = clk_get_sys("enet_out", NULL);
+	if (!IS_ERR(clk))
+		clk_prepare_enable(clk);
+}
+
+static void __init mxs_machine_init(void)
+{
+	if (of_machine_is_compatible("fsl,imx28-evk"))
+		imx28_evk_init();
+
+	of_platform_populate(NULL, of_default_bus_match_table,
+				NULL, NULL);
+}
+
+static const char *imx23_dt_compat[] __initdata = {
+	"fsl,imx23-evk",
+	"fsl,imx23",
+	NULL,
+};
+
+static const char *imx28_dt_compat[] __initdata = {
+	"fsl,imx28-evk",
+	"fsl,imx28",
+	NULL,
+};
+
+DT_MACHINE_START(IMX23, "Freescale i.MX23 (Device Tree)")
+	.map_io		= mx23_map_io,
+	.init_irq	= mxs_dt_init_irq,
+	.timer		= &imx23_timer,
+	.init_machine	= mxs_machine_init,
+	.dt_compat	= imx23_dt_compat,
+	.restart	= mxs_restart,
+MACHINE_END
+
+DT_MACHINE_START(IMX28, "Freescale i.MX28 (Device Tree)")
+	.map_io		= mx28_map_io,
+	.init_irq	= mxs_dt_init_irq,
+	.timer		= &imx28_timer,
+	.init_machine	= mxs_machine_init,
+	.dt_compat	= imx28_dt_compat,
+	.restart	= mxs_restart,
+MACHINE_END
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index 67a384e..dccb67a 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -66,9 +66,25 @@
 void __init mx23_soc_init(void)
 {
 	pinctrl_provide_dummies();
+
+	mxs_add_dma("imx23-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+	mxs_add_dma("imx23-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+	mxs_add_gpio("imx23-gpio", 0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+	mxs_add_gpio("imx23-gpio", 1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+	mxs_add_gpio("imx23-gpio", 2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
 }
 
 void __init mx28_soc_init(void)
 {
 	pinctrl_provide_dummies();
+
+	mxs_add_dma("imx28-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+	mxs_add_dma("imx28-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+	mxs_add_gpio("imx28-gpio", 0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+	mxs_add_gpio("imx28-gpio", 1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+	mxs_add_gpio("imx28-gpio", 2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+	mxs_add_gpio("imx28-gpio", 3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+	mxs_add_gpio("imx28-gpio", 4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
 }
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
deleted file mode 100644
index 0ea5c9d..0000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx23.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- * Copyright 2008-2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX23_H__
-#define __REGS_CLKCTRL_MX23_H__
-
-
-#define HW_CLKCTRL_PLLCTRL0	(0x00000000)
-#define HW_CLKCTRL_PLLCTRL0_SET	(0x00000004)
-#define HW_CLKCTRL_PLLCTRL0_CLR	(0x00000008)
-#define HW_CLKCTRL_PLLCTRL0_TOG	(0x0000000c)
-
-#define BP_CLKCTRL_PLLCTRL0_LFR_SEL	28
-#define BM_CLKCTRL_PLLCTRL0_LFR_SEL	0x30000000
-#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v)  \
-		(((v) << 28) & BM_CLKCTRL_PLLCTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_CP_SEL	24
-#define BM_CLKCTRL_PLLCTRL0_CP_SEL	0x03000000
-#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v)  \
-		(((v) << 24) & BM_CLKCTRL_PLLCTRL0_CP_SEL)
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_DIV_SEL	20
-#define BM_CLKCTRL_PLLCTRL0_DIV_SEL	0x00300000
-#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v)  \
-		(((v) << 20) & BM_CLKCTRL_PLLCTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS	0x00040000
-#define BM_CLKCTRL_PLLCTRL0_POWER	0x00010000
-
-#define HW_CLKCTRL_PLLCTRL1	(0x00000010)
-
-#define BM_CLKCTRL_PLLCTRL1_LOCK	0x80000000
-#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK	0x40000000
-#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT	0
-#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT	0x0000FFFF
-#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v)  \
-		(((v) << 0) & BM_CLKCTRL_PLLCTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_CPU	(0x00000020)
-#define HW_CLKCTRL_CPU_SET	(0x00000024)
-#define HW_CLKCTRL_CPU_CLR	(0x00000028)
-#define HW_CLKCTRL_CPU_TOG	(0x0000002c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL	0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU	0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN	0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL	16
-#define BM_CLKCTRL_CPU_DIV_XTAL	0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v)  \
-		(((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT	0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN	0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU	0
-#define BM_CLKCTRL_CPU_DIV_CPU	0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v)  \
-		(((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS	(0x00000030)
-#define HW_CLKCTRL_HBUS_SET	(0x00000034)
-#define HW_CLKCTRL_HBUS_CLR	(0x00000038)
-#define HW_CLKCTRL_HBUS_TOG	(0x0000003c)
-
-#define BM_CLKCTRL_HBUS_BUSY	0x20000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE	0x10000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE	0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE	0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE	0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE	0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE	0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE	0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE	0x00200000
-#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE	0x00100000
-#define BP_CLKCTRL_HBUS_SLOW_DIV	16
-#define BM_CLKCTRL_HBUS_SLOW_DIV	0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v)  \
-		(((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1  0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2  0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4  0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8  0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN	0x00000020
-#define BP_CLKCTRL_HBUS_DIV	0
-#define BM_CLKCTRL_HBUS_DIV	0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS	(0x00000040)
-
-#define BM_CLKCTRL_XBUS_BUSY	0x80000000
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN	0x00000400
-#define BP_CLKCTRL_XBUS_DIV	0
-#define BM_CLKCTRL_XBUS_DIV	0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL	(0x00000050)
-#define HW_CLKCTRL_XTAL_SET	(0x00000054)
-#define HW_CLKCTRL_XTAL_CLR	(0x00000058)
-#define HW_CLKCTRL_XTAL_TOG	(0x0000005c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE	31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE	0x80000000
-#define BP_CLKCTRL_XTAL_FILT_CLK24M_GATE	30
-#define BM_CLKCTRL_XTAL_FILT_CLK24M_GATE	0x40000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE	29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE	0x20000000
-#define BM_CLKCTRL_XTAL_DRI_CLK24M_GATE	0x10000000
-#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE	0x08000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE	26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE	0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART	0
-#define BM_CLKCTRL_XTAL_DIV_UART	0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v)  \
-		(((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_PIX	(0x00000060)
-
-#define BP_CLKCTRL_PIX_CLKGATE	31
-#define BM_CLKCTRL_PIX_CLKGATE	0x80000000
-#define BM_CLKCTRL_PIX_BUSY	0x20000000
-#define BM_CLKCTRL_PIX_DIV_FRAC_EN	0x00001000
-#define BP_CLKCTRL_PIX_DIV	0
-#define BM_CLKCTRL_PIX_DIV	0x00000FFF
-#define BF_CLKCTRL_PIX_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_PIX_DIV)
-
-#define HW_CLKCTRL_SSP	(0x00000070)
-
-#define BP_CLKCTRL_SSP_CLKGATE	31
-#define BM_CLKCTRL_SSP_CLKGATE	0x80000000
-#define BM_CLKCTRL_SSP_BUSY	0x20000000
-#define BM_CLKCTRL_SSP_DIV_FRAC_EN	0x00000200
-#define BP_CLKCTRL_SSP_DIV	0
-#define BM_CLKCTRL_SSP_DIV	0x000001FF
-#define BF_CLKCTRL_SSP_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SSP_DIV)
-
-#define HW_CLKCTRL_GPMI	(0x00000080)
-
-#define BP_CLKCTRL_GPMI_CLKGATE	31
-#define BM_CLKCTRL_GPMI_CLKGATE	0x80000000
-#define BM_CLKCTRL_GPMI_BUSY	0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN	0x00000400
-#define BP_CLKCTRL_GPMI_DIV	0
-#define BM_CLKCTRL_GPMI_DIV	0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF	(0x00000090)
-
-#define BM_CLKCTRL_SPDIF_CLKGATE	0x80000000
-
-#define HW_CLKCTRL_EMI	(0x000000a0)
-
-#define BP_CLKCTRL_EMI_CLKGATE	31
-#define BM_CLKCTRL_EMI_CLKGATE	0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN	0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL	0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI	0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU	0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE	0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC	0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE	0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL	8
-#define BM_CLKCTRL_EMI_DIV_XTAL	0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v)  \
-		(((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI	0
-#define BM_CLKCTRL_EMI_DIV_EMI	0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v)  \
-		(((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_IR	(0x000000b0)
-
-#define BM_CLKCTRL_IR_CLKGATE	0x80000000
-#define BM_CLKCTRL_IR_AUTO_DIV	0x20000000
-#define BM_CLKCTRL_IR_IR_BUSY	0x10000000
-#define BM_CLKCTRL_IR_IROV_BUSY	0x08000000
-#define BP_CLKCTRL_IR_IROV_DIV	16
-#define BM_CLKCTRL_IR_IROV_DIV	0x01FF0000
-#define BF_CLKCTRL_IR_IROV_DIV(v)  \
-		(((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
-#define BP_CLKCTRL_IR_IR_DIV	0
-#define BM_CLKCTRL_IR_IR_DIV	0x000003FF
-#define BF_CLKCTRL_IR_IR_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_IR_IR_DIV)
-
-#define HW_CLKCTRL_SAIF	(0x000000c0)
-
-#define BM_CLKCTRL_SAIF_CLKGATE	0x80000000
-#define BM_CLKCTRL_SAIF_BUSY	0x20000000
-#define BM_CLKCTRL_SAIF_DIV_FRAC_EN	0x00010000
-#define BP_CLKCTRL_SAIF_DIV	0
-#define BM_CLKCTRL_SAIF_DIV	0x0000FFFF
-#define BF_CLKCTRL_SAIF_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SAIF_DIV)
-
-#define HW_CLKCTRL_TV	(0x000000d0)
-
-#define BM_CLKCTRL_TV_CLK_TV108M_GATE	0x80000000
-#define BM_CLKCTRL_TV_CLK_TV_GATE	0x40000000
-
-#define HW_CLKCTRL_ETM	(0x000000e0)
-
-#define BM_CLKCTRL_ETM_CLKGATE	0x80000000
-#define BM_CLKCTRL_ETM_BUSY	0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN	0x00000040
-#define BP_CLKCTRL_ETM_DIV	0
-#define BM_CLKCTRL_ETM_DIV	0x0000003F
-#define BF_CLKCTRL_ETM_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_FRAC	(0x000000f0)
-#define HW_CLKCTRL_FRAC_SET	(0x000000f4)
-#define HW_CLKCTRL_FRAC_CLR	(0x000000f8)
-#define HW_CLKCTRL_FRAC_TOG	(0x000000fc)
-
-#define BP_CLKCTRL_FRAC_CLKGATEIO	31
-#define BM_CLKCTRL_FRAC_CLKGATEIO	0x80000000
-#define BM_CLKCTRL_FRAC_IO_STABLE	0x40000000
-#define BP_CLKCTRL_FRAC_IOFRAC	24
-#define BM_CLKCTRL_FRAC_IOFRAC	0x3F000000
-#define BF_CLKCTRL_FRAC_IOFRAC(v)  \
-		(((v) << 24) & BM_CLKCTRL_FRAC_IOFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEPIX	23
-#define BM_CLKCTRL_FRAC_CLKGATEPIX	0x00800000
-#define BM_CLKCTRL_FRAC_PIX_STABLE	0x00400000
-#define BP_CLKCTRL_FRAC_PIXFRAC	16
-#define BM_CLKCTRL_FRAC_PIXFRAC	0x003F0000
-#define BF_CLKCTRL_FRAC_PIXFRAC(v)  \
-		(((v) << 16) & BM_CLKCTRL_FRAC_PIXFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEEMI	15
-#define BM_CLKCTRL_FRAC_CLKGATEEMI	0x00008000
-#define BM_CLKCTRL_FRAC_EMI_STABLE	0x00004000
-#define BP_CLKCTRL_FRAC_EMIFRAC	8
-#define BM_CLKCTRL_FRAC_EMIFRAC	0x00003F00
-#define BF_CLKCTRL_FRAC_EMIFRAC(v)  \
-		(((v) << 8) & BM_CLKCTRL_FRAC_EMIFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATECPU	7
-#define BM_CLKCTRL_FRAC_CLKGATECPU	0x00000080
-#define BM_CLKCTRL_FRAC_CPU_STABLE	0x00000040
-#define BP_CLKCTRL_FRAC_CPUFRAC	0
-#define BM_CLKCTRL_FRAC_CPUFRAC	0x0000003F
-#define BF_CLKCTRL_FRAC_CPUFRAC(v)  \
-		(((v) << 0) & BM_CLKCTRL_FRAC_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1	(0x00000100)
-#define HW_CLKCTRL_FRAC1_SET	(0x00000104)
-#define HW_CLKCTRL_FRAC1_CLR	(0x00000108)
-#define HW_CLKCTRL_FRAC1_TOG	(0x0000010c)
-
-#define BM_CLKCTRL_FRAC1_CLKGATEVID	0x80000000
-#define BM_CLKCTRL_FRAC1_VID_STABLE	0x40000000
-
-#define HW_CLKCTRL_CLKSEQ	(0x00000110)
-#define HW_CLKCTRL_CLKSEQ_SET	(0x00000114)
-#define HW_CLKCTRL_CLKSEQ_CLR	(0x00000118)
-#define HW_CLKCTRL_CLKSEQ_TOG	(0x0000011c)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM	0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU	0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI	0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP	0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI	0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_IR	0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX	0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF	0x00000001
-
-#define HW_CLKCTRL_RESET	(0x00000120)
-
-#define BM_CLKCTRL_RESET_CHIP	0x00000002
-#define BM_CLKCTRL_RESET_DIG	0x00000001
-
-#define HW_CLKCTRL_STATUS	(0x00000130)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT	30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT	0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
-		(((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION	(0x00000140)
-
-#define BP_CLKCTRL_VERSION_MAJOR	24
-#define BM_CLKCTRL_VERSION_MAJOR	0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
-		(((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR	16
-#define BM_CLKCTRL_VERSION_MINOR	0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v)  \
-		(((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP	0
-#define BM_CLKCTRL_VERSION_STEP	0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v)  \
-		(((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX23_H__ */
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
deleted file mode 100644
index 7d1b061..0000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx28.h
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX28_H__
-#define __REGS_CLKCTRL_MX28_H__
-
-#define HW_CLKCTRL_PLL0CTRL0	(0x00000000)
-#define HW_CLKCTRL_PLL0CTRL0_SET	(0x00000004)
-#define HW_CLKCTRL_PLL0CTRL0_CLR	(0x00000008)
-#define HW_CLKCTRL_PLL0CTRL0_TOG	(0x0000000c)
-
-#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL	28
-#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL	0x30000000
-#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v)  \
-		(((v) << 28) & BM_CLKCTRL_PLL0CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_CP_SEL	24
-#define BM_CLKCTRL_PLL0CTRL0_CP_SEL	0x03000000
-#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v)  \
-		(((v) << 24) & BM_CLKCTRL_PLL0CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL	20
-#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL	0x00300000
-#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v)  \
-		(((v) << 20) & BM_CLKCTRL_PLL0CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS	0x00040000
-#define BM_CLKCTRL_PLL0CTRL0_POWER	0x00020000
-
-#define HW_CLKCTRL_PLL0CTRL1	(0x00000010)
-
-#define BM_CLKCTRL_PLL0CTRL1_LOCK	0x80000000
-#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK	0x40000000
-#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT	0
-#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT	0x0000FFFF
-#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v)  \
-		(((v) << 0) & BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL1CTRL0	(0x00000020)
-#define HW_CLKCTRL_PLL1CTRL0_SET	(0x00000024)
-#define HW_CLKCTRL_PLL1CTRL0_CLR	(0x00000028)
-#define HW_CLKCTRL_PLL1CTRL0_TOG	(0x0000002c)
-
-#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI	0x80000000
-#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL	28
-#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL	0x30000000
-#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v)  \
-		(((v) << 28) & BM_CLKCTRL_PLL1CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_CP_SEL	24
-#define BM_CLKCTRL_PLL1CTRL0_CP_SEL	0x03000000
-#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v)  \
-		(((v) << 24) & BM_CLKCTRL_PLL1CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL	20
-#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL	0x00300000
-#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v)  \
-		(((v) << 20) & BM_CLKCTRL_PLL1CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS	0x00040000
-#define BM_CLKCTRL_PLL1CTRL0_POWER	0x00020000
-
-#define HW_CLKCTRL_PLL1CTRL1	(0x00000030)
-
-#define BM_CLKCTRL_PLL1CTRL1_LOCK	0x80000000
-#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK	0x40000000
-#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT	0
-#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT	0x0000FFFF
-#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v)  \
-		(((v) << 0) & BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL2CTRL0	(0x00000040)
-#define HW_CLKCTRL_PLL2CTRL0_SET	(0x00000044)
-#define HW_CLKCTRL_PLL2CTRL0_CLR	(0x00000048)
-#define HW_CLKCTRL_PLL2CTRL0_TOG	(0x0000004c)
-
-#define BM_CLKCTRL_PLL2CTRL0_CLKGATE	0x80000000
-#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL	28
-#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL	0x30000000
-#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v)  \
-		(((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B	0x04000000
-#define BP_CLKCTRL_PLL2CTRL0_CP_SEL	24
-#define BM_CLKCTRL_PLL2CTRL0_CP_SEL	0x03000000
-#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v)  \
-		(((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_POWER	0x00800000
-
-#define HW_CLKCTRL_CPU	(0x00000050)
-#define HW_CLKCTRL_CPU_SET	(0x00000054)
-#define HW_CLKCTRL_CPU_CLR	(0x00000058)
-#define HW_CLKCTRL_CPU_TOG	(0x0000005c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL	0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU	0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN	0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL	16
-#define BM_CLKCTRL_CPU_DIV_XTAL	0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v)  \
-		(((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT	0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN	0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU	0
-#define BM_CLKCTRL_CPU_DIV_CPU	0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v)  \
-		(((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS	(0x00000060)
-#define HW_CLKCTRL_HBUS_SET	(0x00000064)
-#define HW_CLKCTRL_HBUS_CLR	(0x00000068)
-#define HW_CLKCTRL_HBUS_TOG	(0x0000006c)
-
-#define BM_CLKCTRL_HBUS_ASM_BUSY	0x80000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE	0x40000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE	0x20000000
-#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE	0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE	0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE	0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE	0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE	0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE	0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE	0x00200000
-#define BM_CLKCTRL_HBUS_ASM_ENABLE	0x00100000
-#define BM_CLKCTRL_HBUS_AUTO_CLEAR_DIV_ENABLE	0x00080000
-#define BP_CLKCTRL_HBUS_SLOW_DIV	16
-#define BM_CLKCTRL_HBUS_SLOW_DIV	0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v)  \
-		(((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1  0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2  0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4  0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8  0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN	0x00000020
-#define BP_CLKCTRL_HBUS_DIV	0
-#define BM_CLKCTRL_HBUS_DIV	0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS	(0x00000070)
-
-#define BM_CLKCTRL_XBUS_BUSY	0x80000000
-#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE	0x00000800
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN	0x00000400
-#define BP_CLKCTRL_XBUS_DIV	0
-#define BM_CLKCTRL_XBUS_DIV	0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL	(0x00000080)
-#define HW_CLKCTRL_XTAL_SET	(0x00000084)
-#define HW_CLKCTRL_XTAL_CLR	(0x00000088)
-#define HW_CLKCTRL_XTAL_TOG	(0x0000008c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE	31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE	0x80000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE	29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE	0x20000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE	26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE	0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART	0
-#define BM_CLKCTRL_XTAL_DIV_UART	0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v)  \
-		(((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_SSP0	(0x00000090)
-
-#define BP_CLKCTRL_SSP0_CLKGATE	31
-#define BM_CLKCTRL_SSP0_CLKGATE	0x80000000
-#define BM_CLKCTRL_SSP0_BUSY	0x20000000
-#define BM_CLKCTRL_SSP0_DIV_FRAC_EN	0x00000200
-#define BP_CLKCTRL_SSP0_DIV	0
-#define BM_CLKCTRL_SSP0_DIV	0x000001FF
-#define BF_CLKCTRL_SSP0_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SSP0_DIV)
-
-#define HW_CLKCTRL_SSP1	(0x000000a0)
-
-#define BP_CLKCTRL_SSP1_CLKGATE	31
-#define BM_CLKCTRL_SSP1_CLKGATE	0x80000000
-#define BM_CLKCTRL_SSP1_BUSY	0x20000000
-#define BM_CLKCTRL_SSP1_DIV_FRAC_EN	0x00000200
-#define BP_CLKCTRL_SSP1_DIV	0
-#define BM_CLKCTRL_SSP1_DIV	0x000001FF
-#define BF_CLKCTRL_SSP1_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SSP1_DIV)
-
-#define HW_CLKCTRL_SSP2	(0x000000b0)
-
-#define BP_CLKCTRL_SSP2_CLKGATE	31
-#define BM_CLKCTRL_SSP2_CLKGATE	0x80000000
-#define BM_CLKCTRL_SSP2_BUSY	0x20000000
-#define BM_CLKCTRL_SSP2_DIV_FRAC_EN	0x00000200
-#define BP_CLKCTRL_SSP2_DIV	0
-#define BM_CLKCTRL_SSP2_DIV	0x000001FF
-#define BF_CLKCTRL_SSP2_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SSP2_DIV)
-
-#define HW_CLKCTRL_SSP3	(0x000000c0)
-
-#define BP_CLKCTRL_SSP3_CLKGATE	31
-#define BM_CLKCTRL_SSP3_CLKGATE	0x80000000
-#define BM_CLKCTRL_SSP3_BUSY	0x20000000
-#define BM_CLKCTRL_SSP3_DIV_FRAC_EN	0x00000200
-#define BP_CLKCTRL_SSP3_DIV	0
-#define BM_CLKCTRL_SSP3_DIV	0x000001FF
-#define BF_CLKCTRL_SSP3_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SSP3_DIV)
-
-#define HW_CLKCTRL_GPMI	(0x000000d0)
-
-#define BP_CLKCTRL_GPMI_CLKGATE	31
-#define BM_CLKCTRL_GPMI_CLKGATE	0x80000000
-#define BM_CLKCTRL_GPMI_BUSY	0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN	0x00000400
-#define BP_CLKCTRL_GPMI_DIV	0
-#define BM_CLKCTRL_GPMI_DIV	0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF	(0x000000e0)
-
-#define BP_CLKCTRL_SPDIF_CLKGATE	31
-#define BM_CLKCTRL_SPDIF_CLKGATE	0x80000000
-
-#define HW_CLKCTRL_EMI	(0x000000f0)
-
-#define BP_CLKCTRL_EMI_CLKGATE	31
-#define BM_CLKCTRL_EMI_CLKGATE	0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN	0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL	0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI	0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU	0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE	0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC	0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE	0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL	8
-#define BM_CLKCTRL_EMI_DIV_XTAL	0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v)  \
-		(((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI	0
-#define BM_CLKCTRL_EMI_DIV_EMI	0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v)  \
-		(((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_SAIF0	(0x00000100)
-
-#define BP_CLKCTRL_SAIF0_CLKGATE	31
-#define BM_CLKCTRL_SAIF0_CLKGATE	0x80000000
-#define BM_CLKCTRL_SAIF0_BUSY	0x20000000
-#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN	0x00010000
-#define BP_CLKCTRL_SAIF0_DIV	0
-#define BM_CLKCTRL_SAIF0_DIV	0x0000FFFF
-#define BF_CLKCTRL_SAIF0_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SAIF0_DIV)
-
-#define HW_CLKCTRL_SAIF1	(0x00000110)
-
-#define BP_CLKCTRL_SAIF1_CLKGATE	31
-#define BM_CLKCTRL_SAIF1_CLKGATE	0x80000000
-#define BM_CLKCTRL_SAIF1_BUSY	0x20000000
-#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN	0x00010000
-#define BP_CLKCTRL_SAIF1_DIV	0
-#define BM_CLKCTRL_SAIF1_DIV	0x0000FFFF
-#define BF_CLKCTRL_SAIF1_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_SAIF1_DIV)
-
-#define HW_CLKCTRL_DIS_LCDIF	(0x00000120)
-
-#define BP_CLKCTRL_DIS_LCDIF_CLKGATE	31
-#define BM_CLKCTRL_DIS_LCDIF_CLKGATE	0x80000000
-#define BM_CLKCTRL_DIS_LCDIF_BUSY	0x20000000
-#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN	0x00002000
-#define BP_CLKCTRL_DIS_LCDIF_DIV	0
-#define BM_CLKCTRL_DIS_LCDIF_DIV	0x00001FFF
-#define BF_CLKCTRL_DIS_LCDIF_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_DIS_LCDIF_DIV)
-
-#define HW_CLKCTRL_ETM	(0x00000130)
-
-#define BM_CLKCTRL_ETM_CLKGATE	0x80000000
-#define BM_CLKCTRL_ETM_BUSY	0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN	0x00000080
-#define BP_CLKCTRL_ETM_DIV	0
-#define BM_CLKCTRL_ETM_DIV	0x0000007F
-#define BF_CLKCTRL_ETM_DIV(v)  \
-		(((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_ENET	(0x00000140)
-
-#define BM_CLKCTRL_ENET_SLEEP	0x80000000
-#define BP_CLKCTRL_ENET_DISABLE	30
-#define BM_CLKCTRL_ENET_DISABLE	0x40000000
-#define BM_CLKCTRL_ENET_STATUS	0x20000000
-#define BM_CLKCTRL_ENET_BUSY_TIME	0x08000000
-#define BP_CLKCTRL_ENET_DIV_TIME	21
-#define BM_CLKCTRL_ENET_DIV_TIME	0x07E00000
-#define BF_CLKCTRL_ENET_DIV_TIME(v)  \
-		(((v) << 21) & BM_CLKCTRL_ENET_DIV_TIME)
-#define BM_CLKCTRL_ENET_BUSY	0x08000000
-#define BP_CLKCTRL_ENET_DIV	21
-#define BM_CLKCTRL_ENET_DIV	0x07E00000
-#define BF_CLKCTRL_ENET_DIV(v)  \
-		(((v) << 21) & BM_CLKCTRL_ENET_DIV)
-#define BP_CLKCTRL_ENET_TIME_SEL	19
-#define BM_CLKCTRL_ENET_TIME_SEL	0x00180000
-#define BF_CLKCTRL_ENET_TIME_SEL(v)  \
-		(((v) << 19) & BM_CLKCTRL_ENET_TIME_SEL)
-#define BV_CLKCTRL_ENET_TIME_SEL__XTAL      0x0
-#define BV_CLKCTRL_ENET_TIME_SEL__PLL       0x1
-#define BV_CLKCTRL_ENET_TIME_SEL__RMII_CLK  0x2
-#define BV_CLKCTRL_ENET_TIME_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_ENET_CLK_OUT_EN	0x00040000
-#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP	0x00020000
-#define BM_CLKCTRL_ENET_RESET_BY_SW	0x00010000
-
-#define HW_CLKCTRL_HSADC	(0x00000150)
-
-#define BM_CLKCTRL_HSADC_RESETB	0x40000000
-#define BP_CLKCTRL_HSADC_FREQDIV	28
-#define BM_CLKCTRL_HSADC_FREQDIV	0x30000000
-#define BF_CLKCTRL_HSADC_FREQDIV(v)  \
-		(((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
-
-#define HW_CLKCTRL_FLEXCAN	(0x00000160)
-
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN0	30
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN0	0x40000000
-#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS	0x20000000
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN1	28
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN1	0x10000000
-#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS	0x08000000
-
-#define HW_CLKCTRL_FRAC0	(0x000001b0)
-#define HW_CLKCTRL_FRAC0_SET	(0x000001b4)
-#define HW_CLKCTRL_FRAC0_CLR	(0x000001b8)
-#define HW_CLKCTRL_FRAC0_TOG	(0x000001bc)
-
-#define BP_CLKCTRL_FRAC0_CLKGATEIO0	31
-#define BM_CLKCTRL_FRAC0_CLKGATEIO0	0x80000000
-#define BM_CLKCTRL_FRAC0_IO0_STABLE	0x40000000
-#define BP_CLKCTRL_FRAC0_IO0FRAC	24
-#define BM_CLKCTRL_FRAC0_IO0FRAC	0x3F000000
-#define BF_CLKCTRL_FRAC0_IO0FRAC(v)  \
-		(((v) << 24) & BM_CLKCTRL_FRAC0_IO0FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEIO1	23
-#define BM_CLKCTRL_FRAC0_CLKGATEIO1	0x00800000
-#define BM_CLKCTRL_FRAC0_IO1_STABLE	0x00400000
-#define BP_CLKCTRL_FRAC0_IO1FRAC	16
-#define BM_CLKCTRL_FRAC0_IO1FRAC	0x003F0000
-#define BF_CLKCTRL_FRAC0_IO1FRAC(v)  \
-		(((v) << 16) & BM_CLKCTRL_FRAC0_IO1FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEEMI	15
-#define BM_CLKCTRL_FRAC0_CLKGATEEMI	0x00008000
-#define BM_CLKCTRL_FRAC0_EMI_STABLE	0x00004000
-#define BP_CLKCTRL_FRAC0_EMIFRAC	8
-#define BM_CLKCTRL_FRAC0_EMIFRAC	0x00003F00
-#define BF_CLKCTRL_FRAC0_EMIFRAC(v)  \
-		(((v) << 8) & BM_CLKCTRL_FRAC0_EMIFRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATECPU	7
-#define BM_CLKCTRL_FRAC0_CLKGATECPU	0x00000080
-#define BM_CLKCTRL_FRAC0_CPU_STABLE	0x00000040
-#define BP_CLKCTRL_FRAC0_CPUFRAC	0
-#define BM_CLKCTRL_FRAC0_CPUFRAC	0x0000003F
-#define BF_CLKCTRL_FRAC0_CPUFRAC(v)  \
-		(((v) << 0) & BM_CLKCTRL_FRAC0_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1	(0x000001c0)
-#define HW_CLKCTRL_FRAC1_SET	(0x000001c4)
-#define HW_CLKCTRL_FRAC1_CLR	(0x000001c8)
-#define HW_CLKCTRL_FRAC1_TOG	(0x000001cc)
-
-#define BP_CLKCTRL_FRAC1_CLKGATEGPMI	23
-#define BM_CLKCTRL_FRAC1_CLKGATEGPMI	0x00800000
-#define BM_CLKCTRL_FRAC1_GPMI_STABLE	0x00400000
-#define BP_CLKCTRL_FRAC1_GPMIFRAC	16
-#define BM_CLKCTRL_FRAC1_GPMIFRAC	0x003F0000
-#define BF_CLKCTRL_FRAC1_GPMIFRAC(v)  \
-		(((v) << 16) & BM_CLKCTRL_FRAC1_GPMIFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEHSADC	15
-#define BM_CLKCTRL_FRAC1_CLKGATEHSADC	0x00008000
-#define BM_CLKCTRL_FRAC1_HSADC_STABLE	0x00004000
-#define BP_CLKCTRL_FRAC1_HSADCFRAC	8
-#define BM_CLKCTRL_FRAC1_HSADCFRAC	0x00003F00
-#define BF_CLKCTRL_FRAC1_HSADCFRAC(v)  \
-		(((v) << 8) & BM_CLKCTRL_FRAC1_HSADCFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEPIX	7
-#define BM_CLKCTRL_FRAC1_CLKGATEPIX	0x00000080
-#define BM_CLKCTRL_FRAC1_PIX_STABLE	0x00000040
-#define BP_CLKCTRL_FRAC1_PIXFRAC	0
-#define BM_CLKCTRL_FRAC1_PIXFRAC	0x0000003F
-#define BF_CLKCTRL_FRAC1_PIXFRAC(v)  \
-		(((v) << 0) & BM_CLKCTRL_FRAC1_PIXFRAC)
-
-#define HW_CLKCTRL_CLKSEQ	(0x000001d0)
-#define HW_CLKCTRL_CLKSEQ_SET	(0x000001d4)
-#define HW_CLKCTRL_CLKSEQ_CLR	(0x000001d8)
-#define HW_CLKCTRL_CLKSEQ_TOG	(0x000001dc)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU	0x00040000
-#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF	0x00004000
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD    0x0
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM	0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI	0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3	0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP2	0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP1	0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP0	0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI	0x00000004
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1	0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0	0x00000001
-
-#define HW_CLKCTRL_RESET	(0x000001e0)
-
-#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE	0x00000020
-#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE	0x00000010
-#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE	0x00000008
-#define BM_CLKCTRL_RESET_THERMAL_RESET_DEFAULT	0x00000004
-#define BM_CLKCTRL_RESET_CHIP	0x00000002
-#define BM_CLKCTRL_RESET_DIG	0x00000001
-
-#define HW_CLKCTRL_STATUS	(0x000001f0)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT	30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT	0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
-		(((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION	(0x00000200)
-
-#define BP_CLKCTRL_VERSION_MAJOR	24
-#define BM_CLKCTRL_VERSION_MAJOR	0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
-		(((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR	16
-#define BM_CLKCTRL_VERSION_MINOR	0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v)  \
-		(((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP	0
-#define BM_CLKCTRL_VERSION_STEP	0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v)  \
-		(((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX28_H__ */
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 80ac1fc..30042e2 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -37,8 +37,6 @@
 #define MXS_MODULE_CLKGATE		(1 << 30)
 #define MXS_MODULE_SFTRST		(1 << 31)
 
-#define CLKCTRL_TIMEOUT		10	/* 10 ms */
-
 static void __iomem *mxs_clkctrl_reset_addr;
 
 /*
@@ -139,17 +137,3 @@
 	return -ETIMEDOUT;
 }
 EXPORT_SYMBOL(mxs_reset_block);
-
-int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(CLKCTRL_TIMEOUT);
-	while (readl_relaxed(MXS_IO_ADDRESS(MXS_CLKCTRL_BASE_ADDR)
-						+ reg_offset) & mask) {
-		if (time_after(jiffies, timeout)) {
-			pr_err("Timeout at CLKCTRL + 0x%x\n", reg_offset);
-			return -ETIMEDOUT;
-		}
-	}
-
-	return 0;
-}
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index 564a632..02d36de 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -20,6 +20,7 @@
  * MA 02110-1301, USA.
  */
 
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/clockchips.h>
@@ -243,8 +244,16 @@
 	return 0;
 }
 
-void __init mxs_timer_init(struct clk *timer_clk, int irq)
+void __init mxs_timer_init(int irq)
 {
+	struct clk *timer_clk;
+
+	timer_clk = clk_get_sys("timrot", NULL);
+	if (IS_ERR(timer_clk)) {
+		pr_err("%s: failed to get clk\n", __func__);
+		return;
+	}
+
 	clk_prepare_enable(timer_clk);
 
 	/*
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 58cacaf..2e8d3e1 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -111,7 +111,7 @@
 	.parts		= nhk8815_partitions,
 	.nparts		= ARRAY_SIZE(nhk8815_partitions),
 	.options	= NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \
-			| NAND_NO_READRDY | NAND_NO_AUTOINCR,
+			| NAND_NO_READRDY,
 	.init		= nhk8815_nand_init,
 };
 
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index c1b681e..f2f8a58 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -595,7 +595,12 @@
 	gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
 	return err;
 }
-late_initcall(late_init);
+
+static void __init ams_delta_init_late(void)
+{
+	omap1_init_late();
+	late_init();
+}
 
 static void __init ams_delta_map_io(void)
 {
@@ -611,6 +616,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= ams_delta_init,
+	.init_late	= ams_delta_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 4a4afb3..6872f3f 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -192,14 +192,11 @@
 	return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
 	.chip	= {
 		.nr_chips		= 1,
 		.chip_offset		= 0,
 		.options		= NAND_SAMSUNG_LP_OPTIONS,
-		.part_probe_types	= part_probes,
 	},
 	.ctrl	= {
 		.cmd_ctrl	= omap1_nand_cmd_ctl,
@@ -369,6 +366,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_fsample_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index 9a5fe58..e75e2d5 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -88,6 +88,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_generic_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 057ec13..a28e989 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -186,8 +186,6 @@
 	return gpio_get_value(H2_NAND_RB_GPIO_PIN);
 }
 
-static const char *h2_part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data h2_nand_platdata = {
 	.chip	= {
 		.nr_chips		= 1,
@@ -195,7 +193,6 @@
 		.nr_partitions		= ARRAY_SIZE(h2_nand_partitions),
 		.partitions		= h2_nand_partitions,
 		.options		= NAND_SAMSUNG_LP_OPTIONS,
-		.part_probe_types	= h2_part_probes,
 	},
 	.ctrl	= {
 		.cmd_ctrl	= omap1_nand_cmd_ctl,
@@ -431,6 +428,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= h2_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f6ddf87..108a864 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -188,8 +188,6 @@
 	return gpio_get_value(H3_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_platdata = {
 	.chip	= {
 		.nr_chips		= 1,
@@ -197,7 +195,6 @@
 		.nr_partitions		= ARRAY_SIZE(nand_partitions),
 		.partitions		= nand_partitions,
 		.options		= NAND_SAMSUNG_LP_OPTIONS,
-		.part_probe_types	= part_probes,
 	},
 	.ctrl	= {
 		.cmd_ctrl	= omap1_nand_cmd_ctl,
@@ -425,6 +422,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= h3_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 60c06ee..118a9d4 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -605,6 +605,7 @@
 	.reserve	= omap_reserve,
 	.init_irq       = omap1_init_irq,
 	.init_machine   = htcherald_init,
+	.init_late	= omap1_init_late,
 	.timer          = &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 67d7fd5..7970223 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -457,6 +457,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= innovator_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index d21dcc2..7212ae9 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -255,6 +255,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_nokia770_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a5f85dd..da8d872 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -574,6 +574,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= osk_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index a60e6c2..949b62a 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -267,6 +267,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmte_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 8d85487..7f1e1cf 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -313,6 +313,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmtt_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 61ed4f0..3c71c6b 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -330,6 +330,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmz71_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index a2c8889..703d55e 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -150,14 +150,11 @@
 	return gpio_get_value(P2_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
 	.chip	= {
 		.nr_chips		= 1,
 		.chip_offset		= 0,
 		.options		= NAND_SAMSUNG_LP_OPTIONS,
-		.part_probe_types	= part_probes,
 	},
 	.ctrl	= {
 		.cmd_ctrl	= omap1_nand_cmd_ctl,
@@ -331,6 +328,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_perseus2_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index f34cb74..3b7b82b 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -407,6 +407,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_sx1_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 37232d0..afd67f0 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -294,6 +294,7 @@
 	.reserve	= omap_reserve,
 	.init_irq	= omap1_init_irq,
 	.init_machine	= voiceblue_init,
+	.init_late	= omap1_init_late,
 	.timer		= &omap1_timer,
 	.restart	= voiceblue_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index bb7779b..c2552b2 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -53,8 +53,18 @@
 }
 #endif
 
+#ifdef CONFIG_OMAP_SERIAL_WAKE
+int omap_serial_wakeup_init(void);
+#else
+static inline int omap_serial_wakeup_init(void)
+{
+	return 0;
+}
+#endif
+
 void omap1_init_early(void);
 void omap1_init_irq(void);
+void omap1_init_late(void);
 void omap1_restart(char, const char *);
 
 extern void __init omap_check_revision(void);
@@ -63,7 +73,14 @@
 			       unsigned int ctrl);
 
 extern struct sys_timer omap1_timer;
-extern bool omap_32k_timer_init(void);
+#ifdef CONFIG_OMAP_32K_TIMER
+extern int omap_32k_timer_init(void);
+#else
+static inline int __init omap_32k_timer_init(void)
+{
+	return -ENODEV;
+}
+#endif
 
 extern u32 omap_irq_flags;
 
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index dcd8ddb..fa1fa4d 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,6 +22,7 @@
 #include <plat/tc.h>
 #include <plat/board.h>
 #include <plat/mux.h>
+#include <plat/dma.h>
 #include <plat/mmc.h>
 #include <plat/omap7xx.h>
 
@@ -31,6 +32,22 @@
 #include "common.h"
 #include "clock.h"
 
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+	.name	= "omap-pcm-audio",
+	.id	= -1,
+};
+
+static void omap_init_audio(void)
+{
+	platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -128,6 +145,56 @@
 	}
 }
 
+#define OMAP_MMC_NR_RES		4
+
+/*
+ * Register MMC devices.
+ */
+static int __init omap_mmc_add(const char *name, int id, unsigned long base,
+				unsigned long size, unsigned int irq,
+				unsigned rx_req, unsigned tx_req,
+				struct omap_mmc_platform_data *data)
+{
+	struct platform_device *pdev;
+	struct resource res[OMAP_MMC_NR_RES];
+	int ret;
+
+	pdev = platform_device_alloc(name, id);
+	if (!pdev)
+		return -ENOMEM;
+
+	memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
+	res[0].start = base;
+	res[0].end = base + size - 1;
+	res[0].flags = IORESOURCE_MEM;
+	res[1].start = res[1].end = irq;
+	res[1].flags = IORESOURCE_IRQ;
+	res[2].start = rx_req;
+	res[2].name = "rx";
+	res[2].flags = IORESOURCE_DMA;
+	res[3].start = tx_req;
+	res[3].name = "tx";
+	res[3].flags = IORESOURCE_DMA;
+
+	ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+	if (ret == 0)
+		ret = platform_device_add_data(pdev, data, sizeof(*data));
+	if (ret)
+		goto fail;
+
+	ret = platform_device_add(pdev);
+	if (ret)
+		goto fail;
+
+	/* return device handle to board setup code */
+	data->dev = &pdev->dev;
+	return 0;
+
+fail:
+	platform_device_put(pdev);
+	return ret;
+}
+
 void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 			int nr_controllers)
 {
@@ -135,6 +202,7 @@
 
 	for (i = 0; i < nr_controllers; i++) {
 		unsigned long base, size;
+		unsigned rx_req, tx_req;
 		unsigned int irq = 0;
 
 		if (!mmc_data[i])
@@ -146,19 +214,24 @@
 		case 0:
 			base = OMAP1_MMC1_BASE;
 			irq = INT_MMC;
+			rx_req = OMAP_DMA_MMC_RX;
+			tx_req = OMAP_DMA_MMC_TX;
 			break;
 		case 1:
 			if (!cpu_is_omap16xx())
 				return;
 			base = OMAP1_MMC2_BASE;
 			irq = INT_1610_MMC2;
+			rx_req = OMAP_DMA_MMC2_RX;
+			tx_req = OMAP_DMA_MMC2_TX;
 			break;
 		default:
 			continue;
 		}
 		size = OMAP1_MMC_SIZE;
 
-		omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]);
+		omap_mmc_add("mmci-omap", i, base, size, irq,
+				rx_req, tx_req, mmc_data[i]);
 	};
 }
 
@@ -242,23 +315,48 @@
 
 static inline void omap_init_sti(void) {}
 
-#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+/* Numbering for the SPI-capable controllers when used for SPI:
+ * spi		= 1
+ * uwire	= 2
+ * mmc1..2	= 3..4
+ * mcbsp1..3	= 5..7
+ */
 
-static struct platform_device omap_pcm = {
-	.name	= "omap-pcm-audio",
-	.id	= -1,
+#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
+
+#define	OMAP_UWIRE_BASE		0xfffb3000
+
+static struct resource uwire_resources[] = {
+	{
+		.start		= OMAP_UWIRE_BASE,
+		.end		= OMAP_UWIRE_BASE + 0x20,
+		.flags		= IORESOURCE_MEM,
+	},
 };
 
-static void omap_init_audio(void)
-{
-	platform_device_register(&omap_pcm);
-}
+static struct platform_device omap_uwire_device = {
+	.name	   = "omap_uwire",
+	.id	     = -1,
+	.num_resources	= ARRAY_SIZE(uwire_resources),
+	.resource	= uwire_resources,
+};
 
+static void omap_init_uwire(void)
+{
+	/* FIXME define and use a boot tag; not all boards will be hooking
+	 * up devices to the microwire controller, and multi-board configs
+	 * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
+	 */
+
+	/* board-specific code must configure chipselects (only a few
+	 * are normally used) and SCLK/SDI/SDO (each has two choices).
+	 */
+	(void) platform_device_register(&omap_uwire_device);
+}
 #else
-static inline void omap_init_audio(void) {}
+static inline void omap_init_uwire(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
 
 /*
  * This gets called after board-specific INIT_MACHINE, and initializes most
@@ -292,11 +390,12 @@
 	 * in alphabetical order so they're easier to sort through.
 	 */
 
+	omap_init_audio();
 	omap_init_mbox();
 	omap_init_rtc();
 	omap_init_spi100k();
 	omap_init_sti();
-	omap_init_audio();
+	omap_init_uwire();
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 71ce017..6c95a59 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -137,6 +137,11 @@
 	omap_init_consistent_dma_size();
 }
 
+void __init omap1_init_late(void)
+{
+	omap_serial_wakeup_init();
+}
+
 /*
  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
  */
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 93ae8f2..6809c9e 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -237,7 +237,7 @@
 	enable_irq_wake(gpio_to_irq(gpio_nr));
 }
 
-static int __init omap_serial_wakeup_init(void)
+int __init omap_serial_wakeup_init(void)
 {
 	if (!cpu_is_omap16xx())
 		return 0;
@@ -251,7 +251,6 @@
 
 	return 0;
 }
-late_initcall(omap_serial_wakeup_init);
 
 #endif	/* CONFIG_OMAP_SERIAL_WAKE */
 
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 4d8dd9a..4062480 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -232,20 +232,6 @@
 }
 #endif	/* CONFIG_OMAP_MPU_TIMER */
 
-static inline int omap_32k_timer_usable(void)
-{
-	int res = false;
-
-	if (cpu_is_omap730() || cpu_is_omap15xx())
-		return res;
-
-#ifdef CONFIG_OMAP_32K_TIMER
-	res = omap_32k_timer_init();
-#endif
-
-	return res;
-}
-
 /*
  * ---------------------------------------------------------------------------
  * Timer initialization
@@ -253,7 +239,7 @@
  */
 static void __init omap1_timer_init(void)
 {
-	if (!omap_32k_timer_usable())
+	if (omap_32k_timer_init() != 0)
 		omap_mpu_timer_init();
 }
 
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 325b9a0..eae49c3 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -71,6 +71,7 @@
 
 /* 16xx specific defines */
 #define OMAP1_32K_TIMER_BASE		0xfffb9000
+#define OMAP1_32KSYNC_TIMER_BASE	0xfffbc400
 #define OMAP1_32K_TIMER_CR		0x08
 #define OMAP1_32K_TIMER_TVR		0x00
 #define OMAP1_32K_TIMER_TCR		0x04
@@ -182,10 +183,29 @@
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-bool __init omap_32k_timer_init(void)
+int __init omap_32k_timer_init(void)
 {
-	omap_init_clocksource_32k();
-	omap_init_32k_timer();
+	int ret = -ENODEV;
 
-	return true;
+	if (cpu_is_omap16xx()) {
+		void __iomem *base;
+		struct clk *sync32k_ick;
+
+		base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
+		if (!base) {
+			pr_err("32k_counter: failed to map base addr\n");
+			return -ENODEV;
+		}
+
+		sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
+		if (!IS_ERR(sync32k_ick))
+			clk_enable(sync32k_ick);
+
+		ret = omap_init_clocksource_32k(base);
+	}
+
+	if (!ret)
+		omap_init_32k_timer();
+
+	return ret;
 }
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 964ee67..4cf5142 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -78,12 +78,12 @@
 	default y
 	select ARCH_OMAP_OTG
 
-config SOC_OMAPTI81XX
+config SOC_TI81XX
 	bool "TI81XX support"
 	depends on ARCH_OMAP3
 	default y
 
-config SOC_OMAPAM33XX
+config SOC_AM33XX
 	bool "AM33XX support"
 	depends on ARCH_OMAP3
 	default y
@@ -320,12 +320,12 @@
 
 config MACH_TI8168EVM
 	bool "TI8168 Evaluation Module"
-	depends on SOC_OMAPTI81XX
+	depends on SOC_TI81XX
 	default y
 
 config MACH_TI8148EVM
 	bool "TI8148 Evaluation Module"
-	depends on SOC_OMAPTI81XX
+	depends on SOC_TI81XX
 	default y
 
 config MACH_OMAP_4430SDP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 385c083..fa742f3 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -24,10 +24,11 @@
 obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
 
 # SMP support ONLY available for OMAP4
+
 obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4)		+= omap4-common.o omap-wakeupgen.o \
-					   sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= omap4-common.o omap-wakeupgen.o
+obj-$(CONFIG_ARCH_OMAP4)		+= sleep44xx.o
 
 plus_sec := $(call as-instr,.arch_extension sec,+sec)
 AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
@@ -64,10 +65,10 @@
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_ARCH_OMAP2)		+= pm24xx.o
 obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o \
-					   cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o omap-mpuss-lowpower.o \
-					   cpuidle44xx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= cpuidle34xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o omap-mpuss-lowpower.o
+obj-$(CONFIG_ARCH_OMAP4)		+= cpuidle44xx.o
 obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
 obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o
@@ -84,90 +85,86 @@
 # PRCM
 obj-y					+= prm_common.o
 obj-$(CONFIG_ARCH_OMAP2)		+= prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
-					   vc3xxx_data.o vp3xxx_data.o
-# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
-# will be removed once the OMAP4 part of the codebase is converted to
-# use OMAP4-specific PRCM functions.
-obj-$(CONFIG_ARCH_OMAP4)		+= prcm.o cm2xxx_3xxx.o cminst44xx.o \
-					   cm44xx.o prcm_mpu44xx.o \
-					   prminst44xx.o vc44xx_data.o \
-					   vp44xx_data.o prm44xx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= vc3xxx_data.o vp3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= prcm.o cminst44xx.o cm44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= prcm_mpu44xx.o prminst44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= vc44xx_data.o vp44xx_data.o prm44xx.o
 
 # OMAP voltage domains
 voltagedomain-common			:= voltage.o vc.o vp.o
-obj-$(CONFIG_ARCH_OMAP2)		+= $(voltagedomain-common) \
-					   voltagedomains2xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(voltagedomain-common) \
-					   voltagedomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(voltagedomain-common) \
-					   voltagedomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP2)		+= voltagedomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP3)		+= voltagedomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP4)		+= voltagedomains44xx_data.o
 
 # OMAP powerdomain framework
 powerdomain-common			+= powerdomain.o powerdomain-common.o
-obj-$(CONFIG_ARCH_OMAP2)		+= $(powerdomain-common) \
-					   powerdomain2xxx_3xxx.o \
-					   powerdomains2xxx_data.o \
-					   powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(powerdomain-common) \
-					   powerdomain2xxx_3xxx.o \
-					   powerdomains3xxx_data.o \
-					   powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(powerdomain-common) \
-					   powerdomain44xx.o \
-					   powerdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP2)		+= powerdomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP2)		+= powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2)		+= powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP3)		+= powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= powerdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP4)		+= powerdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= powerdomains44xx_data.o
 
 # PRCM clockdomain control
-clockdomain-common			+= clockdomain.o \
-					   clockdomains_common_data.o
-obj-$(CONFIG_ARCH_OMAP2)		+= $(clockdomain-common) \
-					   clockdomain2xxx_3xxx.o \
-					   clockdomains2xxx_3xxx_data.o
+clockdomain-common			+= clockdomain.o
+clockdomain-common			+= clockdomains_common_data.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP2)		+= clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clockdomains2xxx_3xxx_data.o
 obj-$(CONFIG_SOC_OMAP2420)		+= clockdomains2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)		+= clockdomains2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(clockdomain-common) \
-					   clockdomain2xxx_3xxx.o \
-					   clockdomains2xxx_3xxx_data.o \
-					   clockdomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(clockdomain-common) \
-					   clockdomain44xx.o \
-					   clockdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP3)		+= clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clockdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP4)		+= clockdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= clockdomains44xx_data.o
 
 # Clock framework
-obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common) clock2xxx.o \
-					   clkt2xxx_sys.o \
-					   clkt2xxx_dpllcore.o \
-					   clkt2xxx_virt_prcm_set.o \
-					   clkt2xxx_apll.o clkt2xxx_osc.o \
-					   clkt2xxx_dpll.o clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common) clock2xxx.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_sys.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_dpllcore.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_virt_prcm_set.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_apll.o clkt2xxx_osc.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_dpll.o clkt_iclk.o
 obj-$(CONFIG_SOC_OMAP2420)		+= clock2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)		+= clock2430.o clock2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(clock-common) clock3xxx.o \
-					   clock34xx.o clkt34xx_dpll3m2.o \
-					   clock3517.o clock36xx.o \
-					   dpll3xxx.o clock3xxx_data.o \
-					   clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(clock-common) clock44xx_data.o \
-					   dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(clock-common) clock3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clock34xx.o clkt34xx_dpll3m2.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clock3517.o clock36xx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= dpll3xxx.o clock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(clock-common) clock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= dpll3xxx.o dpll44xx.o
 
 # OMAP2 clock rate set data (old "OPP" data)
 obj-$(CONFIG_SOC_OMAP2420)		+= opp2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)		+= opp2430_data.o
 
 # hwmod data
-obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2xxx_ipblock_data.o \
-					   omap_hwmod_2xxx_3xxx_ipblock_data.o \
-					   omap_hwmod_2xxx_interconnect_data.o \
-					   omap_hwmod_2xxx_3xxx_interconnect_data.o \
-					   omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2xxx_ipblock_data.o \
-					   omap_hwmod_2xxx_3xxx_ipblock_data.o \
-					   omap_hwmod_2xxx_interconnect_data.o \
-					   omap_hwmod_2xxx_3xxx_interconnect_data.o \
-					   omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)		+= omap_hwmod_2xxx_3xxx_ipblock_data.o \
-					   omap_hwmod_2xxx_3xxx_interconnect_data.o \
-					   omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420)		+= omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430)		+= omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= omap_hwmod_3xxx_data.o
 obj-$(CONFIG_ARCH_OMAP4)		+= omap_hwmod_44xx_data.o
 
 # EMU peripherals
@@ -208,23 +205,19 @@
 obj-$(CONFIG_MACH_OMAP3_PANDORA)	+= board-omap3pandora.o
 obj-$(CONFIG_MACH_OMAP_3430SDP)		+= board-3430sdp.o
 obj-$(CONFIG_MACH_NOKIA_N8X0)		+= board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RM680)		+= board-rm680.o \
-					   sdram-nokia.o
-obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51.o \
-					   sdram-nokia.o \
-					   board-rx51-peripherals.o \
-					   board-rx51-video.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom.o \
-					   board-zoom-peripherals.o \
-					   board-zoom-display.o \
-					   board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom.o \
-					   board-zoom-peripherals.o \
-					   board-zoom-display.o \
-					   board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-3630sdp.o \
-					   board-zoom-peripherals.o \
-					   board-zoom-display.o
+obj-$(CONFIG_MACH_NOKIA_RM680)		+= board-rm680.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51-peripherals.o
+obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51-video.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-3630sdp.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-zoom-display.o
 obj-$(CONFIG_MACH_CM_T35)		+= board-cm-t35.o
 obj-$(CONFIG_MACH_CM_T3517)		+= board-cm-t3517.o
 obj-$(CONFIG_MACH_IGEP0020)		+= board-igep0020.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e658f83..99ca6ba 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -303,6 +303,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_2430sdp_init,
+	.init_late	= omap2430_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 37abb0d..a98c688 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -605,6 +605,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_3430sdp_init,
+	.init_late	= omap3430_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 6ef350d..2dc9ba5 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -217,6 +217,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_sdp_init,
+	.init_late	= omap3630_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 94af6cd..8e17284 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -912,6 +912,7 @@
 	.init_irq	= gic_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= omap_4430sdp_init,
+	.init_late	= omap4430_init_late,
 	.timer		= &omap4_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 3b8a53c..92432c2 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -102,6 +102,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= am3517_crane_init,
+	.init_late	= am35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 99790eb..18f6010 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -385,6 +385,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= am3517_evm_init,
+	.init_late	= am35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 768ece2..502c31e 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -356,6 +356,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_apollon_init,
+	.init_late	= omap2420_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c03df14..ded100c 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -669,6 +669,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= cm_t35_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -681,6 +682,7 @@
 	.init_irq       = omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine   = cm_t3730_init,
+	.init_late     = omap3630_init_late,
 	.timer          = &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 9e66e16..a33ad46 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -303,6 +303,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= cm_t3517_init,
+	.init_late	= am35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index b063f0d..6567c1c 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -644,6 +644,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= devkit8000_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_secure_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index 70a81f9..53c39d2 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -97,11 +97,6 @@
 
 	gpmc_onenand_init(&board_onenand_data);
 }
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || \
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 7302ba7..2029346 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -125,6 +125,7 @@
 	.init_irq	= omap_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= omap_generic_init,
+	.init_late	= omap4430_init_late,
 	.timer		= &omap4_timer,
 	.dt_compat	= omap4_boards_compat,
 	.restart	= omap_prcm_restart,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 0bbbabe..876becf 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -398,6 +398,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_h4_init,
+	.init_late	= omap2420_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 7a27409..7491529 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -650,6 +650,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= igep_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -662,6 +663,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= igep_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 1b60495..ef9e829 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -442,6 +442,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_ldp_init,
+	.init_late	= omap3430_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 518091c..2c5d0ed 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -83,11 +83,9 @@
 };
 
 static struct musb_hdrc_platform_data tusb_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
 	.mode		= MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
-	.mode		= MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
+#else
 	.mode		= MUSB_HOST,
 #endif
 	.set_power	= tusb_set_power,
@@ -694,6 +692,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
+	.init_late	= omap2420_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -706,6 +705,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
+	.init_late	= omap2420_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -718,6 +718,7 @@
 	.init_irq	= omap2_init_irq,
 	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
+	.init_late	= omap2420_init_late,
 	.timer		= &omap2_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 2a7b9a9..580fd17 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -81,13 +81,13 @@
 static struct {
 	int mmc1_gpio_wp;
 	int usb_pwr_level;
-	int reset_gpio;
+	int dvi_pd_gpio;
 	int usr_button_gpio;
 	int mmc_caps;
 } beagle_config = {
 	.mmc1_gpio_wp = -EINVAL,
 	.usb_pwr_level = GPIOF_OUT_INIT_LOW,
-	.reset_gpio = 129,
+	.dvi_pd_gpio = -EINVAL,
 	.usr_button_gpio = 4,
 	.mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
 };
@@ -126,21 +126,21 @@
 		printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
 		omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
 		beagle_config.mmc1_gpio_wp = 29;
-		beagle_config.reset_gpio = 170;
+		beagle_config.dvi_pd_gpio = 170;
 		beagle_config.usr_button_gpio = 7;
 		break;
 	case 6:
 		printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
 		omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
 		beagle_config.mmc1_gpio_wp = 23;
-		beagle_config.reset_gpio = 170;
+		beagle_config.dvi_pd_gpio = 170;
 		beagle_config.usr_button_gpio = 7;
 		break;
 	case 5:
 		printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
 		omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
 		beagle_config.mmc1_gpio_wp = 23;
-		beagle_config.reset_gpio = 170;
+		beagle_config.dvi_pd_gpio = 170;
 		beagle_config.usr_button_gpio = 7;
 		break;
 	case 0:
@@ -274,11 +274,9 @@
 		if (r)
 			pr_err("%s: unable to configure nDVI_PWR_EN\n",
 				__func__);
-		r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
-				     "DVI_LDO_EN");
-		if (r)
-			pr_err("%s: unable to configure DVI_LDO_EN\n",
-				__func__);
+
+		beagle_config.dvi_pd_gpio = gpio + 2;
+
 	} else {
 		/*
 		 * REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@
 		if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
 			pr_err("%s: unable to configure EHCI_nOC\n", __func__);
 	}
-	dvi_panel.power_down_gpio = beagle_config.reset_gpio;
+	dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
 
 	gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
 			"nEN_USB_PWR");
@@ -499,7 +497,7 @@
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 	omap3_beagle_init_rev();
 
-	if (beagle_config.mmc1_gpio_wp != -EINVAL)
+	if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
 		omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
 	mmc[0].caps = beagle_config.mmc_caps;
 	omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@
 
 	platform_add_devices(omap3_beagle_devices,
 			ARRAY_SIZE(omap3_beagle_devices));
+	if (gpio_is_valid(beagle_config.dvi_pd_gpio))
+		omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
 	omap_display_init(&beagle_dss_data);
 	omap_serial_init();
 	omap_sdrc_init(mt46h32m32lf6_sdrc_params,
 				  mt46h32m32lf6_sdrc_params);
 
-	omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-	/* REVISIT leave DVI powered down until it's needed ... */
-	gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
 	usb_musb_init(NULL);
 	usbhs_init(&usbhs_bdata);
 	omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
@@ -543,6 +539,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_beagle_init,
+	.init_late	= omap3_init_late,
 	.timer		= &omap3_secure_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ace3c67..639bd07 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -671,6 +671,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_evm_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index c008bf8..932e177 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -242,6 +242,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3logic_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -254,6 +255,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3logic_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 33d995d..57aebee 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -622,6 +622,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3pandora_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 4396bae..b318f56 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -436,6 +436,7 @@
 	.init_irq		= omap3_init_irq,
 	.handle_irq		= omap3_intc_handle_irq,
 	.init_machine		= omap3_stalker_init,
+	.init_late		= omap35xx_init_late,
 	.timer			= &omap3_secure_timer,
 	.restart		= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index ae2251f..485d14d 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -387,6 +387,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_touchbook_init,
+	.init_late	= omap3430_init_late,
 	.timer		= &omap3_secure_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 68b8fc9..982fb26 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -521,6 +521,7 @@
 	.init_irq	= gic_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= omap4_panda_init,
+	.init_late	= omap4430_init_late,
 	.timer		= &omap4_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5527c19..8fa2fc3 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -554,6 +554,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= overo_init,
+	.init_late	= omap35xx_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index ae53d71..0ad1bb3b 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -151,6 +151,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= rm680_init,
+	.init_late	= omap3630_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -163,6 +164,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= rm680_init,
+	.init_late	= omap3630_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ff53dec..df2534d 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -144,7 +144,6 @@
 	.release_resources = lis302_release,
 	.st_min_limits = {-32, 3, 3},
 	.st_max_limits = {-3, 32, 32},
-	.irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
 };
 #endif
 
@@ -1030,7 +1029,6 @@
 	{
 		I2C_BOARD_INFO("lis3lv02d", 0x1d),
 		.platform_data = &rx51_lis3lv02d_data,
-		.irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
 	},
 #endif
 };
@@ -1056,6 +1054,10 @@
 	omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
 	omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
 			      ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
+	rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
+	rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
+#endif
 	omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
 			      ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
 	return 0;
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 2da92a6..345dd93 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -127,6 +127,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= rx51_init,
+	.init_late	= omap3430_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index ab9a7a9..d4c8392 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -52,6 +52,7 @@
 	.init_irq	= ti81xx_init_irq,
 	.timer		= &omap3_timer,
 	.init_machine	= ti81xx_evm_init,
+	.init_late	= ti81xx_init_late,
 	.restart	= omap_prcm_restart,
 MACHINE_END
 
@@ -63,5 +64,6 @@
 	.init_irq	= ti81xx_init_irq,
 	.timer		= &omap3_timer,
 	.init_machine	= ti81xx_evm_init,
+	.init_late	= ti81xx_init_late,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 5c20bcc..4e7e561 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -137,6 +137,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_zoom_init,
+	.init_late	= omap3430_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
@@ -149,6 +150,7 @@
 	.init_irq	= omap3_init_irq,
 	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_zoom_init,
+	.init_late	= omap3630_init_late,
 	.timer		= &omap3_timer,
 	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 4e1a3b0..1efdec2 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3514,7 +3514,7 @@
 	struct omap_clk *c;
 	u32 cpu_clkflg = 0;
 
-	if (cpu_is_omap3517()) {
+	if (soc_is_am35xx()) {
 		cpu_mask = RATE_IN_34XX;
 		cpu_clkflg = CK_AM35XX;
 	} else if (cpu_is_omap3630()) {
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 2172f66..ba6f9a0 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -84,6 +84,7 @@
 
 static struct clk sys_32k_ck = {
 	.name		= "sys_32k_ck",
+	.clkdm_name	= "prm_clkdm",
 	.rate		= 32768,
 	.ops		= &clkops_null,
 };
@@ -512,6 +513,7 @@
 	.name		= "ddrphy_ck",
 	.parent		= &dpll_core_m2_ck,
 	.ops		= &clkops_null,
+	.clkdm_name	= "l3_emif_clkdm",
 	.fixed_div	= 2,
 	.recalc		= &omap_fixed_divisor_recalc,
 };
@@ -769,6 +771,7 @@
 static struct clk dpll_mpu_m2_ck = {
 	.name		= "dpll_mpu_m2_ck",
 	.parent		= &dpll_mpu_ck,
+	.clkdm_name	= "cm_clkdm",
 	.clksel		= dpll_mpu_m2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_MPU,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@
 static struct clk l3_div_ck = {
 	.name		= "l3_div_ck",
 	.parent		= &div_core_ck,
+	.clkdm_name	= "cm_clkdm",
 	.clksel		= l3_div_div,
 	.clksel_reg	= OMAP4430_CM_CLKSEL_CORE,
 	.clksel_mask	= OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@
 static struct clk trace_clk_div_ck = {
 	.name		= "trace_clk_div_ck",
 	.parent		= &pmd_trace_clk_mux_ck,
+	.clkdm_name	= "emu_sys_clkdm",
 	.clksel		= trace_clk_div_div,
 	.clksel_reg	= OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
 	.clksel_mask	= OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
@@ -3412,9 +3417,12 @@
 	if (cpu_is_omap443x()) {
 		cpu_mask = RATE_IN_4430;
 		cpu_clkflg = CK_443X;
-	} else if (cpu_is_omap446x()) {
+	} else if (cpu_is_omap446x() || cpu_is_omap447x()) {
 		cpu_mask = RATE_IN_4460 | RATE_IN_4430;
 		cpu_clkflg = CK_446X | CK_443X;
+
+		if (cpu_is_omap447x())
+			pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
 	} else {
 		return 0;
 	}
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index a7bc096..f24e3f7 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -22,4 +22,15 @@
  */
 #define MAX_MODULE_READY_TIME		2000
 
+/*
+ * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
+ * the PRCM to request that a module enter the inactive state in the
+ * case of OMAP2 & 3.  In the case of OMAP4 this is the max duration
+ * in microseconds for the module to reach the inactive state from
+ * a functional state.
+ * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
+ * kernel init.
+ */
+#define MAX_MODULE_DISABLE_TIME		5000
+
 #endif
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 8c86d29..1a39945 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -313,9 +313,9 @@
 
 	omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
 			   CLKCTRL_IDLEST_DISABLED),
-			  MAX_MODULE_READY_TIME, i);
+			  MAX_MODULE_DISABLE_TIME, i);
 
-	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+	return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
 }
 
 /**
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6c9e61..be9dfd1 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -55,7 +55,7 @@
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 extern void omapti81xx_map_common_io(void);
 #else
 static inline void omapti81xx_map_common_io(void)
@@ -63,7 +63,7 @@
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 extern void omapam33xx_map_common_io(void);
 #else
 static inline void omapam33xx_map_common_io(void)
@@ -79,6 +79,42 @@
 }
 #endif
 
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP2)
+int omap2_pm_init(void);
+#else
+static inline int omap2_pm_init(void)
+{
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+int omap3_pm_init(void);
+#else
+static inline int omap3_pm_init(void)
+{
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+int omap4_pm_init(void);
+#else
+static inline int omap4_pm_init(void)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_OMAP_MUX
+int omap_mux_late_init(void);
+#else
+static inline int omap_mux_late_init(void)
+{
+	return 0;
+}
+#endif
+
 extern void omap2_init_common_infrastructure(void);
 
 extern struct sys_timer omap2_timer;
@@ -95,6 +131,17 @@
 void am35xx_init_early(void);
 void ti81xx_init_early(void);
 void omap4430_init_early(void);
+void omap3_init_late(void);	/* Do not use this one */
+void omap4430_init_late(void);
+void omap2420_init_late(void);
+void omap2430_init_late(void);
+void omap3430_init_late(void);
+void omap35xx_init_late(void);
+void omap3630_init_late(void);
+void am35xx_init_late(void);
+void ti81xx_init_late(void);
+void omap4430_init_late(void);
+int omap2_common_pm_late_init(void);
 void omap_prcm_restart(char, const char *);
 
 /*
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index ae62ece..7b4b932 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -645,7 +645,11 @@
 
 void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
-	char *name = "mmci-omap";
+	struct platform_device *pdev;
+	struct omap_hwmod *oh;
+	int id = 0;
+	char *oh_name = "msdi1";
+	char *dev_name = "mmci-omap";
 
 	if (!mmc_data[0]) {
 		pr_err("%s fails: Incomplete platform data\n", __func__);
@@ -653,8 +657,17 @@
 	}
 
 	omap242x_mmc_mux(mmc_data[0]);
-	omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE,
-					INT_24XX_MMC_IRQ, mmc_data[0]);
+
+	oh = omap_hwmod_lookup(oh_name);
+	if (!oh) {
+		pr_err("Could not look up %s\n", oh_name);
+		return;
+	}
+	pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
+				 sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
+	if (IS_ERR(pdev))
+		WARN(1, "Can'd build omap_device for %s:%s.\n",
+					dev_name, oh->name);
 }
 
 #endif
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index db5a88a..5fb47a1 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -180,16 +180,133 @@
 		omap4_dsi_mux_pads(dsi_id, 0);
 }
 
+static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+	return omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, tput);
+}
+
+static struct platform_device *create_dss_pdev(const char *pdev_name,
+		int pdev_id, const char *oh_name, void *pdata, int pdata_len,
+		struct platform_device *parent)
+{
+	struct platform_device *pdev;
+	struct omap_device *od;
+	struct omap_hwmod *ohs[1];
+	struct omap_hwmod *oh;
+	int r;
+
+	oh = omap_hwmod_lookup(oh_name);
+	if (!oh) {
+		pr_err("Could not look up %s\n", oh_name);
+		r = -ENODEV;
+		goto err;
+	}
+
+	pdev = platform_device_alloc(pdev_name, pdev_id);
+	if (!pdev) {
+		pr_err("Could not create pdev for %s\n", pdev_name);
+		r = -ENOMEM;
+		goto err;
+	}
+
+	if (parent != NULL)
+		pdev->dev.parent = &parent->dev;
+
+	if (pdev->id != -1)
+		dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+	else
+		dev_set_name(&pdev->dev, "%s", pdev->name);
+
+	ohs[0] = oh;
+	od = omap_device_alloc(pdev, ohs, 1, NULL, 0);
+	if (!od) {
+		pr_err("Could not alloc omap_device for %s\n", pdev_name);
+		r = -ENOMEM;
+		goto err;
+	}
+
+	r = platform_device_add_data(pdev, pdata, pdata_len);
+	if (r) {
+		pr_err("Could not set pdata for %s\n", pdev_name);
+		goto err;
+	}
+
+	r = omap_device_register(pdev);
+	if (r) {
+		pr_err("Could not register omap_device for %s\n", pdev_name);
+		goto err;
+	}
+
+	return pdev;
+
+err:
+	return ERR_PTR(r);
+}
+
+static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
+		int pdev_id, void *pdata, int pdata_len,
+		struct platform_device *parent)
+{
+	struct platform_device *pdev;
+	int r;
+
+	pdev = platform_device_alloc(pdev_name, pdev_id);
+	if (!pdev) {
+		pr_err("Could not create pdev for %s\n", pdev_name);
+		r = -ENOMEM;
+		goto err;
+	}
+
+	if (parent != NULL)
+		pdev->dev.parent = &parent->dev;
+
+	if (pdev->id != -1)
+		dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+	else
+		dev_set_name(&pdev->dev, "%s", pdev->name);
+
+	r = platform_device_add_data(pdev, pdata, pdata_len);
+	if (r) {
+		pr_err("Could not set pdata for %s\n", pdev_name);
+		goto err;
+	}
+
+	r = platform_device_add(pdev);
+	if (r) {
+		pr_err("Could not register platform_device for %s\n", pdev_name);
+		goto err;
+	}
+
+	return pdev;
+
+err:
+	return ERR_PTR(r);
+}
+
 int __init omap_display_init(struct omap_dss_board_info *board_data)
 {
 	int r = 0;
-	struct omap_hwmod *oh;
 	struct platform_device *pdev;
 	int i, oh_count;
-	struct omap_display_platform_data pdata;
 	const struct omap_dss_hwmod_data *curr_dss_hwmod;
+	struct platform_device *dss_pdev;
 
-	memset(&pdata, 0, sizeof(pdata));
+	/* create omapdss device */
+
+	board_data->dsi_enable_pads = omap_dsi_enable_pads;
+	board_data->dsi_disable_pads = omap_dsi_disable_pads;
+	board_data->get_context_loss_count = omap_pm_get_dev_context_loss_count;
+	board_data->set_min_bus_tput = omap_dss_set_min_bus_tput;
+
+	omap_display_device.dev.platform_data = board_data;
+
+	r = platform_device_register(&omap_display_device);
+	if (r < 0) {
+		pr_err("Unable to register omapdss device\n");
+		return r;
+	}
+
+	/* create devices for dss hwmods */
 
 	if (cpu_is_omap24xx()) {
 		curr_dss_hwmod = omap2_dss_hwmod_data;
@@ -202,39 +319,58 @@
 		oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
 	}
 
-	if (board_data->dsi_enable_pads == NULL)
-		board_data->dsi_enable_pads = omap_dsi_enable_pads;
-	if (board_data->dsi_disable_pads == NULL)
-		board_data->dsi_disable_pads = omap_dsi_disable_pads;
+	/*
+	 * First create the pdev for dss_core, which is used as a parent device
+	 * by the other dss pdevs. Note: dss_core has to be the first item in
+	 * the hwmod list.
+	 */
+	dss_pdev = create_dss_pdev(curr_dss_hwmod[0].dev_name,
+			curr_dss_hwmod[0].id,
+			curr_dss_hwmod[0].oh_name,
+			board_data, sizeof(*board_data),
+			NULL);
 
-	pdata.board_data = board_data;
-	pdata.board_data->get_context_loss_count =
-		omap_pm_get_dev_context_loss_count;
+	if (IS_ERR(dss_pdev)) {
+		pr_err("Could not build omap_device for %s\n",
+				curr_dss_hwmod[0].oh_name);
 
-	for (i = 0; i < oh_count; i++) {
-		oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
-		if (!oh) {
-			pr_err("Could not look up %s\n",
-				curr_dss_hwmod[i].oh_name);
-			return -ENODEV;
-		}
-
-		pdev = omap_device_build(curr_dss_hwmod[i].dev_name,
-				curr_dss_hwmod[i].id, oh, &pdata,
-				sizeof(struct omap_display_platform_data),
-				NULL, 0, 0);
-
-		if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n",
-				curr_dss_hwmod[i].oh_name))
-			return -ENODEV;
+		return PTR_ERR(dss_pdev);
 	}
-	omap_display_device.dev.platform_data = board_data;
 
-	r = platform_device_register(&omap_display_device);
-	if (r < 0)
-		printk(KERN_ERR "Unable to register OMAP-Display device\n");
+	for (i = 1; i < oh_count; i++) {
+		pdev = create_dss_pdev(curr_dss_hwmod[i].dev_name,
+				curr_dss_hwmod[i].id,
+				curr_dss_hwmod[i].oh_name,
+				board_data, sizeof(*board_data),
+				dss_pdev);
 
-	return r;
+		if (IS_ERR(pdev)) {
+			pr_err("Could not build omap_device for %s\n",
+					curr_dss_hwmod[i].oh_name);
+
+			return PTR_ERR(pdev);
+		}
+	}
+
+	/* Create devices for DPI and SDI */
+
+	pdev = create_simple_dss_pdev("omapdss_dpi", -1,
+			board_data, sizeof(*board_data), dss_pdev);
+	if (IS_ERR(pdev)) {
+		pr_err("Could not build platform_device for omapdss_dpi\n");
+		return PTR_ERR(pdev);
+	}
+
+	if (cpu_is_omap34xx()) {
+		pdev = create_simple_dss_pdev("omapdss_sdi", -1,
+				board_data, sizeof(*board_data), dss_pdev);
+		if (IS_ERR(pdev)) {
+			pr_err("Could not build platform_device for omapdss_sdi\n");
+			return PTR_ERR(pdev);
+		}
+	}
+
+	return 0;
 }
 
 static void dispc_disable_outputs(void)
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index b19d849..ff75abe 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -227,10 +227,6 @@
 
 	dma_stride		= OMAP2_DMA_STRIDE;
 	dma_common_ch_start	= CSDP;
-	if (cpu_is_omap3630() || cpu_is_omap44xx())
-		dma_common_ch_end = CCDN;
-	else
-		dma_common_ch_end = CCFN;
 
 	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
 	if (!p) {
@@ -277,6 +273,13 @@
 		dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
 		return -ENOMEM;
 	}
+
+	/* Check the capabilities register for descriptor loading feature */
+	if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
+		dma_common_ch_end = CCDN;
+	else
+		dma_common_ch_end = CCFN;
+
 	return 0;
 }
 
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 3376388..88ffa1e 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -20,6 +20,9 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+
+#include <asm/memblock.h>
+
 #include "cm2xxx_3xxx.h"
 #include "prm2xxx_3xxx.h"
 #ifdef CONFIG_BRIDGE_DVFS
@@ -28,8 +31,6 @@
 
 #include <plat/dsp.h>
 
-extern phys_addr_t omap_dsp_get_mempool_base(void);
-
 static struct platform_device *omap_dsp_pdev;
 
 static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
@@ -47,6 +48,31 @@
 	.dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
 };
 
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+	phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+	phys_addr_t paddr;
+
+	if (!size)
+		return;
+
+	paddr = arm_memblock_steal(size, SZ_1M);
+	if (!paddr) {
+		pr_err("%s: failed to reserve %llx bytes\n",
+				__func__, (unsigned long long)size);
+		return;
+	}
+
+	omap_dsp_phys_mempool_base = paddr;
+}
+
+static phys_addr_t omap_dsp_get_mempool_base(void)
+{
+	return omap_dsp_phys_mempool_base;
+}
+
 static int __init omap_dsp_init(void)
 {
 	struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 580e684..2286410 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -49,6 +49,20 @@
 #define GPMC_ECC_CONTROL	0x1f8
 #define GPMC_ECC_SIZE_CONFIG	0x1fc
 #define GPMC_ECC1_RESULT        0x200
+#define GPMC_ECC_BCH_RESULT_0   0x240   /* not available on OMAP2 */
+
+/* GPMC ECC control settings */
+#define GPMC_ECC_CTRL_ECCCLEAR		0x100
+#define GPMC_ECC_CTRL_ECCDISABLE	0x000
+#define GPMC_ECC_CTRL_ECCREG1		0x001
+#define GPMC_ECC_CTRL_ECCREG2		0x002
+#define GPMC_ECC_CTRL_ECCREG3		0x003
+#define GPMC_ECC_CTRL_ECCREG4		0x004
+#define GPMC_ECC_CTRL_ECCREG5		0x005
+#define GPMC_ECC_CTRL_ECCREG6		0x006
+#define GPMC_ECC_CTRL_ECCREG7		0x007
+#define GPMC_ECC_CTRL_ECCREG8		0x008
+#define GPMC_ECC_CTRL_ECCREG9		0x009
 
 #define GPMC_CS0_OFFSET		0x60
 #define GPMC_CS_SIZE		0x30
@@ -860,8 +874,9 @@
 	gpmc_ecc_used = cs;
 
 	/* clear ecc and enable bits */
-	val = ((0x00000001<<8) | 0x00000001);
-	gpmc_write_reg(GPMC_ECC_CONTROL, val);
+	gpmc_write_reg(GPMC_ECC_CONTROL,
+			GPMC_ECC_CTRL_ECCCLEAR |
+			GPMC_ECC_CTRL_ECCREG1);
 
 	/* program ecc and result sizes */
 	val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
@@ -869,13 +884,15 @@
 
 	switch (mode) {
 	case GPMC_ECC_READ:
-		gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+	case GPMC_ECC_WRITE:
+		gpmc_write_reg(GPMC_ECC_CONTROL,
+				GPMC_ECC_CTRL_ECCCLEAR |
+				GPMC_ECC_CTRL_ECCREG1);
 		break;
 	case GPMC_ECC_READSYN:
-		 gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
-		break;
-	case GPMC_ECC_WRITE:
-		gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+		gpmc_write_reg(GPMC_ECC_CONTROL,
+				GPMC_ECC_CTRL_ECCCLEAR |
+				GPMC_ECC_CTRL_ECCDISABLE);
 		break;
 	default:
 		printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
@@ -919,3 +936,186 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc);
+
+#ifdef CONFIG_ARCH_OMAP3
+
+/**
+ * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
+ * @cs: chip select number
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ *
+ * This function must be executed before any call to gpmc_enable_hwecc_bch.
+ */
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors)
+{
+	/* check if ecc module is in use */
+	if (gpmc_ecc_used != -EINVAL)
+		return -EINVAL;
+
+	/* support only OMAP3 class */
+	if (!cpu_is_omap34xx()) {
+		printk(KERN_ERR "BCH ecc is not supported on this CPU\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
+	 * Other chips may be added if confirmed to work.
+	 */
+	if ((nerrors == 4) &&
+	    (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
+		printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n");
+		return -EINVAL;
+	}
+
+	/* sanity check */
+	if (nsectors > 8) {
+		printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n",
+		       nsectors);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch);
+
+/**
+ * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
+ * @cs: chip select number
+ * @mode: read/write mode
+ * @dev_width: device bus width(1 for x16, 0 for x8)
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ */
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+			  int nerrors)
+{
+	unsigned int val;
+
+	/* check if ecc module is in use */
+	if (gpmc_ecc_used != -EINVAL)
+		return -EINVAL;
+
+	gpmc_ecc_used = cs;
+
+	/* clear ecc and enable bits */
+	gpmc_write_reg(GPMC_ECC_CONTROL, 0x1);
+
+	/*
+	 * When using BCH, sector size is hardcoded to 512 bytes.
+	 * Here we are using wrapping mode 6 both for reading and writing, with:
+	 *  size0 = 0  (no additional protected byte in spare area)
+	 *  size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+	 */
+	gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12));
+
+	/* BCH configuration */
+	val = ((1                        << 16) | /* enable BCH */
+	       (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
+	       (0x06                     <<  8) | /* wrap mode = 6 */
+	       (dev_width                <<  7) | /* bus width */
+	       (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
+	       (cs                       <<  1) | /* ECC CS */
+	       (0x1));                            /* enable ECC */
+
+	gpmc_write_reg(GPMC_ECC_CONFIG, val);
+	gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch);
+
+/**
+ * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc)
+{
+	int i;
+	unsigned long nsectors, reg, val1, val2;
+
+	if (gpmc_ecc_used != cs)
+		return -EINVAL;
+
+	nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+	for (i = 0; i < nsectors; i++) {
+
+		reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+		/* Read hw-computed remainder */
+		val1 = gpmc_read_reg(reg + 0);
+		val2 = gpmc_read_reg(reg + 4);
+
+		/*
+		 * Add constant polynomial to remainder, in order to get an ecc
+		 * sequence of 0xFFs for a buffer filled with 0xFFs; and
+		 * left-justify the resulting polynomial.
+		 */
+		*ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF);
+		*ecc++ = 0x13 ^ ((val2 >>  4) & 0xFF);
+		*ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
+		*ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF);
+		*ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF);
+		*ecc++ = 0xac ^ ((val1 >> 4) & 0xFF);
+		*ecc++ = 0x7f ^ ((val1 & 0xF) << 4);
+	}
+
+	gpmc_ecc_used = -EINVAL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4);
+
+/**
+ * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc)
+{
+	int i;
+	unsigned long nsectors, reg, val1, val2, val3, val4;
+
+	if (gpmc_ecc_used != cs)
+		return -EINVAL;
+
+	nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+	for (i = 0; i < nsectors; i++) {
+
+		reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+		/* Read hw-computed remainder */
+		val1 = gpmc_read_reg(reg + 0);
+		val2 = gpmc_read_reg(reg + 4);
+		val3 = gpmc_read_reg(reg + 8);
+		val4 = gpmc_read_reg(reg + 12);
+
+		/*
+		 * Add constant polynomial to remainder, in order to get an ecc
+		 * sequence of 0xFFs for a buffer filled with 0xFFs.
+		 */
+		*ecc++ = 0xef ^ (val4 & 0xFF);
+		*ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF);
+		*ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF);
+		*ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF);
+		*ecc++ = 0xed ^ (val3 & 0xFF);
+		*ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF);
+		*ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF);
+		*ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
+		*ecc++ = 0x97 ^ (val2 & 0xFF);
+		*ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF);
+		*ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
+		*ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF);
+		*ecc++ = 0xb5 ^ (val1 & 0xFF);
+	}
+
+	gpmc_ecc_used = -EINVAL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8);
+
+#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b0268ea..be697d4 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -355,7 +355,7 @@
 	 *
 	 * temporary HACK: ocr_mask instead of fixed supply
 	 */
-	if (cpu_is_omap3505() || cpu_is_omap3517())
+	if (soc_is_am35xx())
 		mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
 					 MMC_VDD_26_27 |
 					 MMC_VDD_27_28 |
@@ -365,7 +365,7 @@
 	else
 		mmc->slots[0].ocr_mask = c->ocr_mask;
 
-	if (!cpu_is_omap3517() && !cpu_is_omap3505())
+	if (!soc_is_am35xx())
 		mmc->slots[0].features |= HSMMC_HAS_PBIAS;
 
 	if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -388,7 +388,7 @@
 			}
 		}
 
-		if (cpu_is_omap3517() || cpu_is_omap3505())
+		if (soc_is_am35xx())
 			mmc->slots[0].set_power = nop_mmc_set_power;
 
 		/* OMAP3630 HSMMC1 supports only 4-bit */
@@ -400,7 +400,7 @@
 		}
 		break;
 	case 2:
-		if (cpu_is_omap3517() || cpu_is_omap3505())
+		if (soc_is_am35xx())
 			mmc->slots[0].set_power = am35x_hsmmc2_set_power;
 
 		if (c->ext_clock)
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index f139817..00486a8 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -185,8 +185,7 @@
 	 */
 	if (cpu_is_omap3630()) {
 		cpu_name = "OMAP3630";
-	} else if (cpu_is_omap3517()) {
-		/* AM35xx devices */
+	} else if (soc_is_am35xx()) {
 		cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
 	} else if (cpu_is_ti816x()) {
 		cpu_name = "TI816X";
@@ -248,6 +247,17 @@
 	omap_features |= OMAP3_HAS_SDRC;
 
 	/*
+	 * am35x fixups:
+	 * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
+	 *   reserved and therefore return 0 when read.  Unfortunately,
+	 *   OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
+	 *   mean that a feature is present even though it isn't so clear
+	 *   the incorrectly set feature bits.
+	 */
+	if (soc_is_am35xx())
+		omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
+
+	/*
 	 * TODO: Get additional info (where applicable)
 	 *       e.g. Size of L2 cache.
 	 */
@@ -352,13 +362,13 @@
 		 */
 		switch (rev) {
 		case 0:
-			omap_revision = OMAP3517_REV_ES1_0;
+			omap_revision = AM35XX_REV_ES1_0;
 			cpu_rev = "1.0";
 			break;
 		case 1:
 		/* FALLTHROUGH */
 		default:
-			omap_revision = OMAP3517_REV_ES1_1;
+			omap_revision = AM35XX_REV_ES1_1;
 			cpu_rev = "1.1";
 		}
 		break;
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
index d79321b..548de90b 100644
--- a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -16,18 +16,10 @@
 #define OMAP_WKG_ENB_B_0			0x14
 #define OMAP_WKG_ENB_C_0			0x18
 #define OMAP_WKG_ENB_D_0			0x1c
-#define OMAP_WKG_ENB_SECURE_A_0			0x20
-#define OMAP_WKG_ENB_SECURE_B_0			0x24
-#define OMAP_WKG_ENB_SECURE_C_0			0x28
-#define OMAP_WKG_ENB_SECURE_D_0			0x2c
 #define OMAP_WKG_ENB_A_1			0x410
 #define OMAP_WKG_ENB_B_1			0x414
 #define OMAP_WKG_ENB_C_1			0x418
 #define OMAP_WKG_ENB_D_1			0x41c
-#define OMAP_WKG_ENB_SECURE_A_1			0x420
-#define OMAP_WKG_ENB_SECURE_B_1			0x424
-#define OMAP_WKG_ENB_SECURE_C_1			0x428
-#define OMAP_WKG_ENB_SECURE_D_1			0x42c
 #define OMAP_AUX_CORE_BOOT_0			0x800
 #define OMAP_AUX_CORE_BOOT_1			0x804
 #define OMAP_PTMSYNCREQ_MASK			0xc00
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4b9491a..8d014ba 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -173,7 +173,7 @@
 };
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 static struct map_desc omapti81xx_io_desc[] __initdata = {
 	{
 		.virtual	= L4_34XX_VIRT,
@@ -184,7 +184,7 @@
 };
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 static struct map_desc omapam33xx_io_desc[] __initdata = {
 	{
 		.virtual	= L4_34XX_VIRT,
@@ -216,41 +216,11 @@
 		.type		= MT_DEVICE,
 	},
 	{
-		.virtual	= OMAP44XX_GPMC_VIRT,
-		.pfn		= __phys_to_pfn(OMAP44XX_GPMC_PHYS),
-		.length		= OMAP44XX_GPMC_SIZE,
-		.type		= MT_DEVICE,
-	},
-	{
-		.virtual	= OMAP44XX_EMIF1_VIRT,
-		.pfn		= __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
-		.length		= OMAP44XX_EMIF1_SIZE,
-		.type		= MT_DEVICE,
-	},
-	{
-		.virtual	= OMAP44XX_EMIF2_VIRT,
-		.pfn		= __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
-		.length		= OMAP44XX_EMIF2_SIZE,
-		.type		= MT_DEVICE,
-	},
-	{
-		.virtual	= OMAP44XX_DMM_VIRT,
-		.pfn		= __phys_to_pfn(OMAP44XX_DMM_PHYS),
-		.length		= OMAP44XX_DMM_SIZE,
-		.type		= MT_DEVICE,
-	},
-	{
 		.virtual	= L4_PER_44XX_VIRT,
 		.pfn		= __phys_to_pfn(L4_PER_44XX_PHYS),
 		.length		= L4_PER_44XX_SIZE,
 		.type		= MT_DEVICE,
 	},
-	{
-		.virtual	= L4_EMU_44XX_VIRT,
-		.pfn		= __phys_to_pfn(L4_EMU_44XX_PHYS),
-		.length		= L4_EMU_44XX_SIZE,
-		.type		= MT_DEVICE,
-	},
 #ifdef CONFIG_OMAP4_ERRATA_I688
 	{
 		.virtual	= OMAP4_SRAM_VA,
@@ -286,14 +256,14 @@
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 void __init omapti81xx_map_common_io(void)
 {
 	iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 void __init omapam33xx_map_common_io(void)
 {
 	iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
@@ -380,6 +350,13 @@
 	omap_hwmod_init_postsetup();
 	omap2420_clk_init();
 }
+
+void __init omap2420_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap2_pm_init();
+}
 #endif
 
 #ifdef CONFIG_SOC_OMAP2430
@@ -395,6 +372,13 @@
 	omap_hwmod_init_postsetup();
 	omap2430_clk_init();
 }
+
+void __init omap2430_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap2_pm_init();
+}
 #endif
 
 /*
@@ -449,6 +433,48 @@
 	omap_hwmod_init_postsetup();
 	omap3xxx_clk_init();
 }
+
+void __init omap3_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
+
+void __init omap3430_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
+
+void __init omap35xx_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
+
+void __init omap3630_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
+
+void __init am35xx_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
+
+void __init ti81xx_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap3_pm_init();
+}
 #endif
 
 #ifdef CONFIG_ARCH_OMAP4
@@ -465,6 +491,13 @@
 	omap_hwmod_init_postsetup();
 	omap4xxx_clk_init();
 }
+
+void __init omap4430_init_late(void)
+{
+	omap_mux_late_init();
+	omap2_common_pm_late_init();
+	omap4_pm_init();
+}
 #endif
 
 void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index 0812b15..80b8892 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -37,9 +37,6 @@
 #define OMAP4_L3_PER_IO_OFFSET	0xb1100000
 #define OMAP4_L3_PER_IO_ADDRESS(pa)	IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
 
-#define OMAP4_GPMC_IO_OFFSET		0xa9000000
-#define OMAP4_GPMC_IO_ADDRESS(pa)	IOMEM((pa) + OMAP4_GPMC_IO_OFFSET)
-
 #define OMAP2_EMU_IO_OFFSET		0xaa800000	/* Emulation */
 #define OMAP2_EMU_IO_ADDRESS(pa)	IOMEM((pa) + OMAP2_EMU_IO_OFFSET)
 
@@ -170,28 +167,3 @@
 #define L4_ABE_44XX_VIRT	(L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
 #define L4_ABE_44XX_SIZE	SZ_1M
 
-#define L4_EMU_44XX_PHYS	L4_EMU_44XX_BASE
-						/* 0x54000000 --> 0xfe800000 */
-#define L4_EMU_44XX_VIRT	(L4_EMU_44XX_PHYS + OMAP2_EMU_IO_OFFSET)
-#define L4_EMU_44XX_SIZE	SZ_8M
-
-#define OMAP44XX_GPMC_PHYS	OMAP44XX_GPMC_BASE
-						/* 0x50000000 --> 0xf9000000 */
-#define OMAP44XX_GPMC_VIRT	(OMAP44XX_GPMC_PHYS + OMAP4_GPMC_IO_OFFSET)
-#define OMAP44XX_GPMC_SIZE	SZ_1M
-
-
-#define OMAP44XX_EMIF1_PHYS	OMAP44XX_EMIF1_BASE
-						/* 0x4c000000 --> 0xfd100000 */
-#define OMAP44XX_EMIF1_VIRT	(OMAP44XX_EMIF1_PHYS + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP44XX_EMIF1_SIZE	SZ_1M
-
-#define OMAP44XX_EMIF2_PHYS	OMAP44XX_EMIF2_BASE
-						/* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_SIZE	SZ_1M
-#define OMAP44XX_EMIF2_VIRT	(OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE)
-
-#define OMAP44XX_DMM_PHYS	OMAP44XX_DMM_BASE
-						/* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_SIZE	SZ_1M
-#define OMAP44XX_DMM_VIRT	(OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 1ecf545..6038a8c 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -149,6 +149,7 @@
 	ct->chip.irq_ack = omap_mask_ack_irq;
 	ct->chip.irq_mask = irq_gc_mask_disable_reg;
 	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
 
 	ct->regs.enable = INTC_MIR_CLEAR0;
 	ct->regs.disable = INTC_MIR_SET0;
@@ -231,7 +232,7 @@
 			goto out;
 
 		irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 		if (irqnr)
 			goto out;
 		irqnr = readl_relaxed(base_addr + 0xf8);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3268ee2..9fe6829 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -41,6 +41,7 @@
 #include "control.h"
 #include "mux.h"
 #include "prm.h"
+#include "common.h"
 
 #define OMAP_MUX_BASE_OFFSET		0x30	/* Offset from CTRL_BASE */
 #define OMAP_MUX_BASE_SZ		0x5ca
@@ -217,8 +218,7 @@
 	return -ENODEV;
 }
 
-static int __init
-omap_mux_get_by_name(const char *muxname,
+int __init omap_mux_get_by_name(const char *muxname,
 			struct omap_mux_partition **found_partition,
 			struct omap_mux **found_mux)
 {
@@ -788,7 +788,7 @@
 }
 
 /* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
-static int __init omap_mux_late_init(void)
+int __init omap_mux_late_init(void)
 {
 	struct omap_mux_partition *partition;
 	int ret;
@@ -823,7 +823,6 @@
 
 	return 0;
 }
-late_initcall(omap_mux_late_init);
 
 static void __init omap_mux_package_fixup(struct omap_mux *p,
 					struct omap_mux *superset)
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 69fe060..471e62a7 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -59,6 +59,7 @@
 #define OMAP_PIN_OFF_WAKEUPENABLE	OMAP_WAKEUP_EN
 
 #define OMAP_MODE_GPIO(x)	(((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_UART(x)	(((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
 
 /* Flags for omapX_mux_init */
 #define OMAP_PACKAGE_MASK		0xffff
@@ -225,8 +226,18 @@
  */
 void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
 
+int omap_mux_get_by_name(const char *muxname,
+		struct omap_mux_partition **found_partition,
+		struct omap_mux **found_mux);
 #else
 
+static inline int omap_mux_get_by_name(const char *muxname,
+		struct omap_mux_partition **found_partition,
+		struct omap_mux **found_mux)
+{
+	return 0;
+}
+
 static inline int omap_mux_init_gpio(int gpio, int val)
 {
 	return 0;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index bf86f7e..7731936 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -530,7 +530,7 @@
 	if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
 		_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
 	if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
-		_set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+		_set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
 
 	/* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index fd48797..b26d3c9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3306,7 +3306,7 @@
 	    rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
 	    rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
 		h = omap34xx_hwmod_ocp_ifs;
-	} else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) {
+	} else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
 		h = am35xx_hwmod_ocp_ifs;
 	} else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
 		   rev == OMAP3630_REV_ES1_2) {
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 950454a..f30e861 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -393,8 +393,7 @@
 	.rev_offs	= 0x0000,
 	.sysc_offs	= 0x0004,
 	.sysc_flags	= SYSC_HAS_SIDLEMODE,
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO),
 	.sysc_fields	= &omap_hwmod_sysc_type1,
 };
 
@@ -854,6 +853,11 @@
 	.name		= "dss_hdmi",
 	.class		= &omap44xx_hdmi_hwmod_class,
 	.clkdm_name	= "l3_dss_clkdm",
+	/*
+	 * HDMI audio requires to use no-idle mode. Hence,
+	 * set idle mode by software.
+	 */
+	.flags		= HWMOD_SWSUP_SIDLE,
 	.mpu_irqs	= omap44xx_dss_hdmi_irqs,
 	.sdma_reqs	= omap44xx_dss_hdmi_sdma_reqs,
 	.main_clk	= "dss_48mhz_clk",
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c
index a05a62f..acc2164 100644
--- a/arch/arm/mach-omap2/omap_l3_smx.c
+++ b/arch/arm/mach-omap2/omap_l3_smx.c
@@ -155,10 +155,11 @@
 	u8 multi = error & L3_ERROR_LOG_MULTI;
 	u32 address = omap3_l3_decode_addr(error_addr);
 
-	WARN(true, "%s seen by %s %s at address %x\n",
+	pr_err("%s seen by %s %s at address %x\n",
 			omap3_l3_code_string(code),
 			omap3_l3_initiator_string(initid),
 			multi ? "Multiple Errors" : "", address);
+	WARN_ON(1);
 
 	return IRQ_HANDLED;
 }
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 4c90477..d52651a 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -239,21 +239,15 @@
 
 	devconf2 &= ~CONF2_OTGMODE;
 	switch (musb_mode) {
-#ifdef	CONFIG_USB_MUSB_HDRC_HCD
 	case MUSB_HOST:		/* Force VBUS valid, ID = 0 */
 		devconf2 |= CONF2_FORCE_HOST;
 		break;
-#endif
-#ifdef	CONFIG_USB_GADGET_MUSB_HDRC
 	case MUSB_PERIPHERAL:	/* Force VBUS valid, ID = 1 */
 		devconf2 |= CONF2_FORCE_DEVICE;
 		break;
-#endif
-#ifdef	CONFIG_USB_MUSB_OTG
 	case MUSB_OTG:		/* Don't override the VBUS/ID comparators */
 		devconf2 |= CONF2_NO_OVERRIDE;
 		break;
-#endif
 	default:
 		pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
 	}
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index d0c1c96..9cb5ced 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -295,7 +295,7 @@
 }
 postcore_initcall(omap2_common_pm_init);
 
-static int __init omap2_common_pm_late_init(void)
+int __init omap2_common_pm_late_init(void)
 {
 	/*
 	 * In the case of DT, the PMIC and SR initialization will be done using
@@ -322,4 +322,3 @@
 
 	return 0;
 }
-late_initcall(omap2_common_pm_late_init);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index facfffc..2edeffc 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -298,13 +298,10 @@
 				WKUP_MOD, PM_WKEN);
 }
 
-static int __init omap2_pm_init(void)
+int __init omap2_pm_init(void)
 {
 	u32 l;
 
-	if (!cpu_is_omap24xx())
-		return -ENODEV;
-
 	printk(KERN_INFO "Power Management for OMAP2 initializing\n");
 	l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
 	printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
@@ -370,17 +367,13 @@
 	 * These routines need to be in SRAM as that's the only
 	 * memory the MPU can see when it wakes up.
 	 */
-	if (cpu_is_omap24xx()) {
-		omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
-						 omap24xx_idle_loop_suspend_sz);
+	omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
+					 omap24xx_idle_loop_suspend_sz);
 
-		omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
-						    omap24xx_cpu_suspend_sz);
-	}
+	omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
+					    omap24xx_cpu_suspend_sz);
 
 	arm_pm_idle = omap2_pm_idle;
 
 	return 0;
 }
-
-late_initcall(omap2_pm_init);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 8b43aef..3a595e8 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -697,15 +697,12 @@
 	}
 }
 
-static int __init omap3_pm_init(void)
+int __init omap3_pm_init(void)
 {
 	struct power_state *pwrst, *tmp;
 	struct clockdomain *neon_clkdm, *mpu_clkdm;
 	int ret;
 
-	if (!cpu_is_omap34xx())
-		return -ENODEV;
-
 	if (!omap3_has_io_chain_ctrl())
 		pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
 
@@ -727,6 +724,7 @@
 	ret = request_irq(omap_prcm_event_to_irq("io"),
 		_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
 		omap3_pm_init);
+	enable_irq(omap_prcm_event_to_irq("io"));
 
 	if (ret) {
 		pr_err("pm: Failed to request pm_io irq\n");
@@ -804,5 +802,3 @@
 err1:
 	return ret;
 }
-
-late_initcall(omap3_pm_init);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 8856253..ea24174 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -141,15 +141,12 @@
  * Initializes all powerdomain and clockdomain target states
  * and all PRCM settings.
  */
-static int __init omap4_pm_init(void)
+int __init omap4_pm_init(void)
 {
 	int ret;
 	struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
 	struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
 
-	if (!cpu_is_omap44xx())
-		return -ENODEV;
-
 	if (omap_rev() == OMAP4430_REV_ES1_0) {
 		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
 		return -ENODEV;
@@ -217,4 +214,3 @@
 err2:
 	return ret;
 }
-late_initcall(omap4_pm_init);
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index b7ea468..fb0a0a6 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -311,7 +311,7 @@
 		 rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
 		pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
 	else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
-		 rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 ||
+		 rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
 		 rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
 		pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
 	else
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 9ce7654..21cb740 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include "common.h"
 #include <plat/cpu.h>
@@ -303,8 +304,15 @@
 
 static int __init omap3xxx_prcm_init(void)
 {
-	if (cpu_is_omap34xx())
-		return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
-	return 0;
+	int ret = 0;
+
+	if (cpu_is_omap34xx()) {
+		ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+		if (!ret)
+			irq_set_status_flags(omap_prcm_event_to_irq("io"),
+					     IRQ_NOAUTOEN);
+	}
+
+	return ret;
 }
 subsys_initcall(omap3xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 292d4aa..c1b93c7 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -57,6 +57,7 @@
 
 	struct list_head node;
 	struct omap_hwmod *oh;
+	struct omap_device_pad default_omap_uart_pads[2];
 };
 
 static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+
+#define OMAP_UART_DEFAULT_PAD_NAME_LEN	28
+static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
+		tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
+
+static void  __init
+omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
+				struct omap_uart_state *uart)
 {
+	uart->default_omap_uart_pads[0].name = rx_pad_name;
+	uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
+							OMAP_DEVICE_PAD_WAKEUP;
+	uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
+							OMAP_MUX_MODE0;
+	uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
+	uart->default_omap_uart_pads[1].name = tx_pad_name;
+	uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
+							OMAP_MUX_MODE0;
+	bdata->pads = uart->default_omap_uart_pads;
+	bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
+}
+
+static void  __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+						struct omap_uart_state *uart)
+{
+	struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
+	struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
+	char *rx_fmt, *tx_fmt;
+	int uart_nr = bdata->id + 1;
+
+	if (bdata->id != 2) {
+		rx_fmt = "uart%d_rx.uart%d_rx";
+		tx_fmt = "uart%d_tx.uart%d_tx";
+	} else {
+		rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
+		tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
+	}
+
+	snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
+			uart_nr, uart_nr);
+	snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
+			uart_nr, uart_nr);
+
+	if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
+			omap_mux_get_by_name
+				(tx_pad_name, &tx_partition, &tx_mux) >= 0) {
+		u16 tx_mode, rx_mode;
+
+		tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
+		rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
+
+		/*
+		 * Check if uart is used in default tx/rx mode i.e. in mux mode0
+		 * if yes then configure rx pin for wake up capability
+		 */
+		if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
+			omap_serial_fill_uart_tx_rx_pads(bdata, uart);
+	}
 }
 #else
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+		struct omap_uart_state *uart)
+{
+}
 #endif
 
 static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@
 		bdata.pads = NULL;
 		bdata.pads_cnt = 0;
 
-		if (cpu_is_omap44xx() || cpu_is_omap34xx())
-			omap_serial_fill_default_pads(&bdata);
+		omap_serial_check_wakeup(&bdata, uart);
 
 		if (!info)
 			omap_serial_init_port(&bdata, NULL);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 1b78358..840929b 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -90,7 +90,7 @@
 }
 
 static struct irqaction omap2_gp_timer_irq = {
-	.name		= "gp timer",
+	.name		= "gp_timer",
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
 	.handler	= omap2_gp_timer_interrupt,
 };
@@ -132,7 +132,7 @@
 }
 
 static struct clock_event_device clockevent_gpt = {
-	.name		= "gp timer",
+	.name		= "gp_timer",
 	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.set_next_event	= omap2_gp_timer_set_next_event,
@@ -236,22 +236,8 @@
 }
 
 /* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/*
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
-{
-	omap_init_clocksource_32k();
-}
-
-#else
-
 static struct omap_dm_timer clksrc;
+static bool use_gptimer_clksrc;
 
 /*
  * clocksource
@@ -262,7 +248,7 @@
 }
 
 static struct clocksource clocksource_gpt = {
-	.name		= "gp timer",
+	.name		= "gp_timer",
 	.rating		= 300,
 	.read		= clocksource_read_cycles,
 	.mask		= CLOCKSOURCE_MASK(32),
@@ -278,7 +264,46 @@
 }
 
 /* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(int gptimer_id,
+static int __init omap2_sync32k_clocksource_init(void)
+{
+	int ret;
+	struct omap_hwmod *oh;
+	void __iomem *vbase;
+	const char *oh_name = "counter_32k";
+
+	/*
+	 * First check hwmod data is available for sync32k counter
+	 */
+	oh = omap_hwmod_lookup(oh_name);
+	if (!oh || oh->slaves_cnt == 0)
+		return -ENODEV;
+
+	omap_hwmod_setup_one(oh_name);
+
+	vbase = omap_hwmod_get_mpu_rt_va(oh);
+	if (!vbase) {
+		pr_warn("%s: failed to get counter_32k resource\n", __func__);
+		return -ENXIO;
+	}
+
+	ret = omap_hwmod_enable(oh);
+	if (ret) {
+		pr_warn("%s: failed to enable counter_32k module (%d)\n",
+							__func__, ret);
+		return ret;
+	}
+
+	ret = omap_init_clocksource_32k(vbase);
+	if (ret) {
+		pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
+							__func__, ret);
+		omap_hwmod_idle(oh);
+	}
+
+	return ret;
+}
+
+static void __init omap2_gptimer_clocksource_init(int gptimer_id,
 						const char *fck_source)
 {
 	int res;
@@ -286,9 +311,6 @@
 	res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
 	BUG_ON(res);
 
-	pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
-		gptimer_id, clksrc.rate);
-
 	__omap_dm_timer_load_start(&clksrc,
 			OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
 	setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
@@ -296,15 +318,36 @@
 	if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
 		pr_err("Could not register clocksource %s\n",
 			clocksource_gpt.name);
+	else
+		pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+			gptimer_id, clksrc.rate);
 }
-#endif
+
+static void __init omap2_clocksource_init(int gptimer_id,
+						const char *fck_source)
+{
+	/*
+	 * First give preference to kernel parameter configuration
+	 * by user (clocksource="gp_timer").
+	 *
+	 * In case of missing kernel parameter for clocksource,
+	 * first check for availability for 32k-sync timer, in case
+	 * of failure in finding 32k_counter module or registering
+	 * it as clocksource, execution will fallback to gp-timer.
+	 */
+	if (use_gptimer_clksrc == true)
+		omap2_gptimer_clocksource_init(gptimer_id, fck_source);
+	else if (omap2_sync32k_clocksource_init())
+		/* Fall back to gp-timer code */
+		omap2_gptimer_clocksource_init(gptimer_id, fck_source);
+}
 
 #define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src,			\
 				clksrc_nr, clksrc_src)			\
 static void __init omap##name##_timer_init(void)			\
 {									\
 	omap2_gp_clockevent_init((clkev_nr), clkev_src);		\
-	omap2_gp_clocksource_init((clksrc_nr), clksrc_src);		\
+	omap2_clocksource_init((clksrc_nr), clksrc_src);		\
 }
 
 #define OMAP_SYS_TIMER(name)						\
@@ -335,7 +378,7 @@
 static void __init omap4_timer_init(void)
 {
 	omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
-	omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+	omap2_clocksource_init(2, OMAP4_MPU_SOURCE);
 #ifdef CONFIG_LOCAL_TIMERS
 	/* Local timers are not supprted on OMAP4430 ES1.0 */
 	if (omap_rev() != OMAP4430_REV_ES1_0) {
@@ -503,3 +546,28 @@
 	return 0;
 }
 arch_initcall(omap2_dm_timer_init);
+
+/**
+ * omap2_override_clocksource - clocksource override with user configuration
+ *
+ * Allows user to override default clocksource, using kernel parameter
+ *   clocksource="gp_timer"	(For all OMAP2PLUS architectures)
+ *
+ * Note that, here we are using same standard kernel parameter "clocksource=",
+ * and not introducing any OMAP specific interface.
+ */
+static int __init omap2_override_clocksource(char *str)
+{
+	if (!str)
+		return 0;
+	/*
+	 * For OMAP architecture, we only have two options
+	 *    - sync_32k (default)
+	 *    - gp_timer (sys_clk based)
+	 */
+	if (!strcmp(str, "gp_timer"))
+		use_gptimer_clksrc = true;
+
+	return 0;
+}
+early_param("clocksource", omap2_override_clocksource);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8d5ed77..c4a5768 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -41,12 +41,10 @@
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_MUSB_OTG
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
 	.mode		= MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#else
 	.mode		= MUSB_HOST,
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
-	.mode		= MUSB_PERIPHERAL,
 #endif
 	/* .clock is set dynamically */
 	.config		= &musb_config,
@@ -90,7 +88,7 @@
 	musb_plat.mode = board_data->mode;
 	musb_plat.extvbus = board_data->extvbus;
 
-	if (cpu_is_omap3517() || cpu_is_omap3505()) {
+	if (soc_is_am35xx()) {
 		oh_name = "am35x_otg_hs";
 		name = "musb-am35x";
 	} else if (cpu_is_ti81xx()) {
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index db84a46..805bea6 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -300,7 +300,7 @@
 		printk(error, 3, status);
 		return status;
 	}
-	tusb_resources[2].start = irq + IH_GPIO_BASE;
+	tusb_resources[2].start = gpio_to_irq(irq);
 
 	/* set up memory timings ... can speed them up later */
 	if (!ps_refclk) {
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 57db203..d0103c8 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -118,7 +118,7 @@
 	}
 #endif
 
-	if (cpu_is_omap3517() || cpu_is_omap3505())
+	if (soc_is_am35xx())
 		voltdms = voltagedomains_am35xx;
 	else
 		voltdms = voltagedomains_omap3;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index e2e9db4..9148b22 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -18,6 +18,7 @@
 #include <linux/mv643xx_i2c.h>
 #include <linux/ata_platform.h>
 #include <linux/delay.h>
+#include <linux/clk-provider.h>
 #include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/setup.h>
@@ -70,6 +71,19 @@
 
 
 /*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+				       orion5x_tclk);
+
+	orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
  * EHCI0
  ****************************************************************************/
 void __init orion5x_ehci0_init(void)
@@ -95,7 +109,7 @@
 {
 	orion_ge00_init(eth_data,
 			ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
-			IRQ_ORION5X_ETH_ERR, orion5x_tclk);
+			IRQ_ORION5X_ETH_ERR);
 }
 
 
@@ -132,7 +146,7 @@
  ****************************************************************************/
 void __init orion5x_spi_init()
 {
-	orion_spi_init(SPI_PHYS_BASE, orion5x_tclk);
+	orion_spi_init(SPI_PHYS_BASE);
 }
 
 
@@ -142,7 +156,7 @@
 void __init orion5x_uart0_init(void)
 {
 	orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-			 IRQ_ORION5X_UART0, orion5x_tclk);
+			 IRQ_ORION5X_UART0, tclk);
 }
 
 /*****************************************************************************
@@ -151,7 +165,7 @@
 void __init orion5x_uart1_init(void)
 {
 	orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-			 IRQ_ORION5X_UART1, orion5x_tclk);
+			 IRQ_ORION5X_UART1, tclk);
 }
 
 /*****************************************************************************
@@ -179,7 +193,7 @@
  ****************************************************************************/
 void __init orion5x_wdt_init(void)
 {
-	orion_wdt_init(orion5x_tclk);
+	orion_wdt_init();
 }
 
 
@@ -276,6 +290,9 @@
 	 */
 	orion5x_setup_cpu_mbus_bridge();
 
+	/* Setup root of clk tree */
+	clk_init();
+
 	/*
 	 * Don't issue "Wait for Interrupt" instruction if we are
 	 * running on D0 5281 silicon.
diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
index 96484bc..11a3c1e 100644
--- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h
+++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
@@ -35,5 +35,5 @@
 #define MAIN_IRQ_MASK		(ORION5X_BRIDGE_VIRT_BASE | 0x204)
 
 #define TIMER_VIRT_BASE		(ORION5X_BRIDGE_VIRT_BASE | 0x300)
-
+#define TIMER_PHYS_BASE		(ORION5X_BRIDGE_PHYS_BASE | 0x300)
 #endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644
index 0000000..1aa5d0a
--- /dev/null
+++ b/arch/arm/mach-orion5x/include/mach/io.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-orion5x/include/mach/io.h
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/orion5x.h>
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT		SZ_2M
+static inline void __iomem *__io(unsigned long addr)
+{
+	return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
+}
+
+#define __io(a)			 __io(a)
+#endif
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index 2745f5d..683e085 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -82,6 +82,7 @@
 #define  UART1_VIRT_BASE		(ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
 
 #define ORION5X_BRIDGE_VIRT_BASE	(ORION5X_REGS_VIRT_BASE | 0x20000)
+#define ORION5X_BRIDGE_PHYS_BASE	(ORION5X_REGS_PHYS_BASE | 0x20000)
 
 #define ORION5X_PCI_VIRT_BASE		(ORION5X_REGS_VIRT_BASE | 0x30000)
 
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index e91bf0b..92df49c 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -16,7 +16,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/ethtool.h>
 #include <net/dsa.h>
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index a74f3cf..b420327 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -251,8 +251,6 @@
 		readsb(io_base, buf, len);
 }
 
-const char *ts_nand_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ts78xx_ts_nand_parts[] = {
 	{
 		.name		= "mbr",
@@ -277,7 +275,6 @@
 static struct platform_nand_data ts78xx_ts_nand_data = {
 	.chip	= {
 		.nr_chips		= 1,
-		.part_probe_types	= ts_nand_part_probes,
 		.partitions		= ts78xx_ts_nand_parts,
 		.nr_partitions		= ARRAY_SIZE(ts78xx_ts_nand_parts),
 		.chip_delay		= 15,
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index be4c928..a00d2f1 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -265,6 +265,17 @@
 	soft_restart(0);
 }
 
+#ifdef CONFIG_PM
+extern int pnx4008_pm_init(void);
+#else
+static inline int pnx4008_pm_init(void) { return 0; }
+#endif
+
+void __init pnx4008_init_late(void)
+{
+	pnx4008_pm_init();
+}
+
 extern struct sys_timer pnx4008_timer;
 
 MACHINE_START(PNX4008, "Philips PNX4008")
@@ -273,6 +284,7 @@
 	.map_io 		= pnx4008_map_io,
 	.init_irq 		= pnx4008_init_irq,
 	.init_machine 		= pnx4008_init,
+	.init_late		= pnx4008_init_late,
 	.timer 			= &pnx4008_timer,
 	.restart		= pnx4008_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index f3e60a0..26f8d06 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -124,7 +124,7 @@
 	.valid = pnx4008_pm_valid,
 };
 
-static int __init pnx4008_pm_init(void)
+int __init pnx4008_pm_init(void)
 {
 	u32 sram_size_to_allocate;
 
@@ -151,5 +151,3 @@
 	suspend_set_ops(&pnx4008_pm_ops);
 	return 0;
 }
-
-late_initcall(pnx4008_pm_init);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index b28a930..60d826f 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -24,4 +24,10 @@
 extern void __init sirfsoc_map_lluart(void);
 #endif
 
+#ifdef CONFIG_SUSPEND
+extern int sirfsoc_pm_init(void);
+#else
+static inline int sirfsoc_pm_init(void) { return 0; }
+#endif
+
 #endif
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 26ebb57..fb5a791 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -85,12 +85,11 @@
 	.valid = suspend_valid_only_mem,
 };
 
-static int __init sirfsoc_pm_init(void)
+int __init sirfsoc_pm_init(void)
 {
 	suspend_set_ops(&sirfsoc_pm_ops);
 	return 0;
 }
-late_initcall(sirfsoc_pm_init);
 
 static const struct of_device_id pwrc_ids[] = {
 	{ .compatible = "sirf,prima2-pwrc" },
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index 02b9c05..8f0429d 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -25,6 +25,11 @@
 	of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
 }
 
+void __init sirfsoc_init_late(void)
+{
+	sirfsoc_pm_init();
+}
+
 static const char *prima2cb_dt_match[] __initdata = {
        "sirf,prima2-cb",
        NULL
@@ -39,6 +44,7 @@
 	.timer		= &sirfsoc_timer,
 	.dma_zone_size	= SZ_256M,
 	.init_machine	= sirfsoc_mach_init,
+	.init_late	= sirfsoc_init_late,
 	.dt_compat      = prima2cb_dt_match,
 	.restart	= sirfsoc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 56e8ceb..9244493 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -679,8 +679,6 @@
 	},
 };
 
-static const char *balloon3_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data balloon3_nand_pdata = {
 	.chip = {
 		.nr_chips	= 4,
@@ -688,7 +686,6 @@
 		.nr_partitions	= ARRAY_SIZE(balloon3_partition_info),
 		.partitions	= balloon3_partition_info,
 		.chip_delay	= 50,
-		.part_probe_types = balloon3_part_probes,
 	},
 	.ctrl = {
 		.hwcontrol	= 0,
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index a3a4a38..97f82ad 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -338,8 +338,6 @@
 	},
 };
 
-static const char *em_x270_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data em_x270_nand_platdata = {
 	.chip = {
 		.nr_chips = 1,
@@ -347,7 +345,6 @@
 		.nr_partitions = ARRAY_SIZE(em_x270_partition_info),
 		.partitions = em_x270_partition_info,
 		.chip_delay = 20,
-		.part_probe_types = em_x270_part_probes,
 	},
 	.ctrl = {
 		.hwcontrol = 0,
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 9507605..0da35dc 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -268,8 +268,6 @@
 	},
 };
 
-static const char *palmtx_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data palmtx_nand_platdata = {
 	.chip	= {
 		.nr_chips		= 1,
@@ -277,7 +275,6 @@
 		.nr_partitions		= ARRAY_SIZE(palmtx_partition_info),
 		.partitions		= palmtx_partition_info,
 		.chip_delay		= 20,
-		.part_probe_types 	= palmtx_part_probes,
 	},
 	.ctrl	= {
 		.cmd_ctrl	= palmtx_nand_cmd_ctl,
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index b34287a..e249611 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -518,6 +518,11 @@
 	help
 	  Internal config node for S3C2443 DMA support
 
+config S3C2443_SETUP_SPI
+	bool
+	help
+	  Common setup code for SPI GPIO configurations
+
 endif	# CPU_S3C2443 || CPU_S3C2416
 
 if CPU_S3C2443
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index 3518fe8..0ab6ab1 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -14,6 +14,8 @@
 
 # core
 
+obj-y				+= common.o
+
 obj-$(CONFIG_CPU_S3C2410)	+= s3c2410.o
 obj-$(CONFIG_S3C2410_DMA)	+= dma-s3c2410.o
 obj-$(CONFIG_S3C2410_PM)	+= pm-s3c2410.o sleep-s3c2410.o
@@ -33,6 +35,10 @@
 
 obj-$(CONFIG_CPU_S3C2443)	+= s3c2443.o irq-s3c2443.o clock-s3c2443.o
 
+# PM
+
+obj-$(CONFIG_PM)		+= pm.o irq-pm.o sleep.o
+
 # common code
 
 obj-$(CONFIG_S3C2443_COMMON)	+= common-s3c2443.o
@@ -91,5 +97,6 @@
 # device setup
 
 obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
+obj-$(CONFIG_S3C2443_SETUP_SPI)		+= setup-spi.o
 obj-$(CONFIG_ARCH_S3C24XX)		+= setup-i2c.o
 obj-$(CONFIG_S3C24XX_SETUP_TS)		+= setup-ts.o
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2416.c b/arch/arm/mach-s3c24xx/clock-s3c2416.c
index dbc9ab4..8702ecf 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2416.c
@@ -144,6 +144,7 @@
 	CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
 	CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
 	CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
+	CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
 };
 
 void __init s3c2416_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2443.c b/arch/arm/mach-s3c24xx/clock-s3c2443.c
index efb3ac3..a4c5a52 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2443.c
@@ -179,6 +179,11 @@
 	&clk_hsmmc,
 };
 
+static struct clk_lookup s3c2443_clk_lookup[] = {
+	CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
+	CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
+};
+
 void __init s3c2443_init_clocks(int xtal)
 {
 	unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
@@ -210,6 +215,7 @@
 
 	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index 4604315..aeeb2be 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -424,11 +424,6 @@
 		.enable		= s3c2443_clkcon_enable_p,
 		.ctrlbit	= S3C2443_PCLKCON_IIS,
 	}, {
-		.name		= "hsspi",
-		.parent		= &clk_p,
-		.enable		= s3c2443_clkcon_enable_p,
-		.ctrlbit	= S3C2443_PCLKCON_HSSPI,
-	}, {
 		.name		= "adc",
 		.parent		= &clk_p,
 		.enable		= s3c2443_clkcon_enable_p,
@@ -562,6 +557,14 @@
 	.ctrlbit	= S3C2443_HCLKCON_HSMMC,
 };
 
+static struct clk hsspi_clk = {
+	.name		= "spi",
+	.devname	= "s3c64xx-spi.0",
+	.parent		= &clk_p,
+	.enable		= s3c2443_clkcon_enable_p,
+	.ctrlbit	= S3C2443_PCLKCON_HSSPI,
+};
+
 /* EPLLCON compatible enough to get on/off information */
 
 void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
@@ -612,6 +615,7 @@
 	&clk_usb_bus,
 	&clk_armdiv,
 	&hsmmc1_clk,
+	&hsspi_clk,
 };
 
 static struct clksrc_clk *clksrcs[] __initdata = {
@@ -629,6 +633,7 @@
 	CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
 	CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
 	CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
+	CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
 };
 
 void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/mach-s3c24xx/common.c
similarity index 76%
rename from arch/arm/plat-s3c24xx/cpu.c
rename to arch/arm/mach-s3c24xx/common.c
index 290942d..56cdd34 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -4,7 +4,7 @@
  *	http://www.simtec.co.uk/products/SWLINUX/
  *	Ben Dooks <ben@simtec.co.uk>
  *
- * S3C24XX CPU Support
+ * Common code for S3C24XX machines
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
+#include <mach/regs-clock.h>
 #include <mach/regs-gpio.h>
 #include <plat/regs-serial.h>
 
@@ -52,6 +53,8 @@
 #include <plat/s3c2416.h>
 #include <plat/s3c244x.h>
 #include <plat/s3c2443.h>
+#include <plat/cpu-freq.h>
+#include <plat/pll.h>
 
 /* table of supported CPUs */
 
@@ -234,3 +237,67 @@
 
 	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
 }
+
+/* Serial port registrations */
+
+static struct resource s3c2410_uart0_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
+	[1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
+			IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \
+			NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart1_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K),
+	[1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \
+			IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \
+			NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart2_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K),
+	[1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \
+			IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \
+			NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart3_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K),
+	[1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \
+			IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \
+			NULL, IORESOURCE_IRQ)
+};
+
+struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
+	[0] = {
+		.resources	= s3c2410_uart0_resource,
+		.nr_resources	= ARRAY_SIZE(s3c2410_uart0_resource),
+	},
+	[1] = {
+		.resources	= s3c2410_uart1_resource,
+		.nr_resources	= ARRAY_SIZE(s3c2410_uart1_resource),
+	},
+	[2] = {
+		.resources	= s3c2410_uart2_resource,
+		.nr_resources	= ARRAY_SIZE(s3c2410_uart2_resource),
+	},
+	[3] = {
+		.resources	= s3c2410_uart3_resource,
+		.nr_resources	= ARRAY_SIZE(s3c2410_uart3_resource),
+	},
+};
+
+/* initialise all the clocks */
+
+void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
+					   unsigned long hclk,
+					   unsigned long pclk)
+{
+	clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
+					clk_xtal.rate);
+
+	clk_mpll.rate = fclk;
+	clk_h.rate = hclk;
+	clk_p.rate = pclk;
+	clk_f.rate = fclk;
+}
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2443.c b/arch/arm/mach-s3c24xx/dma-s3c2443.c
index e227c47..2d94228 100644
--- a/arch/arm/mach-s3c24xx/dma-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/dma-s3c2443.c
@@ -55,12 +55,20 @@
 		.name		= "sdi",
 		.channels	= MAP(S3C2443_DMAREQSEL_SDI),
 	},
-	[DMACH_SPI0] = {
-		.name		= "spi0",
+	[DMACH_SPI0_RX] = {
+		.name		= "spi0-rx",
+		.channels	= MAP(S3C2443_DMAREQSEL_SPI0RX),
+	},
+	[DMACH_SPI0_TX] = {
+		.name		= "spi0-tx",
 		.channels	= MAP(S3C2443_DMAREQSEL_SPI0TX),
 	},
-	[DMACH_SPI1] = { /* only on S3C2443/S3C2450 */
-		.name		= "spi1",
+	[DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
+		.name		= "spi1-rx",
+		.channels	= MAP(S3C2443_DMAREQSEL_SPI1RX),
+	},
+	[DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
+		.name		= "spi1-tx",
 		.channels	= MAP(S3C2443_DMAREQSEL_SPI1TX),
 	},
 	[DMACH_UART0] = {
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index acbdfec..454831b 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -47,6 +47,10 @@
 	DMACH_UART2_SRC2,
 	DMACH_UART3,		/* s3c2443 has extra uart */
 	DMACH_UART3_SRC2,
+	DMACH_SPI0_TX,		/* s3c2443/2416/2450 hsspi0 */
+	DMACH_SPI0_RX,		/* s3c2443/2416/2450 hsspi0 */
+	DMACH_SPI1_TX,		/* s3c2443/2450 hsspi1 */
+	DMACH_SPI1_RX,		/* s3c2443/2450 hsspi1 */
 	DMACH_MAX,		/* the end entry */
 };
 
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index e53b217..b7a9f4d 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -134,6 +134,17 @@
 #define IRQ_S32416_WDT		S3C2410_IRQSUB(27)
 #define IRQ_S32416_AC97		S3C2410_IRQSUB(28)
 
+/* second interrupt-register of s3c2416/s3c2450 */
+
+#define S3C2416_IRQ(x)		S3C2410_IRQ((x) + 54 + 29)
+#define IRQ_S3C2416_2D		S3C2416_IRQ(0)
+#define IRQ_S3C2416_IIC1	S3C2416_IRQ(1)
+#define IRQ_S3C2416_RESERVED2	S3C2416_IRQ(2)
+#define IRQ_S3C2416_RESERVED3	S3C2416_IRQ(3)
+#define IRQ_S3C2416_PCM0	S3C2416_IRQ(4)
+#define IRQ_S3C2416_PCM1	S3C2416_IRQ(5)
+#define IRQ_S3C2416_I2S0	S3C2416_IRQ(6)
+#define IRQ_S3C2416_I2S1	S3C2416_IRQ(7)
 
 /* extra irqs for s3c2440 */
 
@@ -175,7 +186,9 @@
 #define IRQ_S3C2443_WDT		S3C2410_IRQSUB(27)
 #define IRQ_S3C2443_AC97	S3C2410_IRQSUB(28)
 
-#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#if defined(CONFIG_CPU_S3C2416)
+#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
 #define NR_IRQS (IRQ_S3C2443_AC97+1)
 #else
 #define NR_IRQS (IRQ_S3C2440_AC97+1)
diff --git a/arch/arm/mach-s3c24xx/include/mach/map.h b/arch/arm/mach-s3c24xx/include/mach/map.h
index 78ae807..8ba381f 100644
--- a/arch/arm/mach-s3c24xx/include/mach/map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/map.h
@@ -98,6 +98,8 @@
 
 /* SPI */
 #define S3C2410_PA_SPI	   (0x59000000)
+#define S3C2443_PA_SPI0		(0x52000000)
+#define S3C2443_PA_SPI1		S3C2410_PA_SPI
 
 /* SDI */
 #define S3C2410_PA_SDI	   (0x5A000000)
@@ -162,4 +164,7 @@
 #define S3C_PA_WDT	    S3C2410_PA_WATCHDOG
 #define S3C_PA_NAND	    S3C24XX_PA_NAND
 
+#define S3C_PA_SPI0		S3C2443_PA_SPI0
+#define S3C_PA_SPI1		S3C2443_PA_SPI1
+
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/mach-s3c24xx/irq-pm.c
similarity index 100%
rename from arch/arm/plat-s3c24xx/irq-pm.c
rename to arch/arm/mach-s3c24xx/irq-pm.c
diff --git a/arch/arm/mach-s3c24xx/irq-s3c2416.c b/arch/arm/mach-s3c24xx/irq-s3c2416.c
index fd49f35..23ec973 100644
--- a/arch/arm/mach-s3c24xx/irq-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/irq-s3c2416.c
@@ -27,6 +27,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/io.h>
+#include <linux/syscore_ops.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
@@ -192,6 +193,43 @@
 	.irq_ack	= s3c2416_irq_uart3_ack,
 };
 
+/* second interrupt register */
+
+static inline void s3c2416_irq_ack_second(struct irq_data *data)
+{
+	unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+
+	__raw_writel(bitval, S3C2416_SRCPND2);
+	__raw_writel(bitval, S3C2416_INTPND2);
+}
+
+static void s3c2416_irq_mask_second(struct irq_data *data)
+{
+	unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+	unsigned long mask;
+
+	mask = __raw_readl(S3C2416_INTMSK2);
+	mask |= bitval;
+	__raw_writel(mask, S3C2416_INTMSK2);
+}
+
+static void s3c2416_irq_unmask_second(struct irq_data *data)
+{
+	unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+	unsigned long mask;
+
+	mask = __raw_readl(S3C2416_INTMSK2);
+	mask &= ~bitval;
+	__raw_writel(mask, S3C2416_INTMSK2);
+}
+
+struct irq_chip s3c2416_irq_second = {
+	.irq_ack	= s3c2416_irq_ack_second,
+	.irq_mask	= s3c2416_irq_mask_second,
+	.irq_unmask	= s3c2416_irq_unmask_second,
+};
+
+
 /* IRQ initialisation code */
 
 static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@
 	return 0;
 }
 
+static void __init s3c2416_irq_add_second(void)
+{
+	unsigned long pend;
+	unsigned long last;
+	int irqno;
+	int i;
+
+	/* first, clear all interrupts pending... */
+	last = 0;
+	for (i = 0; i < 4; i++) {
+		pend = __raw_readl(S3C2416_INTPND2);
+
+		if (pend == 0 || pend == last)
+			break;
+
+		__raw_writel(pend, S3C2416_SRCPND2);
+		__raw_writel(pend, S3C2416_INTPND2);
+		printk(KERN_INFO "irq: clearing pending status %08x\n",
+		       (int)pend);
+		last = pend;
+	}
+
+	for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
+		switch (irqno) {
+		case IRQ_S3C2416_RESERVED2:
+		case IRQ_S3C2416_RESERVED3:
+			/* no IRQ here */
+			break;
+		default:
+			irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
+						 handle_edge_irq);
+			set_irq_flags(irqno, IRQF_VALID);
+		}
+	}
+}
+
 static int __init s3c2416_irq_add(struct device *dev,
 				  struct subsys_interface *sif)
 {
@@ -232,6 +306,8 @@
 			&s3c2416_irq_wdtac97,
 			IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
 
+	s3c2416_irq_add_second();
+
 	return 0;
 }
 
@@ -248,3 +324,25 @@
 
 arch_initcall(s3c2416_irq_init);
 
+#ifdef CONFIG_PM
+static struct sleep_save irq_save[] = {
+	SAVE_ITEM(S3C2416_INTMSK2),
+};
+
+int s3c2416_irq_suspend(void)
+{
+	s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+
+	return 0;
+}
+
+void s3c2416_irq_resume(void)
+{
+	s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+}
+
+struct syscore_ops s3c2416_irq_syscore_ops = {
+	.suspend	= s3c2416_irq_suspend,
+	.resume		= s3c2416_irq_resume,
+};
+#endif
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c
index 30a44f8..c3100a0 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2416.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c
@@ -148,23 +148,25 @@
 
 static struct s3c_fb_pd_win smdk2416_fb_win[] = {
 	[0] = {
-		/* think this is the same as the smdk6410 */
-		.win_mode	= {
-			.pixclock	= 41094,
-			.left_margin	= 8,
-			.right_margin	= 13,
-			.upper_margin	= 7,
-			.lower_margin	= 5,
-			.hsync_len	= 3,
-			.vsync_len	= 1,
-			.xres           = 800,
-			.yres           = 480,
-		},
 		.default_bpp	= 16,
 		.max_bpp	= 32,
+		.xres           = 800,
+		.yres           = 480,
 	},
 };
 
+static struct fb_videomode smdk2416_lcd_timing = {
+	.pixclock	= 41094,
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres           = 800,
+	.yres           = 480,
+};
+
 static void s3c2416_fb_gpio_setup_24bpp(void)
 {
 	unsigned int gpio;
@@ -187,6 +189,7 @@
 
 static struct s3c_fb_platdata smdk2416_fb_platdata = {
 	.win[0]		= &smdk2416_fb_win[0],
+	.vtiming	= &smdk2416_lcd_timing,
 	.setup_gpio	= s3c2416_fb_gpio_setup_24bpp,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
similarity index 100%
rename from arch/arm/plat-s3c24xx/pm.c
rename to arch/arm/mach-s3c24xx/pm.c
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index 7743fad..ed5a95ec 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -106,6 +106,7 @@
 	register_syscore_ops(&s3c2416_pm_syscore_ops);
 #endif
 	register_syscore_ops(&s3c24xx_irq_syscore_ops);
+	register_syscore_ops(&s3c2416_irq_syscore_ops);
 
 	return device_register(&s3c2416_dev);
 }
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
new file mode 100644
index 0000000..5712c85
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/setup-spi.c
@@ -0,0 +1,39 @@
+/*
+ * HS-SPI device setup for S3C2443/S3C2416
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+	.fifo_lvl_mask	= 0x7f,
+	.rx_lvl_offset	= 13,
+	.tx_st_done	= 21,
+	.high_speed	= 1,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
+{
+	/* enable hsspi bit in misccr */
+	s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
+
+	s3c_gpio_cfgall_range(S3C2410_GPE(11), 3,
+			      S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
+	return 0;
+}
+#endif
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S
similarity index 100%
rename from arch/arm/plat-s3c24xx/sleep.S
rename to arch/arm/mach-s3c24xx/sleep.S
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index b313380..be746e3 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -384,3 +384,8 @@
 	/* if all else fails, or mode was for soft, jump to 0 */
 	soft_restart(0);
 }
+
+void __init s3c64xx_init_late(void)
+{
+	s3c64xx_pm_late_initcall();
+}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 7a10be6..6cfc99b 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -24,6 +24,7 @@
 void s3c64xx_setup_clocks(void);
 
 void s3c64xx_restart(char mode, const char *cmd);
+void s3c64xx_init_late(void);
 
 #ifdef CONFIG_CPU_S3C6400
 
@@ -51,4 +52,10 @@
 #define s3c6410_init NULL
 #endif
 
+#ifdef CONFIG_PM
+int __init s3c64xx_pm_late_initcall(void);
+#else
+static inline int s3c64xx_pm_late_initcall(void) { return 0; }
+#endif
+
 #endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 179460f..acb197c 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -27,12 +27,7 @@
 			      struct cpuidle_driver *drv,
 			      int index)
 {
-	struct timeval before, after;
 	unsigned long tmp;
-	int idle_time;
-
-	local_irq_disable();
-	do_gettimeofday(&before);
 
 	/* Setup PWRCFG to enter idle mode */
 	tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@
 
 	cpu_do_idle();
 
-	do_gettimeofday(&after);
-	local_irq_enable();
-	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-		    (after.tv_usec - before.tv_usec);
-
-	dev->last_residency = idle_time;
 	return index;
 }
 
-static struct cpuidle_state s3c64xx_cpuidle_set[] = {
-	[0] = {
-		.enter			= s3c64xx_enter_idle,
-		.exit_latency		= 1,
-		.target_residency	= 1,
-		.flags			= CPUIDLE_FLAG_TIME_VALID,
-		.name			= "IDLE",
-		.desc			= "System active, ARM gated",
-	},
-};
+static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
 
 static struct cpuidle_driver s3c64xx_cpuidle_driver = {
-	.name		= "s3c64xx_cpuidle",
-	.owner		= THIS_MODULE,
-	.state_count	= ARRAY_SIZE(s3c64xx_cpuidle_set),
-};
-
-static struct cpuidle_device s3c64xx_cpuidle_device = {
-	.state_count	= ARRAY_SIZE(s3c64xx_cpuidle_set),
+	.name	= "s3c64xx_cpuidle",
+	.owner  = THIS_MODULE,
+	.en_core_tk_irqen = 1,
+	.states = {
+		{
+			.enter            = s3c64xx_enter_idle,
+			.exit_latency     = 1,
+			.target_residency = 1,
+			.flags            = CPUIDLE_FLAG_TIME_VALID,
+			.name             = "IDLE",
+			.desc             = "System active, ARM gated",
+		},
+	},
+	.state_count = 1,
 };
 
 static int __init s3c64xx_init_cpuidle(void)
 {
 	int ret;
 
-	memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
-	       sizeof(s3c64xx_cpuidle_set));
 	cpuidle_register_driver(&s3c64xx_cpuidle_driver);
 
 	ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index f252691..ffa29dd 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -134,24 +134,27 @@
 };
 
 static struct s3c_fb_pd_win anw6410_fb_win0 = {
-	/* this is to ensure we use win0 */
-	.win_mode	= {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode anw6410_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &anw6410_lcd_timing,
 	.win[0]		= &anw6410_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -230,6 +233,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= anw6410_map_io,
 	.init_machine	= anw6410_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 0ace108..7a27f56 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -182,6 +182,11 @@
 	},
 };
 
+static const struct i2c_board_info wm6230_i2c_devs[] = {
+	{ I2C_BOARD_INFO("wm9081", 0x6c),
+	  .platform_data = &wm9081_pdata, },
+};
+
 static __devinitdata const struct {
 	u8 id;
 	const char *name;
@@ -195,7 +200,9 @@
 	{ .id = 0x03, .name = "1252-EV1 Glenlivet" },
 	{ .id = 0x11, .name = "6249-EV2 Glenfarclas", },
 	{ .id = 0x14, .name = "6271-EV1 Lochnagar" },
-	{ .id = 0x15, .name = "XXXX-EV1 Bells" },
+	{ .id = 0x15, .name = "6320-EV1 Bells",
+	  .i2c_devs = wm6230_i2c_devs,
+	  .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
 	{ .id = 0x21, .name = "1275-EV1 Mortlach" },
 	{ .id = 0x25, .name = "1274-EV1 Glencadam" },
 	{ .id = 0x31, .name = "1253-EV1 Tomatin",
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aa1137f..d0c352d 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -151,26 +151,29 @@
 
 /* 640x480 URT */
 static struct s3c_fb_pd_win crag6410_fb_win0 = {
-	/* this is to ensure we use win0 */
-	.win_mode	= {
-		.left_margin	= 150,
-		.right_margin	= 80,
-		.upper_margin	= 40,
-		.lower_margin	= 5,
-		.hsync_len	= 40,
-		.vsync_len	= 5,
-		.xres		= 640,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 640,
+	.yres		= 480,
 	.virtual_y	= 480 * 2,
 	.virtual_x	= 640,
 };
 
+static struct fb_videomode crag6410_lcd_timing = {
+	.left_margin	= 150,
+	.right_margin	= 80,
+	.upper_margin	= 40,
+	.lower_margin	= 5,
+	.hsync_len	= 40,
+	.vsync_len	= 5,
+	.xres		= 640,
+	.yres		= 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &crag6410_lcd_timing,
 	.win[0]		= &crag6410_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -671,6 +674,7 @@
 	  .irq = S3C_EINT(0),
 	  .platform_data = &glenfarclas_pmic_pdata },
 
+	{ I2C_BOARD_INFO("wlf-gf-module", 0x22) },
 	{ I2C_BOARD_INFO("wlf-gf-module", 0x24) },
 	{ I2C_BOARD_INFO("wlf-gf-module", 0x25) },
 	{ I2C_BOARD_INFO("wlf-gf-module", 0x26) },
@@ -813,6 +817,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= crag6410_map_io,
 	.init_machine	= crag6410_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 521e07b..6890881 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -129,23 +129,27 @@
 };
 
 static struct s3c_fb_pd_win hmt_fb_win0 = {
-	.win_mode	= {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode hmt_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata hmt_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &hmt_lcd_timing,
 	.win[0]		= &hmt_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -272,6 +276,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= hmt_map_io,
 	.init_machine	= hmt_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index b2166d4..5539a255 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -140,41 +140,59 @@
 	.sets		= mini6410_nand_sets,
 };
 
-static struct s3c_fb_pd_win mini6410_fb_win[] = {
-	{
-		.win_mode	= {	/* 4.3" 480x272 */
-			.left_margin	= 3,
-			.right_margin	= 2,
-			.upper_margin	= 1,
-			.lower_margin	= 1,
-			.hsync_len	= 40,
-			.vsync_len	= 1,
-			.xres		= 480,
-			.yres		= 272,
-		},
-		.max_bpp	= 32,
-		.default_bpp	= 16,
-	}, {
-		.win_mode	= {	/* 7.0" 800x480 */
-			.left_margin	= 8,
-			.right_margin	= 13,
-			.upper_margin	= 7,
-			.lower_margin	= 5,
-			.hsync_len	= 3,
-			.vsync_len	= 1,
-			.xres		= 800,
-			.yres		= 480,
-		},
-		.max_bpp	= 32,
-		.default_bpp	= 16,
-	},
+static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
+	.max_bpp	= 32,
+	.default_bpp	= 16,
+	.xres		= 480,
+	.yres		= 272,
 };
 
-static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = {
-	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
-	.win[0]		= &mini6410_fb_win[0],
-	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+static struct fb_videomode mini6410_lcd_type0_timing = {
+	/* 4.3" 480x272 */
+	.left_margin	= 3,
+	.right_margin	= 2,
+	.upper_margin	= 1,
+	.lower_margin	= 1,
+	.hsync_len	= 40,
+	.vsync_len	= 1,
+	.xres		= 480,
+	.yres		= 272,
+};
+
+static struct s3c_fb_pd_win mini6410_lcd_type1_fb_win = {
+	.max_bpp	= 32,
+	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode mini6410_lcd_type1_timing = {
+	/* 7.0" 800x480 */
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct s3c_fb_platdata mini6410_lcd_pdata[] __initdata = {
+	{
+		.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+		.vtiming	= &mini6410_lcd_type0_timing,
+		.win[0]		= &mini6410_lcd_type0_fb_win,
+		.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+		.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	}, {
+		.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+		.vtiming	= &mini6410_lcd_type1_timing,
+		.win[0]		= &mini6410_lcd_type1_fb_win,
+		.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+		.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	},
+	{ },
 };
 
 static void mini6410_lcd_power_set(struct plat_lcd_data *pd,
@@ -272,7 +290,7 @@
 					"screen type already set\n", f);
 			} else {
 				int li = f - '0';
-				if (li >= ARRAY_SIZE(mini6410_fb_win))
+				if (li >= ARRAY_SIZE(mini6410_lcd_pdata))
 					printk(KERN_INFO "MINI6410: '%c' out "
 						"of range LCD mode\n", f);
 				else {
@@ -296,14 +314,12 @@
 	/* Parse the feature string */
 	mini6410_parse_features(&features, mini6410_features_str);
 
-	mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index];
-
 	printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n",
-		mini6410_lcd_pdata.win[0]->win_mode.xres,
-		mini6410_lcd_pdata.win[0]->win_mode.yres);
+		mini6410_lcd_pdata[features.lcd_index].win[0]->xres,
+		mini6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
 	s3c_nand_set_platdata(&mini6410_nand_info);
-	s3c_fb_set_platdata(&mini6410_lcd_pdata);
+	s3c_fb_set_platdata(&mini6410_lcd_pdata[features.lcd_index]);
 	s3c24xx_ts_set_platdata(NULL);
 
 	/* configure nCS1 width to 16 bits */
@@ -339,6 +355,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= mini6410_map_io,
 	.init_machine	= mini6410_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 0efa2ba..cad2e05 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -104,6 +104,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= ncp_map_io,
 	.init_machine	= ncp_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c08266..326b216 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -106,41 +106,57 @@
 	},
 };
 
-static struct s3c_fb_pd_win real6410_fb_win[] = {
-	{
-		.win_mode	= {	/* 4.3" 480x272 */
-			.left_margin	= 3,
-			.right_margin	= 2,
-			.upper_margin	= 1,
-			.lower_margin	= 1,
-			.hsync_len	= 40,
-			.vsync_len	= 1,
-			.xres		= 480,
-			.yres		= 272,
-		},
-		.max_bpp	= 32,
-		.default_bpp	= 16,
-	}, {
-		.win_mode	= {	/* 7.0" 800x480 */
-			.left_margin	= 8,
-			.right_margin	= 13,
-			.upper_margin	= 7,
-			.lower_margin	= 5,
-			.hsync_len	= 3,
-			.vsync_len	= 1,
-			.xres		= 800,
-			.yres		= 480,
-		},
-		.max_bpp	= 32,
-		.default_bpp	= 16,
-	},
+static struct s3c_fb_pd_win real6410_lcd_type0_fb_win = {
+	.max_bpp	= 32,
+	.default_bpp	= 16,
+	.xres		= 480,
+	.yres		= 272,
 };
 
-static struct s3c_fb_platdata real6410_lcd_pdata __initdata = {
-	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
-	.win[0]		= &real6410_fb_win[0],
-	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+static struct fb_videomode real6410_lcd_type0_timing = {
+	/* 4.3" 480x272 */
+	.left_margin	= 3,
+	.right_margin	= 2,
+	.upper_margin	= 1,
+	.lower_margin	= 1,
+	.hsync_len	= 40,
+	.vsync_len	= 1,
+};
+
+static struct s3c_fb_pd_win real6410_lcd_type1_fb_win = {
+	.max_bpp	= 32,
+	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode real6410_lcd_type1_timing = {
+	/* 7.0" 800x480 */
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct s3c_fb_platdata real6410_lcd_pdata[] __initdata = {
+	{
+		.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+		.vtiming	= &real6410_lcd_type0_timing,
+		.win[0]		= &real6410_lcd_type0_fb_win,
+		.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+		.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	}, {
+		.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+		.vtiming	= &real6410_lcd_type1_timing,
+		.win[0]		= &real6410_lcd_type1_fb_win,
+		.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+		.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	},
+	{ },
 };
 
 static struct mtd_partition real6410_nand_part[] = {
@@ -253,7 +269,7 @@
 					"screen type already set\n", f);
 			} else {
 				int li = f - '0';
-				if (li >= ARRAY_SIZE(real6410_fb_win))
+				if (li >= ARRAY_SIZE(real6410_lcd_pdata))
 					printk(KERN_INFO "REAL6410: '%c' out "
 						"of range LCD mode\n", f);
 				else {
@@ -277,13 +293,11 @@
 	/* Parse the feature string */
 	real6410_parse_features(&features, real6410_features_str);
 
-	real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index];
-
 	printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n",
-		real6410_lcd_pdata.win[0]->win_mode.xres,
-		real6410_lcd_pdata.win[0]->win_mode.yres);
+		real6410_lcd_pdata[features.lcd_index].win[0]->xres,
+		real6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
-	s3c_fb_set_platdata(&real6410_lcd_pdata);
+	s3c_fb_set_platdata(&real6410_lcd_pdata[features.lcd_index]);
 	s3c_nand_set_platdata(&real6410_nand_info);
 	s3c24xx_ts_set_platdata(NULL);
 
@@ -320,6 +334,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= real6410_map_io,
 	.init_machine	= real6410_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 3f42431..d6266d8 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -108,23 +108,27 @@
 };
 
 static struct s3c_fb_pd_win smartq5_fb_win0 = {
-	.win_mode	= {
-		.left_margin	= 216,
-		.right_margin	= 40,
-		.upper_margin	= 35,
-		.lower_margin	= 10,
-		.hsync_len	= 1,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-		.refresh	= 80,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smartq5_lcd_timing = {
+	.left_margin	= 216,
+	.right_margin	= 40,
+	.upper_margin	= 35,
+	.lower_margin	= 10,
+	.hsync_len	= 1,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
+	.refresh	= 80,
 };
 
 static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &smartq5_lcd_timing,
 	.win[0]		= &smartq5_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
@@ -152,6 +156,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= smartq_map_io,
 	.init_machine	= smartq5_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e5c09b6..0957d2a 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -124,23 +124,27 @@
 };
 
 static struct s3c_fb_pd_win smartq7_fb_win0 = {
-	.win_mode	= {
-		.left_margin	= 3,
-		.right_margin	= 5,
-		.upper_margin	= 1,
-		.lower_margin	= 20,
-		.hsync_len	= 10,
-		.vsync_len	= 3,
-		.xres		= 800,
-		.yres		= 480,
-		.refresh	= 80,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smartq7_lcd_timing = {
+	.left_margin	= 3,
+	.right_margin	= 5,
+	.upper_margin	= 1,
+	.lower_margin	= 20,
+	.hsync_len	= 10,
+	.vsync_len	= 3,
+	.xres		= 800,
+	.yres		= 480,
+	.refresh	= 80,
 };
 
 static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &smartq7_lcd_timing,
 	.win[0]		= &smartq7_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
@@ -168,6 +172,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= smartq_map_io,
 	.init_machine	= smartq7_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 5f09653..b0f4525 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -93,6 +93,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6400_map_io,
 	.init_machine	= smdk6400_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 7da044f..df3103d 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -146,26 +146,29 @@
 };
 
 static struct s3c_fb_pd_win smdk6410_fb_win0 = {
-	/* this is to ensure we use win0 */
-	.win_mode	= {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
 	.virtual_y	= 480 * 2,
 	.virtual_x	= 800,
 };
 
+static struct fb_videomode smdk6410_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = {
 	.setup_gpio	= s3c64xx_fb_gpio_setup_24bpp,
+	.vtiming	= &smdk6410_lcd_timing,
 	.win[0]		= &smdk6410_fb_win0,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -702,6 +705,7 @@
 	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6410_map_io,
 	.init_machine	= smdk6410_machine_init,
+	.init_late	= s3c64xx_init_late,
 	.timer		= &s3c24xx_timer,
 	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 7d3e81b..7feb426 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -365,10 +365,9 @@
 }
 arch_initcall(s3c64xx_pm_initcall);
 
-static __init int s3c64xx_pm_late_initcall(void)
+int __init s3c64xx_pm_late_initcall(void)
 {
 	pm_genpd_poweroff_unused();
 
 	return 0;
 }
-late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index a40e325..92fefad 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -103,22 +103,26 @@
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6440_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 24,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smdk6440_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = {
 	.win[0]		= &smdk6440_fb_win0,
+	.vtiming	= &smdk6440_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 	.setup_gpio	= s5p64x0_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index efb69e2..e2335ec 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -121,22 +121,26 @@
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6450_fb_win0 = {
-	.win_mode	= {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 24,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smdk6450_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = {
 	.win[0]		= &smdk6450_fb_win0,
+	.vtiming	= &smdk6450_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 	.setup_gpio	= s5p64x0_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 674d229..0c3ae38 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -136,24 +136,27 @@
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdkc100_fb_win0 = {
-	/* this is to ensure we use win0 */
-	.win_mode	= {
-		.left_margin	= 8,
-		.right_margin	= 13,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-		.refresh	= 80,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smdkc100_lcd_timing = {
+	.left_margin	= 8,
+	.right_margin	= 13,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
+	.refresh	= 80,
 };
 
 static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
 	.win[0]		= &smdkc100_fb_win0,
+	.vtiming	= &smdkc100_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 	.setup_gpio	= s5pc100_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 48d018f..af528f9 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -96,38 +96,34 @@
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win aquila_fb_win0 = {
-	.win_mode = {
-		.left_margin = 16,
-		.right_margin = 16,
-		.upper_margin = 3,
-		.lower_margin = 28,
-		.hsync_len = 2,
-		.vsync_len = 2,
-		.xres = 480,
-		.yres = 800,
-	},
 	.max_bpp = 32,
 	.default_bpp = 16,
+	.xres = 480,
+	.yres = 800,
 };
 
 static struct s3c_fb_pd_win aquila_fb_win1 = {
-	.win_mode = {
-		.left_margin = 16,
-		.right_margin = 16,
-		.upper_margin = 3,
-		.lower_margin = 28,
-		.hsync_len = 2,
-		.vsync_len = 2,
-		.xres = 480,
-		.yres = 800,
-	},
 	.max_bpp = 32,
 	.default_bpp = 16,
+	.xres = 480,
+	.yres = 800,
+};
+
+static struct fb_videomode aquila_lcd_timing = {
+	.left_margin = 16,
+	.right_margin = 16,
+	.upper_margin = 3,
+	.lower_margin = 28,
+	.hsync_len = 2,
+	.vsync_len = 2,
+	.xres = 480,
+	.yres = 800,
 };
 
 static struct s3c_fb_platdata aquila_lcd_pdata __initdata = {
 	.win[0]		= &aquila_fb_win0,
 	.win[1]		= &aquila_fb_win1,
+	.vtiming	= &aquila_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
 			  VIDCON1_INV_VCLK | VIDCON1_INV_VDEN,
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index f20a97c..bf5087c 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -107,25 +107,29 @@
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win goni_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 16,
-		.right_margin	= 16,
-		.upper_margin	= 2,
-		.lower_margin	= 28,
-		.hsync_len	= 2,
-		.vsync_len	= 1,
-		.xres		= 480,
-		.yres		= 800,
-		.refresh	= 55,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 16,
+	.xres		= 480,
+	.yres		= 800,
 	.virtual_x	= 480,
 	.virtual_y	= 2 * 800,
 };
 
+static struct fb_videomode goni_lcd_timing = {
+	.left_margin	= 16,
+	.right_margin	= 16,
+	.upper_margin	= 2,
+	.lower_margin	= 28,
+	.hsync_len	= 2,
+	.vsync_len	= 1,
+	.xres		= 480,
+	.yres		= 800,
+	.refresh	= 55,
+};
+
 static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
 	.win[0]		= &goni_fb_win0,
+	.vtiming	= &goni_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
 			  VIDCON0_CLKSEL_LCD,
 	.vidcon1	= VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index fa1b612..0d7ddec 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -178,22 +178,26 @@
 };
 
 static struct s3c_fb_pd_win smdkv210_fb_win0 = {
-	.win_mode = {
-		.left_margin	= 13,
-		.right_margin	= 8,
-		.upper_margin	= 7,
-		.lower_margin	= 5,
-		.hsync_len	= 3,
-		.vsync_len	= 1,
-		.xres		= 800,
-		.yres		= 480,
-	},
 	.max_bpp	= 32,
 	.default_bpp	= 24,
+	.xres		= 800,
+	.yres		= 480,
+};
+
+static struct fb_videomode smdkv210_lcd_timing = {
+	.left_margin	= 13,
+	.right_margin	= 8,
+	.upper_margin	= 7,
+	.lower_margin	= 5,
+	.hsync_len	= 3,
+	.vsync_len	= 1,
+	.xres		= 800,
+	.yres		= 480,
 };
 
 static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
 	.win[0]		= &smdkv210_fb_win0,
+	.vtiming	= &smdkv210_lcd_timing,
 	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 	.setup_gpio	= s5pv210_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 375d3f7..d1dc7f1 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -538,6 +538,7 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= assabet_init,
+	.init_late	= sa11x0_init_late,
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
 #endif
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index e0f0c03..b30fb99 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -305,6 +305,7 @@
 	.map_io		= badge4_map_io,
 	.nr_irqs	= SA1100_NR_IRQS,
 	.init_irq	= sa1100_init_irq,
+	.init_late	= sa11x0_init_late,
 	.timer		= &sa1100_timer,
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 4a61f60..09d7f4b 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -134,5 +134,6 @@
 	.init_irq	= cerf_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= cerf_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index c7f418b..ea5cff3 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -401,5 +401,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= collie_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 16be4c5..9db3e98 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -359,6 +359,10 @@
 
 arch_initcall(sa1100_init);
 
+void __init sa11x0_init_late(void)
+{
+	sa11x0_pm_init();
+}
 
 /*
  * Common I/O mapping:
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9eb3b3c..a5b7c13 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -11,6 +11,7 @@
 extern void __init sa1100_init_irq(void);
 extern void __init sa1100_init_gpio(void);
 extern void sa11x0_restart(char, const char *);
+extern void sa11x0_init_late(void);
 
 #define SET_BANK(__nr,__start,__size) \
 	mi->bank[__nr].start = (__start), \
@@ -41,3 +42,9 @@
 
 struct sa1100fb_mach_info;
 void sa11x0_register_lcd(struct sa1100fb_mach_info *inf);
+
+#ifdef CONFIG_PM
+int sa11x0_pm_init(void);
+#else
+static inline int sa11x0_pm_init(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b2e8d0f..e1571ea 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -110,6 +110,7 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= h3100_mach_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index cb6659f..ba7a290 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -160,6 +160,7 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= h3600_mach_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 5535475..7f86bd9 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -199,5 +199,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= hackkit_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index ca7a7e8..e3084f4 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -348,6 +348,7 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= jornada720_mach_init,
+	.init_late	= sa11x0_init_late,
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
 #endif
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index eb6534e..b775a0a 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -147,6 +147,7 @@
 	.nr_irqs	= SA1100_NR_IRQS,
 	.init_irq	= sa1100_init_irq,
 	.init_machine	= lart_init,
+	.init_late	= sa11x0_init_late,
 	.timer		= &sa1100_timer,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 8f6446b..41f69d9 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -112,5 +112,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= nanoengine_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6c58f01..266db87 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -89,6 +89,7 @@
 		WARN(1, "nep_base unset\n");
 	}
 }
+EXPORT_SYMBOL(neponset_ncr_frob);
 
 static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
 {
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 1602575..37fe0a0 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -135,5 +135,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine   = pleb_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 2fa499e..690cf0c 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -117,10 +117,8 @@
 	.valid		= suspend_valid_only_mem,
 };
 
-static int __init sa11x0_pm_init(void)
+int __init sa11x0_pm_init(void)
 {
 	suspend_set_ops(&sa11x0_pm_ops);
 	return 0;
 }
-
-late_initcall(sa11x0_pm_init);
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index ca8bf59..5d33fc3 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -104,5 +104,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= shannon_init,
+	.init_late	= sa11x0_init_late,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 3efae03..fbd5359 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -395,6 +395,7 @@
 	.map_io		= simpad_map_io,
 	.nr_irqs	= SA1100_NR_IRQS,
 	.init_irq	= sa1100_init_irq,
+	.init_late	= sa11x0_init_late,
 	.timer		= &sa1100_timer,
 	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index f31383c..df33909 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -186,6 +186,12 @@
 	help
 	  This enables build of the TMU timer driver.
 
+config EM_TIMER_STI
+	bool "STI timer driver"
+	default y
+	help
+	  This enables build of the STI timer driver.
+
 endmenu
 
 config SH_CLK_CPG
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e6b177b..8aa1962 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y				:= timer.o console.o clock.o
+obj-y				:= timer.o console.o clock.o common.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)	+= setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 0891ec6..5a6f22f 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -580,5 +580,6 @@
 	.init_irq	= sh73a0_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= ag5evm_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index b540b8e..ace6024 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1469,5 +1469,6 @@
 	.init_irq	= sh7372_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= ap4evb_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 9e37026..9bd1355 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -779,6 +779,7 @@
 	.init_irq	= r8a7740_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= eva_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.dt_compat	= eva_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 63ab706..e9b32cf 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -500,5 +500,6 @@
 	.init_irq	= r8a7740_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= bonito_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 39b6cf8..796fa00 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -338,5 +338,6 @@
 	.init_irq	= sh7367_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= g3evm_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 0e5a39c..f125732 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -381,5 +381,6 @@
 	.init_irq	= sh7377_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= g4evm_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 200dcd4..f60f1b2 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -521,5 +521,6 @@
 	.init_irq	= sh73a0_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= kota2_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9d.c b/arch/arm/mach-shmobile/board-kzm9d.c
index 7bc5e7d..6a33cf3 100644
--- a/arch/arm/mach-shmobile/board-kzm9d.c
+++ b/arch/arm/mach-shmobile/board-kzm9d.c
@@ -80,6 +80,7 @@
 	.init_irq	= emev2_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= kzm9d_add_standard_devices,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.dt_compat	= kzm9d_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index d8e33b6..c0ae815 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -455,6 +455,7 @@
 	.init_irq	= sh73a0_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= kzm_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.dt_compat	= kzm9g_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 50c67b2..150122a 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1512,6 +1512,9 @@
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+	/* SDHI0 PORT172 card-detect IRQ26 */
+	gpio_request(GPIO_FN_IRQ26_172, NULL);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 	/* enable SDHI1 */
 	gpio_request(GPIO_FN_SDHICMD1, NULL);
@@ -1638,5 +1641,6 @@
 	.init_irq	= sh7372_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= mackerel_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index ef0e13b..14de378 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -98,5 +98,6 @@
 	.init_irq	= r8a7779_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= marzen_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 472d1f5..3946c4b 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -475,9 +475,9 @@
 
 enum { MSTP001,
 	MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
-	MSTP219,
+	MSTP219, MSTP218,
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-	MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+	MSTP331, MSTP329, MSTP325, MSTP323,
 	MSTP314, MSTP313, MSTP312, MSTP311,
 	MSTP303, MSTP302, MSTP301, MSTP300,
 	MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@
 	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
 	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+	[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
 	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@
 	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
 	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
 	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-	[MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
 	[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
 	[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@
 	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
 	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
 	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@
 	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
 	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
 	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
new file mode 100644
index 0000000..608aba9
--- /dev/null
+++ b/arch/arm/mach-shmobile/common.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/common.h>
+
+void __init shmobile_init_late(void)
+{
+	shmobile_suspend_init();
+	shmobile_cpuidle_init();
+}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 7e65591..7b541e91 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -46,7 +46,7 @@
 
 void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
 
-static int shmobile_cpuidle_init(void)
+int shmobile_cpuidle_init(void)
 {
 	struct cpuidle_device *dev = &shmobile_cpuidle_dev;
 	struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
@@ -65,4 +65,3 @@
 
 	return 0;
 }
-late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index ff5f12f..01e2bc0 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -85,4 +85,18 @@
 extern void r8a7779_smp_prepare_cpus(void);
 extern void r8a7779_register_twd(void);
 
+extern void shmobile_init_late(void);
+
+#ifdef CONFIG_SUSPEND
+int shmobile_suspend_init(void);
+#else
+static inline int shmobile_suspend_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+int shmobile_cpuidle_init(void);
+#else
+static inline int shmobile_cpuidle_init(void) { return 0; }
+#endif
+
 #endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
index 550b23d..f04fad4 100644
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ b/arch/arm/mach-shmobile/intc-r8a7779.c
@@ -35,6 +35,9 @@
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR4 0xfe7822b0
 
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 {
 	return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@
 	gic_init(0, 29, gic_dist_base, gic_cpu_base);
 	gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+	/* route all interrupts to ARM */
+	__raw_writel(0xffffffff, INT2NTSR0);
+	__raw_writel(0x3fffffff, INT2NTSR1);
+
 	/* unmask all known interrupts in INTCS2 */
 	__raw_writel(0xfffffff0, INT2SMSKCR0);
 	__raw_writel(0xfff7ffff, INT2SMSKCR1);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index bacdd66..e859fcd 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -25,7 +25,12 @@
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
 			of_machine_is_compatible("renesas,sh73a0"))
 #define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
 
 static unsigned int __init shmobile_smp_get_core_count(void)
 {
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 6a4bd58..fafce9c 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -484,7 +484,7 @@
 	},
 };
 
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
 
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
 	{
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 4d1b86a..47d83f7 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -39,9 +39,8 @@
 	.valid		= suspend_valid_only_mem,
 };
 
-static int __init shmobile_suspend_init(void)
+int __init shmobile_suspend_init(void)
 {
 	suspend_set_ops(&shmobile_suspend_ops);
 	return 0;
 }
-late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-spear13xx/Kconfig b/arch/arm/mach-spear13xx/Kconfig
new file mode 100644
index 0000000..eaadc66
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Kconfig
@@ -0,0 +1,20 @@
+#
+# SPEAr13XX Machine configuration file
+#
+
+if ARCH_SPEAR13XX
+
+menu "SPEAr13xx Implementations"
+config MACH_SPEAR1310
+	bool "SPEAr1310 Machine support with Device Tree"
+	select PINCTRL_SPEAR1310
+	help
+	  Supports ST SPEAr1310 machine configured via the device-tree
+
+config MACH_SPEAR1340
+	bool "SPEAr1340 Machine support with Device Tree"
+	select PINCTRL_SPEAR1340
+	help
+	  Supports ST SPEAr1340 machine configured via the device-tree
+endmenu
+endif #ARCH_SPEAR13XX
diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
new file mode 100644
index 0000000..3435ea7
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for SPEAr13XX machine series
+#
+
+obj-$(CONFIG_SMP)		+= headsmp.o platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug.o
+
+obj-$(CONFIG_ARCH_SPEAR13XX)	+= spear13xx.o
+obj-$(CONFIG_MACH_SPEAR1310)	+= spear1310.o
+obj-$(CONFIG_MACH_SPEAR1340)	+= spear1340.o
diff --git a/arch/arm/mach-spear13xx/Makefile.boot b/arch/arm/mach-spear13xx/Makefile.boot
new file mode 100644
index 0000000..403efd7
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile.boot
@@ -0,0 +1,6 @@
+zreladdr-y	+= 0x00008000
+params_phys-y	:= 0x00000100
+initrd_phys-y	:= 0x00800000
+
+dtb-$(CONFIG_MACH_SPEAR1310)	+= spear1310-evb.dtb
+dtb-$(CONFIG_MACH_SPEAR1340)	+= spear1340-evb.dtb
diff --git a/arch/arm/mach-spear13xx/headsmp.S b/arch/arm/mach-spear13xx/headsmp.S
new file mode 100644
index 0000000..ed85473
--- /dev/null
+++ b/arch/arm/mach-spear13xx/headsmp.S
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-spear13XX/headsmp.S
+ *
+ * Picked from realview
+ * Copyright (c) 2012 ST Microelectronics Limited
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__INIT
+
+/*
+ * spear13xx specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(spear13xx_secondary_startup)
+	mrc	p15, 0, r0, c0, c0, 5
+	and	r0, r0, #15
+	adr	r4, 1f
+	ldmia	r4, {r5, r6}
+	sub	r4, r4, r5
+	add	r6, r6, r4
+pen:	ldr	r7, [r6]
+	cmp	r7, r0
+	bne	pen
+
+	/* re-enable coherency */
+	mrc	p15, 0, r0, c1, c0, 1
+	orr	r0, r0, #(1 << 6) | (1 << 0)
+	mcr	p15, 0, r0, c1, c0, 1
+	/*
+	 * we've been released from the holding pen: secondary_stack
+	 * should now contain the SVC stack for this core
+	 */
+	b	secondary_startup
+
+	.align
+1:	.long	.
+	.long	pen_release
+ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c
new file mode 100644
index 0000000..5c6867b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/hotplug.c
@@ -0,0 +1,119 @@
+/*
+ * linux/arch/arm/mach-spear13xx/hotplug.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * based upon linux/arch/arm/mach-realview/hotplug.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+	unsigned int v;
+
+	flush_cache_all();
+	asm volatile(
+	"	mcr	p15, 0, %1, c7, c5, 0\n"
+	"	dsb\n"
+	/*
+	 * Turn off coherency
+	 */
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	bic	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	"	mrc	p15, 0, %0, c1, c0, 0\n"
+	"	bic	%0, %0, %2\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	: "=&r" (v)
+	: "r" (0), "Ir" (CR_C)
+	: "cc", "memory");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+	unsigned int v;
+
+	asm volatile("mrc	p15, 0, %0, c1, c0, 0\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	orr	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	: "=&r" (v)
+	: "Ir" (CR_C)
+	: "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+	for (;;) {
+		wfi();
+
+		if (pen_release == cpu) {
+			/*
+			 * OK, proper wakeup, we're done
+			 */
+			break;
+		}
+
+		/*
+		 * Getting here, means that we have come out of WFI without
+		 * having been woken up - this shouldn't happen
+		 *
+		 * Just note it happening - when we're woken, we can report
+		 * its occurrence.
+		 */
+		(*spurious)++;
+	}
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+	return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __cpuinit platform_cpu_die(unsigned int cpu)
+{
+	int spurious = 0;
+
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu, &spurious);
+
+	/*
+	 * bring this CPU back into the world of cache
+	 * coherency, and then restore interrupts
+	 */
+	cpu_leave_lowpower();
+
+	if (spurious)
+		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+	/*
+	 * we don't allow CPU 0 to be shutdown (it is still too special
+	 * e.g. clock tick interrupts)
+	 */
+	return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
new file mode 100644
index 0000000..9e3ae6b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
new file mode 100644
index 0000000..d50bdb6
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -0,0 +1,128 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/dma.h
+ *
+ * DMA information for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* request id of all the peripherals */
+enum dma_master_info {
+	/* Accessible from only one master */
+	DMA_MASTER_MCIF = 0,
+	DMA_MASTER_FSMC = 1,
+	/* Accessible from both 0 & 1 */
+	DMA_MASTER_MEMORY = 0,
+	DMA_MASTER_ADC = 0,
+	DMA_MASTER_UART0 = 0,
+	DMA_MASTER_SSP0 = 0,
+	DMA_MASTER_I2C0 = 0,
+
+#ifdef CONFIG_MACH_SPEAR1310
+	/* Accessible from only one master */
+	SPEAR1310_DMA_MASTER_JPEG = 1,
+
+	/* Accessible from both 0 & 1 */
+	SPEAR1310_DMA_MASTER_I2S = 0,
+	SPEAR1310_DMA_MASTER_UART1 = 0,
+	SPEAR1310_DMA_MASTER_UART2 = 0,
+	SPEAR1310_DMA_MASTER_UART3 = 0,
+	SPEAR1310_DMA_MASTER_UART4 = 0,
+	SPEAR1310_DMA_MASTER_UART5 = 0,
+	SPEAR1310_DMA_MASTER_I2C1 = 0,
+	SPEAR1310_DMA_MASTER_I2C2 = 0,
+	SPEAR1310_DMA_MASTER_I2C3 = 0,
+	SPEAR1310_DMA_MASTER_I2C4 = 0,
+	SPEAR1310_DMA_MASTER_I2C5 = 0,
+	SPEAR1310_DMA_MASTER_I2C6 = 0,
+	SPEAR1310_DMA_MASTER_I2C7 = 0,
+	SPEAR1310_DMA_MASTER_SSP1 = 0,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+	/* Accessible from only one master */
+	SPEAR1340_DMA_MASTER_I2S_PLAY = 1,
+	SPEAR1340_DMA_MASTER_I2S_REC = 1,
+	SPEAR1340_DMA_MASTER_I2C1 = 1,
+	SPEAR1340_DMA_MASTER_UART1 = 1,
+
+	/* following are accessible from both master 0 & 1 */
+	SPEAR1340_DMA_MASTER_SPDIF = 0,
+	SPEAR1340_DMA_MASTER_CAM = 1,
+	SPEAR1340_DMA_MASTER_VIDEO_IN = 0,
+	SPEAR1340_DMA_MASTER_MALI = 0,
+#endif
+};
+
+enum request_id {
+	DMA_REQ_ADC = 0,
+	DMA_REQ_SSP0_TX = 4,
+	DMA_REQ_SSP0_RX = 5,
+	DMA_REQ_UART0_TX = 6,
+	DMA_REQ_UART0_RX = 7,
+	DMA_REQ_I2C0_TX = 8,
+	DMA_REQ_I2C0_RX = 9,
+
+#ifdef CONFIG_MACH_SPEAR1310
+	SPEAR1310_DMA_REQ_FROM_JPEG = 2,
+	SPEAR1310_DMA_REQ_TO_JPEG = 3,
+	SPEAR1310_DMA_REQ_I2S_TX = 10,
+	SPEAR1310_DMA_REQ_I2S_RX = 11,
+
+	SPEAR1310_DMA_REQ_I2C1_RX = 0,
+	SPEAR1310_DMA_REQ_I2C1_TX = 1,
+	SPEAR1310_DMA_REQ_I2C2_RX = 2,
+	SPEAR1310_DMA_REQ_I2C2_TX = 3,
+	SPEAR1310_DMA_REQ_I2C3_RX = 4,
+	SPEAR1310_DMA_REQ_I2C3_TX = 5,
+	SPEAR1310_DMA_REQ_I2C4_RX = 6,
+	SPEAR1310_DMA_REQ_I2C4_TX = 7,
+	SPEAR1310_DMA_REQ_I2C5_RX = 8,
+	SPEAR1310_DMA_REQ_I2C5_TX = 9,
+	SPEAR1310_DMA_REQ_I2C6_RX = 10,
+	SPEAR1310_DMA_REQ_I2C6_TX = 11,
+	SPEAR1310_DMA_REQ_UART1_RX = 12,
+	SPEAR1310_DMA_REQ_UART1_TX = 13,
+	SPEAR1310_DMA_REQ_UART2_RX = 14,
+	SPEAR1310_DMA_REQ_UART2_TX = 15,
+	SPEAR1310_DMA_REQ_UART5_RX = 16,
+	SPEAR1310_DMA_REQ_UART5_TX = 17,
+	SPEAR1310_DMA_REQ_SSP1_RX = 18,
+	SPEAR1310_DMA_REQ_SSP1_TX = 19,
+	SPEAR1310_DMA_REQ_I2C7_RX = 20,
+	SPEAR1310_DMA_REQ_I2C7_TX = 21,
+	SPEAR1310_DMA_REQ_UART3_RX = 28,
+	SPEAR1310_DMA_REQ_UART3_TX = 29,
+	SPEAR1310_DMA_REQ_UART4_RX = 30,
+	SPEAR1310_DMA_REQ_UART4_TX = 31,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+	SPEAR1340_DMA_REQ_SPDIF_TX = 2,
+	SPEAR1340_DMA_REQ_SPDIF_RX = 3,
+	SPEAR1340_DMA_REQ_I2S_TX = 10,
+	SPEAR1340_DMA_REQ_I2S_RX = 11,
+	SPEAR1340_DMA_REQ_UART1_TX = 12,
+	SPEAR1340_DMA_REQ_UART1_RX = 13,
+	SPEAR1340_DMA_REQ_I2C1_TX = 14,
+	SPEAR1340_DMA_REQ_I2C1_RX = 15,
+	SPEAR1340_DMA_REQ_CAM0_EVEN = 0,
+	SPEAR1340_DMA_REQ_CAM0_ODD = 1,
+	SPEAR1340_DMA_REQ_CAM1_EVEN = 2,
+	SPEAR1340_DMA_REQ_CAM1_ODD = 3,
+	SPEAR1340_DMA_REQ_CAM2_EVEN = 4,
+	SPEAR1340_DMA_REQ_CAM2_ODD = 5,
+	SPEAR1340_DMA_REQ_CAM3_EVEN = 6,
+	SPEAR1340_DMA_REQ_CAM3_ODD = 7,
+#endif
+};
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
new file mode 100644
index 0000000..dac57fd
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -0,0 +1,49 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/generic.h
+ *
+ * spear13xx machine family generic header file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <linux/dmaengine.h>
+#include <asm/mach/time.h>
+
+/* Add spear13xx structure declarations here */
+extern struct sys_timer spear13xx_timer;
+extern struct pl022_ssp_controller pl022_plat_data;
+extern struct dw_dma_platform_data dmac_plat_data;
+extern struct dw_dma_slave cf_dma_priv;
+extern struct dw_dma_slave nand_read_dma_priv;
+extern struct dw_dma_slave nand_write_dma_priv;
+
+/* Add spear13xx family function declarations here */
+void __init spear_setup_of_timer(void);
+void __init spear13xx_map_io(void);
+void __init spear13xx_dt_init_irq(void);
+void __init spear13xx_l2x0_init(void);
+bool dw_dma_filter(struct dma_chan *chan, void *slave);
+void spear_restart(char, const char *);
+void spear13xx_secondary_startup(void);
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void);
+#else
+static inline void spear1310_clk_init(void) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void);
+#else
+static inline void spear1340_clk_init(void) {}
+#endif
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
new file mode 100644
index 0000000..85f1763
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
new file mode 100644
index 0000000..40a8c178
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
new file mode 100644
index 0000000..271a62b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_GIC_END			160
+#define NR_IRQS				IRQ_GIC_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
new file mode 100644
index 0000000..65f27de
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/spear.h
+ *
+ * spear13xx Machine family specific definition
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR13XX_H
+#define __MACH_SPEAR13XX_H
+
+#include <asm/memory.h>
+
+#define PERIP_GRP2_BASE				UL(0xB3000000)
+#define VA_PERIP_GRP2_BASE			UL(0xFE000000)
+#define MCIF_SDHCI_BASE				UL(0xB3000000)
+#define SYSRAM0_BASE				UL(0xB3800000)
+#define VA_SYSRAM0_BASE				UL(0xFE800000)
+#define SYS_LOCATION				(VA_SYSRAM0_BASE + 0x600)
+
+#define PERIP_GRP1_BASE				UL(0xE0000000)
+#define VA_PERIP_GRP1_BASE			UL(0xFD000000)
+#define UART_BASE				UL(0xE0000000)
+#define VA_UART_BASE				UL(0xFD000000)
+#define SSP_BASE				UL(0xE0100000)
+#define MISC_BASE				UL(0xE0700000)
+#define VA_MISC_BASE				IOMEM(UL(0xFD700000))
+
+#define A9SM_AND_MPMC_BASE			UL(0xEC000000)
+#define VA_A9SM_AND_MPMC_BASE			UL(0xFC000000)
+
+/* A9SM peripheral offsets */
+#define A9SM_PERIP_BASE				UL(0xEC800000)
+#define VA_A9SM_PERIP_BASE			UL(0xFC800000)
+#define VA_SCU_BASE				(VA_A9SM_PERIP_BASE + 0x00)
+
+#define L2CC_BASE				UL(0xED000000)
+#define VA_L2CC_BASE				IOMEM(UL(0xFB000000))
+
+/* others */
+#define DMAC0_BASE				UL(0xEA800000)
+#define DMAC1_BASE				UL(0xEB000000)
+#define MCIF_CF_BASE				UL(0xB2800000)
+
+/* Devices present in SPEAr1310 */
+#ifdef CONFIG_MACH_SPEAR1310
+#define SPEAR1310_RAS_GRP1_BASE			UL(0xD8000000)
+#define VA_SPEAR1310_RAS_GRP1_BASE		UL(0xFA000000)
+#define SPEAR1310_RAS_BASE			UL(0xD8400000)
+#define VA_SPEAR1310_RAS_BASE			IOMEM(UL(0xFA400000))
+#endif /* CONFIG_MACH_SPEAR1310 */
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE			UART_BASE
+#define VA_SPEAR_DBG_UART_BASE			VA_UART_BASE
+
+#endif /* __MACH_SPEAR13XX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
new file mode 100644
index 0000000..3a58b82
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
new file mode 100644
index 0000000..70fe72f
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
new file mode 100644
index 0000000..f5d07f2
--- /dev/null
+++ b/arch/arm/mach-spear13xx/platsmp.c
@@ -0,0 +1,127 @@
+/*
+ * arch/arm/mach-spear13xx/platsmp.c
+ *
+ * based upon linux/arch/arm/mach-realview/platsmp.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/smp_scu.h>
+#include <mach/spear.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+static DEFINE_SPINLOCK(boot_lock);
+
+static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+extern void spear13xx_secondary_startup(void);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	/*
+	 * if any interrupts are already enabled for the primary
+	 * core (e.g. timer irq), then they will not have been enabled
+	 * for us: do so
+	 */
+	gic_secondary_init(0);
+
+	/*
+	 * let the primary processor know we're out of the
+	 * pen, then head off into the C entry point
+	 */
+	pen_release = -1;
+	smp_wmb();
+
+	/*
+	 * Synchronise with the boot thread.
+	 */
+	spin_lock(&boot_lock);
+	spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+
+	/*
+	 * set synchronisation state between this boot processor
+	 * and the secondary one
+	 */
+	spin_lock(&boot_lock);
+
+	/*
+	 * The secondary processor is waiting to be released from
+	 * the holding pen - release it, then wait for it to flag
+	 * that it has been released by resetting pen_release.
+	 *
+	 * Note that "pen_release" is the hardware CPU ID, whereas
+	 * "cpu" is Linux's internal ID.
+	 */
+	pen_release = cpu;
+	flush_cache_all();
+	outer_flush_all();
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		smp_rmb();
+		if (pen_release == -1)
+			break;
+
+		udelay(10);
+	}
+
+	/*
+	 * now the secondary core is starting up let it run its
+	 * calibrations, then wait for it to finish
+	 */
+	spin_unlock(&boot_lock);
+
+	return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+	unsigned int i, ncores = scu_get_core_count(scu_base);
+
+	if (ncores > nr_cpu_ids) {
+		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+			ncores, nr_cpu_ids);
+		ncores = nr_cpu_ids;
+	}
+
+	for (i = 0; i < ncores; i++)
+		set_cpu_possible(i, true);
+
+	set_smp_cross_call(gic_raise_softirq);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+
+	scu_enable(scu_base);
+
+	/*
+	 * Write the address of secondary startup into the system-wide location
+	 * (presently it is in SRAM). The BootMonitor waits until it receives a
+	 * soft interrupt, and then the secondary CPU branches to this address.
+	 */
+	__raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
+}
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
new file mode 100644
index 0000000..732d29b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310.c
+ *
+ * SPEAr1310 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1310: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1310_SSP1_BASE			UL(0x5D400000)
+#define SPEAR1310_SATA0_BASE			UL(0xB1000000)
+#define SPEAR1310_SATA1_BASE			UL(0xB1800000)
+#define SPEAR1310_SATA2_BASE			UL(0xB4000000)
+
+/* ssp device registration */
+static struct pl022_ssp_controller ssp1_plat_data = {
+	.bus_id = 0,
+	.enable_dma = 0,
+	.num_chipselect = 3,
+};
+
+/* Add SPEAr1310 auxdata to pass platform data */
+static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
+	OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+	OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+	OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+	OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+	OF_DEV_AUXDATA("arm,pl022", SPEAR1310_SSP1_BASE, NULL, &ssp1_plat_data),
+	{}
+};
+
+static void __init spear1310_dt_init(void)
+{
+	of_platform_populate(NULL, of_default_bus_match_table,
+			spear1310_auxdata_lookup, NULL);
+}
+
+static const char * const spear1310_dt_board_compat[] = {
+	"st,spear1310",
+	"st,spear1310-evb",
+	NULL,
+};
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL		VIRTUAL
+ * 0xD8000000		0xFA000000
+ */
+struct map_desc spear1310_io_desc[] __initdata = {
+	{
+		.virtual	= VA_SPEAR1310_RAS_GRP1_BASE,
+		.pfn		= __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	},
+};
+
+static void __init spear1310_map_io(void)
+{
+	iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc));
+	spear13xx_map_io();
+}
+
+DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree")
+	.map_io		=	spear1310_map_io,
+	.init_irq	=	spear13xx_dt_init_irq,
+	.handle_irq	=	gic_handle_irq,
+	.timer		=	&spear13xx_timer,
+	.init_machine	=	spear1310_dt_init,
+	.restart	=	spear_restart,
+	.dt_compat	=	spear1310_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
new file mode 100644
index 0000000..81e4ed7
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340.c
+ *
+ * SPEAr1340 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1340: " fmt
+
+#include <linux/ahci_platform.h>
+#include <linux/amba/serial.h>
+#include <linux/delay.h>
+#include <linux/dw_dmac.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1340_SATA_BASE			UL(0xB1000000)
+#define SPEAR1340_UART1_BASE			UL(0xB4100000)
+
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG			(VA_MISC_BASE + 0x100)
+#define SPEAR1340_PCM_WKUP_CFG			(VA_MISC_BASE + 0x104)
+#define SPEAR1340_SWITCH_CTR			(VA_MISC_BASE + 0x108)
+
+#define SPEAR1340_PERIP1_SW_RST			(VA_MISC_BASE + 0x318)
+#define SPEAR1340_PERIP2_SW_RST			(VA_MISC_BASE + 0x31C)
+#define SPEAR1340_PERIP3_SW_RST			(VA_MISC_BASE + 0x320)
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG			(VA_MISC_BASE + 0x424)
+	/* PCIE CFG MASks */
+	#define SPEAR1340_PCIE_CFG_DEVICE_PRESENT	(1 << 11)
+	#define SPEAR1340_PCIE_CFG_POWERUP_RESET	(1 << 10)
+	#define SPEAR1340_PCIE_CFG_CORE_CLK_EN		(1 << 9)
+	#define SPEAR1340_PCIE_CFG_AUX_CLK_EN		(1 << 8)
+	#define SPEAR1340_SATA_CFG_TX_CLK_EN		(1 << 4)
+	#define SPEAR1340_SATA_CFG_RX_CLK_EN		(1 << 3)
+	#define SPEAR1340_SATA_CFG_POWERUP_RESET	(1 << 2)
+	#define SPEAR1340_SATA_CFG_PM_CLK_EN		(1 << 1)
+	#define SPEAR1340_PCIE_SATA_SEL_PCIE		(0)
+	#define SPEAR1340_PCIE_SATA_SEL_SATA		(1)
+	#define SPEAR1340_SATA_PCIE_CFG_MASK		0xF1F
+	#define SPEAR1340_PCIE_CFG_VAL	(SPEAR1340_PCIE_SATA_SEL_PCIE | \
+			SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+			SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+			SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+			SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+	#define SPEAR1340_SATA_CFG_VAL	(SPEAR1340_PCIE_SATA_SEL_SATA | \
+			SPEAR1340_SATA_CFG_PM_CLK_EN | \
+			SPEAR1340_SATA_CFG_POWERUP_RESET | \
+			SPEAR1340_SATA_CFG_RX_CLK_EN | \
+			SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG		(VA_MISC_BASE + 0x428)
+	#define SPEAR1340_MIPHY_OSC_BYPASS_EXT		(1 << 31)
+	#define SPEAR1340_MIPHY_CLK_REF_DIV2		(1 << 27)
+	#define SPEAR1340_MIPHY_CLK_REF_DIV4		(2 << 27)
+	#define SPEAR1340_MIPHY_CLK_REF_DIV8		(3 << 27)
+	#define SPEAR1340_MIPHY_PLL_RATIO_TOP(x)	(x << 0)
+	#define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+			(SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+			SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+			SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+	#define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+			(SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+	#define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+			(SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+			SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+static struct dw_dma_slave uart1_dma_param[] = {
+	{
+		/* Tx */
+		.cfg_hi = DWC_CFGH_DST_PER(SPEAR1340_DMA_REQ_UART1_TX),
+		.cfg_lo = 0,
+		.src_master = DMA_MASTER_MEMORY,
+		.dst_master = SPEAR1340_DMA_MASTER_UART1,
+	}, {
+		/* Rx */
+		.cfg_hi = DWC_CFGH_SRC_PER(SPEAR1340_DMA_REQ_UART1_RX),
+		.cfg_lo = 0,
+		.src_master = SPEAR1340_DMA_MASTER_UART1,
+		.dst_master = DMA_MASTER_MEMORY,
+	}
+};
+
+static struct amba_pl011_data uart1_data = {
+	.dma_filter = dw_dma_filter,
+	.dma_tx_param = &uart1_dma_param[0],
+	.dma_rx_param = &uart1_dma_param[1],
+};
+
+/* SATA device registration */
+static int sata_miphy_init(struct device *dev, void __iomem *addr)
+{
+	writel(SPEAR1340_SATA_CFG_VAL, SPEAR1340_PCIE_SATA_CFG);
+	writel(SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK,
+			SPEAR1340_PCIE_MIPHY_CFG);
+	/* Switch on sata power domain */
+	writel((readl(SPEAR1340_PCM_CFG) | (0x800)), SPEAR1340_PCM_CFG);
+	msleep(20);
+	/* Disable PCIE SATA Controller reset */
+	writel((readl(SPEAR1340_PERIP1_SW_RST) & (~0x1000)),
+			SPEAR1340_PERIP1_SW_RST);
+	msleep(20);
+
+	return 0;
+}
+
+void sata_miphy_exit(struct device *dev)
+{
+	writel(0, SPEAR1340_PCIE_SATA_CFG);
+	writel(0, SPEAR1340_PCIE_MIPHY_CFG);
+
+	/* Enable PCIE SATA Controller reset */
+	writel((readl(SPEAR1340_PERIP1_SW_RST) | (0x1000)),
+			SPEAR1340_PERIP1_SW_RST);
+	msleep(20);
+	/* Switch off sata power domain */
+	writel((readl(SPEAR1340_PCM_CFG) & (~0x800)), SPEAR1340_PCM_CFG);
+	msleep(20);
+}
+
+int sata_suspend(struct device *dev)
+{
+	if (dev->power.power_state.event == PM_EVENT_FREEZE)
+		return 0;
+
+	sata_miphy_exit(dev);
+
+	return 0;
+}
+
+int sata_resume(struct device *dev)
+{
+	if (dev->power.power_state.event == PM_EVENT_THAW)
+		return 0;
+
+	return sata_miphy_init(dev, NULL);
+}
+
+static struct ahci_platform_data sata_pdata = {
+	.init = sata_miphy_init,
+	.exit = sata_miphy_exit,
+	.suspend = sata_suspend,
+	.resume = sata_resume,
+};
+
+/* Add SPEAr1340 auxdata to pass platform data */
+static struct of_dev_auxdata spear1340_auxdata_lookup[] __initdata = {
+	OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+	OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+	OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+	OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+	OF_DEV_AUXDATA("snps,spear-ahci", SPEAR1340_SATA_BASE, NULL,
+			&sata_pdata),
+	OF_DEV_AUXDATA("arm,pl011", SPEAR1340_UART1_BASE, NULL, &uart1_data),
+	{}
+};
+
+static void __init spear1340_dt_init(void)
+{
+	of_platform_populate(NULL, of_default_bus_match_table,
+			spear1340_auxdata_lookup, NULL);
+}
+
+static const char * const spear1340_dt_board_compat[] = {
+	"st,spear1340",
+	"st,spear1340-evb",
+	NULL,
+};
+
+DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree")
+	.map_io		=	spear13xx_map_io,
+	.init_irq	=	spear13xx_dt_init_irq,
+	.handle_irq	=	gic_handle_irq,
+	.timer		=	&spear13xx_timer,
+	.init_machine	=	spear1340_dt_init,
+	.restart	=	spear_restart,
+	.dt_compat	=	spear1340_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
new file mode 100644
index 0000000..cf936b1
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -0,0 +1,197 @@
+/*
+ * arch/arm/mach-spear13xx/spear13xx.c
+ *
+ * SPEAr13XX machines common source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr13xx: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/clk.h>
+#include <linux/dw_dmac.h>
+#include <linux/err.h>
+#include <linux/of_irq.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/smp_twd.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* common dw_dma filter routine to be used by peripherals */
+bool dw_dma_filter(struct dma_chan *chan, void *slave)
+{
+	struct dw_dma_slave *dws = (struct dw_dma_slave *)slave;
+
+	if (chan->device->dev == dws->dma_dev) {
+		chan->private = slave;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+/* ssp device registration */
+static struct dw_dma_slave ssp_dma_param[] = {
+	{
+		/* Tx */
+		.cfg_hi = DWC_CFGH_DST_PER(DMA_REQ_SSP0_TX),
+		.cfg_lo = 0,
+		.src_master = DMA_MASTER_MEMORY,
+		.dst_master = DMA_MASTER_SSP0,
+	}, {
+		/* Rx */
+		.cfg_hi = DWC_CFGH_SRC_PER(DMA_REQ_SSP0_RX),
+		.cfg_lo = 0,
+		.src_master = DMA_MASTER_SSP0,
+		.dst_master = DMA_MASTER_MEMORY,
+	}
+};
+
+struct pl022_ssp_controller pl022_plat_data = {
+	.bus_id = 0,
+	.enable_dma = 1,
+	.dma_filter = dw_dma_filter,
+	.dma_rx_param = &ssp_dma_param[1],
+	.dma_tx_param = &ssp_dma_param[0],
+	.num_chipselect = 3,
+};
+
+/* CF device registration */
+struct dw_dma_slave cf_dma_priv = {
+	.cfg_hi = 0,
+	.cfg_lo = 0,
+	.src_master = 0,
+	.dst_master = 0,
+};
+
+/* dmac device registeration */
+struct dw_dma_platform_data dmac_plat_data = {
+	.nr_channels = 8,
+	.chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
+	.chan_priority = CHAN_PRIORITY_DESCENDING,
+};
+
+void __init spear13xx_l2x0_init(void)
+{
+	/*
+	 * 512KB (64KB/way), 8-way associativity, parity supported
+	 *
+	 * FIXME: 9th bit, of Auxillary Controller register must be set
+	 * for some spear13xx devices for stable L2 operation.
+	 *
+	 * Enable Early BRESP, L2 prefetch for Instruction and Data,
+	 * write alloc and 'Full line of zero' options
+	 *
+	 */
+
+	writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
+
+	/*
+	 * Program following latencies in order to make
+	 * SPEAr1340 work at 600 MHz
+	 */
+	writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+	writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+	l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
+}
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL		VIRTUAL
+ * 0xB3000000		0xFE000000
+ * 0xE0000000		0xFD000000
+ * 0xEC000000		0xFC000000
+ * 0xED000000		0xFB000000
+ */
+struct map_desc spear13xx_io_desc[] __initdata = {
+	{
+		.virtual	= VA_PERIP_GRP2_BASE,
+		.pfn		= __phys_to_pfn(PERIP_GRP2_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	}, {
+		.virtual	= VA_PERIP_GRP1_BASE,
+		.pfn		= __phys_to_pfn(PERIP_GRP1_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	}, {
+		.virtual	= VA_A9SM_AND_MPMC_BASE,
+		.pfn		= __phys_to_pfn(A9SM_AND_MPMC_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	}, {
+		.virtual	= (unsigned long)VA_L2CC_BASE,
+		.pfn		= __phys_to_pfn(L2CC_BASE),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE
+	},
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear13xx_map_io(void)
+{
+	iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc));
+}
+
+static void __init spear13xx_clk_init(void)
+{
+	if (of_machine_is_compatible("st,spear1310"))
+		spear1310_clk_init();
+	else if (of_machine_is_compatible("st,spear1340"))
+		spear1340_clk_init();
+	else
+		pr_err("%s: Unknown machine\n", __func__);
+}
+
+static void __init spear13xx_timer_init(void)
+{
+	char pclk_name[] = "osc_24m_clk";
+	struct clk *gpt_clk, *pclk;
+
+	spear13xx_clk_init();
+
+	/* get the system timer clock */
+	gpt_clk = clk_get_sys("gpt0", NULL);
+	if (IS_ERR(gpt_clk)) {
+		pr_err("%s:couldn't get clk for gpt\n", __func__);
+		BUG();
+	}
+
+	/* get the suitable parent clock for timer*/
+	pclk = clk_get(NULL, pclk_name);
+	if (IS_ERR(pclk)) {
+		pr_err("%s:couldn't get %s as parent for gpt\n", __func__,
+				pclk_name);
+		BUG();
+	}
+
+	clk_set_parent(gpt_clk, pclk);
+	clk_put(gpt_clk);
+	clk_put(pclk);
+
+	spear_setup_of_timer();
+	twd_local_timer_of_register();
+}
+
+struct sys_timer spear13xx_timer = {
+	.init = spear13xx_timer_init,
+};
+
+static const struct of_device_id gic_of_match[] __initconst = {
+	{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
+	{ /* Sentinel */ }
+};
+
+void __init spear13xx_dt_init_irq(void)
+{
+	of_irq_init(gic_of_match);
+}
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
index 17b5d83..8d12faa 100644
--- a/arch/arm/mach-spear3xx/Makefile
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -3,7 +3,7 @@
 #
 
 # common files
-obj-$(CONFIG_ARCH_SPEAR3XX)	+= spear3xx.o clock.o
+obj-$(CONFIG_ARCH_SPEAR3XX)	+= spear3xx.o
 
 # spear300 specific files
 obj-$(CONFIG_MACH_SPEAR300) += spear300.o
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
deleted file mode 100644
index cd6c110..0000000
--- a/arch/arm/mach-spear3xx/clock.c
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/clock.c
- *
- * SPEAr3xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <asm/mach-types.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR		(MISC_BASE + 0x008)
-#define PLL1_FRQ		(MISC_BASE + 0x00C)
-#define PLL1_MOD		(MISC_BASE + 0x010)
-#define PLL2_CTR		(MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE		2
-#define PLL_MODE_SHIFT		4
-#define PLL_MODE_MASK		0x3
-#define PLL_MODE_NORMAL		0
-#define PLL_MODE_FRACTION	1
-#define PLL_MODE_DITH_DSB	2
-#define PLL_MODE_DITH_SSB	3
-
-#define PLL2_FRQ		(MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT		0
-#define PLL_DIV_N_MASK		0xFF
-#define PLL_DIV_P_SHIFT		8
-#define PLL_DIV_P_MASK		0x7
-#define PLL_NORM_FDBK_M_SHIFT	24
-#define PLL_NORM_FDBK_M_MASK	0xFF
-#define PLL_DITH_FDBK_M_SHIFT	16
-#define PLL_DITH_FDBK_M_MASK	0xFFFF
-
-#define PLL2_MOD		(MISC_BASE + 0x01C)
-#define PLL_CLK_CFG		(MISC_BASE + 0x020)
-#define CORE_CLK_CFG		(MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT	10
-#define PLL_HCLK_RATIO_MASK	0x3
-#define HCLK_PCLK_RATIO_SHIFT	8
-#define HCLK_PCLK_RATIO_MASK	0x3
-
-#define PERIP_CLK_CFG		(MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define UART_CLK_SHIFT		4
-#define UART_CLK_MASK		0x1
-#define FIRDA_CLK_SHIFT		5
-#define FIRDA_CLK_MASK		0x3
-#define GPT0_CLK_SHIFT		8
-#define GPT1_CLK_SHIFT		11
-#define GPT2_CLK_SHIFT		12
-#define GPT_CLK_MASK		0x1
-#define AUX_CLK_PLL3_VAL	0
-#define AUX_CLK_PLL1_VAL	1
-
-#define PERIP1_CLK_ENB		(MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART_CLK_ENB		3
-#define SSP_CLK_ENB		5
-#define I2C_CLK_ENB		7
-#define JPEG_CLK_ENB		8
-#define FIRDA_CLK_ENB		10
-#define GPT1_CLK_ENB		11
-#define GPT2_CLK_ENB		12
-#define ADC_CLK_ENB		15
-#define RTC_CLK_ENB		17
-#define GPIO_CLK_ENB		18
-#define DMA_CLK_ENB		19
-#define SMI_CLK_ENB		21
-#define GMAC_CLK_ENB		23
-#define USBD_CLK_ENB		24
-#define USBH_CLK_ENB		25
-#define C3_CLK_ENB		31
-
-#define RAS_CLK_ENB		(MISC_BASE + 0x034)
-
-#define PRSC1_CLK_CFG		(MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG		(MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG		(MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT	0
-#define GPT_MSCALE_MASK		0xFFF
-#define GPT_NSCALE_SHIFT	12
-#define GPT_NSCALE_MASK		0xF
-
-#define AMEM_CLK_CFG		(MISC_BASE + 0x050)
-#define EXPI_CLK_CFG		(MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT		(MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT		(MISC_BASE + 0x060)
-#define UART_CLK_SYNT		(MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT		(MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT		(MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT		(MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT		(MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT		(MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB		31
-#define AUX_EQ_SEL_SHIFT	30
-#define AUX_EQ_SEL_MASK		1
-#define AUX_EQ1_SEL		0
-#define AUX_EQ2_SEL		1
-#define AUX_XSCALE_SHIFT	16
-#define AUX_XSCALE_MASK		0xFFF
-#define AUX_YSCALE_SHIFT	0
-#define AUX_YSCALE_MASK		0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
-	.flags = ALWAYS_ENABLED,
-	.rate = 32000,
-};
-
-/* 24 MHz oscillator clock */
-static struct clk osc_24m_clk = {
-	.flags = ALWAYS_ENABLED,
-	.rate = 24000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
-	.pclk = &osc_32k_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = RTC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from 24 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
-	.mode_mask = PLL_MODE_MASK,
-	.mode_shift = PLL_MODE_SHIFT,
-	.norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
-	.norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
-	.dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
-	.dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
-	.div_p_mask = PLL_DIV_P_MASK,
-	.div_p_shift = PLL_DIV_P_SHIFT,
-	.div_n_mask = PLL_DIV_N_MASK,
-	.div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
-	.mode_reg = PLL1_CTR,
-	.cfg_reg = PLL1_FRQ,
-	.masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
-	{.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
-	{.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
-	.flags = ENABLED_ON_INIT,
-	.pclk = &osc_24m_clk,
-	.en_reg = PLL1_CTR,
-	.en_reg_bit = PLL_ENABLE,
-	.calc_rate = &pll_calc_rate,
-	.recalc = &pll_clk_recalc,
-	.set_rate = &pll_clk_set_rate,
-	.rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
-	.private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &osc_24m_clk,
-	.rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &osc_24m_clk,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
-	.mask = PLL_HCLK_RATIO_MASK,
-	.shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
-	.reg = CORE_CLK_CFG,
-	.masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
-	{.div = 3}, /* == parent divided by 4 */
-	{.div = 2}, /* == parent divided by 3 */
-	{.div = 1}, /* == parent divided by 2 */
-	{.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &bus_calc_rate,
-	.recalc = &bus_clk_recalc,
-	.set_rate = &bus_clk_set_rate,
-	.rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-	.private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
-	.eq_sel_mask = AUX_EQ_SEL_MASK,
-	.eq_sel_shift = AUX_EQ_SEL_SHIFT,
-	.eq1_mask = AUX_EQ1_SEL,
-	.eq2_mask = AUX_EQ2_SEL,
-	.xscale_sel_mask = AUX_XSCALE_MASK,
-	.xscale_sel_shift = AUX_XSCALE_SHIFT,
-	.yscale_sel_mask = AUX_YSCALE_MASK,
-	.yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart synth configurations */
-static struct aux_clk_config uart_synth_config = {
-	.synth_reg = UART_CLK_SYNT,
-	.masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
-	/* For PLL1 = 332 MHz */
-	{.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
-	{.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
-	{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
-	.en_reg = UART_CLK_SYNT,
-	.en_reg_bit = AUX_SYNT_ENB,
-	.pclk = &pll1_clk,
-	.calc_rate = &aux_calc_rate,
-	.recalc = &aux_clk_recalc,
-	.set_rate = &aux_clk_set_rate,
-	.rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
-	.private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
-	{
-		.pclk = &uart_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
-	.pclk_info = uart_pclk_info,
-	.pclk_count = ARRAY_SIZE(uart_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart clock */
-static struct clk uart_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = UART_CLK_ENB,
-	.pclk_sel = &uart_pclk_sel,
-	.pclk_sel_shift = UART_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
-	.synth_reg = FIRDA_CLK_SYNT,
-	.masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
-	.en_reg = FIRDA_CLK_SYNT,
-	.en_reg_bit = AUX_SYNT_ENB,
-	.pclk = &pll1_clk,
-	.calc_rate = &aux_calc_rate,
-	.recalc = &aux_clk_recalc,
-	.set_rate = &aux_clk_set_rate,
-	.rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
-	.private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
-	{
-		.pclk = &firda_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
-	.pclk_info = firda_pclk_info,
-	.pclk_count = ARRAY_SIZE(firda_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = FIRDA_CLK_ENB,
-	.pclk_sel = &firda_pclk_sel,
-	.pclk_sel_shift = FIRDA_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
-	.mscale_sel_mask = GPT_MSCALE_MASK,
-	.mscale_sel_shift = GPT_MSCALE_SHIFT,
-	.nscale_sel_mask = GPT_NSCALE_MASK,
-	.nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
-	/* For pll1 = 332 MHz */
-	{.mscale = 4, .nscale = 0}, /* 41.5 MHz */
-	{.mscale = 2, .nscale = 0}, /* 55.3 MHz */
-	{.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
-	.synth_reg = PRSC1_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
-	{
-		.pclk = &gpt0_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
-	.pclk_info = gpt0_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 timer clock */
-static struct clk gpt0_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk_sel = &gpt0_pclk_sel,
-	.pclk_sel_shift = GPT0_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt1 synth clk configurations */
-static struct gpt_clk_config gpt1_synth_config = {
-	.synth_reg = PRSC2_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt1_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt1_synth_config,
-};
-
-static struct pclk_info gpt1_pclk_info[] = {
-	{
-		.pclk = &gpt1_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
-	.pclk_info = gpt1_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt1_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GPT1_CLK_ENB,
-	.pclk_sel = &gpt1_pclk_sel,
-	.pclk_sel_shift = GPT1_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt2 synth clk configurations */
-static struct gpt_clk_config gpt2_synth_config = {
-	.synth_reg = PRSC3_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt2_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt2_synth_config,
-};
-
-static struct pclk_info gpt2_pclk_info[] = {
-	{
-		.pclk = &gpt2_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
-	.pclk_info = gpt2_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt2_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GPT2_CLK_ENB,
-	.pclk_sel = &gpt2_pclk_sel,
-	.pclk_sel_shift = GPT2_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh clock */
-static struct clk usbh_clk = {
-	.pclk = &pll3_48m_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = USBH_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
-	.pclk = &pll3_48m_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = USBD_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from usbh clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &usbh_clk,
-	.recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &usbh_clk,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
-	.mask = HCLK_PCLK_RATIO_MASK,
-	.shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
-	.reg = CORE_CLK_CFG,
-	.masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.calc_rate = &bus_calc_rate,
-	.recalc = &bus_clk_recalc,
-	.set_rate = &bus_clk_set_rate,
-	.rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-	.private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = I2C_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = DMA_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = JPEG_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GMAC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SMI_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* c3 clock */
-static struct clk c3_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = C3_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = ADC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* emi clock */
-static struct clk emi_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.recalc = &follow_parent,
-};
-#endif
-
-/* ssp clock */
-static struct clk ssp0_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SSP_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* gpio clock */
-static struct clk gpio_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GPIO_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
-	defined(CONFIG_MACH_SPEAR320)
-/* fsmc clock */
-static struct clk fsmc_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.recalc = &follow_parent,
-};
-#endif
-
-/* common clocks to spear310 and spear320 */
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* uart1 clock */
-static struct clk uart1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* uart2 clock */
-static struct clk uart2_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
-
-/* common clocks to spear300 and spear320 */
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
-/* clcd clock */
-static struct clk clcd_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll3_48m_clk,
-	.recalc = &follow_parent,
-};
-
-/* sdhci clock */
-static struct clk sdhci_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
-
-/* spear300 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR300
-/* gpio1 clock */
-static struct clk gpio1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* keyboard clock */
-static struct clk kbd_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-#endif
-
-/* spear310 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR310
-/* uart3 clock */
-static struct clk uart3_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* uart4 clock */
-static struct clk uart4_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* uart5 clock */
-static struct clk uart5_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-#endif
-
-/* spear320 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR320
-/* can0 clock */
-static struct clk can0_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* can1 clock */
-static struct clk can1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* i2c1 clock */
-static struct clk i2c1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* pwm clock */
-static struct clk pwm_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-#endif
-
-/* array of all spear 3xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
-	CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
-	/* root clks */
-	CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
-	CLKDEV_INIT(NULL, "osc_24m_clk", &osc_24m_clk),
-	/* clock derived from 32 KHz osc clk */
-	CLKDEV_INIT("fc900000.rtc", NULL, &rtc_clk),
-	/* clock derived from 24 MHz osc clk */
-	CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
-	CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
-	CLKDEV_INIT("fc880000.wdt", NULL, &wdt_clk),
-	/* clock derived from pll1 clk */
-	CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
-	CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
-	CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
-	CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
-	CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
-	CLKDEV_INIT(NULL, "gpt1_synth_clk", &gpt1_synth_clk),
-	CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
-	CLKDEV_INIT("d0000000.serial", NULL, &uart_clk),
-	CLKDEV_INIT("firda", NULL, &firda_clk),
-	CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
-	CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
-	CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
-	/* clock derived from pll3 clk */
-	CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
-	CLKDEV_INIT(NULL, "usbh_clk", &usbh_clk),
-	/* clock derived from usbh clk */
-	CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
-	CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
-	/* clock derived from ahb clk */
-	CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
-	CLKDEV_INIT("d0180000.i2c", NULL, &i2c_clk),
-	CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
-	CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
-	CLKDEV_INIT("e0800000.eth", NULL, &gmac_clk),
-	CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
-	CLKDEV_INIT("c3", NULL, &c3_clk),
-	/* clock derived from apb clk */
-	CLKDEV_INIT("adc", NULL, &adc_clk),
-	CLKDEV_INIT("d0100000.spi", NULL, &ssp0_clk),
-	CLKDEV_INIT("fc980000.gpio", NULL, &gpio_clk),
-};
-
-/* array of all spear 300 clock lookups */
-#ifdef CONFIG_MACH_SPEAR300
-static struct clk_lookup spear300_clk_lookups[] = {
-	CLKDEV_INIT("60000000.clcd", NULL, &clcd_clk),
-	CLKDEV_INIT("94000000.flash", NULL, &fsmc_clk),
-	CLKDEV_INIT("a9000000.gpio", NULL, &gpio1_clk),
-	CLKDEV_INIT("a0000000.kbd", NULL, &kbd_clk),
-	CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-};
-
-void __init spear300_clk_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-		clk_register(&spear_clk_lookups[i]);
-
-	for (i = 0; i < ARRAY_SIZE(spear300_clk_lookups); i++)
-		clk_register(&spear300_clk_lookups[i]);
-
-	clk_init();
-}
-#endif
-
-/* array of all spear 310 clock lookups */
-#ifdef CONFIG_MACH_SPEAR310
-static struct clk_lookup spear310_clk_lookups[] = {
-	CLKDEV_INIT("44000000.flash", NULL, &fsmc_clk),
-	CLKDEV_INIT(NULL, "emi", &emi_clk),
-	CLKDEV_INIT("b2000000.serial", NULL, &uart1_clk),
-	CLKDEV_INIT("b2080000.serial", NULL, &uart2_clk),
-	CLKDEV_INIT("b2100000.serial", NULL, &uart3_clk),
-	CLKDEV_INIT("b2180000.serial", NULL, &uart4_clk),
-	CLKDEV_INIT("b2200000.serial", NULL, &uart5_clk),
-};
-
-void __init spear310_clk_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-		clk_register(&spear_clk_lookups[i]);
-
-	for (i = 0; i < ARRAY_SIZE(spear310_clk_lookups); i++)
-		clk_register(&spear310_clk_lookups[i]);
-
-	clk_init();
-}
-#endif
-
-/* array of all spear 320 clock lookups */
-#ifdef CONFIG_MACH_SPEAR320
-static struct clk_lookup spear320_clk_lookups[] = {
-	CLKDEV_INIT("90000000.clcd", NULL, &clcd_clk),
-	CLKDEV_INIT("4c000000.flash", NULL, &fsmc_clk),
-	CLKDEV_INIT("a7000000.i2c", NULL, &i2c1_clk),
-	CLKDEV_INIT(NULL, "emi", &emi_clk),
-	CLKDEV_INIT("pwm", NULL, &pwm_clk),
-	CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-	CLKDEV_INIT("c_can_platform.0", NULL, &can0_clk),
-	CLKDEV_INIT("c_can_platform.1", NULL, &can1_clk),
-	CLKDEV_INIT("a5000000.spi", NULL, &ssp1_clk),
-	CLKDEV_INIT("a6000000.spi", NULL, &ssp2_clk),
-	CLKDEV_INIT("a3000000.serial", NULL, &uart1_clk),
-	CLKDEV_INIT("a4000000.serial", NULL, &uart2_clk),
-};
-
-void __init spear320_clk_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-		clk_register(&spear_clk_lookups[i]);
-
-	for (i = 0; i < ARRAY_SIZE(spear320_clk_lookups); i++)
-		clk_register(&spear320_clk_lookups[i]);
-
-	clk_init();
-}
-#endif
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
index 590519f..0a6381f 100644
--- a/arch/arm/mach-spear3xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
@@ -4,7 +4,7 @@
  * Debugging macro include header spear3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index bdb3045..ce19113 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family generic header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -27,28 +27,11 @@
 extern struct pl08x_platform_data pl080_plat_data;
 
 /* Add spear3xx family function declarations here */
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
+void __init spear3xx_clk_init(void);
 void __init spear3xx_map_io(void);
 void __init spear3xx_dt_init_irq(void);
 
 void spear_restart(char, const char *);
 
-/* spear300 declarations */
-#ifdef CONFIG_MACH_SPEAR300
-void __init spear300_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR300 */
-
-/* spear310 declarations */
-#ifdef CONFIG_MACH_SPEAR310
-void __init spear310_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR310 */
-
-/* spear320 declarations */
-#ifdef CONFIG_MACH_SPEAR320
-void __init spear320_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR320 */
-
 #endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h
index 451b208..2ac74c6 100644
--- a/arch/arm/mach-spear3xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear3xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 319620a..803de76 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -4,7 +4,7 @@
  * IRQ helper macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -16,7 +16,6 @@
 
 /* FIXME: probe all these from DT */
 #define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM		1
-#define SPEAR3XX_IRQ_CPU_GPT1_1			2
 #define SPEAR3XX_IRQ_GEN_RAS_1			28
 #define SPEAR3XX_IRQ_GEN_RAS_2			29
 #define SPEAR3XX_IRQ_GEN_RAS_3			30
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index e0ab72e..6309bf6 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -14,6 +14,8 @@
 #ifndef __MACH_MISC_REGS_H
 #define __MACH_MISC_REGS_H
 
+#include <mach/spear.h>
+
 #define MISC_BASE		IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
 #define DMA_CHN_CFG		(MISC_BASE + 0x0A0)
 
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 6d4dadc..8cca951 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -4,7 +4,7 @@
  * SPEAr3xx Machine family specific definition
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -26,7 +26,6 @@
 /* ML1 - Multi Layer CPU Subsystem */
 #define SPEAR3XX_ICM3_ML1_2_BASE	UL(0xF0000000)
 #define VA_SPEAR6XX_ML_CPU_BASE		UL(0xF0000000)
-#define SPEAR3XX_CPU_TMR_BASE		UL(0xF0000000)
 
 /* ICM3 - Basic Subsystem */
 #define SPEAR3XX_ICM3_SMI_CTRL_BASE	UL(0xFC000000)
@@ -45,4 +44,17 @@
 #define SPEAR_SYS_CTRL_BASE		SPEAR3XX_ICM3_SYS_CTRL_BASE
 #define VA_SPEAR_SYS_CTRL_BASE		VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
 
+/* SPEAr320 Macros */
+#define SPEAR320_SOC_CONFIG_BASE	UL(0xB3000000)
+#define VA_SPEAR320_SOC_CONFIG_BASE	UL(0xFE000000)
+#define SPEAR320_CONTROL_REG		IOMEM(VA_SPEAR320_SOC_CONFIG_BASE)
+#define SPEAR320_EXT_CTRL_REG		IOMEM(VA_SPEAR320_SOC_CONFIG_BASE + 0x0018)
+	#define SPEAR320_UARTX_PCLK_MASK		0x1
+	#define SPEAR320_UART2_PCLK_SHIFT		8
+	#define SPEAR320_UART3_PCLK_SHIFT		9
+	#define SPEAR320_UART4_PCLK_SHIFT		10
+	#define SPEAR320_UART5_PCLK_SHIFT		11
+	#define SPEAR320_UART6_PCLK_SHIFT		12
+	#define SPEAR320_RS485_PCLK_SHIFT		13
+
 #endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h
index a38cc9d..9f5d08b 100644
--- a/arch/arm/mach-spear3xx/include/mach/timex.h
+++ b/arch/arm/mach-spear3xx/include/mach/timex.h
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h
index 53ba8bb..b909b01 100644
--- a/arch/arm/mach-spear3xx/include/mach/uncompress.h
+++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f75fe25..0f882ec 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -4,7 +4,7 @@
  * SPEAr300 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -337,7 +337,6 @@
 static void __init spear300_map_io(void)
 {
 	spear3xx_map_io();
-	spear300_clk_init();
 }
 
 DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index f0842a5..bbcf457 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -4,7 +4,7 @@
  * SPEAr310 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -478,7 +478,6 @@
 static void __init spear310_map_io(void)
 {
 	spear3xx_map_io();
-	spear310_clk_init();
 }
 
 DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index e8caeef..88d483b 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -4,7 +4,7 @@
  * SPEAr320 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -27,7 +27,6 @@
 #define SPEAR320_UART2_BASE		UL(0xA4000000)
 #define SPEAR320_SSP0_BASE		UL(0xA5000000)
 #define SPEAR320_SSP1_BASE		UL(0xA6000000)
-#define SPEAR320_SOC_CONFIG_BASE	UL(0xB3000000)
 
 /* Interrupt registers offsets and masks */
 #define SPEAR320_INT_STS_MASK_REG		0x04
@@ -481,10 +480,19 @@
 	NULL,
 };
 
+struct map_desc spear320_io_desc[] __initdata = {
+	{
+		.virtual	= VA_SPEAR320_SOC_CONFIG_BASE,
+		.pfn		= __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	},
+};
+
 static void __init spear320_map_io(void)
 {
+	iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc));
 	spear3xx_map_io();
-	spear320_clk_init();
 }
 
 DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 826ac20..0f41bd1 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -4,7 +4,7 @@
  * SPEAr3XX machines common source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -90,6 +90,8 @@
 	char pclk_name[] = "pll3_48m_clk";
 	struct clk *gpt_clk, *pclk;
 
+	spear3xx_clk_init();
+
 	/* get the system timer clock */
 	gpt_clk = clk_get_sys("gpt0", NULL);
 	if (IS_ERR(gpt_clk)) {
@@ -109,7 +111,7 @@
 	clk_put(gpt_clk);
 	clk_put(pclk);
 
-	spear_setup_timer(SPEAR3XX_CPU_TMR_BASE, SPEAR3XX_IRQ_CPU_GPT1_1);
+	spear_setup_of_timer();
 }
 
 struct sys_timer spear3xx_timer = {
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
index 76e5750..898831d 100644
--- a/arch/arm/mach-spear6xx/Makefile
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -3,4 +3,4 @@
 #
 
 # common files
-obj-y	+= clock.o spear6xx.o
+obj-y	+= spear6xx.o
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
deleted file mode 100644
index bef77d4..0000000
--- a/arch/arm/mach-spear6xx/clock.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/clock.c
- *
- * SPEAr6xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR		(MISC_BASE + 0x008)
-#define PLL1_FRQ		(MISC_BASE + 0x00C)
-#define PLL1_MOD		(MISC_BASE + 0x010)
-#define PLL2_CTR		(MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE		2
-#define PLL_MODE_SHIFT		4
-#define PLL_MODE_MASK		0x3
-#define PLL_MODE_NORMAL		0
-#define PLL_MODE_FRACTION	1
-#define PLL_MODE_DITH_DSB	2
-#define PLL_MODE_DITH_SSB	3
-
-#define PLL2_FRQ		(MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT		0
-#define PLL_DIV_N_MASK		0xFF
-#define PLL_DIV_P_SHIFT		8
-#define PLL_DIV_P_MASK		0x7
-#define PLL_NORM_FDBK_M_SHIFT	24
-#define PLL_NORM_FDBK_M_MASK	0xFF
-#define PLL_DITH_FDBK_M_SHIFT	16
-#define PLL_DITH_FDBK_M_MASK	0xFFFF
-
-#define PLL2_MOD		(MISC_BASE + 0x01C)
-#define PLL_CLK_CFG		(MISC_BASE + 0x020)
-#define CORE_CLK_CFG		(MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT	10
-#define PLL_HCLK_RATIO_MASK	0x3
-#define HCLK_PCLK_RATIO_SHIFT	8
-#define HCLK_PCLK_RATIO_MASK	0x3
-
-#define PERIP_CLK_CFG		(MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define CLCD_CLK_SHIFT		2
-#define CLCD_CLK_MASK		0x3
-#define UART_CLK_SHIFT		4
-#define UART_CLK_MASK		0x1
-#define FIRDA_CLK_SHIFT		5
-#define FIRDA_CLK_MASK		0x3
-#define GPT0_CLK_SHIFT		8
-#define GPT1_CLK_SHIFT		10
-#define GPT2_CLK_SHIFT		11
-#define GPT3_CLK_SHIFT		12
-#define GPT_CLK_MASK		0x1
-#define AUX_CLK_PLL3_VAL	0
-#define AUX_CLK_PLL1_VAL	1
-
-#define PERIP1_CLK_ENB		(MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART0_CLK_ENB		3
-#define UART1_CLK_ENB		4
-#define SSP0_CLK_ENB		5
-#define SSP1_CLK_ENB		6
-#define I2C_CLK_ENB		7
-#define JPEG_CLK_ENB		8
-#define FSMC_CLK_ENB		9
-#define FIRDA_CLK_ENB		10
-#define GPT2_CLK_ENB		11
-#define GPT3_CLK_ENB		12
-#define GPIO2_CLK_ENB		13
-#define SSP2_CLK_ENB		14
-#define ADC_CLK_ENB		15
-#define GPT1_CLK_ENB		11
-#define RTC_CLK_ENB		17
-#define GPIO1_CLK_ENB		18
-#define DMA_CLK_ENB		19
-#define SMI_CLK_ENB		21
-#define CLCD_CLK_ENB		22
-#define GMAC_CLK_ENB		23
-#define USBD_CLK_ENB		24
-#define USBH0_CLK_ENB		25
-#define USBH1_CLK_ENB		26
-
-#define PRSC1_CLK_CFG		(MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG		(MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG		(MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT	0
-#define GPT_MSCALE_MASK		0xFFF
-#define GPT_NSCALE_SHIFT	12
-#define GPT_NSCALE_MASK		0xF
-
-#define AMEM_CLK_CFG		(MISC_BASE + 0x050)
-#define EXPI_CLK_CFG		(MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT		(MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT		(MISC_BASE + 0x060)
-#define UART_CLK_SYNT		(MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT		(MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT		(MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT		(MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT		(MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT		(MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB		31
-#define AUX_EQ_SEL_SHIFT	30
-#define AUX_EQ_SEL_MASK		1
-#define AUX_EQ1_SEL		0
-#define AUX_EQ2_SEL		1
-#define AUX_XSCALE_SHIFT	16
-#define AUX_XSCALE_MASK		0xFFF
-#define AUX_YSCALE_SHIFT	0
-#define AUX_YSCALE_MASK		0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
-	.flags = ALWAYS_ENABLED,
-	.rate = 32000,
-};
-
-/* 30 MHz oscillator clock */
-static struct clk osc_30m_clk = {
-	.flags = ALWAYS_ENABLED,
-	.rate = 30000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
-	.pclk = &osc_32k_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = RTC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from 30 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
-	.mode_mask = PLL_MODE_MASK,
-	.mode_shift = PLL_MODE_SHIFT,
-	.norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
-	.norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
-	.dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
-	.dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
-	.div_p_mask = PLL_DIV_P_MASK,
-	.div_p_shift = PLL_DIV_P_SHIFT,
-	.div_n_mask = PLL_DIV_N_MASK,
-	.div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
-	.mode_reg = PLL1_CTR,
-	.cfg_reg = PLL1_FRQ,
-	.masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
-	{.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
-	{.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
-	.flags = ENABLED_ON_INIT,
-	.pclk = &osc_30m_clk,
-	.en_reg = PLL1_CTR,
-	.en_reg_bit = PLL_ENABLE,
-	.calc_rate = &pll_calc_rate,
-	.recalc = &pll_clk_recalc,
-	.set_rate = &pll_clk_set_rate,
-	.rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
-	.private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &osc_30m_clk,
-	.rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &osc_30m_clk,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
-	.mask = PLL_HCLK_RATIO_MASK,
-	.shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
-	.reg = CORE_CLK_CFG,
-	.masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
-	{.div = 3}, /* == parent divided by 4 */
-	{.div = 2}, /* == parent divided by 3 */
-	{.div = 1}, /* == parent divided by 2 */
-	{.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &bus_calc_rate,
-	.recalc = &bus_clk_recalc,
-	.set_rate = &bus_clk_set_rate,
-	.rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-	.private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
-	.eq_sel_mask = AUX_EQ_SEL_MASK,
-	.eq_sel_shift = AUX_EQ_SEL_SHIFT,
-	.eq1_mask = AUX_EQ1_SEL,
-	.eq2_mask = AUX_EQ2_SEL,
-	.xscale_sel_mask = AUX_XSCALE_MASK,
-	.xscale_sel_shift = AUX_XSCALE_SHIFT,
-	.yscale_sel_mask = AUX_YSCALE_MASK,
-	.yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart configurations */
-static struct aux_clk_config uart_synth_config = {
-	.synth_reg = UART_CLK_SYNT,
-	.masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
-	/* For PLL1 = 332 MHz */
-	{.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
-	{.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
-	{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
-	.en_reg = UART_CLK_SYNT,
-	.en_reg_bit = AUX_SYNT_ENB,
-	.pclk = &pll1_clk,
-	.calc_rate = &aux_calc_rate,
-	.recalc = &aux_clk_recalc,
-	.set_rate = &aux_clk_set_rate,
-	.rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-	.private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
-	{
-		.pclk = &uart_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
-	.pclk_info = uart_pclk_info,
-	.pclk_count = ARRAY_SIZE(uart_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart0 clock */
-static struct clk uart0_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = UART0_CLK_ENB,
-	.pclk_sel = &uart_pclk_sel,
-	.pclk_sel_shift = UART_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* uart1 clock */
-static struct clk uart1_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = UART1_CLK_ENB,
-	.pclk_sel = &uart_pclk_sel,
-	.pclk_sel_shift = UART_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
-	.synth_reg = FIRDA_CLK_SYNT,
-	.masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
-	.en_reg = FIRDA_CLK_SYNT,
-	.en_reg_bit = AUX_SYNT_ENB,
-	.pclk = &pll1_clk,
-	.calc_rate = &aux_calc_rate,
-	.recalc = &aux_clk_recalc,
-	.set_rate = &aux_clk_set_rate,
-	.rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-	.private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
-	{
-		.pclk = &firda_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
-	.pclk_info = firda_pclk_info,
-	.pclk_count = ARRAY_SIZE(firda_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = FIRDA_CLK_ENB,
-	.pclk_sel = &firda_pclk_sel,
-	.pclk_sel_shift = FIRDA_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* clcd configurations */
-static struct aux_clk_config clcd_synth_config = {
-	.synth_reg = CLCD_CLK_SYNT,
-	.masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk clcd_synth_clk = {
-	.en_reg = CLCD_CLK_SYNT,
-	.en_reg_bit = AUX_SYNT_ENB,
-	.pclk = &pll1_clk,
-	.calc_rate = &aux_calc_rate,
-	.recalc = &aux_clk_recalc,
-	.set_rate = &aux_clk_set_rate,
-	.rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-	.private_data = &clcd_synth_config,
-};
-
-/* clcd parents */
-static struct pclk_info clcd_pclk_info[] = {
-	{
-		.pclk = &clcd_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* clcd parent select structure */
-static struct pclk_sel clcd_pclk_sel = {
-	.pclk_info = clcd_pclk_info,
-	.pclk_count = ARRAY_SIZE(clcd_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = CLCD_CLK_MASK,
-};
-
-/* clcd clock */
-static struct clk clcd_clk = {
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = CLCD_CLK_ENB,
-	.pclk_sel = &clcd_pclk_sel,
-	.pclk_sel_shift = CLCD_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
-	.mscale_sel_mask = GPT_MSCALE_MASK,
-	.mscale_sel_shift = GPT_MSCALE_SHIFT,
-	.nscale_sel_mask = GPT_NSCALE_MASK,
-	.nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
-	/* For pll1 = 332 MHz */
-	{.mscale = 4, .nscale = 0}, /* 41.5 MHz */
-	{.mscale = 2, .nscale = 0}, /* 55.3 MHz */
-	{.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
-	.synth_reg = PRSC1_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
-	{
-		.pclk = &gpt0_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
-	.pclk_info = gpt0_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 ARM1 subsystem timer clock */
-static struct clk gpt0_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk_sel = &gpt0_pclk_sel,
-	.pclk_sel_shift = GPT0_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-
-/* Note: gpt0 and gpt1 share same parent clocks */
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
-	.pclk_info = gpt0_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk_sel = &gpt1_pclk_sel,
-	.pclk_sel_shift = GPT1_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt2 synth clk config*/
-static struct gpt_clk_config gpt2_synth_config = {
-	.synth_reg = PRSC2_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt2_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt2_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt2_pclk_info[] = {
-	{
-		.pclk = &gpt2_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
-	.pclk_info = gpt2_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt2_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk_sel = &gpt2_pclk_sel,
-	.pclk_sel_shift = GPT2_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* gpt3 synth clk config*/
-static struct gpt_clk_config gpt3_synth_config = {
-	.synth_reg = PRSC3_CLK_CFG,
-	.masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt3_synth_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &pll1_clk,
-	.calc_rate = &gpt_calc_rate,
-	.recalc = &gpt_clk_recalc,
-	.set_rate = &gpt_clk_set_rate,
-	.rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-	.private_data = &gpt3_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt3_pclk_info[] = {
-	{
-		.pclk = &gpt3_synth_clk,
-		.pclk_val = AUX_CLK_PLL1_VAL,
-	}, {
-		.pclk = &pll3_48m_clk,
-		.pclk_val = AUX_CLK_PLL3_VAL,
-	},
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt3_pclk_sel = {
-	.pclk_info = gpt3_pclk_info,
-	.pclk_count = ARRAY_SIZE(gpt3_pclk_info),
-	.pclk_sel_reg = PERIP_CLK_CFG,
-	.pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt3 timer clock */
-static struct clk gpt3_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk_sel = &gpt3_pclk_sel,
-	.pclk_sel_shift = GPT3_CLK_SHIFT,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
-	.pclk = &pll3_48m_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = USBH0_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
-	.pclk = &pll3_48m_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = USBH1_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
-	.pclk = &pll3_48m_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = USBD_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
-	.mask = HCLK_PCLK_RATIO_MASK,
-	.shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
-	.reg = CORE_CLK_CFG,
-	.masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &ahb_clk,
-	.calc_rate = &bus_calc_rate,
-	.recalc = &bus_clk_recalc,
-	.set_rate = &bus_clk_set_rate,
-	.rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-	.private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = I2C_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = DMA_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = JPEG_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GMAC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SMI_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* fsmc clock */
-static struct clk fsmc_clk = {
-	.pclk = &ahb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = FSMC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = ADC_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* ssp0 clock */
-static struct clk ssp0_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SSP0_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SSP1_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = SSP2_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* gpio0 ARM subsystem clock */
-static struct clk gpio0_clk = {
-	.flags = ALWAYS_ENABLED,
-	.pclk = &apb_clk,
-	.recalc = &follow_parent,
-};
-
-/* gpio1 clock */
-static struct clk gpio1_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GPIO1_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-/* gpio2 clock */
-static struct clk gpio2_clk = {
-	.pclk = &apb_clk,
-	.en_reg = PERIP1_CLK_ENB,
-	.en_reg_bit = GPIO2_CLK_ENB,
-	.recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-/* array of all spear 6xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
-	CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
-	/* root clks */
-	CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
-	CLKDEV_INIT(NULL, "osc_30m_clk", &osc_30m_clk),
-	/* clock derived from 32 KHz os		 clk */
-	CLKDEV_INIT("rtc-spear", NULL, &rtc_clk),
-	/* clock derived from 30 MHz os		 clk */
-	CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
-	CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
-	CLKDEV_INIT("wdt", NULL, &wdt_clk),
-	/* clock derived from pll1 clk */
-	CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
-	CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
-	CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
-	CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
-	CLKDEV_INIT(NULL, "clcd_synth_clk", &clcd_synth_clk),
-	CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
-	CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
-	CLKDEV_INIT(NULL, "gpt3_synth_clk", &gpt3_synth_clk),
-	CLKDEV_INIT("d0000000.serial", NULL, &uart0_clk),
-	CLKDEV_INIT("d0080000.serial", NULL, &uart1_clk),
-	CLKDEV_INIT("firda", NULL, &firda_clk),
-	CLKDEV_INIT("clcd", NULL, &clcd_clk),
-	CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
-	CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
-	CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
-	CLKDEV_INIT("gpt3", NULL, &gpt3_clk),
-	/* clock derived from pll3 clk */
-	CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
-	CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
-	CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
-	/* clock derived from ahb clk */
-	CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
-	CLKDEV_INIT("d0200000.i2c", NULL, &i2c_clk),
-	CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
-	CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
-	CLKDEV_INIT("gmac", NULL, &gmac_clk),
-	CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
-	CLKDEV_INIT("d1800000.flash", NULL, &fsmc_clk),
-	/* clock derived from apb clk */
-	CLKDEV_INIT("adc", NULL, &adc_clk),
-	CLKDEV_INIT("ssp-pl022.0", NULL, &ssp0_clk),
-	CLKDEV_INIT("ssp-pl022.1", NULL, &ssp1_clk),
-	CLKDEV_INIT("ssp-pl022.2", NULL, &ssp2_clk),
-	CLKDEV_INIT("f0100000.gpio", NULL, &gpio0_clk),
-	CLKDEV_INIT("fc980000.gpio", NULL, &gpio1_clk),
-	CLKDEV_INIT("d8100000.gpio", NULL, &gpio2_clk),
-};
-
-void __init spear6xx_clk_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-		clk_register(&spear_clk_lookups[i]);
-
-	clk_init();
-}
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 7167fd3..65514b15 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -16,7 +16,7 @@
 
 #include <linux/init.h>
 
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
 void spear_restart(char, const char *);
 void __init spear6xx_clk_init(void);
 
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h
index 3a789db..d42cefc 100644
--- a/arch/arm/mach-spear6xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear6xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
index 2b73538..37a5c41 100644
--- a/arch/arm/mach-spear6xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -16,9 +16,6 @@
 
 /* IRQ definitions */
 /* VIC 1 */
-/* FIXME: probe this from DT */
-#define IRQ_CPU_GPT1_1				16
-
 #define IRQ_VIC_END				64
 
 /* GPIO pins virtual irqs */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 2b9aaa6..c34acc2 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -14,6 +14,8 @@
 #ifndef __MACH_MISC_REGS_H
 #define __MACH_MISC_REGS_H
 
+#include <mach/spear.h>
+
 #define MISC_BASE		IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
 #define DMA_CHN_CFG		(MISC_BASE + 0x0A0)
 
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
index d278ed0..cb8ed2f 100644
--- a/arch/arm/mach-spear6xx/include/mach/spear.h
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -25,7 +25,6 @@
 /* ML-1, 2 - Multi Layer CPU Subsystem */
 #define SPEAR6XX_ML_CPU_BASE		UL(0xF0000000)
 #define VA_SPEAR6XX_ML_CPU_BASE		UL(0xF0000000)
-#define SPEAR6XX_CPU_TMR_BASE		UL(0xF0000000)
 
 /* ICM3 - Basic Subsystem */
 #define SPEAR6XX_ICM3_SMI_CTRL_BASE	UL(0xFC000000)
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index de194db..2e2e359 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -419,9 +419,6 @@
 void __init spear6xx_map_io(void)
 {
 	iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
-
-	/* This will initialize clock framework */
-	spear6xx_clk_init();
 }
 
 static void __init spear6xx_timer_init(void)
@@ -429,6 +426,8 @@
 	char pclk_name[] = "pll3_48m_clk";
 	struct clk *gpt_clk, *pclk;
 
+	spear6xx_clk_init();
+
 	/* get the system timer clock */
 	gpt_clk = clk_get_sys("gpt0", NULL);
 	if (IS_ERR(gpt_clk)) {
@@ -448,7 +447,7 @@
 	clk_put(gpt_clk);
 	clk_put(pclk);
 
-	spear_setup_timer(SPEAR6XX_CPU_TMR_BASE, IRQ_CPU_GPT1_1);
+	spear_setup_of_timer();
 }
 
 struct sys_timer spear6xx_timer = {
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d0f2546..6a113a9 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -50,6 +50,14 @@
 	depends on ARCH_TEGRA_2x_SOC
 	select PCI
 
+config TEGRA_AHB
+	bool "Enable AHB driver for NVIDIA Tegra SoCs"
+	default y
+	help
+	  Adds AHB configuration functionality for NVIDIA Tegra SoCs,
+	  which controls AHB bus master arbitration and some
+	  perfomance parameters(priority, prefech size).
+
 comment "Tegra board type"
 
 config MACH_HARMONY
@@ -111,7 +119,7 @@
          Support for the nVidia Ventana development platform
 
 choice
-        prompt "Low-level debug console UART"
+        prompt "Default low-level debug console UART"
         default TEGRA_DEBUG_UART_NONE
 
 config TEGRA_DEBUG_UART_NONE
@@ -134,6 +142,33 @@
 
 endchoice
 
+choice
+	prompt "Automatic low-level debug console UART"
+	default TEGRA_DEBUG_UART_AUTO_NONE
+
+config TEGRA_DEBUG_UART_AUTO_NONE
+	bool "None"
+
+config TEGRA_DEBUG_UART_AUTO_ODMDATA
+	bool "Via ODMDATA"
+	help
+	  Automatically determines which UART to use for low-level debug based
+	  on the ODMDATA value. This value is part of the BCT, and is written
+	  to the boot memory device using nvflash, or other flashing tool.
+	  When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
+	  0/1/2/3/4 are UART A/B/C/D/E.
+
+config TEGRA_DEBUG_UART_AUTO_SCRATCH
+	bool "Via UART scratch register"
+	help
+	  Automatically determines which UART to use for low-level debug based
+	  on the UART scratch register value. Some bootloaders put ASCII 'D'
+	  in this register when they initialize their own console UART output.
+	  Using this option allows the kernel to automatically pick the same
+	  UART.
+
+endchoice
+
 config TEGRA_SYSTEM_DMA
 	bool "Enable system DMA driver for NVIDIA Tegra SoCs"
 	default y
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index fac3eb1..eb7249d 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -110,6 +110,7 @@
 	.handle_irq	= gic_handle_irq,
 	.timer		= &tegra_timer,
 	.init_machine	= tegra_dt_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 	.dt_compat	= tegra20_dt_board_compat,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index 5f7c03e..4f76fa7 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -51,12 +51,22 @@
 	OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL),
 	OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
 	OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
+	OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
 	{}
 };
 
 static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
 	/* name		parent		rate		enabled */
 	{ "uarta",	"pll_p",	408000000,	true },
+	{ "pll_a",	"pll_p_out1",	564480000,	true },
+	{ "pll_a_out0",	"pll_a",	11289600,	true },
+	{ "extern1",	"pll_a_out0",	0,		true },
+	{ "clk_out_1",	"extern1",	0,		true },
+	{ "i2s0",	"pll_a_out0",	11289600,	false},
+	{ "i2s1",	"pll_a_out0",	11289600,	false},
+	{ "i2s2",	"pll_a_out0",	11289600,	false},
+	{ "i2s3",	"pll_a_out0",	11289600,	false},
+	{ "i2s4",	"pll_a_out0",	11289600,	false},
 	{ NULL,		NULL,		0,		0},
 };
 
@@ -80,6 +90,7 @@
 	.handle_irq	= gic_handle_irq,
 	.timer		= &tegra_timer,
 	.init_machine	= tegra30_dt_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 	.dt_compat	= tegra30_dt_board_compat,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index b906b3b..e65e837 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -192,5 +192,6 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_harmony_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index d0735c7..bbc1907 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -162,6 +162,8 @@
 
 static void paz00_usb_init(void)
 {
+	tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_ULPI_RST;
+
 	platform_device_register(&tegra_ehci2_device);
 	platform_device_register(&tegra_ehci3_device);
 }
@@ -179,7 +181,6 @@
 	{ "uarta",	"pll_p",	216000000,	true },
 	{ "uartc",	"pll_p",	216000000,	true },
 
-	{ "pll_p_out4",	"pll_p",	24000000,	true },
 	{ "usbd",	"clk_m",	12000000,	false },
 	{ "usb2",	"clk_m",	12000000,	false },
 	{ "usb3",	"clk_m",	12000000,	false },
@@ -224,5 +225,6 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_paz00_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index 79064c7..71e9f3f 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -277,6 +277,7 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_seaboard_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
 
@@ -288,6 +289,7 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_kaen_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
 
@@ -299,5 +301,6 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_wario_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index bc59b37..776aa95 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -118,6 +118,8 @@
 	pdata = tegra_ehci1_device.dev.platform_data;
 	pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE;
 
+	tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_GPIO_PV0;
+
 	platform_device_register(&tegra_ehci3_device);
 	platform_device_register(&tegra_ehci2_device);
 	platform_device_register(&tegra_ehci1_device);
@@ -176,5 +178,6 @@
 	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_trimslice_init,
+	.init_late	= tegra_init_late,
 	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 75d1543..6501496 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -32,5 +32,19 @@
 void __init tegra_dt_init_irq(void);
 int __init tegra_pcie_init(bool init_port0, bool init_port1);
 
+void tegra_init_late(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tegra_clk_debugfs_init(void);
+#else
+static inline int tegra_clk_debugfs_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
+int __init tegra_powergate_debugfs_init(void);
+#else
+static inline int tegra_powergate_debugfs_init(void) { return 0; }
+#endif
+
 extern struct sys_timer tegra_timer;
 #endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 8dad8d1..58f981c 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -642,7 +642,7 @@
 	return 0;
 }
 
-static int __init clk_debugfs_init(void)
+int __init tegra_clk_debugfs_init(void)
 {
 	struct clk *c;
 	struct dentry *d;
@@ -669,5 +669,4 @@
 	return err;
 }
 
-late_initcall(clk_debugfs_init);
 #endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 22df10f..204a5c8 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -82,10 +82,12 @@
 	{ "pll_p_out1",	"pll_p",	28800000,	true },
 	{ "pll_p_out2",	"pll_p",	48000000,	true },
 	{ "pll_p_out3",	"pll_p",	72000000,	true },
-	{ "pll_p_out4",	"pll_p",	108000000,	true },
-	{ "sclk",	"pll_p_out4",	108000000,	true },
-	{ "hclk",	"sclk",		108000000,	true },
-	{ "pclk",	"hclk",		54000000,	true },
+	{ "pll_p_out4",	"pll_p",	24000000,	true },
+	{ "pll_c",	"clk_m",	600000000,	true },
+	{ "pll_c_out1",	"pll_c",	120000000,	true },
+	{ "sclk",	"pll_c_out1",	120000000,	true },
+	{ "hclk",	"sclk",		120000000,	true },
+	{ "pclk",	"hclk",		60000000,	true },
 	{ "csite",	NULL,		0,		true },
 	{ "emc",	NULL,		0,		true },
 	{ "cpu",	NULL,		0,		true },
@@ -93,6 +95,17 @@
 };
 #endif
 
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
+	/* name		parent		rate		enabled */
+	{ "clk_m",	NULL,		0,		true },
+	{ "pll_p",	"clk_m",	408000000,	true },
+	{ "pll_p_out1",	"pll_p",	9600000,	true },
+	{ NULL,		NULL,		0,		0},
+};
+#endif
+
+
 static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
 {
 #ifdef CONFIG_CACHE_L2X0
@@ -127,8 +140,15 @@
 {
 	tegra_init_fuse();
 	tegra30_init_clocks();
+	tegra_clk_init_from_table(tegra30_clk_init_table);
 	tegra_init_cache(0x441, 0x551);
 	tegra_pmc_init();
 	tegra_powergate_init();
 }
 #endif
+
+void __init tegra_init_late(void)
+{
+	tegra_clk_debugfs_init();
+	tegra_powergate_debugfs_init();
+}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 2d8dfa2..c70e65f 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -439,9 +439,8 @@
 	},
 };
 
-static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
-	/* All existing boards use GPIO PV0 for phy reset */
-	.reset_gpio = TEGRA_GPIO_PV0,
+struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
+	.reset_gpio = -1,
 	.clk = "cdev2",
 };
 
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 138c642..4f50527 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -22,6 +22,10 @@
 #include <linux/platform_device.h>
 #include <linux/platform_data/tegra_usb.h>
 
+#include <mach/usb_phy.h>
+
+extern struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config;
+
 extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
 extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
 extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
diff --git a/arch/arm/mach-tegra/include/mach/tegra-ahb.h b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
new file mode 100644
index 0000000..e0f8c84
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_AHB_H__
+#define __MACH_TEGRA_AHB_H__
+
+extern int tegra_ahb_enable_smmu(struct device_node *ahb);
+
+#endif	/* __MACH_TEGRA_AHB_H__ */
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 5a440f3..937c4c5 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -63,52 +63,86 @@
 		buf[0] = 0;
 }
 
-/*
- * Setup before decompression.  This is where we do UART selection for
- * earlyprintk and init the uart_base register.
- */
-static inline void arch_decomp_setup(void)
+static const struct {
+	u32 base;
+	u32 reset_reg;
+	u32 clock_reg;
+	u32 bit;
+} uarts[] = {
+	{
+		TEGRA_UARTA_BASE,
+		TEGRA_CLK_RESET_BASE + 0x04,
+		TEGRA_CLK_RESET_BASE + 0x10,
+		6,
+	},
+	{
+		TEGRA_UARTB_BASE,
+		TEGRA_CLK_RESET_BASE + 0x04,
+		TEGRA_CLK_RESET_BASE + 0x10,
+		7,
+	},
+	{
+		TEGRA_UARTC_BASE,
+		TEGRA_CLK_RESET_BASE + 0x08,
+		TEGRA_CLK_RESET_BASE + 0x14,
+		23,
+	},
+	{
+		TEGRA_UARTD_BASE,
+		TEGRA_CLK_RESET_BASE + 0x0c,
+		TEGRA_CLK_RESET_BASE + 0x18,
+		1,
+	},
+	{
+		TEGRA_UARTE_BASE,
+		TEGRA_CLK_RESET_BASE + 0x0c,
+		TEGRA_CLK_RESET_BASE + 0x18,
+		2,
+	},
+};
+
+static inline bool uart_clocked(int i)
 {
-	static const struct {
-		u32 base;
-		u32 reset_reg;
-		u32 clock_reg;
-		u32 bit;
-	} uarts[] = {
-		{
-			TEGRA_UARTA_BASE,
-			TEGRA_CLK_RESET_BASE + 0x04,
-			TEGRA_CLK_RESET_BASE + 0x10,
-			6,
-		},
-		{
-			TEGRA_UARTB_BASE,
-			TEGRA_CLK_RESET_BASE + 0x04,
-			TEGRA_CLK_RESET_BASE + 0x10,
-			7,
-		},
-		{
-			TEGRA_UARTC_BASE,
-			TEGRA_CLK_RESET_BASE + 0x08,
-			TEGRA_CLK_RESET_BASE + 0x14,
-			23,
-		},
-		{
-			TEGRA_UARTD_BASE,
-			TEGRA_CLK_RESET_BASE + 0x0c,
-			TEGRA_CLK_RESET_BASE + 0x18,
-			1,
-		},
-		{
-			TEGRA_UARTE_BASE,
-			TEGRA_CLK_RESET_BASE + 0x0c,
-			TEGRA_CLK_RESET_BASE + 0x18,
-			2,
-		},
-	};
+	if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
+		return false;
+
+	if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+		return false;
+
+	return true;
+}
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
+int auto_odmdata(void)
+{
+	volatile u32 *pmc = (volatile u32 *)TEGRA_PMC_BASE;
+	u32 odmdata = pmc[0xa0 / 4];
+
+	/*
+	 * Bits 19:18 are the console type: 0=default, 1=none, 2==DCC, 3==UART
+	 * Some boards apparently swap the last two values, but we don't have
+	 * any way of catering for that here, so we just accept either. If this
+	 * doesn't make sense for your board, just don't enable this feature.
+	 *
+	 * Bits 17:15 indicate the UART to use, 0/1/2/3/4 are UART A/B/C/D/E.
+	 */
+
+	switch  ((odmdata >> 18) & 3) {
+	case 2:
+	case 3:
+		break;
+	default:
+		return -1;
+	}
+
+	return (odmdata >> 15) & 7;
+}
+#endif
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
+int auto_scratch(void)
+{
 	int i;
-	volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
-	u32 chip, div;
 
 	/*
 	 * Look for the first UART that:
@@ -125,20 +159,60 @@
 	 * back to what's specified in TEGRA_DEBUG_UART_BASE.
 	 */
 	for (i = 0; i < ARRAY_SIZE(uarts); i++) {
-		if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
-			continue;
-
-		if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+		if (!uart_clocked(i))
 			continue;
 
 		uart = (volatile u8 *)uarts[i].base;
 		if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
 			continue;
 
-		break;
+		return i;
 	}
-	if (i == ARRAY_SIZE(uarts))
-		uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
+
+	return -1;
+}
+#endif
+
+/*
+ * Setup before decompression.  This is where we do UART selection for
+ * earlyprintk and init the uart_base register.
+ */
+static inline void arch_decomp_setup(void)
+{
+	int uart_id, auto_uart_id;
+	volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
+	u32 chip, div;
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+	uart_id = 0;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+	uart_id = 1;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+	uart_id = 2;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+	uart_id = 3;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+	uart_id = 4;
+#else
+	uart_id = -1;
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+	auto_uart_id = auto_odmdata();
+#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
+	auto_uart_id = auto_scratch();
+#else
+	auto_uart_id = -1;
+#endif
+	if (auto_uart_id != -1)
+		uart_id = auto_uart_id;
+
+	if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
+	    !uart_clocked(uart_id))
+		uart = NULL;
+	else
+		uart = (volatile u8 *)uarts[uart_id].base;
+
 	save_uart_address();
 	if (uart == NULL)
 		return;
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h
index de1a0f6..935ce9f 100644
--- a/arch/arm/mach-tegra/include/mach/usb_phy.h
+++ b/arch/arm/mach-tegra/include/mach/usb_phy.h
@@ -61,8 +61,8 @@
 	struct usb_phy *ulpi;
 };
 
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
-			void *config, enum tegra_usb_phy_mode phy_mode);
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+	void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode);
 
 int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
 
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index c238699..f5b12fb 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -234,7 +234,7 @@
 	.release	= single_release,
 };
 
-static int __init powergate_debugfs_init(void)
+int __init tegra_powergate_debugfs_init(void)
 {
 	struct dentry *d;
 	int err = -ENOMEM;
@@ -247,6 +247,4 @@
 	return err;
 }
 
-late_initcall(powergate_debugfs_init);
-
 #endif
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 4d6a2ee..5beb7eb 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -33,7 +33,7 @@
 
 static bool is_enabled;
 
-static void tegra_cpu_reset_handler_enable(void)
+static void __init tegra_cpu_reset_handler_enable(void)
 {
 	void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
 	void __iomem *evp_cpu_reset =
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index bae09b8..b59315c 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1486,6 +1486,10 @@
 };
 
 static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+	{ 12000000, 600000000, 600, 12, 1, 8 },
+	{ 13000000, 600000000, 600, 13, 1, 8 },
+	{ 19200000, 600000000, 500, 16, 1, 6 },
+	{ 26000000, 600000000, 600, 26, 1, 8 },
 	{ 0, 0, 0, 0, 0, 0 },
 };
 
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index 6d08b53..e33fe4b 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -3015,6 +3015,15 @@
 	CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
 	CLK_DUPLICATE("twd", "smp_twd", NULL),
 	CLK_DUPLICATE("vcp", "nvavp", "vcp"),
+	CLK_DUPLICATE("i2s0", NULL, "i2s0"),
+	CLK_DUPLICATE("i2s1", NULL, "i2s1"),
+	CLK_DUPLICATE("i2s2", NULL, "i2s2"),
+	CLK_DUPLICATE("i2s3", NULL, "i2s3"),
+	CLK_DUPLICATE("i2s4", NULL, "i2s4"),
+	CLK_DUPLICATE("dam0", NULL, "dam0"),
+	CLK_DUPLICATE("dam1", NULL, "dam1"),
+	CLK_DUPLICATE("dam2", NULL, "dam2"),
+	CLK_DUPLICATE("spdif_in", NULL, "spdif_in"),
 };
 
 struct clk *tegra_ptr_clks[] = {
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index d71d2fe..54e353c 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -26,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <asm/mach-types.h>
@@ -654,8 +655,8 @@
 	clk_disable(phy->clk);
 }
 
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
-			void *config, enum tegra_usb_phy_mode phy_mode)
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+	void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode)
 {
 	struct tegra_usb_phy *phy;
 	struct tegra_ulpi_config *ulpi_config;
@@ -711,6 +712,16 @@
 			err = -ENXIO;
 			goto err1;
 		}
+		if (!gpio_is_valid(ulpi_config->reset_gpio))
+			ulpi_config->reset_gpio =
+				of_get_named_gpio(dev->of_node,
+						  "nvidia,phy-reset-gpio", 0);
+		if (!gpio_is_valid(ulpi_config->reset_gpio)) {
+			pr_err("%s: invalid reset gpio: %d\n", __func__,
+			       ulpi_config->reset_gpio);
+			err = -EINVAL;
+			goto err1;
+		}
 		gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
 		gpio_direction_output(ulpi_config->reset_gpio, 0);
 		phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
index b29a788..1f47d96 100644
--- a/arch/arm/mach-ux500/board-mop500-uib.c
+++ b/arch/arm/mach-ux500/board-mop500-uib.c
@@ -96,7 +96,7 @@
 /*
  * Detect the UIB attached based on the presence or absence of i2c devices.
  */
-static int __init mop500_uib_init(void)
+int __init mop500_uib_init(void)
 {
 	struct uib *uib = mop500_uib;
 	struct i2c_adapter *i2c0;
@@ -131,5 +131,3 @@
 
 	return 0;
 }
-
-module_init(mop500_uib_init);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index f943687..1509a3c 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -206,7 +206,7 @@
 };
 
 struct platform_device ab8500_device = {
-	.name = "ab8500-i2c",
+	.name = "ab8500-core",
 	.id = 0,
 	.dev = {
 		.platform_data = &ab8500_platdata,
@@ -580,43 +580,12 @@
 	udelay(1);
 }
 
-/* This needs to be referenced by callbacks */
-struct pinctrl *u0_p;
-struct pinctrl_state *u0_def;
-struct pinctrl_state *u0_sleep;
-
-static void ux500_uart0_init(void)
-{
-	int ret;
-
-	if (IS_ERR(u0_p) || IS_ERR(u0_def))
-		return;
-
-	ret = pinctrl_select_state(u0_p, u0_def);
-	if (ret)
-		pr_err("could not set UART0 defstate\n");
-}
-
-static void ux500_uart0_exit(void)
-{
-	int ret;
-
-	if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
-		return;
-
-	ret = pinctrl_select_state(u0_p, u0_sleep);
-	if (ret)
-		pr_err("could not set UART0 idlestate\n");
-}
-
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
 	.dma_filter = stedma40_filter,
 	.dma_rx_param = &uart0_dma_cfg_rx,
 	.dma_tx_param = &uart0_dma_cfg_tx,
 #endif
-	.init = ux500_uart0_init,
-	.exit = ux500_uart0_exit,
 	.reset = ux500_uart0_reset,
 };
 
@@ -638,28 +607,7 @@
 
 static void __init mop500_uart_init(struct device *parent)
 {
-	struct amba_device *uart0_device;
-
-	uart0_device = db8500_add_uart0(parent, &uart0_plat);
-	if (uart0_device) {
-		u0_p = pinctrl_get(&uart0_device->dev);
-		if (IS_ERR(u0_p))
-			dev_err(&uart0_device->dev,
-				"could not get UART0 pinctrl\n");
-		else {
-			u0_def = pinctrl_lookup_state(u0_p,
-						      PINCTRL_STATE_DEFAULT);
-			if (IS_ERR(u0_def)) {
-				dev_err(&uart0_device->dev,
-					"could not get UART0 defstate\n");
-			}
-			u0_sleep = pinctrl_lookup_state(u0_p,
-							PINCTRL_STATE_SLEEP);
-			if (IS_ERR(u0_sleep))
-				dev_err(&uart0_device->dev,
-					"could not get UART0 idlestate\n");
-		}
-	}
+	db8500_add_uart0(parent, &uart0_plat);
 	db8500_add_uart1(parent, &uart1_plat);
 	db8500_add_uart2(parent, &uart2_plat);
 }
@@ -673,9 +621,15 @@
 static struct platform_device *snowball_platform_devs[] __initdata = {
 	&snowball_led_dev,
 	&snowball_key_dev,
+	&snowball_sbnet_dev,
 	&ab8500_device,
 };
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+	&snowball_led_dev,
+	&snowball_key_dev,
+};
+
 static void __init mop500_init_machine(void)
 {
 	struct device *parent = NULL;
@@ -710,6 +664,8 @@
 
 	/* This board has full regulator constraints */
 	regulator_has_full_constraints();
+
+	mop500_uib_init();
 }
 
 static void __init snowball_init_machine(void)
@@ -774,6 +730,8 @@
 
 	/* This board has full regulator constraints */
 	regulator_has_full_constraints();
+
+	mop500_uib_init();
 }
 
 MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -785,6 +743,7 @@
 	.timer		= &ux500_timer,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= mop500_init_machine,
+	.init_late	= ux500_init_late,
 MACHINE_END
 
 MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -794,6 +753,7 @@
 	.timer		= &ux500_timer,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= hrefv60_init_machine,
+	.init_late	= ux500_init_late,
 MACHINE_END
 
 MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
@@ -804,6 +764,7 @@
 	.timer		= &ux500_timer,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= snowball_init_machine,
+	.init_late	= ux500_init_late,
 MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
@@ -831,6 +792,10 @@
 static const struct of_device_id u8500_local_bus_nodes[] = {
 	/* only create devices below soc node */
 	{ .compatible = "stericsson,db8500", },
+	{ .compatible = "stericsson,db8500-prcmu", },
+	{ .compatible = "stericsson,db8500-prcmu-regulator", },
+	{ .compatible = "stericsson,ab8500", },
+	{ .compatible = "stericsson,ab8500-regulator", },
 	{ .compatible = "simple-bus"},
 	{ },
 };
@@ -849,7 +814,7 @@
 	else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
 		hrefv60_pinmaps_init();
 
-	parent = u8500_init_devices();
+	parent = u8500_of_init_devices();
 
 	for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
 		mop500_platform_devs[i]->dev.parent = parent;
@@ -866,15 +831,23 @@
 				ARRAY_SIZE(mop500_platform_devs));
 
 		mop500_sdi_init(parent);
-
 		i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
 		i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
 		i2c_register_board_info(2, mop500_i2c2_devices,
 					ARRAY_SIZE(mop500_i2c2_devices));
 
+		mop500_uib_init();
+
 	} else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
-		platform_add_devices(snowball_platform_devs,
-				ARRAY_SIZE(snowball_platform_devs));
+		/*
+		 * Devices to be DT:ed:
+		 *   snowball_led_dev   = todo
+		 *   snowball_key_dev   = todo
+		 *   snowball_sbnet_dev = done
+		 *   ab8500_device      = done
+		 */
+		platform_add_devices(snowball_of_platform_devs,
+				ARRAY_SIZE(snowball_of_platform_devs));
 
 		snowball_sdi_init(parent);
 	} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
@@ -895,6 +868,8 @@
 		i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
 		i2c_register_board_info(2, mop500_i2c2_devices,
 					ARRAY_SIZE(mop500_i2c2_devices));
+
+		mop500_uib_init();
 	}
 	mop500_i2c_init(parent);
 
@@ -918,6 +893,7 @@
 	.timer		= &ux500_timer,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= u8500_init_machine,
+	.init_late	= ux500_init_late,
 	.dt_compat      = u8500_dt_board_compat,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index bc44c07..2f87b25 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -89,7 +89,11 @@
 void __init snowball_pinmaps_init(void);
 void __init hrefv60_pinmaps_init(void);
 
+int __init mop500_uib_init(void);
 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
 		unsigned n);
 
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void);
+
 #endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1762c47..8d73b06 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,7 +635,7 @@
 	return 0;
 }
 
-static int __init clk_debugfs_init(void)
+int __init clk_debugfs_init(void)
 {
 	struct clk *c;
 	struct dentry *d;
@@ -657,7 +657,6 @@
 	return err;
 }
 
-late_initcall(clk_debugfs_init);
 #endif /* defined(CONFIG_DEBUG_FS) */
 
 unsigned long clk_smp_twd_rate = 500000000;
@@ -696,12 +695,11 @@
 	.notifier_call = clk_twd_cpufreq_transition,
 };
 
-static int clk_init_smp_twd_cpufreq(void)
+int clk_init_smp_twd_cpufreq(void)
 {
 	return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
 				  CPUFREQ_TRANSITION_NOTIFIER);
 }
-late_initcall(clk_init_smp_twd_cpufreq);
 
 #endif
 
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index d776ada..65d27a1 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -150,3 +150,15 @@
 
 int __init clk_db8500_ed_fixup(void);
 int __init clk_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+int clk_debugfs_init(void);
+#else
+static inline int clk_debugfs_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int clk_init_smp_twd_cpufreq(void);
+#else
+static inline int clk_init_smp_twd_cpufreq(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 16169c4..33275eb 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -140,7 +140,6 @@
 static struct platform_device *of_platform_devs[] __initdata = {
 	&u8500_dma40_device,
 	&db8500_pmu_device,
-	&db8500_prcmu_device,
 };
 
 static resource_size_t __initdata db8500_gpio_base[] = {
@@ -222,6 +221,28 @@
 	platform_device_register_data(parent,
 		"cpufreq-u8500", -1, NULL, 0);
 
+	for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
+		platform_devs[i]->dev.parent = parent;
+
+	platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
+
+	return parent;
+}
+
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void)
+{
+	struct device *parent;
+	int i;
+
+	parent = db8500_soc_device_init();
+
+	db8500_add_rtc(parent);
+	db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
+
+	platform_device_register_data(parent,
+		"cpufreq-u8500", -1, NULL, 0);
+
 	for (i = 0; i < ARRAY_SIZE(of_platform_devs); i++)
 		of_platform_devs[i]->dev.parent = parent;
 
@@ -229,7 +250,7 @@
 	 * Devices to be DT:ed:
 	 *   u8500_dma40_device  = todo
 	 *   db8500_pmu_device   = todo
-	 *   db8500_prcmu_device = todo
+	 *   db8500_prcmu_device = done
 	 */
 	platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs));
 
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index a29a0e3..e2360e7 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -73,6 +73,12 @@
 	clk_init();
 }
 
+void __init ux500_init_late(void)
+{
+	clk_debugfs_init();
+	clk_init_smp_twd_cpufreq();
+}
+
 static const char * __init ux500_get_machine(void)
 {
 	return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 4e369f1..8b7ed82 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -20,6 +20,7 @@
 extern struct device * __init u8500_init_devices(void);
 
 extern void __init ux500_init_irq(void);
+extern void __init ux500_init_late(void);
 
 extern struct device *ux500_soc_device_init(const char *soc_id);
 
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index cf4687e..cd8ea35 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -169,26 +169,13 @@
 		.pfn		= __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
 		.length		= VERSATILE_PCI_CFG_BASE_SIZE,
 		.type		= MT_DEVICE
-	},
-#if 0
- 	{
-		.virtual	=  VERSATILE_PCI_VIRT_MEM_BASE0,
+	}, {
+		.virtual	=  (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
 		.pfn		= __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
-		.length		= SZ_16M,
-		.type		= MT_DEVICE
-	}, {
-		.virtual	=  VERSATILE_PCI_VIRT_MEM_BASE1,
-		.pfn		= __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
-		.length		= SZ_16M,
-		.type		= MT_DEVICE
-	}, {
-		.virtual	=  VERSATILE_PCI_VIRT_MEM_BASE2,
-		.pfn		= __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
-		.length		= SZ_16M,
+		.length		= IO_SPACE_LIMIT,
 		.type		= MT_DEVICE
 	},
 #endif
-#endif
 };
 
 void __init versatile_map_io(void)
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index 4d4973d..408e58d 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -29,8 +29,9 @@
  */
 #define VERSATILE_PCI_VIRT_BASE		(void __iomem *)0xe8000000ul
 #define VERSATILE_PCI_CFG_VIRT_BASE	(void __iomem *)0xe9000000ul
+#define VERSATILE_PCI_VIRT_MEM_BASE0	(void __iomem *)PCIO_BASE
 
-/* macro to get at IO space when running virtually */
+/* macro to get at MMIO space when running virtually */
 #define IO_ADDRESS(x)		(((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
 
 #define __io_address(n)		((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644
index 0000000..0406513
--- /dev/null
+++ b/arch/arm/mach-versatile/include/mach/io.h
@@ -0,0 +1,27 @@
+/*
+ *  arch/arm/mach-versatile/include/mach/io.h
+ *
+ *  Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define PCIO_BASE	0xeb000000ul
+
+#define __io(a)		((a) + PCIO_BASE)
+
+#endif
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 15c6a00..bec933b 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -169,11 +169,18 @@
 	.write	= versatile_write_config,
 };
 
+static struct resource io_port = {
+	.name	= "PCI",
+	.start	= 0,
+	.end	= IO_SPACE_LIMIT,
+	.flags	= IORESOURCE_IO,
+};
+
 static struct resource io_mem = {
 	.name	= "PCI I/O space",
 	.start	= VERSATILE_PCI_MEM_BASE0,
 	.end	= VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
-	.flags	= IORESOURCE_IO,
+	.flags	= IORESOURCE_MEM,
 };
 
 static struct resource non_mem = {
@@ -200,6 +207,12 @@
 		       "memory region (%d)\n", ret);
 		goto out;
 	}
+	ret = request_resource(&ioport_resource, &io_port);
+	if (ret) {
+		printk(KERN_ERR "PCI: unable to allocate I/O "
+		       "port region (%d)\n", ret);
+		goto out;
+	}
 	ret = request_resource(&iomem_resource, &non_mem);
 	if (ret) {
 		printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@
 	 * the mem resource for this bus
 	 * the prefetch mem resource for this bus
 	 */
-	pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+	pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
 	pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
 	pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
 
@@ -249,6 +262,7 @@
 
 	if (nr == 0) {
 		sys->mem_offset = 0;
+		sys->io_offset = 0;
 		ret = pci_versatile_setup_resources(sys);
 		if (ret < 0) {
 			printk("pci_versatile_setup: resources... oops?\n");
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 04dd092..fde26ad 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -14,7 +14,6 @@
 #include <linux/ata_platform.h>
 #include <linux/smsc911x.h>
 #include <linux/spinlock.h>
-#include <linux/device.h>
 #include <linux/usb/isp1760.h>
 #include <linux/clkdev.h>
 #include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/timer-sp.h>
 #include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
 
 #include <mach/ct-ca9x4.h>
 #include <mach/motherboard.h>
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4..4044abc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,8 +17,12 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -26,9 +30,112 @@
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
+/*
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+				      handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned int offset = handle & (PAGE_SIZE - 1);
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned int offset = handle & (PAGE_SIZE - 1);
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+	.alloc			= arm_dma_alloc,
+	.free			= arm_dma_free,
+	.mmap			= arm_dma_mmap,
+	.map_page		= arm_dma_map_page,
+	.unmap_page		= arm_dma_unmap_page,
+	.map_sg			= arm_dma_map_sg,
+	.unmap_sg		= arm_dma_unmap_sg,
+	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
+	.sync_single_for_device	= arm_dma_sync_single_for_device,
+	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
+	.set_dma_mask		= arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
 	u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@
 	return mask;
 }
 
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+	void *ptr;
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	ptr = page_address(page);
+	if (ptr) {
+		memset(ptr, 0, size);
+		dmac_flush_range(ptr, ptr + size);
+		outer_flush_range(__pa(ptr), __pa(ptr) + size);
+	}
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@
 {
 	unsigned long order = get_order(size);
 	struct page *page, *p, *e;
-	void *ptr;
-	u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
-	u64 limit = (mask + 1) & ~mask;
-	if (limit && size >= limit) {
-		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
-			size, mask);
-		return NULL;
-	}
-#endif
-
-	if (!mask)
-		return NULL;
-
-	if (mask < 0xffffffffULL)
-		gfp |= GFP_DMA;
 
 	page = alloc_pages(gfp, order);
 	if (!page)
@@ -93,14 +198,7 @@
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 		__free_page(p);
 
-	/*
-	 * Ensure that the allocated pages are zeroed, and that any data
-	 * lurking in the kernel direct-mapped region is invalidated.
-	 */
-	ptr = page_address(page);
-	memset(ptr, 0, size);
-	dmac_flush_range(ptr, ptr + size);
-	outer_flush_range(__pa(ptr), __pa(ptr) + size);
+	__dma_clear_buffer(page, size);
 
 	return page;
 }
@@ -130,7 +228,7 @@
 
 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
 void __init init_consistent_dma_size(unsigned long size)
 {
@@ -170,6 +268,9 @@
 	unsigned long base = consistent_base;
 	unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
+	if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+		return 0;
+
 	consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
 	if (!consistent_pte) {
 		pr_err("%s: no memory\n", __func__);
@@ -184,14 +285,14 @@
 
 		pud = pud_alloc(&init_mm, pgd, base);
 		if (!pud) {
-			printk(KERN_ERR "%s: no pud tables\n", __func__);
+			pr_err("%s: no pud tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
 
 		pmd = pmd_alloc(&init_mm, pud, base);
 		if (!pmd) {
-			printk(KERN_ERR "%s: no pmd tables\n", __func__);
+			pr_err("%s: no pmd tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
@@ -199,7 +300,7 @@
 
 		pte = pte_alloc_kernel(pmd, base);
 		if (!pte) {
-			printk(KERN_ERR "%s: no pte tables\n", __func__);
+			pr_err("%s: no pte tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
@@ -210,9 +311,101 @@
 
 	return ret;
 }
-
 core_initcall(consistent_init);
 
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+				     pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+	.vm_lock	= __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+	.vm_list	= LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+	coherent_pool_size = memparse(p, &p);
+	return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+	size_t size = coherent_pool_size;
+	struct page *page;
+	void *ptr;
+
+	if (!IS_ENABLED(CONFIG_CMA))
+		return 0;
+
+	ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+	if (ptr) {
+		coherent_head.vm_start = (unsigned long) ptr;
+		coherent_head.vm_end = (unsigned long) ptr + size;
+		printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+		       (unsigned)size / 1024);
+		return 0;
+	}
+	printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+	       (unsigned)size / 1024);
+	return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+	phys_addr_t base;
+	unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+	dma_mmu_remap[dma_mmu_remap_num].base = base;
+	dma_mmu_remap[dma_mmu_remap_num].size = size;
+	dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+	int i;
+	for (i = 0; i < dma_mmu_remap_num; i++) {
+		phys_addr_t start = dma_mmu_remap[i].base;
+		phys_addr_t end = start + dma_mmu_remap[i].size;
+		struct map_desc map;
+		unsigned long addr;
+
+		if (end > arm_lowmem_limit)
+			end = arm_lowmem_limit;
+		if (start >= end)
+			return;
+
+		map.pfn = __phys_to_pfn(start);
+		map.virtual = __phys_to_virt(start);
+		map.length = end - start;
+		map.type = MT_MEMORY_DMA_READY;
+
+		/*
+		 * Clear previous low-memory mapping
+		 */
+		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+		     addr += PMD_SIZE)
+			pmd_clear(pmd_off_k(addr));
+
+		iotable_init(&map, 1);
+	}
+}
+
 static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
 	const void *caller)
@@ -222,7 +415,7 @@
 	int bit;
 
 	if (!consistent_pte) {
-		printk(KERN_ERR "%s: not initialised\n", __func__);
+		pr_err("%s: not initialised\n", __func__);
 		dump_stack();
 		return NULL;
 	}
@@ -249,7 +442,7 @@
 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
 		pte = consistent_pte[idx] + off;
-		c->vm_pages = page;
+		c->priv = page;
 
 		do {
 			BUG_ON(!pte_none(*pte));
@@ -281,14 +474,14 @@
 
 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
 	if (!c) {
-		printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+		pr_err("%s: trying to free invalid coherent area: %p\n",
 		       __func__, cpu_addr);
 		dump_stack();
 		return;
 	}
 
 	if ((c->vm_end - c->vm_start) != size) {
-		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+		pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
 		       __func__, c->vm_end - c->vm_start, size);
 		dump_stack();
 		size = c->vm_end - c->vm_start;
@@ -310,8 +503,8 @@
 		}
 
 		if (pte_none(pte) || !pte_present(pte))
-			printk(KERN_CRIT "%s: bad page in kernel page table\n",
-			       __func__);
+			pr_crit("%s: bad page in kernel page table\n",
+				__func__);
 	} while (size -= PAGE_SIZE);
 
 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +512,182 @@
 	arm_vmregion_free(&consistent_head, c);
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	struct page *page = virt_to_page(addr);
+	pgprot_t prot = *(pgprot_t *)data;
+
+	set_pte_ext(pte, mk_pte(page, prot), 0);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+	unsigned long start = (unsigned long) page_address(page);
+	unsigned end = start + size;
+
+	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+	dsb();
+	flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+				 pgprot_t prot, struct page **ret_page,
+				 const void *caller)
+{
+	struct page *page;
+	void *ptr;
+	page = __dma_alloc_buffer(dev, size, gfp);
+	if (!page)
+		return NULL;
+
+	ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+	if (!ptr) {
+		__dma_free_buffer(page, size);
+		return NULL;
+	}
+
+	*ret_page = page;
+	return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+			       struct page **ret_page, const void *caller)
+{
+	struct arm_vmregion *c;
+	size_t align;
+
+	if (!coherent_head.vm_start) {
+		printk(KERN_ERR "%s: coherent pool not initialised!\n",
+		       __func__);
+		dump_stack();
+		return NULL;
+	}
+
+	/*
+	 * Align the region allocation - allocations from pool are rather
+	 * small, so align them to their order in pages, minimum is a page
+	 * size. This helps reduce fragmentation of the DMA space.
+	 */
+	align = PAGE_SIZE << get_order(size);
+	c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+	if (c) {
+		void *ptr = (void *)c->vm_start;
+		struct page *page = virt_to_page(ptr);
+		*ret_page = page;
+		return ptr;
+	}
+	return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+	unsigned long start = (unsigned long)cpu_addr;
+	unsigned long end = start + size;
+	struct arm_vmregion *c;
+
+	if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+		return 0;
+
+	c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+	if ((c->vm_end - c->vm_start) != size) {
+		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+		       __func__, c->vm_end - c->vm_start, size);
+		dump_stack();
+		size = c->vm_end - c->vm_start;
+	}
+
+	arm_vmregion_free(&coherent_head, c);
+	return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+				     pgprot_t prot, struct page **ret_page)
+{
+	unsigned long order = get_order(size);
+	size_t count = size >> PAGE_SHIFT;
+	struct page *page;
+
+	page = dma_alloc_from_contiguous(dev, count, order);
+	if (!page)
+		return NULL;
+
+	__dma_clear_buffer(page, size);
+	__dma_remap(page, size, prot);
+
+	*ret_page = page;
+	return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+				   size_t size)
+{
+	__dma_remap(page, size, pgprot_kernel);
+	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+	prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+			    pgprot_writecombine(prot) :
+			    pgprot_dmacoherent(prot);
+	return prot;
+}
+
+#define nommu() 0
+
 #else	/* !CONFIG_MMU */
 
-#define __dma_alloc_remap(page, size, gfp, prot, c)	page_address(page)
-#define __dma_free_remap(addr, size)			do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot)	__pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL
+#define __alloc_from_pool(dev, size, ret_page, c)		NULL
+#define __alloc_from_contiguous(dev, size, prot, ret)		NULL
+#define __free_from_pool(cpu_addr, size)			0
+#define __free_from_contiguous(dev, page, size)			do { } while (0)
+#define __dma_free_remap(cpu_addr, size)			do { } while (0)
 
 #endif	/* CONFIG_MMU */
 
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
-	    pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+				   struct page **ret_page)
 {
 	struct page *page;
+	page = __dma_alloc_buffer(dev, size, gfp);
+	if (!page)
+		return NULL;
+
+	*ret_page = page;
+	return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+			 gfp_t gfp, pgprot_t prot, const void *caller)
+{
+	u64 mask = get_coherent_dma_mask(dev);
+	struct page *page;
 	void *addr;
 
+#ifdef CONFIG_DMA_API_DEBUG
+	u64 limit = (mask + 1) & ~mask;
+	if (limit && size >= limit) {
+		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+			size, mask);
+		return NULL;
+	}
+#endif
+
+	if (!mask)
+		return NULL;
+
+	if (mask < 0xffffffffULL)
+		gfp |= GFP_DMA;
+
 	/*
 	 * Following is a work-around (a.k.a. hack) to prevent pages
 	 * with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +697,20 @@
 	 */
 	gfp &= ~(__GFP_COMP);
 
-	*handle = ~0;
+	*handle = DMA_ERROR_CODE;
 	size = PAGE_ALIGN(size);
 
-	page = __dma_alloc_buffer(dev, size, gfp);
-	if (!page)
-		return NULL;
-
-	if (!arch_is_coherent())
-		addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+	if (arch_is_coherent() || nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (!IS_ENABLED(CONFIG_CMA))
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+	else if (gfp & GFP_ATOMIC)
+		addr = __alloc_from_pool(dev, size, &page, caller);
 	else
-		addr = page_address(page);
+		addr = __alloc_from_contiguous(dev, size, prot, &page);
 
 	if (addr)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
-	else
-		__dma_free_buffer(page, size);
 
 	return addr;
 }
@@ -366,138 +719,71 @@
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		    gfp_t gfp, struct dma_attrs *attrs)
 {
+	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
-	return __dma_alloc(dev, size, handle, gfp,
-			   pgprot_dmacoherent(pgprot_kernel),
+	return __dma_alloc(dev, size, handle, gfp, prot,
 			   __builtin_return_address(0));
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
  */
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
-	return __dma_alloc(dev, size, handle, gfp,
-			   pgprot_writecombine(pgprot_kernel),
-			   __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		 struct dma_attrs *attrs)
 {
 	int ret = -ENXIO;
 #ifdef CONFIG_MMU
-	unsigned long user_size, kern_size;
-	struct arm_vmregion *c;
+	unsigned long pfn = dma_to_pfn(dev, dma_addr);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
-	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
 
-	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-	if (c) {
-		unsigned long off = vma->vm_pgoff;
-
-		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
-		if (off < kern_size &&
-		    user_size <= (kern_size - off)) {
-			ret = remap_pfn_range(vma, vma->vm_start,
-					      page_to_pfn(c->vm_pages) + off,
-					      user_size << PAGE_SHIFT,
-					      vma->vm_page_prot);
-		}
-	}
+	ret = remap_pfn_range(vma, vma->vm_start,
+			      pfn + vma->vm_pgoff,
+			      vma->vm_end - vma->vm_start,
+			      vma->vm_page_prot);
 #endif	/* CONFIG_MMU */
 
 	return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
-	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		  dma_addr_t handle, struct dma_attrs *attrs)
 {
-	WARN_ON(irqs_disabled());
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
 		return;
 
 	size = PAGE_ALIGN(size);
 
-	if (!arch_is_coherent())
+	if (arch_is_coherent() || nommu()) {
+		__dma_free_buffer(page, size);
+	} else if (!IS_ENABLED(CONFIG_CMA)) {
 		__dma_free_remap(cpu_addr, size);
-
-	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	unsigned long paddr;
-
-	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-	dmac_map_area(kaddr, size, dir);
-
-	paddr = __pa(kaddr);
-	if (dir == DMA_FROM_DEVICE) {
-		outer_inv_range(paddr, paddr + size);
+		__dma_free_buffer(page, size);
 	} else {
-		outer_clean_range(paddr, paddr + size);
+		if (__free_from_pool(cpu_addr, size))
+			return;
+		/*
+		 * Non-atomic allocations cannot be freed with IRQs disabled
+		 */
+		WARN_ON(irqs_disabled());
+		__free_from_contiguous(dev, page, size);
 	}
-	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-	/* FIXME: non-speculating: not required */
-	/* don't bother invalidating if DMA to device */
-	if (dir != DMA_TO_DEVICE) {
-		unsigned long paddr = __pa(kaddr);
-		outer_inv_range(paddr, paddr + size);
-	}
-
-	dmac_unmap_area(kaddr, size, dir);
-}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	size_t size, enum dma_data_direction dir,
@@ -543,7 +829,13 @@
 	} while (left);
 }
 
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
 	unsigned long paddr;
@@ -558,9 +850,8 @@
 	}
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
 	unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +869,9 @@
 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
 		set_bit(PG_dcache_clean, &page->flags);
 }
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
 /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map
@@ -596,32 +886,32 @@
  * Device ownership issues as mentioned for dma_map_single are the same
  * here.
  */
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i, j;
 
-	BUG_ON(!valid_dma_direction(dir));
-
 	for_each_sg(sg, s, nents, i) {
-		s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-						s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		s->dma_length = s->length;
+#endif
+		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+						s->length, dir, attrs);
 		if (dma_mapping_error(dev, s->dma_address))
 			goto bad_mapping;
 	}
-	debug_dma_map_sg(dev, sg, nents, nents, dir);
 	return nents;
 
  bad_mapping:
 	for_each_sg(sg, s, i, j)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 	return 0;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
 /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +920,55 @@
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
  * rules concerning calls here are the same as for dma_unmap_single().
  */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
-	int i;
 
-	debug_dma_unmap_sg(dev, sg, nents, dir);
+	int i;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
 /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 			int nents, enum dma_data_direction dir)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i;
 
-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-					    sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_dev_to_cpu(sg_page(s), s->offset,
-				      s->length, dir);
-	}
-
-	debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+					 dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
 /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 			int nents, enum dma_data_direction dir)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i;
 
-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
-					sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_cpu_to_dev(sg_page(s), s->offset,
-				      s->length, dir);
-	}
-
-	debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+					    dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -709,18 +984,15 @@
 }
 EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 		return -EIO;
 
-#ifndef CONFIG_DMABOUNCE
 	*dev->dma_mask = dma_mask;
-#endif
 
 	return 0;
 }
-EXPORT_SYMBOL(dma_set_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
 
@@ -733,3 +1005,679 @@
 	return 0;
 }
 fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+				      size_t size)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+	unsigned long flags;
+
+	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+		 (1 << mapping->order) - 1) >> mapping->order;
+
+	if (order > mapping->order)
+		align = (1 << (order - mapping->order)) - 1;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+	if (start > mapping->bits) {
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+			       dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >>
+			     (mapping->order + PAGE_SHIFT);
+	unsigned int count = ((size >> PAGE_SHIFT) +
+			      (1 << mapping->order) - 1) >> mapping->order;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	bitmap_clear(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+	struct page **pages;
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i = 0;
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order)
+			split_page(pages[i], order);
+		j = 1 << order;
+		while (--j)
+			pages[i + j] = pages[i] + j;
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (--i)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size < PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i;
+	for (i = 0; i < count; i++)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size < PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+	struct arm_vmregion *c;
+	size_t align;
+	size_t count = size >> PAGE_SHIFT;
+	int bit;
+
+	if (!consistent_pte[0]) {
+		pr_err("%s: not initialised\n", __func__);
+		dump_stack();
+		return NULL;
+	}
+
+	/*
+	 * Align the virtual region allocation - maximum alignment is
+	 * a section size, minimum is a page size.  This helps reduce
+	 * fragmentation of the DMA space, and also prevents allocations
+	 * smaller than a section from crossing a section boundary.
+	 */
+	bit = fls(size - 1);
+	if (bit > SECTION_SHIFT)
+		bit = SECTION_SHIFT;
+	align = 1 << bit;
+
+	/*
+	 * Allocate a virtual address in the consistent mapping region.
+	 */
+	c = arm_vmregion_alloc(&consistent_head, align, size,
+			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+	if (c) {
+		pte_t *pte;
+		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+		int i = 0;
+		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+		pte = consistent_pte[idx] + off;
+		c->priv = pages;
+
+		do {
+			BUG_ON(!pte_none(*pte));
+
+			set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+			pte++;
+			off++;
+			i++;
+			if (off >= PTRS_PER_PTE) {
+				off = 0;
+				pte = consistent_pte[++idx];
+			}
+		} while (i < count);
+
+		dsb();
+
+		return (void *)c->vm_start;
+	}
+	return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	dma_addr_t dma_addr, iova;
+	int i, ret = DMA_ERROR_CODE;
+
+	dma_addr = __alloc_iova(mapping, size);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	iova = dma_addr;
+	for (i = 0; i < count; ) {
+		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+		phys_addr_t phys = page_to_phys(pages[i]);
+		unsigned int len, j;
+
+		for (j = i + 1; j < count; j++, next_pfn++)
+			if (page_to_pfn(pages[j]) != next_pfn)
+				break;
+
+		len = (j - i) << PAGE_SHIFT;
+		ret = iommu_map(mapping->domain, iova, phys, len, 0);
+		if (ret < 0)
+			goto fail;
+		iova += len;
+		i = j;
+	}
+	return dma_addr;
+fail:
+	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+	__free_iova(mapping, dma_addr, size);
+	return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	/*
+	 * add optional in-page offset from iova to size and align
+	 * result to page size
+	 */
+	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, size);
+	__free_iova(mapping, iova, size);
+	return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+	struct page **pages;
+	void *addr = NULL;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+	pages = __iommu_alloc_buffer(dev, size, gfp);
+	if (!pages)
+		return NULL;
+
+	*handle = __iommu_create_mapping(dev, pages, size);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_buffer;
+
+	addr = __iommu_alloc_remap(pages, size, gfp, prot);
+	if (!addr)
+		goto err_mapping;
+
+	return addr;
+
+err_mapping:
+	__iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+	__iommu_free_buffer(dev, pages, size);
+	return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		    struct dma_attrs *attrs)
+{
+	struct arm_vmregion *c;
+
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+	if (c) {
+		struct page **pages = c->priv;
+
+		unsigned long uaddr = vma->vm_start;
+		unsigned long usize = vma->vm_end - vma->vm_start;
+		int i = 0;
+
+		do {
+			int ret;
+
+			ret = vm_insert_page(vma, uaddr, pages[i++]);
+			if (ret) {
+				pr_err("Remapping memory, error: %d\n", ret);
+				return ret;
+			}
+
+			uaddr += PAGE_SIZE;
+			usize -= PAGE_SIZE;
+		} while (usize > 0);
+	}
+	return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+			  dma_addr_t handle, struct dma_attrs *attrs)
+{
+	struct arm_vmregion *c;
+	size = PAGE_ALIGN(size);
+
+	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+	if (c) {
+		struct page **pages = c->priv;
+		__dma_free_remap(cpu_addr, size);
+		__iommu_remove_mapping(dev, handle, size);
+		__iommu_free_buffer(dev, pages, size);
+	}
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+			  size_t size, dma_addr_t *handle,
+			  enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova, iova_base;
+	int ret = 0;
+	unsigned int count;
+	struct scatterlist *s;
+
+	size = PAGE_ALIGN(size);
+	*handle = DMA_ERROR_CODE;
+
+	iova_base = iova = __alloc_iova(mapping, size);
+	if (iova == DMA_ERROR_CODE)
+		return -ENOMEM;
+
+	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+		phys_addr_t phys = page_to_phys(sg_page(s));
+		unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+		if (!arch_is_coherent())
+			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+		ret = iommu_map(mapping->domain, iova, phys, len, 0);
+		if (ret < 0)
+			goto fail;
+		count += len >> PAGE_SHIFT;
+		iova += len;
+	}
+	*handle = iova_base;
+
+	return 0;
+fail:
+	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+	__free_iova(mapping, iova_base, size);
+	return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		     enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct scatterlist *s = sg, *dma = sg, *start = sg;
+	int i, count = 0;
+	unsigned int offset = s->offset;
+	unsigned int size = s->offset + s->length;
+	unsigned int max = dma_get_max_seg_size(dev);
+
+	for (i = 1; i < nents; i++) {
+		s = sg_next(s);
+
+		s->dma_address = DMA_ERROR_CODE;
+		s->dma_length = 0;
+
+		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+			    dir) < 0)
+				goto bad_mapping;
+
+			dma->dma_address += offset;
+			dma->dma_length = size - offset;
+
+			size = offset = s->offset;
+			start = s;
+			dma = sg_next(dma);
+			count += 1;
+		}
+		size += s->length;
+	}
+	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+		goto bad_mapping;
+
+	dma->dma_address += offset;
+	dma->dma_length = size - offset;
+
+	return count+1;
+
+bad_mapping:
+	for_each_sg(sg, s, count, i)
+		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+	return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		if (sg_dma_len(s))
+			__iommu_remove_mapping(dev, sg_dma_address(s),
+					       sg_dma_len(s));
+		if (!arch_is_coherent())
+			__dma_page_dev_to_cpu(sg_page(s), s->offset,
+					      s->length, dir);
+	}
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		if (!arch_is_coherent())
+			__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		if (!arch_is_coherent())
+			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t dma_addr;
+	int ret, len = PAGE_ALIGN(size + offset);
+
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+
+	dma_addr = __alloc_iova(mapping, len);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+	if (ret < 0)
+		goto fail;
+
+	return dma_addr + offset;
+fail:
+	__free_iova(mapping, dma_addr, len);
+	return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	int offset = handle & ~PAGE_MASK;
+	int len = PAGE_ALIGN(size + offset);
+
+	if (!iova)
+		return;
+
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+
+	iommu_unmap(mapping->domain, iova, len);
+	__free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+	.alloc		= arm_iommu_alloc_attrs,
+	.free		= arm_iommu_free_attrs,
+	.mmap		= arm_iommu_mmap_attrs,
+
+	.map_page		= arm_iommu_map_page,
+	.unmap_page		= arm_iommu_unmap_page,
+	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
+	.sync_single_for_device	= arm_iommu_sync_single_for_device,
+
+	.map_sg			= arm_iommu_map_sg,
+	.unmap_sg		= arm_iommu_unmap_sg,
+	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+			 int order)
+{
+	unsigned int count = size >> (PAGE_SHIFT + order);
+	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	struct dma_iommu_mapping *mapping;
+	int err = -ENOMEM;
+
+	if (!count)
+		return ERR_PTR(-EINVAL);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		goto err;
+
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!mapping->bitmap)
+		goto err2;
+
+	mapping->base = base;
+	mapping->bits = BITS_PER_BYTE * bitmap_size;
+	mapping->order = order;
+	spin_lock_init(&mapping->lock);
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err3;
+
+	kref_init(&mapping->kref);
+	return mapping;
+err3:
+	kfree(mapping->bitmap);
+err2:
+	kfree(mapping);
+err:
+	return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	iommu_domain_free(mapping->domain);
+	kfree(mapping->bitmap);
+	kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+	if (mapping)
+		kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err;
+
+	err = iommu_attach_device(mapping->domain, dev);
+	if (err)
+		return err;
+
+	kref_get(&mapping->kref);
+	dev->archdata.mapping = mapping;
+	set_dma_ops(dev, &iommu_ops);
+
+	pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+	return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813b..f54d592 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,7 @@
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/mach-types.h>
 #include <asm/memblock.h>
@@ -211,7 +212,7 @@
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	unsigned long dma_size)
@@ -226,6 +227,17 @@
 }
 #endif
 
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		arm_dma_zone_size = mdesc->dma_zone_size;
+		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+	} else
+		arm_dma_limit = 0xffffffff;
+#endif
+}
+
 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 	unsigned long max_high)
 {
@@ -273,12 +285,9 @@
 	 * Adjust the sizes according to any special requirements for
 	 * this machine type.
 	 */
-	if (arm_dma_zone_size) {
+	if (arm_dma_zone_size)
 		arm_adjust_dma_zone(zone_size, zhole_size,
 			arm_dma_zone_size >> PAGE_SHIFT);
-		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
-	} else
-		arm_dma_limit = 0xffffffff;
 #endif
 
 	free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@
 	if (mdesc->reserve)
 		mdesc->reserve();
 
+	/*
+	 * reserve memory for DMA contigouos allocations,
+	 * must come from DMA area inside low memory
+	 */
+	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
 	arm_memblock_steal_permitted = false;
 	memblock_allow_resize();
 	memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a61..c471436 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -62,10 +62,13 @@
 #endif
 
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
 #define arm_dma_limit ((u32)~0)
 #endif
 
+extern phys_addr_t arm_lowmem_limit;
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8..e5dad60 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@
 				PMD_SECT_UNCACHED | PMD_SECT_XN,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_MEMORY_DMA_READY] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.domain    = DOMAIN_KERNEL,
+	},
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@
 	if (arch_is_coherent() && cpu_is_xsc3()) {
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+		mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 	}
@@ -460,6 +466,7 @@
 			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 		}
@@ -512,6 +519,7 @@
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
 	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
@@ -596,7 +604,7 @@
 	 * L1 entries, whereas PGDs refer to a group of L1 entries making
 	 * up one logical pointer to an L2 table.
 	 */
-	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
 		pmd_t *p = pmd;
 
 #ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@
 }
 early_param("vmalloc", early_vmalloc);
 
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
 
 void __init sanity_check_meminfo(void)
 {
@@ -897,8 +905,8 @@
 			bank->size = newsize;
 		}
 #endif
-		if (!bank->highmem && bank->start + bank->size > lowmem_limit)
-			lowmem_limit = bank->start + bank->size;
+		if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+			arm_lowmem_limit = bank->start + bank->size;
 
 		j++;
 	}
@@ -923,8 +931,8 @@
 	}
 #endif
 	meminfo.nr_banks = j;
-	high_memory = __va(lowmem_limit - 1) + 1;
-	memblock_set_current_limit(lowmem_limit);
+	high_memory = __va(arm_lowmem_limit - 1) + 1;
+	memblock_set_current_limit(arm_lowmem_limit);
 }
 
 static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@
 	 * Find the end of the first block of lowmem.
 	 */
 	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
-	if (end >= lowmem_limit)
-		end = lowmem_limit;
+	if (end >= arm_lowmem_limit)
+		end = arm_lowmem_limit;
 
 	/*
 	 * Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@
 		phys_addr_t end = start + reg->size;
 		struct map_desc map;
 
-		if (end > lowmem_limit)
-			end = lowmem_limit;
+		if (end > arm_lowmem_limit)
+			end = arm_lowmem_limit;
 		if (start >= end)
 			break;
 
@@ -1115,11 +1123,12 @@
 {
 	void *zero_page;
 
-	memblock_set_current_limit(lowmem_limit);
+	memblock_set_current_limit(arm_lowmem_limit);
 
 	build_mem_type_table();
 	prepare_page_table();
 	map_lowmem();
+	dma_contiguous_remap();
 	devicemaps_init(mdesc);
 	kmap_init();
 
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be66..bf312c3 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@
 	struct list_head	vm_list;
 	unsigned long		vm_start;
 	unsigned long		vm_end;
-	struct page		*vm_pages;
+	void			*priv;
 	int			vm_active;
 	const void		*caller;
 };
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 62135849..c641fb6 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -762,6 +762,11 @@
 			update_on_xread(ctx);
 			emit(ARM_MOV_R(r_A, r_X), ctx);
 			break;
+		case BPF_S_ANC_ALU_XOR_X:
+			/* A ^= X */
+			update_on_xread(ctx);
+			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+			break;
 		case BPF_S_ANC_PROTOCOL:
 			/* A = ntohs(skb->protocol) */
 			ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 99ae5e3..7fa2f7d 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -68,6 +68,8 @@
 #define ARM_INST_CMP_R		0x01500000
 #define ARM_INST_CMP_I		0x03500000
 
+#define ARM_INST_EOR_R		0x00200000
+
 #define ARM_INST_LDRB_I		0x05d00000
 #define ARM_INST_LDRB_R		0x07d00000
 #define ARM_INST_LDRH_I		0x01d000b0
@@ -132,6 +134,8 @@
 #define ARM_CMP_R(rn, rm)	_AL3_R(ARM_INST_CMP, 0, rn, rm)
 #define ARM_CMP_I(rn, imm)	_AL3_I(ARM_INST_CMP, 0, rn, imm)
 
+#define ARM_EOR_R(rd, rn, rm)	_AL3_R(ARM_INST_EOR, rd, rn, rm)
+
 #define ARM_LDR_I(rt, rn, off)	(ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
 				 | (off))
 #define ARM_LDRB_I(rt, rn, off)	(ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2ed3ab1..5079787 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -41,6 +41,7 @@
 #include <mach/clock.h>
 #include <mach/hardware.h>
 
+#ifndef CONFIG_COMMON_CLK
 static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -200,6 +201,16 @@
 }
 EXPORT_SYMBOL(clk_get_parent);
 
+#else
+
+/*
+ * Lock to protect the clock module (ccm) registers. Used
+ * on all i.MXs
+ */
+DEFINE_SPINLOCK(imx_ccm_lock);
+
+#endif /* CONFIG_COMMON_CLK */
+
 /*
  * Get the resulting clock rate from a PLL register value and the input
  * frequency. PLLs with this register layout can at least be found on
diff --git a/arch/arm/plat-mxc/epit.c b/arch/arm/plat-mxc/epit.c
index 9129c9e..88726f4 100644
--- a/arch/arm/plat-mxc/epit.c
+++ b/arch/arm/plat-mxc/epit.c
@@ -50,6 +50,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/mach/time.h>
@@ -201,8 +202,16 @@
 	return 0;
 }
 
-void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init epit_timer_init(void __iomem *base, int irq)
 {
+	struct clk *timer_clk;
+
+	timer_clk = clk_get_sys("imx-epit.0", NULL);
+	if (IS_ERR(timer_clk)) {
+		pr_err("i.MX epit: unable to get clk\n");
+		return;
+	}
+
 	clk_prepare_enable(timer_clk);
 
 	timer_base = base;
diff --git a/arch/arm/plat-mxc/include/mach/clock.h b/arch/arm/plat-mxc/include/mach/clock.h
index 753a598..bd940c7 100644
--- a/arch/arm/plat-mxc/include/mach/clock.h
+++ b/arch/arm/plat-mxc/include/mach/clock.h
@@ -23,6 +23,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/list.h>
 
+#ifndef CONFIG_COMMON_CLK
 struct module;
 
 struct clk {
@@ -59,6 +60,9 @@
 
 int clk_register(struct clk *clk);
 void clk_unregister(struct clk *clk);
+#endif /* CONFIG_COMMON_CLK */
+
+extern spinlock_t imx_ccm_lock;
 
 unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref);
 
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 0319c4a..e429ca1 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -53,8 +53,9 @@
 extern void imx50_soc_init(void);
 extern void imx51_soc_init(void);
 extern void imx53_soc_init(void);
-extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
-extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
+extern void imx51_init_late(void);
+extern void epit_timer_init(void __iomem *base, int irq);
+extern void mxc_timer_init(void __iomem *, int);
 extern int mx1_clocks_init(unsigned long fref);
 extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
 extern int mx25_clocks_init(void);
@@ -149,4 +150,10 @@
 static inline void imx6q_pm_init(void) {}
 #endif
 
+#ifdef CONFIG_NEON
+extern int mx51_neon_fixup(void);
+#else
+static inline int mx51_neon_fixup(void) { return 0; }
+#endif
+
 #endif
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S
index 8ddda36..761e45f 100644
--- a/arch/arm/plat-mxc/include/mach/debug-macro.S
+++ b/arch/arm/plat-mxc/include/mach/debug-macro.S
@@ -24,6 +24,8 @@
 #define UART_PADDR	MX51_UART1_BASE_ADDR
 #elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
 #define UART_PADDR	MX53_UART1_BASE_ADDR
+#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
+#define UART_PADDR	MX6Q_UART2_BASE_ADDR
 #elif defined (CONFIG_DEBUG_IMX6Q_UART4)
 #define UART_PADDR	MX6Q_UART4_BASE_ADDR
 #endif
diff --git a/arch/arm/plat-mxc/include/mach/mx2_cam.h b/arch/arm/plat-mxc/include/mach/mx2_cam.h
index 7ded6f1..3c080a3 100644
--- a/arch/arm/plat-mxc/include/mach/mx2_cam.h
+++ b/arch/arm/plat-mxc/include/mach/mx2_cam.h
@@ -23,6 +23,7 @@
 #ifndef __MACH_MX2_CAM_H_
 #define __MACH_MX2_CAM_H_
 
+#define MX2_CAMERA_SWAP16		(1 << 0)
 #define MX2_CAMERA_EXT_VSYNC		(1 << 1)
 #define MX2_CAMERA_CCIR			(1 << 2)
 #define MX2_CAMERA_CCIR_INTERLACE	(1 << 3)
@@ -30,6 +31,7 @@
 #define MX2_CAMERA_GATED_CLOCK		(1 << 5)
 #define MX2_CAMERA_INV_DATA		(1 << 6)
 #define MX2_CAMERA_PCLK_SAMPLE_RISING	(1 << 7)
+#define MX2_CAMERA_PACK_DIR_MSB		(1 << 8)
 
 /**
  * struct mx2_camera_platform_data - optional platform data for mx2_camera
diff --git a/arch/arm/plat-mxc/include/mach/mx6q.h b/arch/arm/plat-mxc/include/mach/mx6q.h
index 254a561..f7e7dba 100644
--- a/arch/arm/plat-mxc/include/mach/mx6q.h
+++ b/arch/arm/plat-mxc/include/mach/mx6q.h
@@ -27,6 +27,8 @@
 #define MX6Q_CCM_SIZE			0x4000
 #define MX6Q_ANATOP_BASE_ADDR		0x020c8000
 #define MX6Q_ANATOP_SIZE		0x1000
+#define MX6Q_UART2_BASE_ADDR		0x021e8000
+#define MX6Q_UART2_SIZE			0x4000
 #define MX6Q_UART4_BASE_ADDR		0x021f0000
 #define MX6Q_UART4_SIZE			0x4000
 
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 7daf7c9..00e8e659 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -25,6 +25,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/sched_clock.h>
@@ -57,6 +58,7 @@
 /* MX31, MX35, MX25, MX5 */
 #define V2_TCTL_WAITEN		(1 << 3) /* Wait enable mode */
 #define V2_TCTL_CLK_IPG		(1 << 6)
+#define V2_TCTL_CLK_PER		(2 << 6)
 #define V2_TCTL_FRR		(1 << 9)
 #define V2_IR			0x0c
 #define V2_TSTAT		0x08
@@ -279,9 +281,21 @@
 	return 0;
 }
 
-void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init mxc_timer_init(void __iomem *base, int irq)
 {
 	uint32_t tctl_val;
+	struct clk *timer_clk;
+	struct clk *timer_ipg_clk;
+
+	timer_clk = clk_get_sys("imx-gpt.0", "per");
+	if (IS_ERR(timer_clk)) {
+		pr_err("i.MX timer: unable to get clk\n");
+		return;
+	}
+
+	timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+	if (!IS_ERR(timer_ipg_clk))
+		clk_prepare_enable(timer_ipg_clk);
 
 	clk_prepare_enable(timer_clk);
 
@@ -295,7 +309,7 @@
 	__raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
 
 	if (timer_is_v2())
-		tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+		tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
 	else
 		tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
 
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 62ec5c4..706b7e2 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -461,6 +461,7 @@
 	struct clk *c;
 	struct clk *pa;
 
+	mutex_lock(&clocks_mutex);
 	seq_printf(s, "%-30s %-30s %-10s %s\n",
 		"clock-name", "parent-name", "rate", "use-count");
 
@@ -469,6 +470,7 @@
 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
 			c->name, pa ? pa->name : "none", c->rate, c->usecount);
 	}
+	mutex_unlock(&clocks_mutex);
 
 	return 0;
 }
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 44ae077..2132c4f 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -28,19 +28,20 @@
 
 #include <plat/clock.h>
 
+/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
+#define OMAP2_32KSYNCNT_CR_OFF		0x10
+
 /*
  * 32KHz clocksource ... always available, on pretty most chips except
  * OMAP 730 and 1510.  Other timers could be used as clocksources, with
  * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
  * but systems won't necessarily want to spend resources that way.
  */
-static void __iomem *timer_32k_base;
-
-#define OMAP16XX_TIMER_32K_SYNCHRONIZED		0xfffbc410
+static void __iomem *sync32k_cnt_reg;
 
 static u32 notrace omap_32k_read_sched_clock(void)
 {
-	return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+	return sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
 }
 
 /**
@@ -60,7 +61,7 @@
 	struct timespec *tsp = &persistent_ts;
 
 	last_cycles = cycles;
-	cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+	cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
 	delta = cycles - last_cycles;
 
 	nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
@@ -69,55 +70,41 @@
 	*ts = *tsp;
 }
 
-int __init omap_init_clocksource_32k(void)
+/**
+ * omap_init_clocksource_32k - setup and register counter 32k as a
+ * kernel clocksource
+ * @pbase: base addr of counter_32k module
+ * @size: size of counter_32k to map
+ *
+ * Returns 0 upon success or negative error code upon failure.
+ *
+ */
+int __init omap_init_clocksource_32k(void __iomem *vbase)
 {
-	static char err[] __initdata = KERN_ERR
-			"%s: can't register clocksource!\n";
+	int ret;
 
-	if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
-		u32 pbase;
-		unsigned long size = SZ_4K;
-		void __iomem *base;
-		struct clk *sync_32k_ick;
+	/*
+	 * 32k sync Counter register offset is at 0x10
+	 */
+	sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
 
-		if (cpu_is_omap16xx()) {
-			pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
-			size = SZ_1K;
-		} else if (cpu_is_omap2420())
-			pbase = OMAP2420_32KSYNCT_BASE + 0x10;
-		else if (cpu_is_omap2430())
-			pbase = OMAP2430_32KSYNCT_BASE + 0x10;
-		else if (cpu_is_omap34xx())
-			pbase = OMAP3430_32KSYNCT_BASE + 0x10;
-		else if (cpu_is_omap44xx())
-			pbase = OMAP4430_32KSYNCT_BASE + 0x10;
-		else
-			return -ENODEV;
+	/*
+	 * 120000 rough estimate from the calculations in
+	 * __clocksource_updatefreq_scale.
+	 */
+	clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+			32768, NSEC_PER_SEC, 120000);
 
-		/* For this to work we must have a static mapping in io.c for this area */
-		base = ioremap(pbase, size);
-		if (!base)
-			return -ENODEV;
-
-		sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
-		if (!IS_ERR(sync_32k_ick))
-			clk_enable(sync_32k_ick);
-
-		timer_32k_base = base;
-
-		/*
-		 * 120000 rough estimate from the calculations in
-		 * __clocksource_updatefreq_scale.
-		 */
-		clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
-				32768, NSEC_PER_SEC, 120000);
-
-		if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
-					  clocksource_mmio_readl_up))
-			printk(err, "32k_counter");
-
-		setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
-		register_persistent_clock(NULL, omap_read_persistent_clock);
+	ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
+				250, 32, clocksource_mmio_readl_up);
+	if (ret) {
+		pr_err("32k_counter: can't register clocksource\n");
+		return ret;
 	}
+
+	setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+	register_persistent_clock(NULL, omap_read_persistent_clock);
+	pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
 	return 0;
 }
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 09b07d2..1cba927 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -28,54 +28,6 @@
 #include <plat/menelaus.h>
 #include <plat/omap44xx.h>
 
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-	defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-
-#define OMAP_MMC_NR_RES		2
-
-/*
- * Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
- */
-int __init omap_mmc_add(const char *name, int id, unsigned long base,
-				unsigned long size, unsigned int irq,
-				struct omap_mmc_platform_data *data)
-{
-	struct platform_device *pdev;
-	struct resource res[OMAP_MMC_NR_RES];
-	int ret;
-
-	pdev = platform_device_alloc(name, id);
-	if (!pdev)
-		return -ENOMEM;
-
-	memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
-	res[0].start = base;
-	res[0].end = base + size - 1;
-	res[0].flags = IORESOURCE_MEM;
-	res[1].start = res[1].end = irq;
-	res[1].flags = IORESOURCE_IRQ;
-
-	ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
-	if (ret == 0)
-		ret = platform_device_add_data(pdev, data, sizeof(*data));
-	if (ret)
-		goto fail;
-
-	ret = platform_device_add(pdev);
-	if (ret)
-		goto fail;
-
-	/* return device handle to board setup code */
-	data->dev = &pdev->dev;
-	return 0;
-
-fail:
-	platform_device_put(pdev);
-	return ret;
-}
-
-#endif
-
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
@@ -109,79 +61,6 @@
 static inline void omap_init_rng(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
-
-/* Numbering for the SPI-capable controllers when used for SPI:
- * spi		= 1
- * uwire	= 2
- * mmc1..2	= 3..4
- * mcbsp1..3	= 5..7
- */
-
-#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
-
-#define	OMAP_UWIRE_BASE		0xfffb3000
-
-static struct resource uwire_resources[] = {
-	{
-		.start		= OMAP_UWIRE_BASE,
-		.end		= OMAP_UWIRE_BASE + 0x20,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device omap_uwire_device = {
-	.name	   = "omap_uwire",
-	.id	     = -1,
-	.num_resources	= ARRAY_SIZE(uwire_resources),
-	.resource	= uwire_resources,
-};
-
-static void omap_init_uwire(void)
-{
-	/* FIXME define and use a boot tag; not all boards will be hooking
-	 * up devices to the microwire controller, and multi-board configs
-	 * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
-	 */
-
-	/* board-specific code must configure chipselects (only a few
-	 * are normally used) and SCLK/SDI/SDO (each has two choices).
-	 */
-	(void) platform_device_register(&omap_uwire_device);
-}
-#else
-static inline void omap_init_uwire(void) {}
-#endif
-
-#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
-
-static phys_addr_t omap_dsp_phys_mempool_base;
-
-void __init omap_dsp_reserve_sdram_memblock(void)
-{
-	phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
-	phys_addr_t paddr;
-
-	if (!size)
-		return;
-
-	paddr = arm_memblock_steal(size, SZ_1M);
-	if (!paddr) {
-		pr_err("%s: failed to reserve %llx bytes\n",
-				__func__, (unsigned long long)size);
-		return;
-	}
-
-	omap_dsp_phys_mempool_base = paddr;
-}
-
-phys_addr_t omap_dsp_get_mempool_base(void)
-{
-	return omap_dsp_phys_mempool_base;
-}
-EXPORT_SYMBOL(omap_dsp_get_mempool_base);
-#endif
-
 /*
  * This gets called after board-specific INIT_MACHINE, and initializes most
  * on-chip peripherals accessible on this board (except for few like USB):
@@ -208,7 +87,6 @@
 	 * in alphabetical order so they're easier to sort through.
 	 */
 	omap_init_rng();
-	omap_init_uwire();
 	return 0;
 }
 arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 987e610..cb16ade 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -852,7 +852,7 @@
 	}
 	l = p->dma_read(CCR, lch);
 	l &= ~((1 << 6) | (1 << 26));
-	if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
+	if (cpu_class_is_omap2() && !cpu_is_omap242x())
 		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
 	else
 		l |= ((read_prio & 0x1) << 6);
@@ -2080,7 +2080,7 @@
 		}
 	}
 
-	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+	if (cpu_class_is_omap2() && !cpu_is_omap242x())
 		omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
 				DMA_DEFAULT_FIFO_DEPTH, 0);
 
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index c4ed35e..3b0cfeb 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -82,8 +82,6 @@
 
 static void omap_timer_restore_context(struct omap_dm_timer *timer)
 {
-	__raw_writel(timer->context.tiocp_cfg,
-			timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
 	if (timer->revision == 1)
 		__raw_writel(timer->context.tistat, timer->sys_stat);
 
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index a557b84..d1cb6f5 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,7 +30,7 @@
 #include <plat/i2c.h>
 #include <plat/omap_hwmod.h>
 
-extern int __init omap_init_clocksource_32k(void);
+extern int __init omap_init_clocksource_32k(void __iomem *vbase);
 
 extern void __init omap_check_revision(void);
 
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 4bdf14e..de6c0a0 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -121,6 +121,7 @@
 IS_OMAP_CLASS(24xx, 0x24)
 IS_OMAP_CLASS(34xx, 0x34)
 IS_OMAP_CLASS(44xx, 0x44)
+IS_AM_CLASS(35xx, 0x35)
 IS_AM_CLASS(33xx, 0x33)
 
 IS_TI_CLASS(81xx, 0x81)
@@ -148,6 +149,7 @@
 #define cpu_is_ti81xx()			0
 #define cpu_is_ti816x()			0
 #define cpu_is_ti814x()			0
+#define soc_is_am35xx()			0
 #define cpu_is_am33xx()			0
 #define cpu_is_am335x()			0
 #define cpu_is_omap44xx()		0
@@ -250,8 +252,6 @@
  * cpu_is_omap2423():	True for OMAP2423
  * cpu_is_omap2430():	True for OMAP2430
  * cpu_is_omap3430():	True for OMAP3430
- * cpu_is_omap3505():	True for OMAP3505
- * cpu_is_omap3517():	True for OMAP3517
  */
 #define GET_OMAP_TYPE	((omap_rev() >> 16) & 0xffff)
 
@@ -275,8 +275,6 @@
 IS_OMAP_TYPE(2423, 0x2423)
 IS_OMAP_TYPE(2430, 0x2430)
 IS_OMAP_TYPE(3430, 0x3430)
-IS_OMAP_TYPE(3505, 0x3517)
-IS_OMAP_TYPE(3517, 0x3517)
 
 #define cpu_is_omap310()		0
 #define cpu_is_omap730()		0
@@ -291,12 +289,6 @@
 #define cpu_is_omap2422()		0
 #define cpu_is_omap2423()		0
 #define cpu_is_omap2430()		0
-#define cpu_is_omap3503()		0
-#define cpu_is_omap3515()		0
-#define cpu_is_omap3525()		0
-#define cpu_is_omap3530()		0
-#define cpu_is_omap3505()		0
-#define cpu_is_omap3517()		0
 #define cpu_is_omap3430()		0
 #define cpu_is_omap3630()		0
 
@@ -348,36 +340,19 @@
 
 #if defined(CONFIG_ARCH_OMAP3)
 # undef cpu_is_omap3430
-# undef cpu_is_omap3503
-# undef cpu_is_omap3515
-# undef cpu_is_omap3525
-# undef cpu_is_omap3530
-# undef cpu_is_omap3505
-# undef cpu_is_omap3517
 # undef cpu_is_ti81xx
 # undef cpu_is_ti816x
 # undef cpu_is_ti814x
+# undef soc_is_am35xx
 # undef cpu_is_am33xx
 # undef cpu_is_am335x
 # define cpu_is_omap3430()		is_omap3430()
-# define cpu_is_omap3503()		(cpu_is_omap3430() &&		\
-						(!omap3_has_iva()) &&	\
-						(!omap3_has_sgx()))
-# define cpu_is_omap3515()		(cpu_is_omap3430() &&		\
-						(!omap3_has_iva()) &&	\
-						(omap3_has_sgx()))
-# define cpu_is_omap3525()		(cpu_is_omap3430() &&		\
-						(!omap3_has_sgx()) &&	\
-						(omap3_has_iva()))
-# define cpu_is_omap3530()		(cpu_is_omap3430())
-# define cpu_is_omap3517()		is_omap3517()
-# define cpu_is_omap3505()		(cpu_is_omap3517() &&		\
-						!omap3_has_sgx())
 # undef cpu_is_omap3630
 # define cpu_is_omap3630()		is_omap363x()
 # define cpu_is_ti81xx()		is_ti81xx()
 # define cpu_is_ti816x()		is_ti816x()
 # define cpu_is_ti814x()		is_ti814x()
+# define soc_is_am35xx()		is_am35xx()
 # define cpu_is_am33xx()		is_am33xx()
 # define cpu_is_am335x()		is_am335x()
 #endif
@@ -420,10 +395,6 @@
 #define OMAP3630_REV_ES1_1	(OMAP363X_CLASS | (0x1 << 8))
 #define OMAP3630_REV_ES1_2	(OMAP363X_CLASS | (0x2 << 8))
 
-#define OMAP3517_CLASS		0x35170034
-#define OMAP3517_REV_ES1_0	OMAP3517_CLASS
-#define OMAP3517_REV_ES1_1	(OMAP3517_CLASS | (0x1 << 8))
-
 #define TI816X_CLASS		0x81600034
 #define TI8168_REV_ES1_0	TI816X_CLASS
 #define TI8168_REV_ES1_1	(TI816X_CLASS | (0x1 << 8))
@@ -433,6 +404,10 @@
 #define TI8148_REV_ES2_0	(TI814X_CLASS | (0x1 << 8))
 #define TI8148_REV_ES2_1	(TI814X_CLASS | (0x2 << 8))
 
+#define AM35XX_CLASS		0x35170034
+#define AM35XX_REV_ES1_0	AM35XX_CLASS
+#define AM35XX_REV_ES1_1	(AM35XX_CLASS | (0x1 << 8))
+
 #define AM335X_CLASS		0x33500034
 #define AM335X_REV_ES1_0	AM335X_CLASS
 
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 42afb4c..c5811d4 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -312,6 +312,11 @@
 #define CLEAR_CSR_ON_READ		BIT(0xC)
 #define IS_WORD_16			BIT(0xD)
 
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS	(0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS	(0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS		(0x3 << 20)
+
 enum omap_reg_offsets {
 
 GCR,		GSCR,		GRST1,		HW_ID,
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index bdf871a..5da7356 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -75,7 +75,6 @@
 
 struct timer_regs {
 	u32 tidr;
-	u32 tiocp_cfg;
 	u32 tistat;
 	u32 tisr;
 	u32 tier;
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 1527929..f37764a 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -92,6 +92,8 @@
 	OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
 		/* 1-bit ecc: stored at beginning of spare area as romcode */
 	OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
+	OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
+	OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
 };
 
 /*
@@ -157,4 +159,13 @@
 
 int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size);
 int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code);
+
+#ifdef CONFIG_ARCH_OMAP3
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors);
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+			  int nerrors);
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc);
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc);
+#endif /* CONFIG_ARCH_OMAP3 */
+
 #endif
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 3e7ae0f..5493bd9 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -172,14 +172,10 @@
 extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
 					int is_closed);
 
-#if	defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-	defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
 void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 				int nr_controllers);
 void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
-int omap_mmc_add(const char *name, int id, unsigned long base,
-				unsigned long size, unsigned int irq,
-				struct omap_mmc_platform_data *data);
 #else
 static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 				int nr_controllers)
@@ -188,13 +184,6 @@
 static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
 }
-static inline int omap_mmc_add(const char *name, int id, unsigned long base,
-				unsigned long size, unsigned int irq,
-				struct omap_mmc_platform_data *data)
-{
-	return 0;
-}
-
 #endif
 
 extern int omap_msdi_reset(struct omap_hwmod *oh);
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 74daf5e..c179378 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -14,15 +14,41 @@
 #include <linux/dma-mapping.h>
 #include <linux/serial_8250.h>
 #include <linux/ata_platform.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/mv643xx_i2c.h>
 #include <net/dsa.h>
-#include <linux/spi/orion_spi.h>
-#include <plat/orion_wdt.h>
 #include <plat/mv_xor.h>
 #include <plat/ehci-orion.h>
 #include <mach/bridge-regs.h>
 
+/* Create a clkdev entry for a given device/clk */
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+			     struct clk *clk)
+{
+	struct clk_lookup *cl;
+
+	cl = clkdev_alloc(clk, con_id, dev_id);
+	if (cl)
+		clkdev_add(cl);
+}
+
+/* Create clkdev entries for all orion platforms except kirkwood.
+   Kirkwood has gated clocks for some of its peripherals, so creates
+   its own clkdev entries. For all the other orion devices, create
+   clkdev entries to the tclk. */
+void __init orion_clkdev_init(struct clk *tclk)
+{
+	orion_clkdev_add(NULL, "orion_spi.0", tclk);
+	orion_clkdev_add(NULL, "orion_spi.1", tclk);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", tclk);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", tclk);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
+	orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
+	orion_clkdev_add(NULL, "orion_wdt", tclk);
+}
+
 /* Fill in the resources structure and link it into the platform
    device structure. There is always a memory region, and nearly
    always an interrupt.*/
@@ -49,6 +75,12 @@
 /*****************************************************************************
  * UART
  ****************************************************************************/
+static unsigned long __init uart_get_clk_rate(struct clk *clk)
+{
+	clk_prepare_enable(clk);
+	return clk_get_rate(clk);
+}
+
 static void __init uart_complete(
 	struct platform_device *orion_uart,
 	struct plat_serial8250_port *data,
@@ -56,12 +88,12 @@
 	unsigned int membase,
 	resource_size_t mapbase,
 	unsigned int irq,
-	unsigned int uartclk)
+	struct clk *clk)
 {
 	data->mapbase = mapbase;
 	data->membase = (void __iomem *)membase;
 	data->irq = irq;
-	data->uartclk = uartclk;
+	data->uartclk = uart_get_clk_rate(clk);
 	orion_uart->dev.platform_data = data;
 
 	fill_resources(orion_uart, resources, mapbase, 0xff, irq);
@@ -90,10 +122,10 @@
 void __init orion_uart0_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk)
+			     struct clk *clk)
 {
 	uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources,
-		      membase, mapbase, irq, uartclk);
+		      membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -118,10 +150,10 @@
 void __init orion_uart1_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk)
+			     struct clk *clk)
 {
 	uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources,
-		      membase, mapbase, irq, uartclk);
+		      membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -146,10 +178,10 @@
 void __init orion_uart2_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk)
+			     struct clk *clk)
 {
 	uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources,
-		      membase, mapbase, irq, uartclk);
+		      membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -174,10 +206,10 @@
 void __init orion_uart3_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk)
+			     struct clk *clk)
 {
 	uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources,
-		      membase, mapbase, irq, uartclk);
+		      membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -203,13 +235,11 @@
  ****************************************************************************/
 static __init void ge_complete(
 	struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
-	int tclk,
 	struct resource *orion_ge_resource, unsigned long irq,
 	struct platform_device *orion_ge_shared,
 	struct mv643xx_eth_platform_data *eth_data,
 	struct platform_device *orion_ge)
 {
-	orion_ge_shared_data->t_clk = tclk;
 	orion_ge_resource->start = irq;
 	orion_ge_resource->end = irq;
 	eth_data->shared = orion_ge_shared;
@@ -260,12 +290,11 @@
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk)
+			    unsigned long irq_err)
 {
 	fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
 		       mapbase + 0x2000, SZ_16K - 1, irq_err);
-	ge_complete(&orion_ge00_shared_data, tclk,
+	ge_complete(&orion_ge00_shared_data,
 		    orion_ge00_resources, irq, &orion_ge00_shared,
 		    eth_data, &orion_ge00);
 }
@@ -313,12 +342,11 @@
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk)
+			    unsigned long irq_err)
 {
 	fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
 		       mapbase + 0x2000, SZ_16K - 1, irq_err);
-	ge_complete(&orion_ge01_shared_data, tclk,
+	ge_complete(&orion_ge01_shared_data,
 		    orion_ge01_resources, irq, &orion_ge01_shared,
 		    eth_data, &orion_ge01);
 }
@@ -366,12 +394,11 @@
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk)
+			    unsigned long irq_err)
 {
 	fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
 		       mapbase + 0x2000, SZ_16K - 1, irq_err);
-	ge_complete(&orion_ge10_shared_data, tclk,
+	ge_complete(&orion_ge10_shared_data,
 		    orion_ge10_resources, irq, &orion_ge10_shared,
 		    eth_data, &orion_ge10);
 }
@@ -419,12 +446,11 @@
 void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk)
+			    unsigned long irq_err)
 {
 	fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
 		       mapbase + 0x2000, SZ_16K - 1, irq_err);
-	ge_complete(&orion_ge11_shared_data, tclk,
+	ge_complete(&orion_ge11_shared_data,
 		    orion_ge11_resources, irq, &orion_ge11_shared,
 		    eth_data, &orion_ge11);
 }
@@ -521,44 +547,32 @@
 /*****************************************************************************
  * SPI
  ****************************************************************************/
-static struct orion_spi_info orion_spi_plat_data;
 static struct resource orion_spi_resources;
 
 static struct platform_device orion_spi = {
 	.name		= "orion_spi",
 	.id		= 0,
-	.dev		= {
-		.platform_data	= &orion_spi_plat_data,
-	},
 };
 
-static struct orion_spi_info orion_spi_1_plat_data;
 static struct resource orion_spi_1_resources;
 
 static struct platform_device orion_spi_1 = {
 	.name		= "orion_spi",
 	.id		= 1,
-	.dev		= {
-		.platform_data	= &orion_spi_1_plat_data,
-	},
 };
 
 /* Note: The SPI silicon core does have interrupts. However the
  * current Linux software driver does not use interrupts. */
 
-void __init orion_spi_init(unsigned long mapbase,
-			   unsigned long tclk)
+void __init orion_spi_init(unsigned long mapbase)
 {
-	orion_spi_plat_data.tclk = tclk;
 	fill_resources(&orion_spi, &orion_spi_resources,
 		       mapbase, SZ_512 - 1, NO_IRQ);
 	platform_device_register(&orion_spi);
 }
 
-void __init orion_spi_1_init(unsigned long mapbase,
-			     unsigned long tclk)
+void __init orion_spi_1_init(unsigned long mapbase)
 {
-	orion_spi_1_plat_data.tclk = tclk;
 	fill_resources(&orion_spi_1, &orion_spi_1_resources,
 		       mapbase, SZ_512 - 1, NO_IRQ);
 	platform_device_register(&orion_spi_1);
@@ -567,24 +581,18 @@
 /*****************************************************************************
  * Watchdog
  ****************************************************************************/
-static struct orion_wdt_platform_data orion_wdt_data;
-
 static struct resource orion_wdt_resource =
-		DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
+		DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
 
 static struct platform_device orion_wdt_device = {
 	.name		= "orion_wdt",
 	.id		= -1,
-	.dev		= {
-		.platform_data	= &orion_wdt_data,
-	},
-	.resource	= &orion_wdt_resource,
 	.num_resources	= 1,
+	.resource	= &orion_wdt_resource,
 };
 
-void __init orion_wdt_init(unsigned long tclk)
+void __init orion_wdt_init(void)
 {
-	orion_wdt_data.tclk = tclk;
 	platform_device_register(&orion_wdt_device);
 }
 
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index a7fa005..e00fdb2 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -16,22 +16,22 @@
 void __init orion_uart0_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk);
+			     struct clk *clk);
 
 void __init orion_uart1_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk);
+			     struct clk *clk);
 
 void __init orion_uart2_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk);
+			     struct clk *clk);
 
 void __init orion_uart3_init(unsigned int membase,
 			     resource_size_t mapbase,
 			     unsigned int irq,
-			     unsigned int uartclk);
+			     struct clk *clk);
 
 void __init orion_rtc_init(unsigned long mapbase,
 			   unsigned long irq);
@@ -39,29 +39,26 @@
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk);
+			    unsigned long irq_err);
 
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk);
+			    unsigned long irq_err);
 
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk);
+			    unsigned long irq_err);
 
 void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
 			    unsigned long mapbase,
 			    unsigned long irq,
-			    unsigned long irq_err,
-			    int tclk);
+			    unsigned long irq_err);
 
 void __init orion_ge00_switch_init(struct dsa_platform_data *d,
 				   int irq);
+
 void __init orion_i2c_init(unsigned long mapbase,
 			   unsigned long irq,
 			   unsigned long freq_m);
@@ -70,13 +67,11 @@
 			     unsigned long irq,
 			     unsigned long freq_m);
 
-void __init orion_spi_init(unsigned long mapbase,
-			   unsigned long tclk);
+void __init orion_spi_init(unsigned long mapbase);
 
-void __init orion_spi_1_init(unsigned long mapbase,
-			     unsigned long tclk);
+void __init orion_spi_1_init(unsigned long mapbase);
 
-void __init orion_wdt_init(unsigned long tclk);
+void __init orion_wdt_init(void);
 
 void __init orion_xor0_init(unsigned long mapbase_low,
 			    unsigned long mapbase_high,
@@ -106,4 +101,9 @@
 			      unsigned long srambase,
 			      unsigned long sram_size,
 			      unsigned long irq);
+
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+			     struct clk *clk);
+
+void __init orion_clkdev_init(struct clk *tclk);
 #endif
diff --git a/arch/arm/plat-orion/include/plat/orion_wdt.h b/arch/arm/plat-orion/include/plat/orion_wdt.h
deleted file mode 100644
index 665c362..0000000
--- a/arch/arm/plat-orion/include/plat/orion_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/plat-orion/include/plat/orion_wdt.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_ORION_WDT_H
-#define __PLAT_ORION_WDT_H
-
-struct orion_wdt_platform_data {
-	u32	tclk;		/* no <linux/clk.h> support yet */
-};
-
-
-#endif
-
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 86dbb5b..f20a321 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -52,12 +52,12 @@
 #define  PCIE_DEBUG_SOFT_RESET		(1<<20)
 
 
-u32 __init orion_pcie_dev_id(void __iomem *base)
+u32 orion_pcie_dev_id(void __iomem *base)
 {
 	return readl(base + PCIE_DEV_ID_OFF) >> 16;
 }
 
-u32 __init orion_pcie_rev(void __iomem *base)
+u32 orion_pcie_rev(void __iomem *base)
 {
 	return readl(base + PCIE_DEV_REV_OFF) & 0xff;
 }
diff --git a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
index abcc36e..5ce8d5e6 100644
--- a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
+++ b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
@@ -44,6 +44,10 @@
 	/* direct keys */
 	int		direct_key_num;
 	unsigned int	direct_key_map[MAX_DIRECT_KEY_NUM];
+	/* the key output may be low active */
+	int		direct_key_low_active;
+	/* give board a chance to choose the start direct key */
+	unsigned int	direct_key_mask;
 
 	/* rotary encoders 0 */
 	int		enable_rotary0;
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 58b7980..584c9bf8 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -193,6 +193,7 @@
 	{ "pxa25x-nssp",	PXA25x_NSSP },
 	{ "pxa27x-ssp",		PXA27x_SSP },
 	{ "pxa168-ssp",		PXA168_SSP },
+	{ "pxa910-ssp",		PXA910_SSP },
 	{ },
 };
 
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 2467b80..9f60549c 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -12,10 +12,7 @@
 
 # Core files
 
-obj-y				+= cpu.o
 obj-y				+= irq.o
-obj-y				+= dev-uart.o
-obj-y				+= clock.o
 obj-$(CONFIG_S3C24XX_DCLK)	+= clock-dclk.o
 
 obj-$(CONFIG_CPU_FREQ_S3C24XX)	+= cpu-freq.o
@@ -23,9 +20,6 @@
 
 # Architecture dependent builds
 
-obj-$(CONFIG_PM)		+= pm.o
-obj-$(CONFIG_PM)		+= irq-pm.o
-obj-$(CONFIG_PM)		+= sleep.o
 obj-$(CONFIG_S3C2410_CLOCK)	+= s3c2410-clock.o
 obj-$(CONFIG_S3C24XX_DMA)	+= dma.o
 obj-$(CONFIG_S3C2410_IOTIMING)	+= s3c2410-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/clock.c b/arch/arm/plat-s3c24xx/clock.c
deleted file mode 100644
index 931d26d..0000000
--- a/arch/arm/plat-s3c24xx/clock.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/clock.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX Core clock control support
- *
- * Based on, and code from linux/arch/arm/mach-versatile/clock.c
- **
- **  Copyright (C) 2004 ARM Limited.
- **  Written by Deep Blue Solutions Limited.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/cpu-freq.h>
-
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-
-/* initialise all the clocks */
-
-void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
-					   unsigned long hclk,
-					   unsigned long pclk)
-{
-	clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
-					clk_xtal.rate);
-
-	clk_mpll.rate = fclk;
-	clk_h.rate = hclk;
-	clk_p.rate = pclk;
-	clk_f.rate = fclk;
-}
diff --git a/arch/arm/plat-s3c24xx/dev-uart.c b/arch/arm/plat-s3c24xx/dev-uart.c
deleted file mode 100644
index 9ab22e6..0000000
--- a/arch/arm/plat-s3c24xx/dev-uart.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/dev-uart.c
- *
- * Copyright (c) 2004 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * Base S3C24XX UART resource and platform device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-/* Serial port registrations */
-
-static struct resource s3c2410_uart0_resource[] = {
-	[0] = {
-		.start = S3C2410_PA_UART0,
-		.end   = S3C2410_PA_UART0 + 0x3fff,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = IRQ_S3CUART_RX0,
-		.end   = IRQ_S3CUART_ERR0,
-		.flags = IORESOURCE_IRQ,
-	}
-};
-
-static struct resource s3c2410_uart1_resource[] = {
-	[0] = {
-		.start = S3C2410_PA_UART1,
-		.end   = S3C2410_PA_UART1 + 0x3fff,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = IRQ_S3CUART_RX1,
-		.end   = IRQ_S3CUART_ERR1,
-		.flags = IORESOURCE_IRQ,
-	}
-};
-
-static struct resource s3c2410_uart2_resource[] = {
-	[0] = {
-		.start = S3C2410_PA_UART2,
-		.end   = S3C2410_PA_UART2 + 0x3fff,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = IRQ_S3CUART_RX2,
-		.end   = IRQ_S3CUART_ERR2,
-		.flags = IORESOURCE_IRQ,
-	}
-};
-
-static struct resource s3c2410_uart3_resource[] = {
-	[0] = {
-		.start = S3C2443_PA_UART3,
-		.end   = S3C2443_PA_UART3 + 0x3fff,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = IRQ_S3CUART_RX3,
-		.end   = IRQ_S3CUART_ERR3,
-		.flags = IORESOURCE_IRQ,
-	},
-};
-
-struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
-	[0] = {
-		.resources	= s3c2410_uart0_resource,
-		.nr_resources	= ARRAY_SIZE(s3c2410_uart0_resource),
-	},
-	[1] = {
-		.resources	= s3c2410_uart1_resource,
-		.nr_resources	= ARRAY_SIZE(s3c2410_uart1_resource),
-	},
-	[2] = {
-		.resources	= s3c2410_uart2_resource,
-		.nr_resources	= ARRAY_SIZE(s3c2410_uart2_resource),
-	},
-	[3] = {
-		.resources	= s3c2410_uart3_resource,
-		.nr_resources	= ARRAY_SIZE(s3c2410_uart3_resource),
-	},
-};
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
deleted file mode 100644
index 96bea32..0000000
--- a/arch/arm/plat-s5p/Kconfig
+++ /dev/null
@@ -1,140 +0,0 @@
-# arch/arm/plat-s5p/Kconfig
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-#		http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-config PLAT_S5P
-	bool
-	depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
-	default y
-	select ARM_VIC if !ARCH_EXYNOS
-	select ARM_GIC if ARCH_EXYNOS
-	select GIC_NON_BANKED if ARCH_EXYNOS4
-	select NO_IOPORT
-	select ARCH_REQUIRE_GPIOLIB
-	select S3C_GPIO_TRACK
-	select S5P_GPIO_DRVSTR
-	select SAMSUNG_GPIOLIB_4BIT
-	select PLAT_SAMSUNG
-	select SAMSUNG_CLKSRC
-	select SAMSUNG_IRQ_VIC_TIMER
-	help
-	  Base platform code for Samsung's S5P series SoC.
-
-config S5P_EXT_INT
-	bool
-	help
-	  Use the external interrupts (other than GPIO interrupts.)
-	  Note: Do not choose this for S5P6440 and S5P6450.
-
-config S5P_GPIO_INT
-	bool
-	help
-	  Common code for the GPIO interrupts (other than external interrupts.)
-
-config S5P_HRT
-	bool
-	select SAMSUNG_DEV_PWM
-	help
-	  Use the High Resolution timer support
-
-config S5P_DEV_UART
-	def_bool y
-	depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
-
-config S5P_PM
-	bool
-	help
-	  Common code for power management support on S5P and newer SoCs
-	  Note: Do not select this for S5P6440 and S5P6450.
-
-comment "System MMU"
-
-config S5P_SYSTEM_MMU
-	bool "S5P SYSTEM MMU"
-	depends on ARCH_EXYNOS4
-	help
-	  Say Y here if you want to enable System MMU
-
-config S5P_SLEEP
-	bool
-	help
-	  Internal config node to apply common S5P sleep management code.
-	  Can be selected by S5P and newer SoCs with similar sleep procedure.
-
-config S5P_DEV_FIMC0
-	bool
-	help
-	  Compile in platform device definitions for FIMC controller 0
-
-config S5P_DEV_FIMC1
-	bool
-	help
-	  Compile in platform device definitions for FIMC controller 1
-
-config S5P_DEV_FIMC2
-	bool
-	help
-	  Compile in platform device definitions for FIMC controller 2
-
-config S5P_DEV_FIMC3
-	bool
-	help
-	  Compile in platform device definitions for FIMC controller 3
-
-config S5P_DEV_JPEG
-	bool
-	help
-	  Compile in platform device definitions for JPEG codec
-
-config S5P_DEV_G2D
-	bool
-	help
-	  Compile in platform device definitions for G2D device
-
-config S5P_DEV_FIMD0
-	bool
-	help
-	  Compile in platform device definitions for FIMD controller 0
-
-config S5P_DEV_I2C_HDMIPHY
-	bool
-	help
-	  Compile in platform device definitions for I2C HDMIPHY controller
-
-config S5P_DEV_MFC
-	bool
-	help
-	  Compile in platform device definitions for MFC
-
-config S5P_DEV_ONENAND
-	bool
-	help
-	  Compile in platform device definition for OneNAND controller
-
-config S5P_DEV_CSIS0
-	bool
-	help
-	  Compile in platform device definitions for MIPI-CSIS channel 0
-
-config S5P_DEV_CSIS1
-	bool
-	help
-	  Compile in platform device definitions for MIPI-CSIS channel 1
-
-config S5P_DEV_TV
-	bool
-	help
-	  Compile in platform device definition for TV interface
-
-config S5P_DEV_USB_EHCI
-	bool
-	help
-	  Compile in platform device definition for USB EHCI
-
-config S5P_SETUP_MIPIPHY
-	bool
-	help
-	  Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
deleted file mode 100644
index 4bd8241..0000000
--- a/arch/arm/plat-s5p/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-# arch/arm/plat-s5p/Makefile
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-# 		http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y				:=
-obj-m				:=
-obj-n				:= dummy.o
-obj-				:=
-
-# Core files
-
-obj-y				+= clock.o
-obj-y				+= irq.o
-obj-$(CONFIG_S5P_EXT_INT)	+= irq-eint.o
-obj-$(CONFIG_S5P_GPIO_INT)	+= irq-gpioint.o
-obj-$(CONFIG_S5P_SYSTEM_MMU)	+= sysmmu.o
-obj-$(CONFIG_S5P_PM)		+= pm.o irq-pm.o
-obj-$(CONFIG_S5P_SLEEP)		+= sleep.o
-obj-$(CONFIG_S5P_HRT) 		+= s5p-time.o
-
-# devices
-
-obj-$(CONFIG_S5P_DEV_UART)	+= dev-uart.o
-obj-$(CONFIG_S5P_DEV_MFC)	+= dev-mfc.o
-obj-$(CONFIG_S5P_SETUP_MIPIPHY)	+= setup-mipiphy.o
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
deleted file mode 100644
index c9308db..0000000
--- a/arch/arm/plat-s5p/dev-uart.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/* linux/arch/arm/plat-s5p/dev-uart.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Base S5P UART resource and device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-
- /* Serial port registrations */
-
-static struct resource s5p_uart0_resource[] = {
-	[0] = {
-		.start	= S5P_PA_UART0,
-		.end	= S5P_PA_UART0 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART0,
-		.end	= IRQ_UART0,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource s5p_uart1_resource[] = {
-	[0] = {
-		.start	= S5P_PA_UART1,
-		.end	= S5P_PA_UART1 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART1,
-		.end	= IRQ_UART1,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource s5p_uart2_resource[] = {
-	[0] = {
-		.start	= S5P_PA_UART2,
-		.end	= S5P_PA_UART2 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART2,
-		.end	= IRQ_UART2,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource s5p_uart3_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
-	[0] = {
-		.start	= S5P_PA_UART3,
-		.end	= S5P_PA_UART3 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART3,
-		.end	= IRQ_UART3,
-		.flags	= IORESOURCE_IRQ,
-	},
-#endif
-};
-
-static struct resource s5p_uart4_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
-	[0] = {
-		.start	= S5P_PA_UART4,
-		.end	= S5P_PA_UART4 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART4,
-		.end	= IRQ_UART4,
-		.flags	= IORESOURCE_IRQ,
-	},
-#endif
-};
-
-static struct resource s5p_uart5_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
-	[0] = {
-		.start	= S5P_PA_UART5,
-		.end	= S5P_PA_UART5 + S5P_SZ_UART - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_UART5,
-		.end	= IRQ_UART5,
-		.flags	= IORESOURCE_IRQ,
-	},
-#endif
-};
-
-struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = {
-	[0] = {
-		.resources	= s5p_uart0_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart0_resource),
-	},
-	[1] = {
-		.resources	= s5p_uart1_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart1_resource),
-	},
-	[2] = {
-		.resources	= s5p_uart2_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart2_resource),
-	},
-	[3] = {
-		.resources	= s5p_uart3_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart3_resource),
-	},
-	[4] = {
-		.resources	= s5p_uart4_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart4_resource),
-	},
-	[5] = {
-		.resources	= s5p_uart5_resource,
-		.nr_resources	= ARRAY_SIZE(s5p_uart5_resource),
-	},
-};
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
deleted file mode 100644
index c8bec9c..0000000
--- a/arch/arm/plat-s5p/sysmmu.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/* linux/arch/arm/plat-s5p/sysmmu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/export.h>
-
-#include <asm/pgtable.h>
-
-#include <mach/map.h>
-#include <mach/regs-sysmmu.h>
-#include <plat/sysmmu.h>
-
-#define CTRL_ENABLE	0x5
-#define CTRL_BLOCK	0x7
-#define CTRL_DISABLE	0x0
-
-static struct device *dev;
-
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
-	S5P_PAGE_FAULT_ADDR,
-	S5P_AR_FAULT_ADDR,
-	S5P_AW_FAULT_ADDR,
-	S5P_DEFAULT_SLAVE_ADDR,
-	S5P_AR_FAULT_ADDR,
-	S5P_AR_FAULT_ADDR,
-	S5P_AW_FAULT_ADDR,
-	S5P_AW_FAULT_ADDR
-};
-
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
-	"PAGE FAULT",
-	"AR MULTI-HIT FAULT",
-	"AW MULTI-HIT FAULT",
-	"BUS ERROR",
-	"AR SECURITY PROTECTION FAULT",
-	"AR ACCESS PROTECTION FAULT",
-	"AW SECURITY PROTECTION FAULT",
-	"AW ACCESS PROTECTION FAULT"
-};
-
-static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
-		enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-		unsigned long pgtable_base,
-		unsigned long fault_addr);
-
-/*
- * If adjacent 2 bits are true, the system MMU is enabled.
- * The system MMU is disabled, otherwise.
- */
-static unsigned long sysmmu_states;
-
-static inline void set_sysmmu_active(sysmmu_ips ips)
-{
-	sysmmu_states |= 3 << (ips * 2);
-}
-
-static inline void set_sysmmu_inactive(sysmmu_ips ips)
-{
-	sysmmu_states &= ~(3 << (ips * 2));
-}
-
-static inline int is_sysmmu_active(sysmmu_ips ips)
-{
-	return sysmmu_states & (3 << (ips * 2));
-}
-
-static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
-
-static inline void sysmmu_block(sysmmu_ips ips)
-{
-	__raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
-	dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void sysmmu_unblock(sysmmu_ips ips)
-{
-	__raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-	dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
-	__raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
-	dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
-{
-	if (unlikely(pgd == 0)) {
-		pgd = (unsigned long)ZERO_PAGE(0);
-		__raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
-	} else {
-		__raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
-	}
-
-	__raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-
-	dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
-						sysmmu_ips_name[ips], pgd);
-	__sysmmu_tlb_invalidate(ips);
-}
-
-void sysmmu_set_fault_handler(sysmmu_ips ips,
-			int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-					unsigned long pgtable_base,
-					unsigned long fault_addr))
-{
-	BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
-	fault_handlers[ips] = handler;
-}
-
-static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
-{
-	/* SYSMMU is in blocked when interrupt occurred. */
-	unsigned long base = 0;
-	sysmmu_ips ips = (sysmmu_ips)dev_id;
-	enum S5P_SYSMMU_INTERRUPT_TYPE itype;
-
-	itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
-		__ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
-
-	BUG_ON(!((itype >= 0) && (itype < 8)));
-
-	dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
-							sysmmu_ips_name[ips]);
-
-	if (fault_handlers[ips]) {
-		unsigned long addr;
-
-		base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-		addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
-
-		if (fault_handlers[ips](itype, base, addr)) {
-			__raw_writel(1 << itype,
-					sysmmusfrs[ips] + S5P_INT_CLEAR);
-			dev_notice(dev, "%s from %s is resolved."
-					" Retrying translation.\n",
-				sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-		} else {
-			base = 0;
-		}
-	}
-
-	sysmmu_unblock(ips);
-
-	if (!base)
-		dev_notice(dev, "%s from %s is not handled.\n",
-			sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-
-	return IRQ_HANDLED;
-}
-
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
-{
-	if (is_sysmmu_active(ips)) {
-		sysmmu_block(ips);
-		__sysmmu_set_ptbase(ips, pgd);
-		sysmmu_unblock(ips);
-	} else {
-		dev_dbg(dev, "%s is disabled. "
-			"Skipping initializing page table base.\n",
-						sysmmu_ips_name[ips]);
-	}
-}
-
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
-{
-	if (!is_sysmmu_active(ips)) {
-		sysmmu_clk_enable(ips);
-
-		__sysmmu_set_ptbase(ips, pgd);
-
-		__raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-
-		set_sysmmu_active(ips);
-		dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
-	} else {
-		dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
-	}
-}
-
-void s5p_sysmmu_disable(sysmmu_ips ips)
-{
-	if (is_sysmmu_active(ips)) {
-		__raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-		set_sysmmu_inactive(ips);
-		sysmmu_clk_disable(ips);
-		dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
-	} else {
-		dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
-	}
-}
-
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
-	if (is_sysmmu_active(ips)) {
-		sysmmu_block(ips);
-		__sysmmu_tlb_invalidate(ips);
-		sysmmu_unblock(ips);
-	} else {
-		dev_dbg(dev, "%s is disabled. "
-			"Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
-	}
-}
-
-static int s5p_sysmmu_probe(struct platform_device *pdev)
-{
-	int i, ret;
-	struct resource *res, *mem;
-
-	dev = &pdev->dev;
-
-	for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
-		int irq;
-
-		sysmmu_clk_init(dev, i);
-		sysmmu_clk_disable(i);
-
-		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-		if (!res) {
-			dev_err(dev, "Failed to get the resource of %s.\n",
-							sysmmu_ips_name[i]);
-			ret = -ENODEV;
-			goto err_res;
-		}
-
-		mem = request_mem_region(res->start, resource_size(res),
-					 pdev->name);
-		if (!mem) {
-			dev_err(dev, "Failed to request the memory region of %s.\n",
-							sysmmu_ips_name[i]);
-			ret = -EBUSY;
-			goto err_res;
-		}
-
-		sysmmusfrs[i] = ioremap(res->start, resource_size(res));
-		if (!sysmmusfrs[i]) {
-			dev_err(dev, "Failed to ioremap() for %s.\n",
-							sysmmu_ips_name[i]);
-			ret = -ENXIO;
-			goto err_reg;
-		}
-
-		irq = platform_get_irq(pdev, i);
-		if (irq <= 0) {
-			dev_err(dev, "Failed to get the IRQ resource of %s.\n",
-							sysmmu_ips_name[i]);
-			ret = -ENOENT;
-			goto err_map;
-		}
-
-		if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
-						pdev->name, (void *)i)) {
-			dev_err(dev, "Failed to request IRQ for %s.\n",
-							sysmmu_ips_name[i]);
-			ret = -ENOENT;
-			goto err_map;
-		}
-	}
-
-	return 0;
-
-err_map:
-	iounmap(sysmmusfrs[i]);
-err_reg:
-	release_mem_region(mem->start, resource_size(mem));
-err_res:
-	return ret;
-}
-
-static int s5p_sysmmu_remove(struct platform_device *pdev)
-{
-	return 0;
-}
-int s5p_sysmmu_runtime_suspend(struct device *dev)
-{
-	return 0;
-}
-
-int s5p_sysmmu_runtime_resume(struct device *dev)
-{
-	return 0;
-}
-
-const struct dev_pm_ops s5p_sysmmu_pm_ops = {
-	.runtime_suspend	= s5p_sysmmu_runtime_suspend,
-	.runtime_resume		= s5p_sysmmu_runtime_resume,
-};
-
-static struct platform_driver s5p_sysmmu_driver = {
-	.probe		= s5p_sysmmu_probe,
-	.remove		= s5p_sysmmu_remove,
-	.driver		= {
-		.owner		= THIS_MODULE,
-		.name		= "s5p-sysmmu",
-		.pm		= &s5p_sysmmu_pm_ops,
-	}
-};
-
-static int __init s5p_sysmmu_init(void)
-{
-	return platform_driver_register(&s5p_sysmmu_driver);
-}
-arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index a0ffc77d..a2fae4e 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -13,6 +13,24 @@
 	help
 	  Base platform code for all Samsung SoC based systems
 
+config PLAT_S5P
+	bool
+	depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+	default y
+	select ARM_VIC if !ARCH_EXYNOS
+	select ARM_GIC if ARCH_EXYNOS
+	select GIC_NON_BANKED if ARCH_EXYNOS4
+	select NO_IOPORT
+	select ARCH_REQUIRE_GPIOLIB
+	select S3C_GPIO_TRACK
+	select S5P_GPIO_DRVSTR
+	select SAMSUNG_GPIOLIB_4BIT
+	select PLAT_SAMSUNG
+	select SAMSUNG_CLKSRC
+	select SAMSUNG_IRQ_VIC_TIMER
+	help
+	  Base platform code for Samsung's S5P series SoC.
+
 if PLAT_SAMSUNG
 
 # boot configurations
@@ -50,6 +68,14 @@
 	  this configuration should be between zero and two. The port
 	  must have been initialised by the boot-loader before use.
 
+# timer options
+
+config S5P_HRT
+	bool
+	select SAMSUNG_DEV_PWM
+	help
+	  Use the High Resolution timer support
+
 # clock options
 
 config SAMSUNG_CLKSRC
@@ -58,6 +84,11 @@
 	  Select the clock code for the clksrc implementation
 	  used by newer systems such as the S3C64XX.
 
+config S5P_CLOCK
+	def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+	help
+	  Support common clock part for ARCH_S5P and ARCH_EXYNOS SoCs
+
 # options for IRQ support
 
 config SAMSUNG_IRQ_VIC_TIMER
@@ -65,6 +96,22 @@
        help
          Internal configuration to build the VIC timer interrupt code.
 
+config S5P_IRQ
+	def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+	help
+	  Support common interrup part for ARCH_S5P and ARCH_EXYNOS SoCs
+
+config S5P_EXT_INT
+	bool
+	help
+	  Use the external interrupts (other than GPIO interrupts.)
+	  Note: Do not choose this for S5P6440 and S5P6450.
+
+config S5P_GPIO_INT
+	bool
+	help
+	  Common code for the GPIO interrupts (other than external interrupts.)
+
 # options for gpio configuration support
 
 config SAMSUNG_GPIOLIB_4BIT
@@ -117,6 +164,12 @@
 	  Internal configuration option to enable the s3c specific gpio
 	  chip tracking if the platform requires it.
 
+# uart options
+
+config S5P_DEV_UART
+	def_bool y
+	depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
+
 # ADC driver
 
 config S3C_ADC
@@ -274,6 +327,76 @@
 	help
 	  Compile in platform device definition LCD backlight with PWM Timer
 
+config S5P_DEV_CSIS0
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 1
+
+config S5P_DEV_FIMC0
+	bool
+	help
+	  Compile in platform device definitions for FIMC controller 0
+
+config S5P_DEV_FIMC1
+	bool
+	help
+	  Compile in platform device definitions for FIMC controller 1
+
+config S5P_DEV_FIMC2
+	bool
+	help
+	  Compile in platform device definitions for FIMC controller 2
+
+config S5P_DEV_FIMC3
+	bool
+	help
+	  Compile in platform device definitions for FIMC controller 3
+
+config S5P_DEV_FIMD0
+	bool
+	help
+	  Compile in platform device definitions for FIMD controller 0
+
+config S5P_DEV_G2D
+	bool
+	help
+	  Compile in platform device definitions for G2D device
+
+config S5P_DEV_I2C_HDMIPHY
+	bool
+	help
+	  Compile in platform device definitions for I2C HDMIPHY controller
+
+config S5P_DEV_JPEG
+	bool
+	help
+	  Compile in platform device definitions for JPEG codec
+
+config S5P_DEV_MFC
+	bool
+	help
+	  Compile in setup memory (init) code for MFC
+
+config S5P_DEV_ONENAND
+	bool
+	help
+	  Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_TV
+	bool
+	help
+	  Compile in platform device definition for TV interface
+
+config S5P_DEV_USB_EHCI
+	bool
+	help
+	  Compile in platform device definition for USB EHCI
+
 config S3C24XX_PWM
 	bool "PWM device support"
 	select HAVE_PWM
@@ -281,6 +404,11 @@
 	  Support for exporting the PWM timer blocks via the pwm device
 	  system
 
+config S5P_SETUP_MIPIPHY
+	bool
+	help
+	  Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
+
 # DMA
 
 config S3C_DMA
@@ -291,7 +419,7 @@
 config SAMSUNG_DMADEV
 	bool
 	select DMADEVICES
-	select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
+	select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
 					CPU_S5P6450 || CPU_S5P6440)
 	select ARM_AMBA
 	help
@@ -351,6 +479,18 @@
 	  and above. This code allows a set of interrupt to wakeup-mask
 	  mappings. See <plat/wakeup-mask.h>
 
+config S5P_PM
+	bool
+	help
+	  Common code for power management support on S5P and newer SoCs
+	  Note: Do not select this for S5P6440 and S5P6450.
+
+config S5P_SLEEP
+	bool
+	help
+	  Internal config node to apply common S5P sleep management code.
+	  Can be selected by S5P and newer SoCs with similar sleep procedure.
+
 comment "Power Domain"
 
 config SAMSUNG_PD
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 6012366..860b2db 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -13,12 +13,18 @@
 
 obj-y				+= init.o cpu.o
 obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET)   += time.o
+obj-$(CONFIG_S5P_HRT) 		+= s5p-time.o
+
 obj-y				+= clock.o
 obj-y				+= pwm-clock.o
 
 obj-$(CONFIG_SAMSUNG_CLKSRC)	+= clock-clksrc.o
+obj-$(CONFIG_S5P_CLOCK)		+= s5p-clock.o
 
 obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
+obj-$(CONFIG_S5P_IRQ)		+= s5p-irq.o
+obj-$(CONFIG_S5P_EXT_INT)	+= s5p-irq-eint.o
+obj-$(CONFIG_S5P_GPIO_INT)	+= s5p-irq-gpioint.o
 
 # ADC
 
@@ -30,9 +36,13 @@
 
 obj-y				+= devs.o
 obj-y				+= dev-uart.o
+obj-$(CONFIG_S5P_DEV_MFC)	+= s5p-dev-mfc.o
+obj-$(CONFIG_S5P_DEV_UART)	+= s5p-dev-uart.o
 
 obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT)	+= dev-backlight.o
 
+obj-$(CONFIG_S5P_SETUP_MIPIPHY)	+= setup-mipiphy.o
+
 # DMA support
 
 obj-$(CONFIG_S3C_DMA)		+= dma.o s3c-dma-ops.o
@@ -47,6 +57,9 @@
 
 obj-$(CONFIG_SAMSUNG_WAKEMASK)	+= wakeup-mask.o
 
+obj-$(CONFIG_S5P_PM)		+= s5p-pm.o s5p-irq-pm.o
+obj-$(CONFIG_S5P_SLEEP)		+= s5p-sleep.o
+
 # PD support
 
 obj-$(CONFIG_SAMSUNG_PD)	+= pd.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 787ceac..0721293 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -202,7 +202,7 @@
 extern struct bus_type s3c6410_subsys;
 extern struct bus_type s5p64x0_subsys;
 extern struct bus_type s5pv210_subsys;
-extern struct bus_type exynos4_subsys;
+extern struct bus_type exynos_subsys;
 
 extern void (*s5pc1xx_idle)(void);
 
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2155d4a..61ca2f3 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -133,7 +133,8 @@
 extern struct platform_device exynos4_device_pcm2;
 extern struct platform_device exynos4_device_pd[];
 extern struct platform_device exynos4_device_spdif;
-extern struct platform_device exynos4_device_sysmmu;
+
+extern struct platform_device exynos_device_drm;
 
 extern struct platform_device samsung_asoc_dma;
 extern struct platform_device samsung_asoc_idma;
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 0670f37..d384a80 100644
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -90,6 +90,7 @@
 	DMACH_MIPI_HSI5,
 	DMACH_MIPI_HSI6,
 	DMACH_MIPI_HSI7,
+	DMACH_DISP1,
 	DMACH_MTOM_0,
 	DMACH_MTOM_1,
 	DMACH_MTOM_2,
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index 0fedf47..536002f 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -24,15 +24,16 @@
 
 /**
  * struct s3c_fb_pd_win - per window setup data
- * @win_mode: The display parameters to initialise (not for window 0)
+ * @xres     : The window X size.
+ * @yres     : The window Y size.
  * @virtual_x: The virtual X size.
  * @virtual_y: The virtual Y size.
  */
 struct s3c_fb_pd_win {
-	struct fb_videomode	win_mode;
-
 	unsigned short		default_bpp;
 	unsigned short		max_bpp;
+	unsigned short		xres;
+	unsigned short		yres;
 	unsigned short		virtual_x;
 	unsigned short		virtual_y;
 };
@@ -45,6 +46,7 @@
  * @default_win: default window layer number to be used for UI layer.
  * @vidcon0: The base vidcon0 values to control the panel data format.
  * @vidcon1: The base vidcon1 values to control the panel data output.
+ * @vtiming: Video timing when connected to a RGB type panel.
  * @win: The setup data for each hardware window, or NULL for unused.
  * @display_mode: The LCD output display mode.
  *
@@ -58,8 +60,7 @@
 	void	(*setup_gpio)(void);
 
 	struct s3c_fb_pd_win	*win[S3C_FB_MAX_WIN];
-
-	u32			 default_win;
+	struct fb_videomode     *vtiming;
 
 	u32			 vidcon0;
 	u32			 vidcon1;
diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
index 7d04875..c0c70a8 100644
--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
+++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG	S3C_VA_WATCHDOG
 
 #define S3C2412_VA_SSMC		S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI		S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI		S3C_ADDR_CPU(0x00100000)
 
 #define S3C2410_PA_UART		(0x50000000)
 #define S3C24XX_PA_UART		S3C2410_PA_UART
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index de2b5bd..7178e33 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -24,6 +24,9 @@
 extern  int s3c2416_baseclk_add(void);
 
 extern void s3c2416_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c2416_irq_syscore_ops;
+
 #else
 #define s3c2416_init_clocks NULL
 #define s3c2416_init_uarts NULL
diff --git a/arch/arm/plat-samsung/include/plat/s5p-clock.h b/arch/arm/plat-samsung/include/plat/s5p-clock.h
index 1de4b32..8364b4b 100644
--- a/arch/arm/plat-samsung/include/plat/s5p-clock.h
+++ b/arch/arm/plat-samsung/include/plat/s5p-clock.h
@@ -32,8 +32,10 @@
 extern struct clk s5p_clk_27m;
 extern struct clk clk_fout_apll;
 extern struct clk clk_fout_bpll;
+extern struct clk clk_fout_bpll_div2;
 extern struct clk clk_fout_cpll;
 extern struct clk clk_fout_mpll;
+extern struct clk clk_fout_mpll_div2;
 extern struct clk clk_fout_epll;
 extern struct clk clk_fout_dpll;
 extern struct clk clk_fout_vpll;
@@ -42,8 +44,10 @@
 
 extern struct clksrc_sources clk_src_apll;
 extern struct clksrc_sources clk_src_bpll;
+extern struct clksrc_sources clk_src_bpll_fout;
 extern struct clksrc_sources clk_src_cpll;
 extern struct clksrc_sources clk_src_mpll;
+extern struct clksrc_sources clk_src_mpll_fout;
 extern struct clksrc_sources clk_src_epll;
 extern struct clksrc_sources clk_src_dpll;
 
diff --git a/arch/arm/plat-samsung/include/plat/sysmmu.h b/arch/arm/plat-samsung/include/plat/sysmmu.h
deleted file mode 100644
index 5fe8ee0..0000000
--- a/arch/arm/plat-samsung/include/plat/sysmmu.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Samsung System MMU driver for S5P platform
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_SYSMMU_H
-#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
-
-enum S5P_SYSMMU_INTERRUPT_TYPE {
-	SYSMMU_PAGEFAULT,
-	SYSMMU_AR_MULTIHIT,
-	SYSMMU_AW_MULTIHIT,
-	SYSMMU_BUSERROR,
-	SYSMMU_AR_SECURITY,
-	SYSMMU_AR_ACCESS,
-	SYSMMU_AW_SECURITY,
-	SYSMMU_AW_PROTECTION, /* 7 */
-	SYSMMU_FAULTS_NUM
-};
-
-#ifdef CONFIG_S5P_SYSTEM_MMU
-
-#include <mach/sysmmu.h>
-
-/**
- * s5p_sysmmu_enable() - enable system mmu of ip
- * @ips: The ip connected system mmu.
- * #pgd: Base physical address of the 1st level page table
- *
- * This function enable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_disable() - disable sysmmu mmu of ip
- * @ips: The ip connected system mmu.
- *
- * This function disable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_disable(sysmmu_ips ips);
-
-/**
- * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
- * @ips: The ip connected system mmu.
- * @pgd: The page table base address.
- *
- * This function set page table base address
- * When system mmu transfer address from virtaul address to physical address,
- * system mmu refer address information from page table
- */
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
- * @ips: The ip connected system mmu.
- *
- * This function flush all TLB entry in system mmu
- */
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
-
-/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @ips is
- *               SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- *             translated. This is 0 if @ips is SYSMMU_BUSERROR.
- * Called when interrupt occurred by the System MMUs
- * The device drivers of peripheral devices that has a System MMU can implement
- * a fault handler to resolve address translation fault by System MMU.
- * The meanings of return value and parameters are described below.
-
- * return value: non-zero if the fault is correctly resolved.
- *         zero if the fault is not handled.
- */
-void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
-			int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-					unsigned long pgtable_base,
-					unsigned long fault_addr));
-#else
-#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
-#define s5p_sysmmu_disable(ips) do { } while (0)
-#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
-#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
-#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
-#endif
-#endif /* __ASM_PLAT_SYSMMU_H */
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index f19aff1..bc4db9b 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -25,7 +25,7 @@
 
 	__raw_writel(0, S3C2410_WTCON);	  /* disable watchdog, to be safe  */
 
-	if (s3c2410_wdtclk)
+	if (!IS_ERR(s3c2410_wdtclk))
 		clk_enable(s3c2410_wdtclk);
 
 	/* put initial values into count and data */
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-samsung/s5p-clock.c
similarity index 88%
rename from arch/arm/plat-s5p/clock.c
rename to arch/arm/plat-samsung/s5p-clock.c
index f68a9bb..031a618 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/clock.c
- *
+/*
  * Copyright 2009 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
@@ -68,6 +67,11 @@
 	.id		= -1,
 };
 
+struct clk clk_fout_bpll_div2 = {
+	.name		= "fout_bpll_div2",
+	.id		= -1,
+};
+
 /* CPLL clock output */
 
 struct clk clk_fout_cpll = {
@@ -83,6 +87,11 @@
 	.id		= -1,
 };
 
+struct clk clk_fout_mpll_div2 = {
+	.name		= "fout_mpll_div2",
+	.id		= -1,
+};
+
 /* EPLL clock output */
 struct clk clk_fout_epll = {
 	.name		= "fout_epll",
@@ -126,6 +135,16 @@
 	.nr_sources	= ARRAY_SIZE(clk_src_bpll_list),
 };
 
+static struct clk *clk_src_bpll_fout_list[] = {
+	[0] = &clk_fout_bpll_div2,
+	[1] = &clk_fout_bpll,
+};
+
+struct clksrc_sources clk_src_bpll_fout = {
+	.sources	= clk_src_bpll_fout_list,
+	.nr_sources	= ARRAY_SIZE(clk_src_bpll_fout_list),
+};
+
 /* Possible clock sources for CPLL Mux */
 static struct clk *clk_src_cpll_list[] = {
 	[0] = &clk_fin_cpll,
@@ -148,6 +167,16 @@
 	.nr_sources	= ARRAY_SIZE(clk_src_mpll_list),
 };
 
+static struct clk *clk_src_mpll_fout_list[] = {
+	[0] = &clk_fout_mpll_div2,
+	[1] = &clk_fout_mpll,
+};
+
+struct clksrc_sources clk_src_mpll_fout = {
+	.sources	= clk_src_mpll_fout_list,
+	.nr_sources	= ARRAY_SIZE(clk_src_mpll_fout_list),
+};
+
 /* Possible clock sources for EPLL Mux */
 static struct clk *clk_src_epll_list[] = {
 	[0] = &clk_fin_epll,
diff --git a/arch/arm/plat-s5p/dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
similarity index 97%
rename from arch/arm/plat-s5p/dev-mfc.c
rename to arch/arm/plat-samsung/s5p-dev-mfc.c
index a30d36b..ad60894 100644
--- a/arch/arm/plat-s5p/dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/dev-mfc.c
- *
+/*
  * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
  *
  * Base S5P MFC resource and device definitions
@@ -9,7 +8,6 @@
  * published by the Free Software Foundation.
  */
 
-
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
diff --git a/arch/arm/plat-samsung/s5p-dev-uart.c b/arch/arm/plat-samsung/s5p-dev-uart.c
new file mode 100644
index 0000000..cafa3de
--- /dev/null
+++ b/arch/arm/plat-samsung/s5p-dev-uart.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2009,2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Base S5P UART resource and device definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <mach/hardware.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+ /* Serial port registrations */
+
+static struct resource s5p_uart0_resource[] = {
+	[0] = DEFINE_RES_MEM(S5P_PA_UART0, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART0),
+};
+
+static struct resource s5p_uart1_resource[] = {
+	[0] = DEFINE_RES_MEM(S5P_PA_UART1, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART1),
+};
+
+static struct resource s5p_uart2_resource[] = {
+	[0] = DEFINE_RES_MEM(S5P_PA_UART2, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART2),
+};
+
+static struct resource s5p_uart3_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
+	[0] = DEFINE_RES_MEM(S5P_PA_UART3, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART3),
+#endif
+};
+
+static struct resource s5p_uart4_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
+	[0] = DEFINE_RES_MEM(S5P_PA_UART4, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART4),
+#endif
+};
+
+static struct resource s5p_uart5_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
+	[0] = DEFINE_RES_MEM(S5P_PA_UART5, S5P_SZ_UART),
+	[1] = DEFINE_RES_IRQ(IRQ_UART5),
+#endif
+};
+
+struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = {
+	[0] = {
+		.resources	= s5p_uart0_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart0_resource),
+	},
+	[1] = {
+		.resources	= s5p_uart1_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart1_resource),
+	},
+	[2] = {
+		.resources	= s5p_uart2_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart2_resource),
+	},
+	[3] = {
+		.resources	= s5p_uart3_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart3_resource),
+	},
+	[4] = {
+		.resources	= s5p_uart4_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart4_resource),
+	},
+	[5] = {
+		.resources	= s5p_uart5_resource,
+		.nr_resources	= ARRAY_SIZE(s5p_uart5_resource),
+	},
+};
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
similarity index 98%
rename from arch/arm/plat-s5p/irq-eint.c
rename to arch/arm/plat-samsung/s5p-irq-eint.c
index 139c050..33bd3f3 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-samsung/s5p-irq-eint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-eint.c
- *
+/*
  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
similarity index 98%
rename from arch/arm/plat-s5p/irq-gpioint.c
rename to arch/arm/plat-samsung/s5p-irq-gpioint.c
index 82c7311..f9431fe 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-gpioint.c
- *
+/*
  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
  * Author: Kyungmin Park <kyungmin.park@samsung.com>
  * Author: Joonyoung Shim <jy0922.shim@samsung.com>
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-samsung/s5p-irq-pm.c
similarity index 97%
rename from arch/arm/plat-s5p/irq-pm.c
rename to arch/arm/plat-samsung/s5p-irq-pm.c
index d1bfeca..7c1e3b7 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-samsung/s5p-irq-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-pm.c
- *
+/*
  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-samsung/s5p-irq.c
similarity index 95%
rename from arch/arm/plat-s5p/irq.c
rename to arch/arm/plat-samsung/s5p-irq.c
index afdaa10..dfb47d6 100644
--- a/arch/arm/plat-s5p/irq.c
+++ b/arch/arm/plat-samsung/s5p-irq.c
@@ -1,5 +1,4 @@
-/* arch/arm/plat-s5p/irq.c
- *
+/*
  * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-samsung/s5p-pm.c
similarity index 95%
rename from arch/arm/plat-s5p/pm.c
rename to arch/arm/plat-samsung/s5p-pm.c
index d15dc47..0747468 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-samsung/s5p-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/pm.c
- *
+/*
  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
diff --git a/arch/arm/plat-s5p/sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
similarity index 97%
rename from arch/arm/plat-s5p/sleep.S
rename to arch/arm/plat-samsung/s5p-sleep.S
index 006bd01..bdf6dad 100644
--- a/arch/arm/plat-s5p/sleep.S
+++ b/arch/arm/plat-samsung/s5p-sleep.S
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/sleep.S
- *
+/*
  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-samsung/s5p-time.c
similarity index 99%
rename from arch/arm/plat-s5p/s5p-time.c
rename to arch/arm/plat-samsung/s5p-time.c
index 17c0a2c..028b6e8 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-samsung/s5p-time.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/s5p-time.c
- *
+/*
  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
diff --git a/arch/arm/plat-s5p/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
similarity index 100%
rename from arch/arm/plat-s5p/setup-mipiphy.c
rename to arch/arm/plat-samsung/setup-mipiphy.c
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 387655b..4404f82 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -8,6 +8,17 @@
 	prompt "ST SPEAr Family"
 	default ARCH_SPEAR3XX
 
+config ARCH_SPEAR13XX
+	bool "ST SPEAr13xx with Device Tree"
+	select ARM_GIC
+	select CPU_V7
+	select USE_OF
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
+	select PINCTRL
+	help
+	  Supports for ARM's SPEAR13XX family
+
 config ARCH_SPEAR3XX
 	bool "ST SPEAr3xx with Device Tree"
 	select ARM_VIC
@@ -27,6 +38,7 @@
 endchoice
 
 # Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear13xx/Kconfig"
 source "arch/arm/mach-spear3xx/Kconfig"
 source "arch/arm/mach-spear6xx/Kconfig"
 
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index 7744802..2607bd0 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,7 @@
 #
 
 # Common support
-obj-y	:= clock.o restart.o time.o pl080.o
+obj-y	:= restart.o time.o
 
-obj-$(CONFIG_ARCH_SPEAR3XX)	+= shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX)	+= pl080.o shirq.o
+obj-$(CONFIG_ARCH_SPEAR6XX)	+= pl080.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
deleted file mode 100644
index 67dd003..0000000
--- a/arch/arm/plat-spear/clock.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * arch/arm/plat-spear/clock.c
- *
- * Clock framework for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/bug.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <plat/clock.h>
-
-static DEFINE_SPINLOCK(clocks_lock);
-static LIST_HEAD(root_clks);
-#ifdef CONFIG_DEBUG_FS
-static LIST_HEAD(clocks);
-#endif
-
-static void propagate_rate(struct clk *, int on_init);
-#ifdef CONFIG_DEBUG_FS
-static int clk_debugfs_reparent(struct clk *);
-#endif
-
-static int generic_clk_enable(struct clk *clk)
-{
-	unsigned int val;
-
-	if (!clk->en_reg)
-		return -EFAULT;
-
-	val = readl(clk->en_reg);
-	if (unlikely(clk->flags & RESET_TO_ENABLE))
-		val &= ~(1 << clk->en_reg_bit);
-	else
-		val |= 1 << clk->en_reg_bit;
-
-	writel(val, clk->en_reg);
-
-	return 0;
-}
-
-static void generic_clk_disable(struct clk *clk)
-{
-	unsigned int val;
-
-	if (!clk->en_reg)
-		return;
-
-	val = readl(clk->en_reg);
-	if (unlikely(clk->flags & RESET_TO_ENABLE))
-		val |= 1 << clk->en_reg_bit;
-	else
-		val &= ~(1 << clk->en_reg_bit);
-
-	writel(val, clk->en_reg);
-}
-
-/* generic clk ops */
-static struct clkops generic_clkops = {
-	.enable = generic_clk_enable,
-	.disable = generic_clk_disable,
-};
-
-/* returns current programmed clocks clock info structure */
-static struct pclk_info *pclk_info_get(struct clk *clk)
-{
-	unsigned int val, i;
-	struct pclk_info *info = NULL;
-
-	val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
-		& clk->pclk_sel->pclk_sel_mask;
-
-	for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
-		if (clk->pclk_sel->pclk_info[i].pclk_val == val)
-			info = &clk->pclk_sel->pclk_info[i];
-	}
-
-	return info;
-}
-
-/*
- * Set Update pclk, and pclk_info of clk and add clock sibling node to current
- * parents children list
- */
-static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	list_del(&clk->sibling);
-	list_add(&clk->sibling, &pclk_info->pclk->children);
-
-	clk->pclk = pclk_info->pclk;
-	spin_unlock_irqrestore(&clocks_lock, flags);
-
-#ifdef CONFIG_DEBUG_FS
-	clk_debugfs_reparent(clk);
-#endif
-}
-
-static void do_clk_disable(struct clk *clk)
-{
-	if (!clk)
-		return;
-
-	if (!clk->usage_count) {
-		WARN_ON(1);
-		return;
-	}
-
-	clk->usage_count--;
-
-	if (clk->usage_count == 0) {
-		/*
-		 * Surely, there are no active childrens or direct users
-		 * of this clock
-		 */
-		if (clk->pclk)
-			do_clk_disable(clk->pclk);
-
-		if (clk->ops && clk->ops->disable)
-			clk->ops->disable(clk);
-	}
-}
-
-static int do_clk_enable(struct clk *clk)
-{
-	int ret = 0;
-
-	if (!clk)
-		return -EFAULT;
-
-	if (clk->usage_count == 0) {
-		if (clk->pclk) {
-			ret = do_clk_enable(clk->pclk);
-			if (ret)
-				goto err;
-		}
-		if (clk->ops && clk->ops->enable) {
-			ret = clk->ops->enable(clk);
-			if (ret) {
-				if (clk->pclk)
-					do_clk_disable(clk->pclk);
-				goto err;
-			}
-		}
-		/*
-		 * Since the clock is going to be used for the first
-		 * time please reclac
-		 */
-		if (clk->recalc) {
-			ret = clk->recalc(clk);
-			if (ret)
-				goto err;
-		}
-	}
-	clk->usage_count++;
-err:
-	return ret;
-}
-
-/*
- * clk_enable - inform the system when the clock source should be running.
- * @clk: clock source
- *
- * If the clock can not be enabled/disabled, this should return success.
- *
- * Returns success (0) or negative errno.
- */
-int clk_enable(struct clk *clk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	ret = do_clk_enable(clk);
-	spin_unlock_irqrestore(&clocks_lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-/*
- * clk_disable - inform the system when the clock source is no longer required.
- * @clk: clock source
- *
- * Inform the system that a clock source is no longer required by
- * a driver and may be shut down.
- *
- * Implementation detail: if the clock source is shared between
- * multiple drivers, clk_enable() calls must be balanced by the
- * same number of clk_disable() calls for the clock source to be
- * disabled.
- */
-void clk_disable(struct clk *clk)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	do_clk_disable(clk);
-	spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-/**
- * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
- *		 This is only valid once the clock source has been enabled.
- * @clk: clock source
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
-	unsigned long flags, rate;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	rate = clk->rate;
-	spin_unlock_irqrestore(&clocks_lock, flags);
-
-	return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/**
- * clk_set_parent - set the parent clock source for this clock
- * @clk: clock source
- * @parent: parent clock source
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	int i, found = 0, val = 0;
-	unsigned long flags;
-
-	if (!clk || !parent)
-		return -EFAULT;
-	if (clk->pclk == parent)
-		return 0;
-	if (!clk->pclk_sel)
-		return -EPERM;
-
-	/* check if requested parent is in clk parent list */
-	for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
-		if (clk->pclk_sel->pclk_info[i].pclk == parent) {
-			found = 1;
-			break;
-		}
-	}
-
-	if (!found)
-		return -EINVAL;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	/* reflect parent change in hardware */
-	val = readl(clk->pclk_sel->pclk_sel_reg);
-	val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
-	val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
-	writel(val, clk->pclk_sel->pclk_sel_reg);
-	spin_unlock_irqrestore(&clocks_lock, flags);
-
-	/* reflect parent change in software */
-	clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
-
-	propagate_rate(clk, 0);
-	return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/**
- * clk_set_rate - set the clock rate for a clock source
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	if (!clk || !rate)
-		return -EFAULT;
-
-	if (clk->set_rate) {
-		spin_lock_irqsave(&clocks_lock, flags);
-		ret = clk->set_rate(clk, rate);
-		if (!ret)
-			/* if successful -> propagate */
-			propagate_rate(clk, 0);
-		spin_unlock_irqrestore(&clocks_lock, flags);
-	} else if (clk->pclk) {
-		u32 mult = clk->div_factor ? clk->div_factor : 1;
-		ret = clk_set_rate(clk->pclk, mult * rate);
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* registers clock in platform clock framework */
-void clk_register(struct clk_lookup *cl)
-{
-	struct clk *clk;
-	unsigned long flags;
-
-	if (!cl || !cl->clk)
-		return;
-	clk = cl->clk;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-
-	INIT_LIST_HEAD(&clk->children);
-	if (clk->flags & ALWAYS_ENABLED)
-		clk->ops = NULL;
-	else if (!clk->ops)
-		clk->ops = &generic_clkops;
-
-	/* root clock don't have any parents */
-	if (!clk->pclk && !clk->pclk_sel) {
-		list_add(&clk->sibling, &root_clks);
-	} else if (clk->pclk && !clk->pclk_sel) {
-		/* add clocks with only one parent to parent's children list */
-		list_add(&clk->sibling, &clk->pclk->children);
-	} else {
-		/* clocks with more than one parent */
-		struct pclk_info *pclk_info;
-
-		pclk_info = pclk_info_get(clk);
-		if (!pclk_info) {
-			pr_err("CLKDEV: invalid pclk info of clk with"
-					" %s dev_id and %s con_id\n",
-					cl->dev_id, cl->con_id);
-		} else {
-			clk->pclk = pclk_info->pclk;
-			list_add(&clk->sibling, &pclk_info->pclk->children);
-		}
-	}
-
-	spin_unlock_irqrestore(&clocks_lock, flags);
-
-	/* debugfs specific */
-#ifdef CONFIG_DEBUG_FS
-	list_add(&clk->node, &clocks);
-	clk->cl = cl;
-#endif
-
-	/* add clock to arm clockdev framework */
-	clkdev_add(cl);
-}
-
-/**
- * propagate_rate - recalculate and propagate all clocks to children
- * @pclk: parent clock required to be propogated
- * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
- *
- * Recalculates all children clocks
- */
-void propagate_rate(struct clk *pclk, int on_init)
-{
-	struct clk *clk, *_temp;
-	int ret = 0;
-
-	list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
-		if (clk->recalc) {
-			ret = clk->recalc(clk);
-			/*
-			 * recalc will return error if clk out is not programmed
-			 * In this case configure default rate.
-			 */
-			if (ret && clk->set_rate)
-				clk->set_rate(clk, 0);
-		}
-		propagate_rate(clk, on_init);
-
-		if (!on_init)
-			continue;
-
-		/* Enable clks enabled on init, in software view */
-		if (clk->flags & ENABLED_ON_INIT)
-			do_clk_enable(clk);
-	}
-}
-
-/**
- * round_rate_index - return closest programmable rate index in rate_config tbl
- * @clk: ptr to clock structure
- * @drate: desired rate
- * @rate: final rate will be returned in this variable only.
- *
- * Finds index in rate_config for highest clk rate which is less than
- * requested rate. If there is no clk rate lesser than requested rate then
- * -EINVAL is returned. This routine assumes that rate_config is written
- * in incrementing order of clk rates.
- * If drate passed is zero then default rate is programmed.
- */
-static int
-round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
-{
-	unsigned long tmp = 0, prev_rate = 0;
-	int index;
-
-	if (!clk->calc_rate)
-		return -EFAULT;
-
-	if (!drate)
-		return -EINVAL;
-
-	/*
-	 * This loops ends on two conditions:
-	 * - as soon as clk is found with rate greater than requested rate.
-	 * - if all clks in rate_config are smaller than requested rate.
-	 */
-	for (index = 0; index < clk->rate_config.count; index++) {
-		prev_rate = tmp;
-		tmp = clk->calc_rate(clk, index);
-		if (drate < tmp) {
-			index--;
-			break;
-		}
-	}
-	/* return if can't find suitable clock */
-	if (index < 0) {
-		index = -EINVAL;
-		*rate = 0;
-	} else if (index == clk->rate_config.count) {
-		/* program with highest clk rate possible */
-		index = clk->rate_config.count - 1;
-		*rate = tmp;
-	} else
-		*rate = prev_rate;
-
-	return index;
-}
-
-/**
- * clk_round_rate - adjust a rate to the exact rate a clock can provide
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns rounded clock rate in Hz, or negative errno.
- */
-long clk_round_rate(struct clk *clk, unsigned long drate)
-{
-	long rate = 0;
-	int index;
-
-	/*
-	 * propagate call to parent who supports calc_rate. Similar approach is
-	 * used in clk_set_rate.
-	 */
-	if (!clk->calc_rate) {
-		u32 mult;
-		if (!clk->pclk)
-			return clk->rate;
-
-		mult = clk->div_factor ? clk->div_factor : 1;
-		return clk_round_rate(clk->pclk, mult * drate) / mult;
-	}
-
-	index = round_rate_index(clk, drate, &rate);
-	if (index >= 0)
-		return rate;
-	else
-		return index;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/*All below functions are called with lock held */
-
-/*
- * Calculates pll clk rate for specific value of mode, m, n and p
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-unsigned long pll_calc_rate(struct clk *clk, int index)
-{
-	unsigned long rate = clk->pclk->rate;
-	struct pll_rate_tbl *tbls = clk->rate_config.tbls;
-	unsigned int mode;
-
-	mode = tbls[index].mode ? 256 : 1;
-	return (((2 * rate / 10000) * tbls[index].m) /
-			(mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
-}
-
-/*
- * calculates current programmed rate of pll1
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-int pll_clk_recalc(struct clk *clk)
-{
-	struct pll_clk_config *config = clk->private_data;
-	unsigned int num = 2, den = 0, val, mode = 0;
-
-	mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
-		config->masks->mode_mask;
-
-	val = readl(config->cfg_reg);
-	/* calculate denominator */
-	den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
-	den = 1 << den;
-	den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
-
-	/* calculate numerator & denominator */
-	if (!mode) {
-		/* Normal mode */
-		num *= (val >> config->masks->norm_fdbk_m_shift) &
-			config->masks->norm_fdbk_m_mask;
-	} else {
-		/* Dithered mode */
-		num *= (val >> config->masks->dith_fdbk_m_shift) &
-			config->masks->dith_fdbk_m_mask;
-		den *= 256;
-	}
-
-	if (!den)
-		return -EINVAL;
-
-	clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
-	return 0;
-}
-
-/*
- * Configures new clock rate of pll
- */
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-	struct pll_rate_tbl *tbls = clk->rate_config.tbls;
-	struct pll_clk_config *config = clk->private_data;
-	unsigned long val, rate;
-	int i;
-
-	i = round_rate_index(clk, desired_rate, &rate);
-	if (i < 0)
-		return i;
-
-	val = readl(config->mode_reg) &
-		~(config->masks->mode_mask << config->masks->mode_shift);
-	val |= (tbls[i].mode & config->masks->mode_mask) <<
-		config->masks->mode_shift;
-	writel(val, config->mode_reg);
-
-	val = readl(config->cfg_reg) &
-		~(config->masks->div_p_mask << config->masks->div_p_shift);
-	val |= (tbls[i].p & config->masks->div_p_mask) <<
-		config->masks->div_p_shift;
-	val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
-	val |= (tbls[i].n & config->masks->div_n_mask) <<
-		config->masks->div_n_shift;
-	val &= ~(config->masks->dith_fdbk_m_mask <<
-			config->masks->dith_fdbk_m_shift);
-	if (tbls[i].mode)
-		val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
-			config->masks->dith_fdbk_m_shift;
-	else
-		val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
-			config->masks->norm_fdbk_m_shift;
-
-	writel(val, config->cfg_reg);
-
-	clk->rate = rate;
-
-	return 0;
-}
-
-/*
- * Calculates ahb, apb clk rate for specific value of div
- */
-unsigned long bus_calc_rate(struct clk *clk, int index)
-{
-	unsigned long rate = clk->pclk->rate;
-	struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-
-	return rate / (tbls[index].div + 1);
-}
-
-/* calculates current programmed rate of ahb or apb bus */
-int bus_clk_recalc(struct clk *clk)
-{
-	struct bus_clk_config *config = clk->private_data;
-	unsigned int div;
-
-	div = ((readl(config->reg) >> config->masks->shift) &
-			config->masks->mask) + 1;
-
-	if (!div)
-		return -EINVAL;
-
-	clk->rate = (unsigned long)clk->pclk->rate / div;
-	return 0;
-}
-
-/* Configures new clock rate of AHB OR APB bus */
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-	struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-	struct bus_clk_config *config = clk->private_data;
-	unsigned long val, rate;
-	int i;
-
-	i = round_rate_index(clk, desired_rate, &rate);
-	if (i < 0)
-		return i;
-
-	val = readl(config->reg) &
-		~(config->masks->mask << config->masks->shift);
-	val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
-	writel(val, config->reg);
-
-	clk->rate = rate;
-
-	return 0;
-}
-
-/*
- * gives rate for different values of eq, x and y
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2		EQ1
- * Fout2 = Fin * X/Y			EQ2
- */
-unsigned long aux_calc_rate(struct clk *clk, int index)
-{
-	unsigned long rate = clk->pclk->rate;
-	struct aux_rate_tbl *tbls = clk->rate_config.tbls;
-	u8 eq = tbls[index].eq ? 1 : 2;
-
-	return (((rate/10000) * tbls[index].xscale) /
-			(tbls[index].yscale * eq)) * 10000;
-}
-
-/*
- * calculates current programmed rate of auxiliary synthesizers
- * used by: UART, FIRDA
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2
- * Fout2 = Fin * X/Y
- *
- * Selection of eqn 1 or 2 is programmed in register
- */
-int aux_clk_recalc(struct clk *clk)
-{
-	struct aux_clk_config *config = clk->private_data;
-	unsigned int num = 1, den = 1, val, eqn;
-
-	val = readl(config->synth_reg);
-
-	eqn = (val >> config->masks->eq_sel_shift) &
-		config->masks->eq_sel_mask;
-	if (eqn == config->masks->eq1_mask)
-		den *= 2;
-
-	/* calculate numerator */
-	num = (val >> config->masks->xscale_sel_shift) &
-		config->masks->xscale_sel_mask;
-
-	/* calculate denominator */
-	den *= (val >> config->masks->yscale_sel_shift) &
-		config->masks->yscale_sel_mask;
-
-	if (!den)
-		return -EINVAL;
-
-	clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
-	return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-	struct aux_rate_tbl *tbls = clk->rate_config.tbls;
-	struct aux_clk_config *config = clk->private_data;
-	unsigned long val, rate;
-	int i;
-
-	i = round_rate_index(clk, desired_rate, &rate);
-	if (i < 0)
-		return i;
-
-	val = readl(config->synth_reg) &
-		~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
-	val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
-		config->masks->eq_sel_shift;
-	val &= ~(config->masks->xscale_sel_mask <<
-			config->masks->xscale_sel_shift);
-	val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
-		config->masks->xscale_sel_shift;
-	val &= ~(config->masks->yscale_sel_mask <<
-			config->masks->yscale_sel_shift);
-	val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
-		config->masks->yscale_sel_shift;
-	writel(val, config->synth_reg);
-
-	clk->rate = rate;
-
-	return 0;
-}
-
-/*
- * Calculates gpt clk rate for different values of mscale and nscale
- *
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-unsigned long gpt_calc_rate(struct clk *clk, int index)
-{
-	unsigned long rate = clk->pclk->rate;
-	struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-
-	return rate / ((1 << (tbls[index].nscale + 1)) *
-			(tbls[index].mscale + 1));
-}
-
-/*
- * calculates current programmed rate of gpt synthesizers
- * Fout from synthesizer can be given from below equations:
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-int gpt_clk_recalc(struct clk *clk)
-{
-	struct gpt_clk_config *config = clk->private_data;
-	unsigned int div = 1, val;
-
-	val = readl(config->synth_reg);
-	div += (val >> config->masks->mscale_sel_shift) &
-		config->masks->mscale_sel_mask;
-	div *= 1 << (((val >> config->masks->nscale_sel_shift) &
-				config->masks->nscale_sel_mask) + 1);
-
-	if (!div)
-		return -EINVAL;
-
-	clk->rate = (unsigned long)clk->pclk->rate / div;
-	return 0;
-}
-
-/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-	struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-	struct gpt_clk_config *config = clk->private_data;
-	unsigned long val, rate;
-	int i;
-
-	i = round_rate_index(clk, desired_rate, &rate);
-	if (i < 0)
-		return i;
-
-	val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
-			config->masks->mscale_sel_shift);
-	val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
-		config->masks->mscale_sel_shift;
-	val &= ~(config->masks->nscale_sel_mask <<
-			config->masks->nscale_sel_shift);
-	val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
-		config->masks->nscale_sel_shift;
-	writel(val, config->synth_reg);
-
-	clk->rate = rate;
-
-	return 0;
-}
-
-/*
- * Calculates clcd clk rate for different values of div
- *
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- *	0-13 (fractional part)
- *	14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-unsigned long clcd_calc_rate(struct clk *clk, int index)
-{
-	unsigned long rate = clk->pclk->rate;
-	struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-
-	rate /= 1000;
-	rate <<= 12;
-	rate /= (2 * tbls[index].div);
-	rate >>= 12;
-	rate *= 1000;
-
-	return rate;
-}
-
-/*
- * calculates current programmed rate of clcd synthesizer
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- *	0-13 (fractional part)
- *	14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-int clcd_clk_recalc(struct clk *clk)
-{
-	struct clcd_clk_config *config = clk->private_data;
-	unsigned int div = 1;
-	unsigned long prate;
-	unsigned int val;
-
-	val = readl(config->synth_reg);
-	div = (val >> config->masks->div_factor_shift) &
-		config->masks->div_factor_mask;
-
-	if (!div)
-		return -EINVAL;
-
-	prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
-
-	clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
-	clk->rate *= 1000;
-	return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-	struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-	struct clcd_clk_config *config = clk->private_data;
-	unsigned long val, rate;
-	int i;
-
-	i = round_rate_index(clk, desired_rate, &rate);
-	if (i < 0)
-		return i;
-
-	val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
-			config->masks->div_factor_shift);
-	val |= (tbls[i].div & config->masks->div_factor_mask) <<
-		config->masks->div_factor_shift;
-	writel(val, config->synth_reg);
-
-	clk->rate = rate;
-
-	return 0;
-}
-
-/*
- * Used for clocks that always have value as the parent clock divided by a
- * fixed divisor
- */
-int follow_parent(struct clk *clk)
-{
-	unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
-
-	clk->rate = clk->pclk->rate/div_factor;
-	return 0;
-}
-
-/**
- * recalc_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- */
-void recalc_root_clocks(void)
-{
-	struct clk *pclk;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&clocks_lock, flags);
-	list_for_each_entry(pclk, &root_clks, sibling) {
-		if (pclk->recalc) {
-			ret = pclk->recalc(pclk);
-			/*
-			 * recalc will return error if clk out is not programmed
-			 * In this case configure default clock.
-			 */
-			if (ret && pclk->set_rate)
-				pclk->set_rate(pclk, 0);
-		}
-		propagate_rate(pclk, 1);
-		/* Enable clks enabled on init, in software view */
-		if (pclk->flags & ENABLED_ON_INIT)
-			do_clk_enable(pclk);
-	}
-	spin_unlock_irqrestore(&clocks_lock, flags);
-}
-
-void __init clk_init(void)
-{
-	recalc_root_clocks();
-}
-
-#ifdef CONFIG_DEBUG_FS
-/*
- *	debugfs support to trace clock tree hierarchy and attributes
- */
-static struct dentry *clk_debugfs_root;
-static int clk_debugfs_register_one(struct clk *c)
-{
-	int err;
-	struct dentry *d;
-	struct clk *pa = c->pclk;
-	char s[255];
-	char *p = s;
-
-	if (c) {
-		if (c->cl->con_id)
-			p += sprintf(p, "%s", c->cl->con_id);
-		if (c->cl->dev_id)
-			p += sprintf(p, "%s", c->cl->dev_id);
-	}
-	d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
-	if (!d)
-		return -ENOMEM;
-	c->dent = d;
-
-	d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
-			(u32 *)&c->usage_count);
-	if (!d) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
-	if (!d) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
-	if (!d) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-	return 0;
-
-err_out:
-	debugfs_remove_recursive(c->dent);
-	return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
-	int err;
-	struct clk *pa = c->pclk;
-
-	if (pa && !pa->dent) {
-		err = clk_debugfs_register(pa);
-		if (err)
-			return err;
-	}
-
-	if (!c->dent) {
-		err = clk_debugfs_register_one(c);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
-	struct clk *c;
-	struct dentry *d;
-	int err;
-
-	d = debugfs_create_dir("clock", NULL);
-	if (!d)
-		return -ENOMEM;
-	clk_debugfs_root = d;
-
-	list_for_each_entry(c, &clocks, node) {
-		err = clk_debugfs_register(c);
-		if (err)
-			goto err_out;
-	}
-	return 0;
-err_out:
-	debugfs_remove_recursive(clk_debugfs_root);
-	return err;
-}
-late_initcall(clk_debugfs_init);
-
-static int clk_debugfs_reparent(struct clk *c)
-{
-	debugfs_remove(c->dent);
-	return clk_debugfs_register_one(c);
-}
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
deleted file mode 100644
index 0062baf..0000000
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/clock.h
- *
- * Clock framework definitions for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_CLOCK_H
-#define __PLAT_CLOCK_H
-
-#include <linux/list.h>
-#include <linux/clkdev.h>
-#include <linux/types.h>
-
-/* clk structure flags */
-#define	ALWAYS_ENABLED		(1 << 0) /* clock always enabled */
-#define	RESET_TO_ENABLE		(1 << 1) /* reset register bit to enable clk */
-#define	ENABLED_ON_INIT		(1 << 2) /* clocks enabled at init */
-
-/**
- * struct clkops - clock operations
- * @enable: pointer to clock enable function
- * @disable: pointer to clock disable function
- */
-struct clkops {
-	int (*enable) (struct clk *);
-	void (*disable) (struct clk *);
-};
-
-/**
- * struct pclk_info - parents info
- * @pclk: pointer to parent clk
- * @pclk_val: value to be written for selecting this parent
- */
-struct pclk_info {
-	struct clk *pclk;
-	u8 pclk_val;
-};
-
-/**
- * struct pclk_sel - parents selection configuration
- * @pclk_info: pointer to array of parent clock info
- * @pclk_count: number of parents
- * @pclk_sel_reg: register for selecting a parent
- * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
- */
-struct pclk_sel {
-	struct pclk_info *pclk_info;
-	u8 pclk_count;
-	void __iomem *pclk_sel_reg;
-	unsigned int pclk_sel_mask;
-};
-
-/**
- * struct rate_config - clk rate configurations
- * @tbls: array of device specific clk rate tables, in ascending order of rates
- * @count: size of tbls array
- * @default_index: default setting when originally disabled
- */
-struct rate_config {
-	void *tbls;
-	u8 count;
-	u8 default_index;
-};
-
-/**
- * struct clk - clock structure
- * @usage_count: num of users who enabled this clock
- * @flags: flags for clock properties
- * @rate: programmed clock rate in Hz
- * @en_reg: clk enable/disable reg
- * @en_reg_bit: clk enable/disable bit
- * @ops: clk enable/disable ops - generic_clkops selected if NULL
- * @recalc: pointer to clock rate recalculate function
- * @set_rate: pointer to clock set rate function
- * @calc_rate: pointer to clock get rate function for index
- * @rate_config: rate configuration information, used by set_rate
- * @div_factor: division factor to parent clock.
- * @pclk: current parent clk
- * @pclk_sel: pointer to parent selection structure
- * @pclk_sel_shift: register shift for selecting parent of this clock
- * @children: list for childrens or this clock
- * @sibling: node for list of clocks having same parents
- * @private_data: clock specific private data
- * @node: list to maintain clocks linearly
- * @cl: clocklook up associated with this clock
- * @dent: object for debugfs
- */
-struct clk {
-	unsigned int usage_count;
-	unsigned int flags;
-	unsigned long rate;
-	void __iomem *en_reg;
-	u8 en_reg_bit;
-	const struct clkops *ops;
-	int (*recalc) (struct clk *);
-	int (*set_rate) (struct clk *, unsigned long rate);
-	unsigned long (*calc_rate)(struct clk *, int index);
-	struct rate_config rate_config;
-	unsigned int div_factor;
-
-	struct clk *pclk;
-	struct pclk_sel *pclk_sel;
-	unsigned int pclk_sel_shift;
-
-	struct list_head children;
-	struct list_head sibling;
-	void *private_data;
-#ifdef CONFIG_DEBUG_FS
-	struct list_head node;
-	struct clk_lookup *cl;
-	struct dentry *dent;
-#endif
-};
-
-/* pll configuration structure */
-struct pll_clk_masks {
-	u32 mode_mask;
-	u32 mode_shift;
-
-	u32 norm_fdbk_m_mask;
-	u32 norm_fdbk_m_shift;
-	u32 dith_fdbk_m_mask;
-	u32 dith_fdbk_m_shift;
-	u32 div_p_mask;
-	u32 div_p_shift;
-	u32 div_n_mask;
-	u32 div_n_shift;
-};
-
-struct pll_clk_config {
-	void __iomem *mode_reg;
-	void __iomem *cfg_reg;
-	struct pll_clk_masks *masks;
-};
-
-/* pll clk rate config structure */
-struct pll_rate_tbl {
-	u8 mode;
-	u16 m;
-	u8 n;
-	u8 p;
-};
-
-/* ahb and apb bus configuration structure */
-struct bus_clk_masks {
-	u32 mask;
-	u32 shift;
-};
-
-struct bus_clk_config {
-	void __iomem *reg;
-	struct bus_clk_masks *masks;
-};
-
-/* ahb and apb clk bus rate config structure */
-struct bus_rate_tbl {
-	u8 div;
-};
-
-/* Aux clk configuration structure: applicable to UART and FIRDA */
-struct aux_clk_masks {
-	u32 eq_sel_mask;
-	u32 eq_sel_shift;
-	u32 eq1_mask;
-	u32 eq2_mask;
-	u32 xscale_sel_mask;
-	u32 xscale_sel_shift;
-	u32 yscale_sel_mask;
-	u32 yscale_sel_shift;
-};
-
-struct aux_clk_config {
-	void __iomem *synth_reg;
-	struct aux_clk_masks *masks;
-};
-
-/* aux clk rate config structure */
-struct aux_rate_tbl {
-	u16 xscale;
-	u16 yscale;
-	u8 eq;
-};
-
-/* GPT clk configuration structure */
-struct gpt_clk_masks {
-	u32 mscale_sel_mask;
-	u32 mscale_sel_shift;
-	u32 nscale_sel_mask;
-	u32 nscale_sel_shift;
-};
-
-struct gpt_clk_config {
-	void __iomem *synth_reg;
-	struct gpt_clk_masks *masks;
-};
-
-/* gpt clk rate config structure */
-struct gpt_rate_tbl {
-	u16 mscale;
-	u16 nscale;
-};
-
-/* clcd clk configuration structure */
-struct clcd_synth_masks {
-	u32 div_factor_mask;
-	u32 div_factor_shift;
-};
-
-struct clcd_clk_config {
-	void __iomem *synth_reg;
-	struct clcd_synth_masks *masks;
-};
-
-/* clcd clk rate config structure */
-struct clcd_rate_tbl {
-	u16 div;
-};
-
-/* platform specific clock functions */
-void __init clk_init(void);
-void clk_register(struct clk_lookup *cl);
-void recalc_root_clocks(void);
-
-/* clock recalc & set rate functions */
-int follow_parent(struct clk *clk);
-unsigned long pll_calc_rate(struct clk *clk, int index);
-int pll_clk_recalc(struct clk *clk);
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long bus_calc_rate(struct clk *clk, int index);
-int bus_clk_recalc(struct clk *clk);
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long gpt_calc_rate(struct clk *clk, int index);
-int gpt_clk_recalc(struct clk *clk);
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long aux_calc_rate(struct clk *clk, int index);
-int aux_clk_recalc(struct clk *clk);
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long clcd_calc_rate(struct clk *clk, int index);
-int clcd_clk_recalc(struct clk *clk);
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-
-#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
index ab3de72..75b05ad 100644
--- a/arch/arm/plat-spear/include/plat/debug-macro.S
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -4,7 +4,7 @@
  * Debugging macro include header for spear platform
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index e14a3e4..2bc6b54 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h
index 03ed8b5..88a7fbd 100644
--- a/arch/arm/plat-spear/include/plat/shirq.h
+++ b/arch/arm/plat-spear/include/plat/shirq.h
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h
index 914d09d..ef95e5b 100644
--- a/arch/arm/plat-spear/include/plat/timex.h
+++ b/arch/arm/plat-spear/include/plat/timex.h
@@ -4,7 +4,7 @@
  * SPEAr platform specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 6dd455b..2ce6cb1 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index a56a067..12cf27f 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index 4471a23..4f99011 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -4,7 +4,7 @@
  * SPEAr platform specific restart functions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -16,6 +16,7 @@
 #include <mach/spear.h>
 #include <mach/generic.h>
 
+#define SPEAR13XX_SYS_SW_RES			(VA_MISC_BASE + 0x204)
 void spear_restart(char mode, const char *cmd)
 {
 	if (mode == 's') {
@@ -23,6 +24,10 @@
 		soft_restart(0);
 	} else {
 		/* hardware reset, Use on-chip reset capability */
+#ifdef CONFIG_ARCH_SPEAR13XX
+		writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES);
+#else
 		sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+#endif
 	}
 }
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 961fb72..853e891e 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer source file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index a3164d1..03321af 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -18,6 +18,8 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
 #include <linux/time.h>
 #include <linux/irq.h>
 #include <asm/mach/time.h>
@@ -197,19 +199,32 @@
 	setup_irq(irq, &spear_timer_irq);
 }
 
-void __init spear_setup_timer(resource_size_t base, int irq)
-{
-	int ret;
+const static struct of_device_id timer_of_match[] __initconst = {
+	{ .compatible = "st,spear-timer", },
+	{ },
+};
 
-	if (!request_mem_region(base, SZ_1K, "gpt0")) {
-		pr_err("%s:cannot get IO addr\n", __func__);
+void __init spear_setup_of_timer(void)
+{
+	struct device_node *np;
+	int irq, ret;
+
+	np = of_find_matching_node(NULL, timer_of_match);
+	if (!np) {
+		pr_err("%s: No timer passed via DT\n", __func__);
 		return;
 	}
 
-	gpt_base = ioremap(base, SZ_1K);
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		pr_err("%s: No irq passed for timer via DT\n", __func__);
+		return;
+	}
+
+	gpt_base = of_iomap(np, 0);
 	if (!gpt_base) {
-		pr_err("%s:ioremap failed for gpt\n", __func__);
-		goto err_mem;
+		pr_err("%s: of iomap failed\n", __func__);
+		return;
 	}
 
 	gpt_clk = clk_get_sys("gpt0", NULL);
@@ -218,10 +233,10 @@
 		goto err_iomap;
 	}
 
-	ret = clk_enable(gpt_clk);
+	ret = clk_prepare_enable(gpt_clk);
 	if (ret < 0) {
-		pr_err("%s:couldn't enable gpt clock\n", __func__);
-		goto err_clk;
+		pr_err("%s:couldn't prepare-enable gpt clock\n", __func__);
+		goto err_prepare_enable_clk;
 	}
 
 	spear_clockevent_init(irq);
@@ -229,10 +244,8 @@
 
 	return;
 
-err_clk:
+err_prepare_enable_clk:
 	clk_put(gpt_clk);
 err_iomap:
 	iounmap(gpt_base);
-err_mem:
-	release_mem_region(base, SZ_1K);
 }
diff --git a/arch/avr32/include/asm/posix_types.h b/arch/avr32/include/asm/posix_types.h
index 74667bf..9ba9e74 100644
--- a/arch/avr32/include/asm/posix_types.h
+++ b/arch/avr32/include/asm/posix_types.h
@@ -17,9 +17,6 @@
 typedef unsigned short  __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 169268c..df28841 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -281,7 +281,7 @@
 	ld.w	r1, r0[TI_flags]
 	rjmp	1b
 
-2:	mov	r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
+2:	mov	r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 	tst	r1, r2
 	breq	3f
 	unmask_interrupts
@@ -587,7 +587,7 @@
 	ld.w	r1, r0[TI_flags]
 	rjmp	fault_exit_work
 
-1:	mov	r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
+1:	mov	r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 	tst	r1, r2
 	breq	2f
 	unmask_interrupts
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index ae386c3..d552a85 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -22,8 +22,6 @@
 #include <asm/ucontext.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 			       struct pt_regs *regs)
 {
@@ -89,7 +87,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -224,30 +221,27 @@
 
 static inline void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs, int syscall)
+	      struct pt_regs *regs, int syscall)
 {
 	int ret;
 
 	/*
 	 * Set up the stack frame
 	 */
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
+	ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
 
 	/*
 	 * Check that the resulting registers are sane
 	 */
 	ret |= !valid_user_regs(regs);
 
-	if (ret != 0) {
-		force_sigsegv(sig, current);
-		return;
-	}
-
 	/*
 	 * Block the signal if we were successful.
 	 */
-	block_sigmask(ka, sig);
-	clear_thread_flag(TIF_RESTORE_SIGMASK);
+	if (ret != 0)
+		force_sigsegv(sig, current);
+	else
+		signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -255,7 +249,7 @@
  * doesn't want to handle. Thus you cannot kill init even with a
  * SIGKILL even by mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
+static void do_signal(struct pt_regs *regs, int syscall)
 {
 	siginfo_t info;
 	int signr;
@@ -267,12 +261,7 @@
 	 * without doing anything if so.
 	 */
 	if (!user_mode(regs))
-		return 0;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else if (!oldset)
-		oldset = &current->blocked;
+		return;
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (syscall) {
@@ -297,15 +286,11 @@
 
 	if (signr == 0) {
 		/* No signal to deliver -- put the saved sigmask back */
-		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-		}
-		return 0;
+		restore_saved_sigmask();
+		return;
 	}
 
-	handle_signal(signr, &ka, &info, oldset, regs, syscall);
-	return 1;
+	handle_signal(signr, &ka, &info, regs, syscall);
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
@@ -315,13 +300,11 @@
 	if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
 		syscall = 1;
 
-	if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
-		do_signal(regs, &current->blocked, syscall);
+	if (ti->flags & _TIF_SIGPENDING)
+		do_signal(regs, syscall);
 
 	if (ti->flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/blackfin/include/asm/posix_types.h b/arch/blackfin/include/asm/posix_types.h
index 41bc187..1bd3436 100644
--- a/arch/blackfin/include/asm/posix_types.h
+++ b/arch/blackfin/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned int __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8..53ad100 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@
 					   TIF_NEED_RESCHED */
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
-#define TIF_FREEZE		6	/* is freezing for suspend */
 #define TIF_IRQ_SYNC		7	/* sync pipeline stage */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_SINGLESTEP		9
@@ -111,7 +110,6 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_IRQ_SYNC		(1<<TIF_IRQ_SYNC)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 2e3994b..62bcea7 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -173,7 +173,7 @@
 	unsigned long newsp;
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-	if (current->rt.nr_cpus_allowed == num_possible_cpus())
+	if (current->nr_cpus_allowed == num_possible_cpus())
 		set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 #endif
 
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index e5bbc1a..6682b73 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -19,8 +19,6 @@
 #include <asm/fixed_code.h>
 #include <asm/syscall.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* Location of the trace bit in SYSCFG. */
 #define TRACE_BITS 0x0001
 
@@ -98,7 +96,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -190,17 +187,22 @@
 	err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
 	if (err)
-		goto give_sigsegv;
+		return -EFAULT;
 
 	/* Set up registers for signal handler */
-	wrusp((unsigned long)frame);
 	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor *) ka->sa.sa_handler;
-		__get_user(regs->pc, &funcptr->text);
-		__get_user(regs->p3, &funcptr->GOT);
+		u32 pc, p3;
+		err |= __get_user(pc, &funcptr->text);
+		err |= __get_user(p3, &funcptr->GOT);
+		if (err)
+			return -EFAULT;
+		regs->pc = pc;
+		regs->p3 = p3;
 	} else
 		regs->pc = (unsigned long)ka->sa.sa_handler;
+	wrusp((unsigned long)frame);
 	regs->rets = SIGRETURN_STUB;
 
 	regs->r0 = frame->sig;
@@ -208,10 +210,6 @@
 	regs->r2 = (unsigned long)(&frame->uc);
 
 	return 0;
-
- give_sigsegv:
-	force_sigsegv(sig, current);
-	return -EFAULT;
 }
 
 static inline void
@@ -247,24 +245,21 @@
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-	      sigset_t *oldset, struct pt_regs *regs)
+	      struct pt_regs *regs)
 {
-	int ret;
-
 	/* are we from a system call? to see pt_regs->orig_p0 */
 	if (regs->orig_p0 >= 0)
 		/* If so, check system call restarting.. */
 		handle_restart(regs, ka, 1);
 
 	/* set up the stack frame */
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
-
-	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+	if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+		force_sigsegv(sig, current);
+	else 
+		signal_delivered(sig, info, ka, regs,
+				test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -281,37 +276,16 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	current->thread.esp0 = (unsigned long)regs;
 
-	if (try_to_freeze())
-		goto no_signal;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			tracehook_signal_handler(signr, &info, &ka, regs,
-				test_thread_flag(TIF_SINGLESTEP));
-		}
-
+		handle_signal(signr, &info, &ka, regs);
 		return;
 	}
 
- no_signal:
 	/* Did we come from a system call? */
 	if (regs->orig_p0 >= 0)
 		/* Restart the system call - no handlers present */
@@ -319,10 +293,7 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -330,14 +301,12 @@
  */
 asmlinkage void do_notify_resume(struct pt_regs *regs)
 {
-	if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
+	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal(regs);
 
 	if (test_thread_flag(TIF_NOTIFY_RESUME)) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 44bbf2f..f7f7a18 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -10,6 +10,8 @@
 #include <linux/hardirq.h>
 #include <linux/thread_info.h>
 #include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
@@ -27,8 +29,7 @@
 {
 	struct task_struct *p;
 	struct mm_struct *mm;
-	unsigned long flags, offset;
-	unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+	unsigned long offset;
 	struct rb_node *n;
 
 #ifdef CONFIG_KALLSYMS
@@ -112,17 +113,17 @@
 	 * mappings of all our processes and see if we can't be a whee
 	 * bit more specific
 	 */
-	write_lock_irqsave(&tasklist_lock, flags);
+	read_lock(&tasklist_lock);
 	for_each_process(p) {
-		mm = (in_atomic ? p->mm : get_task_mm(p));
-		if (!mm)
+		struct task_struct *t;
+
+		t = find_lock_task_mm(p);
+		if (!t)
 			continue;
 
-		if (!down_read_trylock(&mm->mmap_sem)) {
-			if (!in_atomic)
-				mmput(mm);
-			continue;
-		}
+		mm = t->mm;
+		if (!down_read_trylock(&mm->mmap_sem))
+			goto __continue;
 
 		for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
 			struct vm_area_struct *vma;
@@ -131,7 +132,7 @@
 
 			if (address >= vma->vm_start && address < vma->vm_end) {
 				char _tmpbuf[256];
-				char *name = p->comm;
+				char *name = t->comm;
 				struct file *file = vma->vm_file;
 
 				if (file) {
@@ -164,8 +165,7 @@
 						name, vma->vm_start, vma->vm_end);
 
 				up_read(&mm->mmap_sem);
-				if (!in_atomic)
-					mmput(mm);
+				task_unlock(t);
 
 				if (buf[0] == '\0')
 					sprintf(buf, "[ %s ] dynamic memory", name);
@@ -175,8 +175,8 @@
 		}
 
 		up_read(&mm->mmap_sem);
-		if (!in_atomic)
-			mmput(mm);
+__continue:
+		task_unlock(t);
 	}
 
 	/*
@@ -186,7 +186,7 @@
 	sprintf(buf, "/* kernel dynamic memory */");
 
 done:
-	write_unlock_irqrestore(&tasklist_lock, flags);
+	read_unlock(&tasklist_lock);
 }
 
 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index f6ffd6f..0b74218 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -248,8 +248,6 @@
 
 #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition bfin_plat_nand_partitions[] = {
 	{
 	 .name = "params(nand)",
@@ -289,7 +287,6 @@
 	.chip = {
 		 .nr_chips = 1,
 		 .chip_delay = 30,
-		 .part_probe_types = part_probes,
 		 .partitions = bfin_plat_nand_partitions,
 		 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
 		 },
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 80aa253..04c2fbe4 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -711,8 +711,6 @@
 	jump .Lresume_userspace_1;
 
 .Lsyscall_sigpending:
-	cc = BITTST(r7, TIF_RESTORE_SIGMASK);
-	if cc jump .Lsyscall_do_signals;
 	cc = BITTST(r7, TIF_SIGPENDING);
 	if cc jump .Lsyscall_do_signals;
 	cc = BITTST(r7, TIF_NOTIFY_RESUME);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index cf37478..3d8f3c2 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -20,8 +20,6 @@
 #include <asm/cacheflush.h>
 
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Do a signal return, undo the signal stack.
  */
@@ -87,7 +85,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -248,10 +245,9 @@
 /*
  * handle the actual delivery of a signal to userspace
  */
-static int handle_signal(int sig,
+static void handle_signal(int sig,
 			 siginfo_t *info, struct k_sigaction *ka,
-			 sigset_t *oldset, struct pt_regs *regs,
-			 int syscall)
+			 struct pt_regs *regs, int syscall)
 {
 	int ret;
 
@@ -278,11 +274,9 @@
 	}
 
 	/* Set up the stack frame */
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
-	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+	if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+		return;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -292,7 +286,6 @@
 {
 	struct k_sigaction ka;
 	siginfo_t info;
-	sigset_t *oldset;
 	int signr;
 
 	/* we want the common case to go fast, which is why we may in certain
@@ -300,25 +293,9 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
-		if (handle_signal(signr, &info, &ka, oldset,
-				  regs, syscall) == 0) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			tracehook_signal_handler(signr, &info, &ka, regs, 0);
-		}
-
+		handle_signal(signr, &info, &ka, regs, syscall);
 		return;
 	}
 
@@ -343,10 +320,7 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -357,14 +331,11 @@
 				 int syscall)
 {
 	/* deal with pending signal delivery */
-	if (thread_info_flags & ((1 << TIF_SIGPENDING) |
-				 (1 << TIF_RESTORE_SIGMASK)))
+	if (thread_info_flags & (1 << TIF_SIGPENDING))
 		do_signal(regs, syscall);
 
 	if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 22d34d6..bb34465 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -40,6 +40,7 @@
 	bool
 	default y
 	select HAVE_IDE
+	select GENERIC_ATOMIC64
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
deleted file mode 100644
index 74f99c6..0000000
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*!***************************************************************************
-*!
-*! FILE NAME  : ds1302.c
-*!
-*! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O
-*!
-*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init
-*!
-*! ---------------------------------------------------------------------------
-*!
-*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
-*!
-*!***************************************************************************/
-
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-#include <linux/capability.h>
-
-#include <asm/uaccess.h>
-#include <arch/svinto.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-#include <arch/io_interface_mux.h>
-
-#include "i2c.h"
-
-#define RTC_MAJOR_NR 121 /* local major, change later */
-
-static DEFINE_MUTEX(ds1302_mutex);
-static const char ds1302_name[] = "ds1302";
-
-/* The DS1302 might be connected to different bits on different products. 
- * It has three signals - SDA, SCL and RST. RST and SCL are always outputs,
- * but SDA can have a selected direction.
- * For now, only PORT_PB is hardcoded.
- */
-
-/* The RST bit may be on either the Generic Port or Port PB. */
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA,  port_g_data_shadow,  CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x)
-#else
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_RSTBIT, x)
-#endif
-
-
-#define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-#define TK_SDA_IN()   ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1)
-/* 1 is out, 0 is in */
-#define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-
-/*
- * The reason for tempudelay and not udelay is that loops_per_usec
- * (used in udelay) is not set when functions here are called from time.c 
- */
-
-static void tempudelay(int usecs) 
-{
-	volatile int loops;
-
-	for(loops = usecs * 12; loops > 0; loops--)
-		/* nothing */;	
-}
-
-
-/* Send 8 bits. */
-static void
-out_byte(unsigned char x) 
-{
-	int i;
-	TK_SDA_DIR(1);
-	for (i = 8; i--;) {
-		/* The chip latches incoming bits on the rising edge of SCL. */
-		TK_SCL_OUT(0);
-		TK_SDA_OUT(x & 1);
-		tempudelay(1);
-		TK_SCL_OUT(1);
-		tempudelay(1);
-		x >>= 1;
-	}
-	TK_SDA_DIR(0);
-}
-
-static unsigned char
-in_byte(void) 
-{
-	unsigned char x = 0;
-	int i;
-
-	/* Read byte. Bits come LSB first, on the falling edge of SCL.
-	 * Assume SDA is in input direction already.
-	 */
-	TK_SDA_DIR(0);
-
-	for (i = 8; i--;) {
-		TK_SCL_OUT(0);
-		tempudelay(1);
-		x >>= 1;
-		x |= (TK_SDA_IN() << 7);
-		TK_SCL_OUT(1);
-		tempudelay(1);
-	}
-
-	return x;
-}
-
-/* Prepares for a transaction by de-activating RST (active-low). */
-
-static void
-start(void) 
-{
-	TK_SCL_OUT(0);
-	tempudelay(1);
-	TK_RST_OUT(0);
-	tempudelay(5);
-	TK_RST_OUT(1);	
-}
-
-/* Ends a transaction by taking RST active again. */
-
-static void
-stop(void) 
-{
-	tempudelay(2);
-	TK_RST_OUT(0);
-}
-
-/* Enable writing. */
-
-static void
-ds1302_wenable(void) 
-{
-	start(); 	
-	out_byte(0x8e); /* Write control register  */
-	out_byte(0x00); /* Disable write protect bit 7 = 0 */
-	stop();
-}
-
-/* Disable writing. */
-
-static void
-ds1302_wdisable(void) 
-{
-	start();
-	out_byte(0x8e); /* Write control register  */
-	out_byte(0x80); /* Disable write protect bit 7 = 0 */
-	stop();
-}
-
-
-
-/* Read a byte from the selected register in the DS1302. */
-
-unsigned char
-ds1302_readreg(int reg) 
-{
-	unsigned char x;
-
-	start();
-	out_byte(0x81 | (reg << 1)); /* read register */
-	x = in_byte();
-	stop();
-
-	return x;
-}
-
-/* Write a byte to the selected register. */
-
-void
-ds1302_writereg(int reg, unsigned char val) 
-{
-#ifndef CONFIG_ETRAX_RTC_READONLY
-	int do_writereg = 1;
-#else
-	int do_writereg = 0;
-
-	if (reg == RTC_TRICKLECHARGER)
-		do_writereg = 1;
-#endif
-
-	if (do_writereg) {
-		ds1302_wenable();
-		start();
-		out_byte(0x80 | (reg << 1)); /* write register */
-		out_byte(val);
-		stop();
-		ds1302_wdisable();
-	}
-}
-
-void
-get_rtc_time(struct rtc_time *rtc_tm) 
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
-	rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
-	rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
-	rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
-	rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
-	rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
-
-	local_irq_restore(flags);
-	
-	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
-	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
-	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
-	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
-	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
-	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
-
-	/*
-	 * Account for differences between how the RTC uses the values
-	 * and how they are defined in a struct rtc_time;
-	 */
-
-	if (rtc_tm->tm_year <= 69)
-		rtc_tm->tm_year += 100;
-
-	rtc_tm->tm_mon--;
-}
-
-static unsigned char days_in_mo[] = 
-    {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
-
-static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	unsigned long flags;
-
-	switch(cmd) {
-		case RTC_RD_TIME:	/* read the time/date from RTC	*/
-		{
-			struct rtc_time rtc_tm;
-						
-			memset(&rtc_tm, 0, sizeof (struct rtc_time));
-			get_rtc_time(&rtc_tm);						
-			if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
-				return -EFAULT;	
-			return 0;
-		}
-
-		case RTC_SET_TIME:	/* set the RTC */
-		{
-			struct rtc_time rtc_tm;
-			unsigned char mon, day, hrs, min, sec, leap_yr;
-			unsigned int yrs;
-
-			if (!capable(CAP_SYS_TIME))
-				return -EPERM;
-
-			if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
-				return -EFAULT;
-
-			yrs = rtc_tm.tm_year + 1900;
-			mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
-			day = rtc_tm.tm_mday;
-			hrs = rtc_tm.tm_hour;
-			min = rtc_tm.tm_min;
-			sec = rtc_tm.tm_sec;
-			
-			
-			if ((yrs < 1970) || (yrs > 2069))
-				return -EINVAL;
-
-			leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
-			if ((mon > 12) || (day == 0))
-				return -EINVAL;
-
-			if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
-				return -EINVAL;
-			
-			if ((hrs >= 24) || (min >= 60) || (sec >= 60))
-				return -EINVAL;
-
-			if (yrs >= 2000)
-				yrs -= 2000;	/* RTC (0, 1, ... 69) */
-			else
-				yrs -= 1900;	/* RTC (70, 71, ... 99) */
-
-			sec = bin2bcd(sec);
-			min = bin2bcd(min);
-			hrs = bin2bcd(hrs);
-			day = bin2bcd(day);
-			mon = bin2bcd(mon);
-			yrs = bin2bcd(yrs);
-
-			local_irq_save(flags);
-			CMOS_WRITE(yrs, RTC_YEAR);
-			CMOS_WRITE(mon, RTC_MONTH);
-			CMOS_WRITE(day, RTC_DAY_OF_MONTH);
-			CMOS_WRITE(hrs, RTC_HOURS);
-			CMOS_WRITE(min, RTC_MINUTES);
-			CMOS_WRITE(sec, RTC_SECONDS);
-			local_irq_restore(flags);
-
-			/* Notice that at this point, the RTC is updated but
-			 * the kernel is still running with the old time.
-			 * You need to set that separately with settimeofday
-			 * or adjtimex.
-			 */
-			return 0;
-		}
-
-		case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
-		{
-			int tcs_val;
-
-			if (!capable(CAP_SYS_TIME))
-				return -EPERM;
-			
-			if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
-				return -EFAULT;
-
-			tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
-			ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
-			return 0;
-		}
-		case RTC_VL_READ:
-		{
-			/* TODO:
-			 * Implement voltage low detection support
-			 */
-			printk(KERN_WARNING "DS1302: RTC Voltage Low detection"
-			       " is not supported\n");
-			return 0;
-		}
-		case RTC_VL_CLR:
-		{
-			/* TODO:
-			 * Nothing to do since Voltage Low detection is not supported
-			 */
-			return 0;
-		}
-		default:
-			return -ENOIOCTLCMD;
-	}
-}
-
-static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	int ret;
-
-	mutex_lock(&ds1302_mutex);
-	ret = rtc_ioctl(file, cmd, arg);
-	mutex_unlock(&ds1302_mutex);
-
-	return ret;
-}
-
-static void
-print_rtc_status(void)
-{
-	struct rtc_time tm;
-
-	get_rtc_time(&tm);
-
-	/*
-	 * There is no way to tell if the luser has the RTC set for local
-	 * time or for Universal Standard Time (GMT). Probably local though.
-	 */
-
-	printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n",
-	       tm.tm_hour, tm.tm_min, tm.tm_sec);
-	printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n",
-	       tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
-}
-
-/* The various file operations we support. */
-
-static const struct file_operations rtc_fops = {
-	.owner		= THIS_MODULE,
-	.unlocked_ioctl = rtc_unlocked_ioctl,
-	.llseek		= noop_llseek,
-}; 
-
-/* Probe for the chip by writing something to its RAM and try reading it back. */
-
-#define MAGIC_PATTERN 0x42
-
-static int __init
-ds1302_probe(void) 
-{
-	int retval, res; 
-
-	TK_RST_DIR(1);
-	TK_SCL_DIR(1);
-	TK_SDA_DIR(0);
-	
-	/* Try to talk to timekeeper. */
-
-	ds1302_wenable();  
-	start();
-	out_byte(0xc0); /* write RAM byte 0 */	
-	out_byte(MAGIC_PATTERN); /* write something magic */
-	start();
-	out_byte(0xc1); /* read RAM byte 0 */
-
-	if((res = in_byte()) == MAGIC_PATTERN) {
-		stop();
-		ds1302_wdisable();
-		printk(KERN_INFO "%s: RTC found.\n", ds1302_name);
-		printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n",
-		       ds1302_name,
-		       CONFIG_ETRAX_DS1302_SDABIT,
-		       CONFIG_ETRAX_DS1302_SCLBIT,
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-		       "GENIO",
-#else
-		       "PB",
-#endif
-		       CONFIG_ETRAX_DS1302_RSTBIT);
-		       print_rtc_status();
-		retval = 1;
-	} else {
-		stop();
-		retval = 0;
-	}
-
-	return retval;
-}
-
-
-/* Just probe for the RTC and register the device to handle the ioctl needed. */
-
-int __init
-ds1302_init(void) 
-{
-#ifdef CONFIG_ETRAX_I2C
-	i2c_init();
-#endif
-
-	if (!ds1302_probe()) {
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#if CONFIG_ETRAX_DS1302_RSTBIT == 27
-		/*
-		 * The only way to set g27 to output is to enable ATA.
-		 *
-		 * Make sure that R_GEN_CONFIG is setup correct.
-		 */
-		/* Allocating the ATA interface will grab almost all
-		 * pins in I/O groups a, b, c and d.  A consequence of
-		 * allocating the ATA interface is that the fixed
-		 * interfaces shared RAM, parallel port 0, parallel
-		 * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port
-		 * 1, SCSI-W, serial port 2, serial port 3,
-		 * synchronous serial port 3 and USB port 2 and almost
-		 * all GPIO pins on port g cannot be used.
-		 */
-		if (cris_request_io_interface(if_ata, "ds1302/ATA")) {
-			printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
-			return -1;
-		}
-
-#elif CONFIG_ETRAX_DS1302_RSTBIT == 0
-		if (cris_io_interface_allocate_pins(if_gpio_grp_a,
-						    'g',
-						    CONFIG_ETRAX_DS1302_RSTBIT,
-						    CONFIG_ETRAX_DS1302_RSTBIT)) {
-			printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
-			return -1;
-		}
-
-		/* Set the direction of this bit to out. */
-		genconfig_shadow = ((genconfig_shadow &
- 				     ~IO_MASK(R_GEN_CONFIG, g0dir)) |
- 				   (IO_STATE(R_GEN_CONFIG, g0dir, out)));
-		*R_GEN_CONFIG = genconfig_shadow;
-#endif
-		if (!ds1302_probe()) {
-			printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
-			return -1;
-		}
-#else
-		printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
-		return -1;
-#endif
-  	}
-	/* Initialise trickle charger */
-	ds1302_writereg(RTC_TRICKLECHARGER,
-			RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F));
-        /* Start clock by resetting CLOCK_HALT */
-	ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F));
-	return 0;
-}
-
-static int __init ds1302_register(void)
-{
-	ds1302_init();
-	if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
-		printk(KERN_INFO "%s: unable to get major %d for rtc\n", 
-		       ds1302_name, RTC_MAJOR_NR);
-		return -1;
-	}
-        return 0;
-
-}
-
-module_init(ds1302_register);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
deleted file mode 100644
index 9da0568..0000000
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * PCF8563 RTC
- *
- * From Phillips' datasheet:
- *
- * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interrupt output and voltage
- * low detector are also provided. All address and data are transferred
- * serially via two-line bidirectional I2C-bus. Maximum bus speed is
- * 400 kbits/s. The built-in word address register is incremented
- * automatically after each written or read byte.
- *
- * Copyright (c) 2002-2007, Axis Communications AB
- * All rights reserved.
- *
- * Author: Tobias Anderberg <tobiasa@axis.com>.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-#include <linux/bcd.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-
-#include "i2c.h"
-
-#define PCF8563_MAJOR 121	/* Local major number. */
-#define DEVICE_NAME "rtc"	/* Name which is registered in /proc/devices. */
-#define PCF8563_NAME "PCF8563"
-#define DRIVER_VERSION "$Revision: 1.24 $"
-
-/* I2C bus slave registers. */
-#define RTC_I2C_READ		0xa3
-#define RTC_I2C_WRITE		0xa2
-
-/* Two simple wrapper macros, saves a few keystrokes. */
-#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
-#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
-
-static DEFINE_MUTEX(pcf8563_mutex);
-static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
-
-static const unsigned char days_in_month[] =
-	{ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-
-static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-
-/* Cache VL bit value read at driver init since writing the RTC_SECOND
- * register clears the VL status.
- */
-static int voltage_low;
-
-static const struct file_operations pcf8563_fops = {
-	.owner = THIS_MODULE,
-	.unlocked_ioctl = pcf8563_unlocked_ioctl,
-	.llseek		= noop_llseek,
-};
-
-unsigned char
-pcf8563_readreg(int reg)
-{
-	unsigned char res = rtc_read(reg);
-
-	/* The PCF8563 does not return 0 for unimplemented bits. */
-	switch (reg) {
-	case RTC_SECONDS:
-	case RTC_MINUTES:
-		res &= 0x7F;
-		break;
-	case RTC_HOURS:
-	case RTC_DAY_OF_MONTH:
-		res &= 0x3F;
-		break;
-	case RTC_WEEKDAY:
-		res &= 0x07;
-		break;
-	case RTC_MONTH:
-		res &= 0x1F;
-		break;
-	case RTC_CONTROL1:
-		res &= 0xA8;
-		break;
-	case RTC_CONTROL2:
-		res &= 0x1F;
-		break;
-	case RTC_CLOCKOUT_FREQ:
-	case RTC_TIMER_CONTROL:
-		res &= 0x83;
-		break;
-	}
-	return res;
-}
-
-void
-pcf8563_writereg(int reg, unsigned char val)
-{
-	rtc_write(reg, val);
-}
-
-void
-get_rtc_time(struct rtc_time *tm)
-{
-	tm->tm_sec  = rtc_read(RTC_SECONDS);
-	tm->tm_min  = rtc_read(RTC_MINUTES);
-	tm->tm_hour = rtc_read(RTC_HOURS);
-	tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
-	tm->tm_wday = rtc_read(RTC_WEEKDAY);
-	tm->tm_mon  = rtc_read(RTC_MONTH);
-	tm->tm_year = rtc_read(RTC_YEAR);
-
-	if (tm->tm_sec & 0x80) {
-		printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
-		       "information is no longer guaranteed!\n", PCF8563_NAME);
-	}
-
-	tm->tm_year  = bcd2bin(tm->tm_year) +
-		       ((tm->tm_mon & 0x80) ? 100 : 0);
-	tm->tm_sec  &= 0x7F;
-	tm->tm_min  &= 0x7F;
-	tm->tm_hour &= 0x3F;
-	tm->tm_mday &= 0x3F;
-	tm->tm_wday &= 0x07; /* Not coded in BCD. */
-	tm->tm_mon  &= 0x1F;
-
-	tm->tm_sec = bcd2bin(tm->tm_sec);
-	tm->tm_min = bcd2bin(tm->tm_min);
-	tm->tm_hour = bcd2bin(tm->tm_hour);
-	tm->tm_mday = bcd2bin(tm->tm_mday);
-	tm->tm_mon = bcd2bin(tm->tm_mon);
-	tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
-}
-
-int __init
-pcf8563_init(void)
-{
-	static int res;
-	static int first = 1;
-
-	if (!first)
-		return res;
-	first = 0;
-
-	/* Initiate the i2c protocol. */
-	res = i2c_init();
-	if (res < 0) {
-		printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
-		return res;
-	}
-
-	/*
-	 * First of all we need to reset the chip. This is done by
-	 * clearing control1, control2 and clk freq and resetting
-	 * all alarms.
-	 */
-	if (rtc_write(RTC_CONTROL1, 0x00) < 0)
-		goto err;
-
-	if (rtc_write(RTC_CONTROL2, 0x00) < 0)
-		goto err;
-
-	if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
-		goto err;
-
-	if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
-		goto err;
-
-	/* Reset the alarms. */
-	if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
-		goto err;
-
-	if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
-		goto err;
-
-	if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
-		goto err;
-
-	if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
-		goto err;
-
-	/* Check for low voltage, and warn about it. */
-	if (rtc_read(RTC_SECONDS) & 0x80) {
-		voltage_low = 1;
-		printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
-		       "date/time information is no longer guaranteed!\n",
-		       PCF8563_NAME);
-	}
-
-	return res;
-
-err:
-	printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
-	res = -1;
-	return res;
-}
-
-void __exit
-pcf8563_exit(void)
-{
-	unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
-}
-
-/*
- * ioctl calls for this driver. Why return -ENOTTY upon error? Because
- * POSIX says so!
- */
-static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	/* Some sanity checks. */
-	if (_IOC_TYPE(cmd) != RTC_MAGIC)
-		return -ENOTTY;
-
-	if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
-		return -ENOTTY;
-
-	switch (cmd) {
-	case RTC_RD_TIME:
-	{
-		struct rtc_time tm;
-
-		mutex_lock(&rtc_lock);
-		memset(&tm, 0, sizeof tm);
-		get_rtc_time(&tm);
-
-		if (copy_to_user((struct rtc_time *) arg, &tm,
-				 sizeof tm)) {
-			mutex_unlock(&rtc_lock);
-			return -EFAULT;
-		}
-
-		mutex_unlock(&rtc_lock);
-
-		return 0;
-	}
-	case RTC_SET_TIME:
-	{
-		int leap;
-		int year;
-		int century;
-		struct rtc_time tm;
-
-		memset(&tm, 0, sizeof tm);
-		if (!capable(CAP_SYS_TIME))
-			return -EPERM;
-
-		if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
-			return -EFAULT;
-
-		/* Convert from struct tm to struct rtc_time. */
-		tm.tm_year += 1900;
-		tm.tm_mon += 1;
-
-		/*
-		 * Check if tm.tm_year is a leap year. A year is a leap
-		 * year if it is divisible by 4 but not 100, except
-		 * that years divisible by 400 _are_ leap years.
-		 */
-		year = tm.tm_year;
-		leap = (tm.tm_mon == 2) &&
-			((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
-
-		/* Perform some sanity checks. */
-		if ((tm.tm_year < 1970) ||
-		    (tm.tm_mon > 12) ||
-		    (tm.tm_mday == 0) ||
-		    (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
-		    (tm.tm_wday >= 7) ||
-		    (tm.tm_hour >= 24) ||
-		    (tm.tm_min >= 60) ||
-		    (tm.tm_sec >= 60))
-			return -EINVAL;
-
-		century = (tm.tm_year >= 2000) ? 0x80 : 0;
-		tm.tm_year = tm.tm_year % 100;
-
-		tm.tm_year = bin2bcd(tm.tm_year);
-		tm.tm_mon = bin2bcd(tm.tm_mon);
-		tm.tm_mday = bin2bcd(tm.tm_mday);
-		tm.tm_hour = bin2bcd(tm.tm_hour);
-		tm.tm_min = bin2bcd(tm.tm_min);
-		tm.tm_sec = bin2bcd(tm.tm_sec);
-		tm.tm_mon |= century;
-
-		mutex_lock(&rtc_lock);
-
-		rtc_write(RTC_YEAR, tm.tm_year);
-		rtc_write(RTC_MONTH, tm.tm_mon);
-		rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
-		rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
-		rtc_write(RTC_HOURS, tm.tm_hour);
-		rtc_write(RTC_MINUTES, tm.tm_min);
-		rtc_write(RTC_SECONDS, tm.tm_sec);
-
-		mutex_unlock(&rtc_lock);
-
-		return 0;
-	}
-	case RTC_VL_READ:
-		if (voltage_low) {
-			printk(KERN_ERR "%s: RTC Voltage Low - "
-			       "reliable date/time information is no "
-			       "longer guaranteed!\n", PCF8563_NAME);
-		}
-
-		if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
-			return -EFAULT;
-		return 0;
-
-	case RTC_VL_CLR:
-	{
-		/* Clear the VL bit in the seconds register in case
-		 * the time has not been set already (which would
-		 * have cleared it). This does not really matter
-		 * because of the cached voltage_low value but do it
-		 * anyway for consistency. */
-
-		int ret = rtc_read(RTC_SECONDS);
-
-		rtc_write(RTC_SECONDS, (ret & 0x7F));
-
-		/* Clear the cached value. */
-		voltage_low = 0;
-
-		return 0;
-	}
-	default:
-		return -ENOTTY;
-	}
-
-	return 0;
-}
-
-static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	int ret;
-
-	mutex_lock(&pcf8563_mutex);
-	ret = pcf8563_ioctl(filp, cmd, arg);
-	mutex_unlock(&pcf8563_mutex);
-
-	return ret;
-}
-
-static int __init pcf8563_register(void)
-{
-	if (pcf8563_init() < 0) {
-		printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
-		       "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
-		return -1;
-	}
-
-	if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
-		printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n",
-		       PCF8563_NAME, PCF8563_MAJOR);
-		return -1;
-	}
-
-	printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
-	       DRIVER_VERSION);
-
-	/* Check for low voltage, and warn about it. */
-	if (voltage_low) {
-		printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
-		       "information is no longer guaranteed!\n", PCF8563_NAME);
-	}
-
-	return 0;
-}
-
-module_init(pcf8563_register);
-module_exit(pcf8563_exit);
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 8a8196e..082f189 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -21,8 +21,6 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
-
 
 #include <arch/svinto.h>
 #include <asm/fasttimer.h>
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b579dd0..37e6d2c 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -264,7 +264,7 @@
 
 /* Write a value to a specified register in the stack of a thread other
    than the current thread. */
-static write_stack_register (int thread_id, int regno, char *valptr);
+static int write_stack_register(int thread_id, int regno, char *valptr);
 
 /* Read a value from a specified register in the register image. Returns the
    status of the read operation. The register value is returned in valptr. */
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index e16f8f2..0bb477c 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */
 /* manipulate regs so that upon return, it will be re-executed */
 
@@ -176,7 +174,6 @@
 				    sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc))
@@ -212,7 +209,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -415,10 +411,11 @@
  * OK, we're invoking a handler
  */
 
-static inline int handle_signal(int canrestart, unsigned long sig,
+static inline void handle_signal(int canrestart, unsigned long sig,
 	siginfo_t *info, struct k_sigaction *ka,
-	sigset_t *oldset, struct pt_regs *regs)
+	struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Are we from a system call? */
@@ -456,9 +453,7 @@
 		ret = setup_frame(sig, ka, oldset, regs);
 
 	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+		signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -478,7 +473,6 @@
 	siginfo_t info;
 	int signr;
         struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -489,23 +483,10 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(canrestart, signr, &info, &ka,
-				oldset, regs)) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
+		handle_signal(canrestart, signr, &info, &ka, regs);
 		return;
 	}
 
@@ -525,8 +506,5 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 20c85b5..bcffcb6 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -19,16 +19,12 @@
 #include <asm/signal.h>
 #include <asm/io.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
 #include <asm/irq_regs.h>
 
 /* define this if you need to use print_timestamp */
 /* it will make jiffies at 96 hz instead of 100 hz though */
 #undef USE_CASCADE_TIMERS
 
-extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
-
 unsigned long get_ns_in_jiffie(void)
 {
 	unsigned char timer_count, t1;
@@ -203,11 +199,6 @@
 	 */
 	loops_per_usec = 50;
 
-	if(RTC_INIT() < 0)
-		have_rtc = 0;
-	else
-		have_rtc = 1;
-
 	/* Setup the etrax timers
 	 * Base frequency is 25000 hz, divider 250 -> 100 HZ
 	 * In normal mode, we use timer0, so timer1 is free. In cascade
diff --git a/arch/cris/arch-v10/lib/Makefile b/arch/cris/arch-v10/lib/Makefile
index 36e9a9c..725153e 100644
--- a/arch/cris/arch-v10/lib/Makefile
+++ b/arch/cris/arch-v10/lib/Makefile
@@ -2,8 +2,5 @@
 # Makefile for Etrax-specific library files..
 #
 
-
-EXTRA_AFLAGS := -traditional
-
 lib-y  = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o
 
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 642c6fe..f8476d9 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1394,11 +1394,10 @@
 
 	if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
 
-	p = kmalloc(padlen, alloc_flag);
+	p = kzalloc(padlen, alloc_flag);
 	if (!p) return -ENOMEM;
 
 	*p = 0x80;
-	memset(p+1, 0, padlen - 1);
 
 	DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
 
@@ -1426,11 +1425,10 @@
 
 	if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
 
-	p = kmalloc(padlen, alloc_flag);
+	p = kzalloc(padlen, alloc_flag);
 	if (!p) return -ENOMEM;
 
 	*p = 0x80;
-	memset(p+1, 0, padlen - 1);
 
 	DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
 
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f7ad9e8..f085229 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -114,8 +114,6 @@
 void
 ptrace_disable(struct task_struct *child)
 {
-	unsigned long tmp;
-
 	/* Deconfigure SPC and S-bit. */
 	user_disable_single_step(child);
 	put_reg(child, PT_SPC, 0);
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index b338d8f..b60d1b6 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -24,9 +24,6 @@
 
 extern unsigned long cris_signal_return_page;
 
-/* Flag to check if a signal is blockable. */
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * A syscall in CRIS is really a "break 13" instruction, which is 2
  * bytes. The registers is manipulated so upon return the instruction
@@ -167,7 +164,6 @@
 						 sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc))
@@ -208,7 +204,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -434,11 +429,12 @@
 }
 
 /* Invoke a signal handler to, well, handle the signal. */
-static inline int
+static inline void
 handle_signal(int canrestart, unsigned long sig,
 	      siginfo_t *info, struct k_sigaction *ka,
-              sigset_t *oldset, struct pt_regs * regs)
+              struct pt_regs * regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Check if this got called from a system call. */
@@ -489,9 +485,7 @@
 		ret = setup_frame(sig, ka, oldset, regs);
 
 	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+		signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -511,7 +505,6 @@
 	int signr;
 	siginfo_t info;
         struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * The common case should go fast, which is why this point is
@@ -521,25 +514,11 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(canrestart, signr, &info, &ka,
-				oldset, regs)) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
-
+		handle_signal(canrestart, signr, &info, &ka, regs);
 		return;
 	}
 
@@ -560,10 +539,7 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 asmlinkage void
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 6773fc8..8c4b45efd 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -18,7 +18,6 @@
 #include <asm/signal.h>
 #include <asm/io.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 
@@ -67,7 +66,6 @@
 };
 
 extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
 
 #ifdef CONFIG_CPU_FREQ
 static int
@@ -265,11 +263,6 @@
 	 */
 	loops_per_usec = 50;
 
-	if(RTC_INIT() < 0)
-		have_rtc = 0;
-	else
-		have_rtc = 1;
-
 	/* Start CPU local timer. */
 	cris_timer_init();
 
diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
index 1de779f..7caf25d 100644
--- a/arch/cris/include/arch-v32/arch/cache.h
+++ b/arch/cris/include/arch-v32/arch/cache.h
@@ -7,7 +7,7 @@
 #define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
 
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 void flush_dma_list(dma_descr_data *descr);
 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 956eea2..04d02a5 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -6,5 +6,4 @@
 header-y += ethernet.h
 header-y += etraxgpio.h
 header-y += rs485.h
-header-y += rtc.h
 header-y += sync_serial.h
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index 72b3cd6..ce4e517 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -15,9 +15,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
@@ -33,4 +30,6 @@
 typedef unsigned short	__kernel_old_dev_t;
 #define __kernel_old_dev_t __kernel_old_dev_t
 
+#include <asm-generic/posix_types.h>
+
 #endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/cris/include/asm/rtc.h b/arch/cris/include/asm/rtc.h
deleted file mode 100644
index 17d3019..0000000
--- a/arch/cris/include/asm/rtc.h
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#ifndef __RTC_H__
-#define __RTC_H__
-
-#ifdef CONFIG_ETRAX_DS1302
-   /* Dallas DS1302 clock/calendar register numbers. */
-#  define RTC_SECONDS      0
-#  define RTC_MINUTES      1
-#  define RTC_HOURS        2
-#  define RTC_DAY_OF_MONTH 3
-#  define RTC_MONTH        4
-#  define RTC_WEEKDAY      5
-#  define RTC_YEAR         6
-#  define RTC_CONTROL      7
-
-   /* Bits in CONTROL register. */
-#  define RTC_CONTROL_WRITEPROTECT	0x80
-#  define RTC_TRICKLECHARGER		8
-
-  /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
-#  define RTC_TCR_PATTERN	0xA0	/* 1010xxxx */
-#  define RTC_TCR_1DIOD		0x04	/* xxxx01xx */
-#  define RTC_TCR_2DIOD		0x08	/* xxxx10xx */
-#  define RTC_TCR_DISABLED	0x00	/* xxxxxx00 Disabled */
-#  define RTC_TCR_2KOHM		0x01	/* xxxxxx01 2KOhm */
-#  define RTC_TCR_4KOHM		0x02	/* xxxxxx10 4kOhm */
-#  define RTC_TCR_8KOHM		0x03	/* xxxxxx11 8kOhm */
-
-#elif defined(CONFIG_ETRAX_PCF8563)
-   /* I2C bus slave registers. */
-#  define RTC_I2C_READ		0xa3
-#  define RTC_I2C_WRITE		0xa2
-
-   /* Phillips PCF8563 registers. */
-#  define RTC_CONTROL1		0x00		/* Control/Status register 1. */
-#  define RTC_CONTROL2		0x01		/* Control/Status register 2. */
-#  define RTC_CLOCKOUT_FREQ	0x0d		/* CLKOUT frequency. */
-#  define RTC_TIMER_CONTROL	0x0e		/* Timer control. */
-#  define RTC_TIMER_CNTDOWN	0x0f		/* Timer countdown. */
-
-   /* BCD encoded clock registers. */
-#  define RTC_SECONDS		0x02
-#  define RTC_MINUTES		0x03
-#  define RTC_HOURS		0x04
-#  define RTC_DAY_OF_MONTH	0x05
-#  define RTC_WEEKDAY		0x06	/* Not coded in BCD! */
-#  define RTC_MONTH		0x07
-#  define RTC_YEAR		0x08
-#  define RTC_MINUTE_ALARM	0x09
-#  define RTC_HOUR_ALARM	0x0a
-#  define RTC_DAY_ALARM		0x0b
-#  define RTC_WEEKDAY_ALARM 0x0c
-
-#endif
-
-#ifdef CONFIG_ETRAX_DS1302
-extern unsigned char ds1302_readreg(int reg);
-extern void ds1302_writereg(int reg, unsigned char val);
-extern int ds1302_init(void);
-#  define CMOS_READ(x) ds1302_readreg(x)
-#  define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
-#  define RTC_INIT() ds1302_init()
-#elif defined(CONFIG_ETRAX_PCF8563)
-extern unsigned char pcf8563_readreg(int reg);
-extern void pcf8563_writereg(int reg, unsigned char val);
-extern int pcf8563_init(void);
-#  define CMOS_READ(x) pcf8563_readreg(x)
-#  define CMOS_WRITE(val,reg) pcf8563_writereg(reg,val)
-#  define RTC_INIT() pcf8563_init()
-#else
-  /* No RTC configured so we shouldn't try to access any. */
-#  define CMOS_READ(x) 42
-#  define CMOS_WRITE(x,y)
-#  define RTC_INIT() (-1)
-#endif
-
-/*
- * The struct used to pass data via the following ioctl. Similar to the
- * struct tm in <time.h>, but it needs to be here so that the kernel
- * source is self contained, allowing cross-compiles, etc. etc.
- */
-struct rtc_time {
-	int tm_sec;
-	int tm_min;
-	int tm_hour;
-	int tm_mday;
-	int tm_mon;
-	int tm_year;
-	int tm_wday;
-	int tm_yday;
-	int tm_isdst;
-};
-
-/* ioctl() calls that are permitted to the /dev/rtc interface. */
-#define RTC_MAGIC 'p'
-/* Read RTC time. */
-#define RTC_RD_TIME		_IOR(RTC_MAGIC, 0x09, struct rtc_time)
-/* Set RTC time. */
-#define RTC_SET_TIME		_IOW(RTC_MAGIC, 0x0a, struct rtc_time)
-#define RTC_SET_CHARGE		_IOW(RTC_MAGIC, 0x0b, int)
-/* Voltage low detector */
-#define RTC_VL_READ		_IOR(RTC_MAGIC, 0x13, int)
-/* Clear voltage low information */
-#define RTC_VL_CLR		_IO(RTC_MAGIC, 0x14)
-#define RTC_MAX_IOCTL 0x14
-
-#endif /* __RTC_H__ */
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c
index d114ad3..58d44ee 100644
--- a/arch/cris/kernel/ptrace.c
+++ b/arch/cris/kernel/ptrace.c
@@ -40,7 +40,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 4e73092..277ffc4 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include <asm/rtc.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/param.h>
@@ -32,7 +31,8 @@
 #include <linux/profile.h>
 #include <linux/sched.h>	/* just for sched_clock() - funny that */
 
-int have_rtc;  /* used to remember if we have an RTC or not */;
+
+#define D(x)
 
 #define TICK_SIZE tick
 
@@ -50,78 +50,16 @@
 }
 #endif
 
-/*
- * BUG: This routine does not handle hour overflow properly; it just
- *      sets the minutes. Usually you'll only notice that after reboot!
- */
-
 int set_rtc_mmss(unsigned long nowtime)
 {
-	int retval = 0;
-	int real_seconds, real_minutes, cmos_minutes;
-
-	printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime);
-
-	if(!have_rtc)
-		return 0;
-
-	cmos_minutes = CMOS_READ(RTC_MINUTES);
-	cmos_minutes = bcd2bin(cmos_minutes);
-
-	/*
-	 * since we're only adjusting minutes and seconds,
-	 * don't interfere with hour overflow. This avoids
-	 * messing with unknown time zones but requires your
-	 * RTC not to be off by more than 15 minutes
-	 */
-	real_seconds = nowtime % 60;
-	real_minutes = nowtime / 60;
-	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-		real_minutes += 30;		/* correct for half hour time zone */
-	real_minutes %= 60;
-
-	if (abs(real_minutes - cmos_minutes) < 30) {
-		real_seconds = bin2bcd(real_seconds);
-		real_minutes = bin2bcd(real_minutes);
-		CMOS_WRITE(real_seconds,RTC_SECONDS);
-		CMOS_WRITE(real_minutes,RTC_MINUTES);
-	} else {
-		printk_once(KERN_NOTICE
-		       "set_rtc_mmss: can't update from %d to %d\n",
-		       cmos_minutes, real_minutes);
-		retval = -1;
-	}
-
-	return retval;
+	D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
+	return 0;
 }
 
 /* grab the time from the RTC chip */
-
-unsigned long
-get_cmos_time(void)
+unsigned long get_cmos_time(void)
 {
-	unsigned int year, mon, day, hour, min, sec;
-	if(!have_rtc)
-		return 0;
-
-	sec = CMOS_READ(RTC_SECONDS);
-	min = CMOS_READ(RTC_MINUTES);
-	hour = CMOS_READ(RTC_HOURS);
-	day = CMOS_READ(RTC_DAY_OF_MONTH);
-	mon = CMOS_READ(RTC_MONTH);
-	year = CMOS_READ(RTC_YEAR);
-
-	sec = bcd2bin(sec);
-	min = bcd2bin(min);
-	hour = bcd2bin(hour);
-	day = bcd2bin(day);
-	mon = bcd2bin(mon);
-	year = bcd2bin(year);
-
-	if ((year += 1900) < 1970)
-		year += 100;
-
-	return mktime(year, mon, day, hour, min, sec);
+	return 0;
 }
 
 
@@ -132,7 +70,7 @@
 
 void read_persistent_clock(struct timespec *ts)
 {
-	ts->tv_sec = get_cmos_time();
+	ts->tv_sec = 0;
 	ts->tv_nsec = 0;
 }
 
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index a6990cb..a68b983 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@
 
 	EXCEPTION_TABLE(4)
 
+	_sdata = .;
 	RODATA
 
 	. = ALIGN (4);
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index b4760d8..45fd542 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -58,6 +58,8 @@
 	struct vm_area_struct * vma;
 	siginfo_t info;
 	int fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+				((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
 
 	D(printk(KERN_DEBUG
 		 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -115,6 +117,7 @@
 	if (in_atomic() || !mm)
 		goto no_context;
 
+retry:
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, address);
 	if (!vma)
@@ -163,7 +166,11 @@
 	 * the fault.
 	 */
 
-	fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -171,10 +178,24 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR)
-		tsk->maj_flt++;
-	else
-		tsk->min_flt++;
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			tsk->maj_flt++;
+		else
+			tsk->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h
index 3f34cb4..fe512af 100644
--- a/arch/frv/include/asm/posix_types.h
+++ b/arch/frv/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index 54ab13a..0ff03a3 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -94,8 +94,8 @@
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
-#define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE		17	/* is terminating due to OOM killer */
+#define TIF_POLLING_NRFLAG	6	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE		7	/* is terminating due to OOM killer */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -105,8 +105,16 @@
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 
-#define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
-#define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK		\
+	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK	(_TIF_WORK_MASK | _TIF_SYSCALL_TRACE)
+
+#if _TIF_ALLWORK_MASK >= 0x2000
+#error "_TIF_ALLWORK_MASK won't fit in an ANDI now (see entry.S)"
+#endif
 
 /*
  * Thread-synchronous status.
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 5ba23f7..7d5e000 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -905,18 +905,19 @@
 __syscall_exit:
 	LEDS		0x6300
 
-	sti		gr8,@(gr28,#REG_GR(8))	; save return value
+	# keep current PSR in GR23
+	movsg		psr,gr23
+
+	ldi		@(gr28,#REG_PSR),gr22
+
+	sti.p		gr8,@(gr28,#REG_GR(8))	; save return value
 
 	# rebuild saved psr - execve will change it for init/main.c
-	ldi		@(gr28,#REG_PSR),gr22
 	srli		gr22,#1,gr5
 	andi.p		gr22,#~PSR_PS,gr22
 	andi		gr5,#PSR_PS,gr5
 	or		gr5,gr22,gr22
-	ori		gr22,#PSR_S,gr22
-
-	# keep current PSR in GR23
-	movsg		psr,gr23
+	ori.p		gr22,#PSR_S,gr22
 
 	# make sure we don't miss an interrupt setting need_resched or sigpending between
 	# sampling and the RETT
@@ -924,9 +925,7 @@
 	movgs		gr23,psr
 
 	ldi		@(gr15,#TI_FLAGS),gr4
-	sethi.p		%hi(_TIF_ALLWORK_MASK),gr5
-	setlo		%lo(_TIF_ALLWORK_MASK),gr5
-	andcc		gr4,gr5,gr0,icc0
+	andicc		gr4,#_TIF_ALLWORK_MASK,gr0,icc0
 	bne		icc0,#0,__syscall_exit_work
 
 	# restore all registers and return
@@ -1111,9 +1110,7 @@
 __entry_return_from_user_interrupt:
 	LEDS		0x6402
 	ldi		@(gr15,#TI_FLAGS),gr4
-	sethi.p		%hi(_TIF_WORK_MASK),gr5
-	setlo		%lo(_TIF_WORK_MASK),gr5
-	andcc		gr4,gr5,gr0,icc0
+	andicc		gr4,#_TIF_WORK_MASK,gr0,icc0
 	beq		icc0,#1,__entry_return_direct
 
 __entry_work_pending:
@@ -1133,9 +1130,7 @@
 
 	LEDS		0x6401
 	ldi		@(gr15,#TI_FLAGS),gr4
-	sethi.p		%hi(_TIF_WORK_MASK),gr5
-	setlo		%lo(_TIF_WORK_MASK),gr5
-	andcc		gr4,gr5,gr0,icc0
+	andicc		gr4,#_TIF_WORK_MASK,gr0,icc0
 	beq		icc0,#1,__entry_return_direct
 	andicc		gr4,#_TIF_NEED_RESCHED,gr0,icc0
 	bne		icc0,#1,__entry_work_resched
@@ -1163,7 +1158,9 @@
 	# perform syscall exit tracing
 __syscall_exit_work:
 	LEDS		0x6340
-	andicc		gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+	andicc		gr22,#PSR_PS,gr0,icc1	; don't handle on return to kernel mode
+	andicc.p	gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+	bne		icc1,#0,__entry_return_direct
 	beq		icc0,#1,__entry_work_pending
 
 	movsg		psr,gr23
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 8cf5dca..864c2f0 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
 	unsigned long	text;
 	unsigned long	GOT;
@@ -149,7 +147,6 @@
 	    __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(&frame->sc, &gr8))
@@ -172,7 +169,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8))
@@ -426,9 +422,10 @@
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, siginfo_t *info,
-			 struct k_sigaction *ka, sigset_t *oldset)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+			 struct k_sigaction *ka)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Are we from a system call? */
@@ -460,11 +457,11 @@
 	else
 		ret = setup_frame(sig, ka, oldset);
 
-	if (ret == 0)
-		block_sigmask(ka, sig);
+	if (ret)
+		return;
 
-	return ret;
-
+	signal_delivered(sig, info, ka, __frame,
+				 test_thread_flag(TIF_SINGLESTEP));
 } /* end handle_signal() */
 
 /*****************************************************************************/
@@ -477,44 +474,14 @@
 {
 	struct k_sigaction ka;
 	siginfo_t info;
-	sigset_t *oldset;
 	int signr;
 
-	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
-	 * if so.
-	 */
-	if (!user_mode(__frame))
-		return;
-
-	if (try_to_freeze())
-		goto no_signal;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
 	if (signr > 0) {
-		if (handle_signal(signr, &info, &ka, oldset) == 0) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			tracehook_signal_handler(signr, &info, &ka, __frame,
-						 test_thread_flag(TIF_SINGLESTEP));
-		}
-
+		handle_signal(signr, &info, &ka);
 		return;
 	}
 
-no_signal:
 	/* Did we come from a system call? */
 	if (__frame->syscallno != -1) {
 		/* Restart the system call - no handlers present */
@@ -536,11 +503,7 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-
+	restore_saved_sigmask();
 } /* end do_signal() */
 
 /*****************************************************************************/
@@ -555,15 +518,13 @@
 		clear_thread_flag(TIF_SINGLESTEP);
 
 	/* deal with pending signal delivery */
-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal();
 
 	/* deal with notification on about to resume userspace execution */
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(__frame);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 
 } /* end do_notify_resume() */
diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h
index bc4c34e..91e62ba 100644
--- a/arch/h8300/include/asm/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 68d6510..d0b1607f 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #if defined(__H8300H__)
 #define CPU "H8/300H"
@@ -54,7 +55,6 @@
 
 char __initdata command_line[COMMAND_LINE_SIZE];
 
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
 extern int _ramstart, _ramend;
 extern char _target_name[];
 extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@
 	    memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
 
-	init_mm.start_code = (unsigned long) &_stext;
-	init_mm.end_code = (unsigned long) &_etext;
-	init_mm.end_data = (unsigned long) &_edata;
+	init_mm.start_code = (unsigned long) _stext;
+	init_mm.end_code = (unsigned long) _etext;
+	init_mm.end_data = (unsigned long) _edata;
 	init_mm.brk = (unsigned long) 0; 
 
 #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@
 	printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
 
 #ifdef DEBUG
-	printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
-		"BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
-		(int) &_sdata, (int) &_edata,
-		(int) &_sbss, (int) &_ebss);
-	printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
-		"STACK=0x%06x-0x%06x\n",
-	       (int) &_ebss, (int) memory_start,
-		(int) memory_start, (int) memory_end,
-		(int) memory_end, (int) &_ramend);
+	printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
+		"BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
+		__bss_stop);
+	printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
+		"STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
+		memory_end, memory_end, &_ramend);
 #endif
 
 #ifdef CONFIG_DEFAULT_CMDLINE
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index d4b0555..fca1037 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -47,8 +47,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
@@ -186,7 +184,6 @@
 			      sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	
 	if (restore_sigcontext(regs, &frame->sc, &er0))
@@ -211,7 +208,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
@@ -412,8 +408,9 @@
  */
 static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-	      sigset_t *oldset,	struct pt_regs * regs)
+	      struct pt_regs * regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 	/* are we from a system call? */
 	if (regs->orig_er0 >= 0) {
@@ -441,10 +438,8 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 
-	if (!ret) {
-		block_sigmask(ka, sig);
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-	}
+	if (!ret)
+		signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -457,7 +452,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -468,23 +462,14 @@
 	if ((regs->ccr & 0x10))
 		return;
 
-	if (try_to_freeze())
-		goto no_signal;
-
 	current->thread.esp0 = (unsigned long) regs;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &info, &ka, oldset, regs);
+		handle_signal(signr, &info, &ka, regs);
 		return;
 	}
- no_signal:
 	/* Did we come from a system call? */
 	if (regs->orig_er0 >= 0) {
 		/* Restart the system call - no handlers present */
@@ -501,8 +486,7 @@
 	}
 
 	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-		set_current_blocked(&current->saved_sigmask);
+	restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
@@ -513,7 +497,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 973369c..981e250 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -36,6 +36,7 @@
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #undef DEBUG
 
@@ -123,7 +124,6 @@
 	int codek = 0, datak = 0, initk = 0;
 	/* DAVIDM look at setup memory map generically with reserved area */
 	unsigned long tmp;
-	extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
 	extern unsigned long  _ramend, _ramstart;
 	unsigned long len = &_ramend - &_ramstart;
 	unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@
 	/* this will put all memory onto the freelists */
 	totalram_pages = free_all_bootmem();
 
-	codek = (&_etext - &_stext) >> 10;
-	datak = (&_ebss - &_sdata) >> 10;
-	initk = (&__init_begin - &__init_end) >> 10;
+	codek = (_etext - _stext) >> 10;
+	datak = (__bss_stop - _sdata) >> 10;
+	initk = (__init_begin - __init_end) >> 10;
 
 	tmp = nr_free_pages() << PAGE_SHIFT;
 	printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@
 {
 #ifdef CONFIG_RAMKERNEL
 	unsigned long addr;
-	extern char __init_begin, __init_end;
 /*
  *	the following code should be cool even if these sections
  *	are not page aligned.
  */
-	addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+	addr = PAGE_ALIGN((unsigned long)(__init_begin));
 	/* next to check that the page we free is not a partial page */
-	for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+	for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
 		free_page(addr);
 		totalram_pages++;
 	}
 	printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
-			(addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
-			(int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+			(addr - PAGE_ALIGN((long) __init_begin)) >> 10,
+			(int)(PAGE_ALIGN((unsigned long)__init_begin)),
 			(int)(addr - PAGE_SIZE));
 #endif
 }
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index 434866e..304b0808 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -31,8 +31,6 @@
 #include <asm/signal.h>
 #include <asm/vdso.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
 	unsigned long tramp[2];
 	struct siginfo info;
@@ -149,11 +147,9 @@
 /*
  * Setup invocation of signal handler
  */
-static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-			 sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
+			 struct pt_regs *regs)
 {
-	int rc;
-
 	/*
 	 * If we're handling a signal that aborted a system call,
 	 * set up the error return value before adding the signal
@@ -186,15 +182,12 @@
 	 * Set up the stack frame; not doing the SA_SIGINFO thing.  We
 	 * only set up the rt_frame flavor.
 	 */
-	rc = setup_rt_frame(sig, ka, info, oldset, regs);
-
 	/* If there was an error on setup, no signal was delivered. */
-	if (rc)
-		return rc;
+	if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+		return;
 
-	block_sigmask(ka, sig);
-
-	return 0;
+	signal_delivered(sig, info, ka, regs,
+			test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -209,34 +202,13 @@
 	if (!user_mode(regs))
 		return;
 
-	if (try_to_freeze())
-		goto no_signal;
-
 	signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
 
 	if (signo > 0) {
-		sigset_t *oldset;
-
-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-			oldset = &current->saved_sigmask;
-		else
-			oldset = &current->blocked;
-
-		if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) {
-			/*
-			 * Successful delivery case.  The saved sigmask is
-			 * stored in the signal frame, and will be restored
-			 * by sigreturn.  We can clear the TIF flag.
-			 */
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			tracehook_signal_handler(signo, &info, &sigact, regs,
-				test_thread_flag(TIF_SINGLESTEP));
-		}
+		handle_signal(signo, &info, &sigact, regs);
 		return;
 	}
 
-no_signal:
 	/*
 	 * If we came from a system call, handle the restart.
 	 */
@@ -259,10 +231,7 @@
 
 no_restart:
 	/* If there's no signal to deliver, put the saved sigmask back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -273,8 +242,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
@@ -303,7 +270,6 @@
 	if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
 		goto badframe;
 
-	sigdelsetmask(&blocked, ~_BLOCKABLE);
 	set_current_blocked(&blocked);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h
index 7323ab9..99ee1d6 100644
--- a/arch/ia64/include/asm/posix_types.h
+++ b/arch/ia64/include/asm/posix_types.h
@@ -1,9 +1,6 @@
 #ifndef _ASM_IA64_POSIX_TYPES_H
 #define _ASM_IA64_POSIX_TYPES_H
 
-typedef unsigned int	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long	__kernel_sigset_t;	/* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 310d973..f7ee853 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -141,7 +141,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, &ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
 }
 #endif	/* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f00ba02..d7f558c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -604,12 +604,6 @@
 	spin_unlock(&(x)->ctx_lock);
 }
 
-static inline unsigned long 
-pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
-{
-	return get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
 /* forward declaration */
 static const struct dentry_operations pfmfs_dentry_operations;
 
@@ -2333,8 +2327,8 @@
 	down_write(&task->mm->mmap_sem);
 
 	/* find some free area in address space, must have mmap sem held */
-	vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
-	if (vma->vm_start == 0UL) {
+	vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
+	if (IS_ERR_VALUE(vma->vm_start)) {
 		DPRINT(("Cannot find unmapped area for size %ld\n", size));
 		up_write(&task->mm->mmap_sem);
 		goto error;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5e0e86d..dd6fc14 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -199,8 +199,6 @@
 	if (test_thread_flag(TIF_NOTIFY_RESUME)) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(&scr->pt);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 
 	/* copy user rbs to kernel rbs */
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 7523501..a199be1 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -30,7 +30,6 @@
 
 #define DEBUG_SIG	0
 #define STACK_ALIGN	16		/* minimal alignment for stack pointer */
-#define _BLOCKABLE	(~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 #if _NSIG_WORDS > 1
 # define PUT_SIGSET(k,u)	__copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
@@ -200,7 +199,6 @@
 	if (GET_SIGSET(&set, &sc->sc_mask))
 		goto give_sigsegv;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(sc, scr))
@@ -415,18 +413,13 @@
 }
 
 static long
-handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
+handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 	       struct sigscratch *scr)
 {
-	if (!setup_frame(sig, ka, info, oldset, scr))
+	if (!setup_frame(sig, ka, info, sigmask_to_save(), scr))
 		return 0;
 
-	block_sigmask(ka, sig);
-
-	/*
-	 * Let tracing know that we've done the handler setup.
-	 */
-	tracehook_signal_handler(sig, info, ka, &scr->pt,
+	signal_delivered(sig, info, ka, &scr->pt,
 				 test_thread_flag(TIF_SINGLESTEP));
 
 	return 1;
@@ -440,7 +433,6 @@
 ia64_do_signal (struct sigscratch *scr, long in_syscall)
 {
 	struct k_sigaction ka;
-	sigset_t *oldset;
 	siginfo_t info;
 	long restart = in_syscall;
 	long errno = scr->pt.r8;
@@ -453,11 +445,6 @@
 	if (!user_mode(&scr->pt))
 		return;
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	/*
 	 * This only loops in the rare cases of handle_signal() failing, in which case we
 	 * need to push through a forced SIGSEGV.
@@ -507,16 +494,8 @@
 		 * Whee!  Actually deliver the signal.  If the delivery failed, we need to
 		 * continue to iterate in this loop so we can deliver the SIGSEGV...
 		 */
-		if (handle_signal(signr, &ka, &info, oldset, scr)) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+		if (handle_signal(signr, &ka, &info, scr))
 			return;
-		}
 	}
 
 	/* Did we come from a system call? */
@@ -538,8 +517,5 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 609d500..d9439ef 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -171,22 +171,9 @@
 ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
 	     unsigned long new_addr)
 {
-	extern unsigned long do_mremap (unsigned long addr,
-					unsigned long old_len,
-					unsigned long new_len,
-					unsigned long flags,
-					unsigned long new_addr);
-
-	down_write(&current->mm->mmap_sem);
-	{
-		addr = do_mremap(addr, old_len, new_len, flags, new_addr);
-	}
-	up_write(&current->mm->mmap_sem);
-
-	if (IS_ERR((void *) addr))
-		return addr;
-
-	force_successful_syscall_return();
+	addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
+	if (!IS_ERR((void *) addr))
+		force_successful_syscall_return();
 	return addr;
 }
 
diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/asm/posix_types.h
index 0195850..236de26 100644
--- a/arch/m32r/include/asm/posix_types.h
+++ b/arch/m32r/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index f54d969..f3fb2c0 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 		unsigned long r2, unsigned long r3, unsigned long r4,
@@ -111,7 +109,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
@@ -267,9 +264,9 @@
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs)
+	      struct pt_regs *regs)
 {
 	/* Are we from a system call? */
 	if (regs->syscall_nr >= 0) {
@@ -294,11 +291,10 @@
 	}
 
 	/* Set up the stack frame */
-	if (setup_rt_frame(sig, ka, info, oldset, regs))
-		return -EFAULT;
+	if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs))
+		return;
 
-	block_sigmask(ka, sig);
-	return 0;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -311,7 +307,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -322,14 +317,6 @@
 	if (!user_mode(regs))
 		return;
 
-	if (try_to_freeze()) 
-		goto no_signal;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Re-enable any watchpoints before delivering the
@@ -339,13 +326,11 @@
 		 */
 
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
+		handle_signal(signr, &ka, &info, regs);
 
 		return;
 	}
 
- no_signal:
 	/* Did we come from a system call? */
 	if (regs->syscall_nr >= 0) {
 		/* Restart the system call - no handlers present */
@@ -360,10 +345,7 @@
 			prev_insn(regs);
 		}
 	}
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -383,8 +365,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 
 	clear_thread_flag(TIF_IRET);
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index cac5b6b..1471201 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,8 @@
 	select GENERIC_IRQ_SHOW
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
 	select GENERIC_CPU_DEVICES
+	select GENERIC_STRNCPY_FROM_USER if MMU
+	select GENERIC_STRNLEN_USER if MMU
 	select FPU if MMU
 	select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
 
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1a922fa..eafa253 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,2 +1,4 @@
 include include/asm-generic/Kbuild.asm
 header-y += cachectl.h
+
+generic-y += word-at-a-time.h
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index d63b99f..497c31c 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -86,7 +86,7 @@
 /*
  *	QSPI module.
  */
-#define	MCFQSPI_IOBASE		(MCF_IPSBAR + 0x340)
+#define	MCFQSPI_BASE		(MCF_IPSBAR + 0x340)
 #define	MCFQSPI_SIZE		0x40
 
 #define	MCFQSPI_CS0		147
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 6373093..cf4dbf7 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 9c80cd5..472c891 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -379,12 +379,15 @@
 #define copy_from_user(to, from, n)	__copy_from_user(to, from, n)
 #define copy_to_user(to, from, n)	__copy_to_user(to, from, n)
 
-long strncpy_from_user(char *dst, const char __user *src, long count);
-long strnlen_user(const char __user *src, long n);
+#define user_addr_max() \
+	(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 unsigned long __clear_user(void __user *to, unsigned long n);
 
 #define clear_user	__clear_user
 
-#define strlen_user(str) strnlen_user(str, 32767)
-
 #endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8b4a222..1bc10e6 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -286,7 +286,7 @@
 	}
 }
 
-#ifdef CONFIG_COLDFIRE
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
 asmlinkage int syscall_trace_enter(void)
 {
 	int ret = 0;
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index d9f3d19..710a528 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -51,8 +51,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #ifdef CONFIG_MMU
 
 /*
@@ -795,7 +793,6 @@
 			      sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc, frame + 1))
@@ -820,7 +817,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (rt_restore_ucontext(regs, sw, &frame->uc))
@@ -1123,8 +1119,9 @@
  */
 static void
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs)
+	      struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int err;
 	/* are we from a system call? */
 	if (regs->orig_d0 >= 0)
@@ -1140,14 +1137,12 @@
 	if (err)
 		return;
 
-	block_sigmask(ka, sig);
+	signal_delivered(sig, info, ka, regs, 0);
 
 	if (test_thread_flag(TIF_DELAYED_TRACE)) {
 		regs->sr &= ~0x8000;
 		send_sig(SIGTRAP, current, 1);
 	}
-
-	clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 
 /*
@@ -1160,19 +1155,13 @@
 	siginfo_t info;
 	struct k_sigaction ka;
 	int signr;
-	sigset_t *oldset;
 
 	current->thread.esp0 = (unsigned long) regs;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &ka, &info, oldset, regs);
+		handle_signal(signr, &ka, &info, regs);
 		return;
 	}
 
@@ -1182,10 +1171,7 @@
 		handle_restart(regs, NULL, 0);
 
 	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
@@ -1193,9 +1179,6 @@
 	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal(regs);
 
-	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
-	}
 }
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index d7deb7f..707f057 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -85,7 +85,7 @@
 	mach_sched_init(timer_interrupt);
 }
 
-#ifdef CONFIG_M68KCLASSIC
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 
 u32 arch_gettimeoffset(void)
 {
@@ -108,4 +108,4 @@
 
 module_init(rtc_init);
 
-#endif /* CONFIG_M68KCLASSIC */
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 5664386..5e97f2e 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -104,80 +104,6 @@
 EXPORT_SYMBOL(__generic_copy_to_user);
 
 /*
- * Copy a null terminated string from userspace.
- */
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	char c;
-
-	if (count <= 0)
-		return count;
-
-	asm volatile ("\n"
-		"1:	"MOVES".b	(%2)+,%4\n"
-		"	move.b	%4,(%1)+\n"
-		"	jeq	2f\n"
-		"	subq.l	#1,%3\n"
-		"	jne	1b\n"
-		"2:	sub.l	%3,%0\n"
-		"3:\n"
-		"	.section .fixup,\"ax\"\n"
-		"	.even\n"
-		"10:	move.l	%5,%0\n"
-		"	jra	3b\n"
-		"	.previous\n"
-		"\n"
-		"	.section __ex_table,\"a\"\n"
-		"	.align	4\n"
-		"	.long	1b,10b\n"
-		"	.previous"
-		: "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
-		: "i" (-EFAULT), "0" (count));
-
-	return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-long strnlen_user(const char __user *src, long n)
-{
-	char c;
-	long res;
-
-	asm volatile ("\n"
-		"1:	subq.l	#1,%1\n"
-		"	jmi	3f\n"
-		"2:	"MOVES".b	(%0)+,%2\n"
-		"	tst.b	%2\n"
-		"	jne	1b\n"
-		"	jra	4f\n"
-		"\n"
-		"3:	addq.l	#1,%0\n"
-		"4:	sub.l	%4,%0\n"
-		"5:\n"
-		"	.section .fixup,\"ax\"\n"
-		"	.even\n"
-		"20:	sub.l	%0,%0\n"
-		"	jra	5b\n"
-		"	.previous\n"
-		"\n"
-		"	.section __ex_table,\"a\"\n"
-		"	.align	4\n"
-		"	.long	2b,20b\n"
-		"	.previous\n"
-		: "=&a" (res), "+d" (n), "=&d" (c)
-		: "0" (src), "r" (src));
-
-	return res;
-}
-EXPORT_SYMBOL(strnlen_user);
-
-/*
  * Zero Userspace
  */
 
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index c801c17..f4dc9b2 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -53,6 +53,7 @@
 #endif
 
 static u32 m68328_tick_cnt;
+static irq_handler_t timer_interrupt;
 
 /***************************************************************************/
 
@@ -62,7 +63,7 @@
 	TSTAT &= 0;
 
 	m68328_tick_cnt += TICKS_PER_JIFFY;
-	return arch_timer_interrupt(irq, dummy);
+	return timer_interrupt(irq, dummy);
 }
 
 /***************************************************************************/
@@ -99,7 +100,7 @@
 
 /***************************************************************************/
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
 	/* disable timer 1 */
 	TCTL = 0;
@@ -115,6 +116,7 @@
 	/* Enable timer 1 */
 	TCTL |= TCTL_TEN;
 	clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
+	timer_interrupt = handler;
 }
 
 /***************************************************************************/
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c
index 255fc03..9877cef 100644
--- a/arch/m68k/platform/68360/config.c
+++ b/arch/m68k/platform/68360/config.c
@@ -35,6 +35,7 @@
 #define OSCILLATOR  (unsigned long int)33000000
 #endif
 
+static irq_handler_t timer_interrupt;
 unsigned long int system_clock;
 
 extern QUICC *pquicc;
@@ -52,7 +53,7 @@
 
   pquicc->timer_ter1 = 0x0002; /* clear timer event */
 
-  return arch_timer_interrupt(irq, dummy);
+  return timer_interrupt(irq, dummy);
 }
 
 static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@
 	.handler = hw_tick,
 };
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
   unsigned char prescaler;
   unsigned short tgcr_save;
@@ -94,6 +95,8 @@
 
   pquicc->timer_ter1 = 0x0003; /* clear timer events */
 
+  timer_interrupt = handler;
+
   /* enable timer 1 interrupt in CIMR */
   setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
 
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 9f1260c..44da406 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -42,4 +42,11 @@
 	return MCF_CLK;
 }
 EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+	return NULL;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
 /***************************************************************************/
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8346046..0bf4423 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -52,7 +52,7 @@
 	def_bool y
 
 config GENERIC_GPIO
-	def_bool y
+	bool
 
 config GENERIC_CSUM
 	def_bool y
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 1a8ab6a..6c610234 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -166,7 +166,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
 }
 #endif
 
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index daff9e5..03f7b8c 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -492,10 +492,11 @@
 	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
 	lwi	r6, r1, PT_R1;	/* If so, use paret's stack ptr */
 1:	addik	r7, r1, 0;			/* Arg 2: parent context */
-	add	r8, r0, r0;			/* Arg 3: (unused) */
-	add	r9, r0, r0;			/* Arg 4: (unused) */
+	lwi     r9, r1, PT_R8;          /* parent tid.  */
+	lwi     r10, r1, PT_R9;         /* child tid.  */
+	/* do_fork will pick up TLS from regs->r10.  */
 	brid	do_fork		/* Do real work (tail-call) */
-	add	r10, r0, r0;			/* Arg 5: (unused) */
+	add     r8, r0, r0;             /* Arg 3: (unused) */
 
 C_ENTRY(sys_execve):
 	brid	microblaze_execve;	/* Do real work (tail-call).*/
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index e7eaa7a..fc1e132 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -138,7 +138,7 @@
 #endif /* CONFIG_DYNAMIC_FTRACE */
 /* static normal trace */
 	lwi	r6, r1, 120; /* MS: load parent addr */
-	addik	r5, r15, 0; /* MS: load current function addr */
+	addik	r5, r15, -4; /* MS: load current function addr */
 	/* MS: here is dependency on previous code */
 	brald	r15, r20; /* MS: jump to ftrace handler */
 	nop;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 883b927..1944e00 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -182,8 +182,12 @@
 #endif
 	ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
 
+	/*
+	 *  r21 is the thread reg, r10 is 6th arg to clone
+	 *  which contains TLS area
+	 */
 	if (clone_flags & CLONE_SETTLS)
-		;
+		childregs->r21 = childregs->r10;
 
 	return 0;
 }
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 7f4c7be..76b9722 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -41,8 +41,6 @@
 #include <asm/cacheflush.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 		struct pt_regs *regs)
@@ -106,7 +104,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
@@ -310,10 +307,11 @@
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-		siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+		siginfo_t *info, struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Set up the stack frame */
@@ -323,11 +321,9 @@
 		ret = setup_rt_frame(sig, ka, NULL, oldset, regs);
 
 	if (ret)
-		return ret;
+		return;
 
-	block_sigmask(ka, sig);
-
-	return 0;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -344,33 +340,18 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 #ifdef DEBUG_SIG
 	printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
 	printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
 			regs->r12, current_thread_info()->flags);
 #endif
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee! Actually deliver the signal. */
 		if (in_syscall)
 			handle_restart(regs, &ka, 1);
-		if (!handle_signal(signr, &ka, &info, oldset, regs)) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &=
-			    ~TS_RESTORE_SIGMASK;
-		}
+		handle_signal(signr, &ka, &info, regs);
 		return;
 	}
 
@@ -381,10 +362,7 @@
 	 * If there's no signal to deliver, we just put the saved sigmask
 	 * back.
 	 */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, int in_syscall)
@@ -401,9 +379,6 @@
 	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal(regs, in_syscall);
 
-	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
-	}
 }
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265..eb365d6 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,6 +92,8 @@
 	int code = SEGV_MAPERR;
 	int is_write = error_code & ESR_S;
 	int fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+					 (is_write ? FAULT_FLAG_WRITE : 0);
 
 	regs->ear = address;
 	regs->esr = error_code;
@@ -138,6 +140,7 @@
 		if (kernel_mode(regs) && !search_exception_tables(regs->pc))
 			goto bad_area_nosemaphore;
 
+retry:
 		down_read(&mm->mmap_sem);
 	}
 
@@ -210,7 +213,11 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -218,11 +225,27 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (unlikely(fault & VM_FAULT_MAJOR))
-		current->maj_flt++;
-	else
-		current->min_flt++;
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (unlikely(fault & VM_FAULT_MAJOR))
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+
 	up_read(&mm->mmap_sem);
+
 	/*
 	 * keep track of tlb+htab misses that are good addrs but
 	 * just need pte's created via handle_mm_fault()
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7705067..09ab87e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -233,8 +233,9 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select SWAP_IO_SPACE
 	select BOOT_RAW
-	select HAVE_CLK
-	select MIPS_MACHINE
+	select HAVE_MACH_CLKDEV
+	select CLKDEV_LOOKUP
+	select USE_OF
 
 config LASAT
 	bool "LASAT Networks platforms"
@@ -1783,10 +1784,12 @@
 
 config FORCE_MAX_ZONEORDER
 	int "Maximum zone order"
-	range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
-	default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
-	range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
-	default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+	range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
+	default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
+	range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
+	default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
+	range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
+	default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
 	range 11 64
 	default "11"
 	help
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 76017c2..764e37a 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -219,8 +219,8 @@
 
 KBUILD_AFLAGS	+= $(cflags-y)
 KBUILD_CFLAGS	+= $(cflags-y)
-KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
-KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
 
 LDFLAGS			+= -m $(ld-emul)
 
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index a83302b..bf22484 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -22,6 +22,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/leds.h>
@@ -212,8 +213,6 @@
 	return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1200_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1200_nand_parts[] = {
 	{
 		.name	= "NAND FS 0",
@@ -234,7 +233,6 @@
 		.nr_partitions	= ARRAY_SIZE(db1200_nand_parts),
 		.partitions	= db1200_nand_parts,
 		.chip_delay	= 20,
-		.part_probe_types = db1200_part_probes,
 	},
 	.ctrl = {
 		.dev_ready	= au1200_nand_device_ready,
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 0893f2a..c56e024 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -145,8 +145,6 @@
 	return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1300_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1300_nand_parts[] = {
 	{
 		.name	= "NAND FS 0",
@@ -167,7 +165,6 @@
 		.nr_partitions	= ARRAY_SIZE(db1300_nand_parts),
 		.partitions	= db1300_nand_parts,
 		.chip_delay	= 20,
-		.part_probe_types = db1300_part_probes,
 	},
 	.ctrl = {
 		.dev_ready	= au1300_nand_device_ready,
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 6815d07..9eb7906 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -149,8 +149,6 @@
 	return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1550_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1550_nand_parts[] = {
 	{
 		.name	= "NAND FS 0",
@@ -171,7 +169,6 @@
 		.nr_partitions	= ARRAY_SIZE(db1550_nand_parts),
 		.partitions	= db1550_nand_parts,
 		.chip_delay	= 20,
-		.part_probe_types = db1550_part_probes,
 	},
 	.ctrl = {
 		.dev_ready	= au1550_nand_device_ready,
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index e0fae8f..f44feee 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -26,6 +26,18 @@
 	  Say 'Y' here if you want your kernel to support the
 	  Atheros AP81 reference board.
 
+config ATH79_MACH_DB120
+	bool "Atheros DB120 reference board"
+	select SOC_AR934X
+	select ATH79_DEV_GPIO_BUTTONS
+	select ATH79_DEV_LEDS_GPIO
+	select ATH79_DEV_SPI
+	select ATH79_DEV_USB
+	select ATH79_DEV_WMAC
+	help
+	  Say 'Y' here if you want your kernel to support the
+	  Atheros DB120 reference board.
+
 config ATH79_MACH_PB44
 	bool "Atheros PB44 reference board"
 	select SOC_AR71XX
@@ -52,12 +64,14 @@
 config SOC_AR71XX
 	select USB_ARCH_HAS_EHCI
 	select USB_ARCH_HAS_OHCI
+	select HW_HAS_PCI
 	def_bool n
 
 config SOC_AR724X
 	select USB_ARCH_HAS_EHCI
 	select USB_ARCH_HAS_OHCI
 	select HW_HAS_PCI
+	select PCI_AR724X if PCI
 	def_bool n
 
 config SOC_AR913X
@@ -68,6 +82,15 @@
 	select USB_ARCH_HAS_EHCI
 	def_bool n
 
+config SOC_AR934X
+	select USB_ARCH_HAS_EHCI
+	select HW_HAS_PCI
+	select PCI_AR724X if PCI
+	def_bool n
+
+config PCI_AR724X
+	def_bool n
+
 config ATH79_DEV_GPIO_BUTTONS
 	def_bool n
 
@@ -81,7 +104,7 @@
 	def_bool n
 
 config ATH79_DEV_WMAC
-	depends on (SOC_AR913X || SOC_AR933X)
+	depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
 	def_bool n
 
 endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 3b911e09..2b54d98 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -11,6 +11,7 @@
 obj-y	:= prom.o setup.o irq.o common.o clock.o gpio.o
 
 obj-$(CONFIG_EARLY_PRINTK)		+= early_printk.o
+obj-$(CONFIG_PCI)			+= pci.o
 
 #
 # Devices
@@ -27,5 +28,6 @@
 #
 obj-$(CONFIG_ATH79_MACH_AP121)		+= mach-ap121.o
 obj-$(CONFIG_ATH79_MACH_AP81)		+= mach-ap81.o
+obj-$(CONFIG_ATH79_MACH_DB120)		+= mach-db120.o
 obj-$(CONFIG_ATH79_MACH_PB44)		+= mach-pb44.o
 obj-$(CONFIG_ATH79_MACH_UBNT_XM)	+= mach-ubnt-xm.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 54d0eb4..b91ad3e 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -1,8 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X common routines
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -163,6 +166,82 @@
 	ath79_uart_clk.rate = ath79_ref_clk.rate;
 }
 
+static void __init ar934x_clocks_init(void)
+{
+	u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+	u32 cpu_pll, ddr_pll;
+	u32 bootstrap;
+
+	bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+	if (bootstrap &	AR934X_BOOTSTRAP_REF_CLK_40)
+		ath79_ref_clk.rate = 40 * 1000 * 1000;
+	else
+		ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+	pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+	out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+		  AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+	ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+		  AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+	nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+	       AR934X_PLL_CPU_CONFIG_NINT_MASK;
+	frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+	       AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+	cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+	cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
+	cpu_pll /= (1 << out_div);
+
+	pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+	out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+		  AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+	ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+		  AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+	nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+	       AR934X_PLL_DDR_CONFIG_NINT_MASK;
+	frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+	       AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+	ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+	ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
+	ddr_pll /= (1 << out_div);
+
+	clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+	postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+		  AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
+
+	if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
+		ath79_cpu_clk.rate = ath79_ref_clk.rate;
+	else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+		ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+	else
+		ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+
+	postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+		  AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
+
+	if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
+		ath79_ddr_clk.rate = ath79_ref_clk.rate;
+	else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+		ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+	else
+		ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+
+	postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+		  AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
+
+	if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
+		ath79_ahb_clk.rate = ath79_ref_clk.rate;
+	else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+		ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+	else
+		ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+	ath79_wdt_clk.rate = ath79_ref_clk.rate;
+	ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
 void __init ath79_clocks_init(void)
 {
 	if (soc_is_ar71xx())
@@ -173,6 +252,8 @@
 		ar913x_clocks_init();
 	else if (soc_is_ar933x())
 		ar933x_clocks_init();
+	else if (soc_is_ar934x())
+		ar934x_clocks_init();
 	else
 		BUG();
 
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index f0fda98..5a4adfc 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X common routines
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -67,6 +70,8 @@
 		reg = AR913X_RESET_REG_RESET_MODULE;
 	else if (soc_is_ar933x())
 		reg = AR933X_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar934x())
+		reg = AR934X_RESET_REG_RESET_MODULE;
 	else
 		BUG();
 
@@ -91,6 +96,8 @@
 		reg = AR913X_RESET_REG_RESET_MODULE;
 	else if (soc_is_ar933x())
 		reg = AR933X_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar934x())
+		reg = AR934X_RESET_REG_RESET_MODULE;
 	else
 		BUG();
 
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index f4956f8..45efc63 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -89,7 +89,8 @@
 
 	if (soc_is_ar71xx() ||
 	    soc_is_ar724x() ||
-	    soc_is_ar913x()) {
+	    soc_is_ar913x() ||
+	    soc_is_ar934x()) {
 		ath79_uart_data[0].uartclk = clk_get_rate(clk);
 		platform_device_register(&ath79_uart_device);
 	} else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
index 4b0168a..366b35f 100644
--- a/arch/mips/ath79/dev-gpio-buttons.c
+++ b/arch/mips/ath79/dev-gpio-buttons.c
@@ -25,12 +25,10 @@
 	struct gpio_keys_button *p;
 	int err;
 
-	p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+	p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return;
 
-	memcpy(p, buttons, nbuttons * sizeof(*p));
-
 	pdev = platform_device_alloc("gpio-keys-polled", id);
 	if (!pdev)
 		goto err_free_buttons;
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
index cdade68..dcb1deb 100644
--- a/arch/mips/ath79/dev-leds-gpio.c
+++ b/arch/mips/ath79/dev-leds-gpio.c
@@ -24,12 +24,10 @@
 	struct gpio_led *p;
 	int err;
 
-	p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+	p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return;
 
-	memcpy(p, leds, num_leds * sizeof(*p));
-
 	pdev = platform_device_alloc("leds-gpio", id);
 	if (!pdev)
 		goto err_free_leds;
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index 9c717bf..d6d893c 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR913X/AR933X SoC built-in WMAC device support
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -26,8 +29,7 @@
 		/* .start and .end fields are filled dynamically */
 		.flags	= IORESOURCE_MEM,
 	}, {
-		.start	= ATH79_CPU_IRQ_IP2,
-		.end	= ATH79_CPU_IRQ_IP2,
+		/* .start and .end fields are filled dynamically */
 		.flags	= IORESOURCE_IRQ,
 	},
 };
@@ -53,6 +55,8 @@
 
 	ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
 	ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
+	ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+	ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
 }
 
 
@@ -79,6 +83,8 @@
 
 	ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
 	ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
+	ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+	ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
 
 	t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
 	if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -92,12 +98,32 @@
 	ath79_wmac_data.external_reset = ar933x_wmac_reset;
 }
 
+static void ar934x_wmac_setup(void)
+{
+	u32 t;
+
+	ath79_wmac_device.name = "ar934x_wmac";
+
+	ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
+	ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
+	ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+	ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+
+	t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+	if (t & AR934X_BOOTSTRAP_REF_CLK_40)
+		ath79_wmac_data.is_clk_25mhz = false;
+	else
+		ath79_wmac_data.is_clk_25mhz = true;
+}
+
 void __init ath79_register_wmac(u8 *cal_data)
 {
 	if (soc_is_ar913x())
 		ar913x_wmac_setup();
 	else if (soc_is_ar933x())
 		ar933x_wmac_setup();
+	else if (soc_is_ar934x())
+		ar934x_wmac_setup();
 	else
 		BUG();
 
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index 6a51ced..dc938cb 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -71,6 +71,9 @@
 	case REV_ID_MAJOR_AR7241:
 	case REV_ID_MAJOR_AR7242:
 	case REV_ID_MAJOR_AR913X:
+	case REV_ID_MAJOR_AR9341:
+	case REV_ID_MAJOR_AR9342:
+	case REV_ID_MAJOR_AR9344:
 		_prom_putchar = prom_putchar_ar71xx;
 		break;
 
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index a2f8ca6..29054f2 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X GPIO API support
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -89,6 +92,42 @@
 	return 0;
 }
 
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+					int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
 static struct gpio_chip ath79_gpio_chip = {
 	.label			= "ath79",
 	.get			= ath79_gpio_get_value,
@@ -155,11 +194,17 @@
 		ath79_gpio_count = AR913X_GPIO_COUNT;
 	else if (soc_is_ar933x())
 		ath79_gpio_count = AR933X_GPIO_COUNT;
+	else if (soc_is_ar934x())
+		ath79_gpio_count = AR934X_GPIO_COUNT;
 	else
 		BUG();
 
 	ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
 	ath79_gpio_chip.ngpio = ath79_gpio_count;
+	if (soc_is_ar934x()) {
+		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+	}
 
 	err = gpiochip_add(&ath79_gpio_chip);
 	if (err)
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 1b073de..90d09fc 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -23,8 +24,8 @@
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include "common.h"
 
-static unsigned int ath79_ip2_flush_reg;
-static unsigned int ath79_ip3_flush_reg;
+static void (*ath79_ip2_handler)(void);
+static void (*ath79_ip3_handler)(void);
 
 static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
@@ -129,7 +130,7 @@
 
 	if (soc_is_ar71xx() || soc_is_ar913x())
 		ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
-	else if (soc_is_ar724x() || soc_is_ar933x())
+	else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
 		ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
 	else
 		BUG();
@@ -143,6 +144,39 @@
 	irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
 }
 
+static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+	u32 status;
+
+	disable_irq_nosync(irq);
+
+	status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
+
+	if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
+		ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
+		generic_handle_irq(ATH79_IP2_IRQ(0));
+	} else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
+		ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
+		generic_handle_irq(ATH79_IP2_IRQ(1));
+	} else {
+		spurious_interrupt();
+	}
+
+	enable_irq(irq);
+}
+
+static void ar934x_ip2_irq_init(void)
+{
+	int i;
+
+	for (i = ATH79_IP2_IRQ_BASE;
+	     i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+		irq_set_chip_and_handler(i, &dummy_irq_chip,
+					 handle_level_irq);
+
+	irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+}
+
 asmlinkage void plat_irq_dispatch(void)
 {
 	unsigned long pending;
@@ -152,10 +186,8 @@
 	if (pending & STATUSF_IP7)
 		do_IRQ(ATH79_CPU_IRQ_TIMER);
 
-	else if (pending & STATUSF_IP2) {
-		ath79_ddr_wb_flush(ath79_ip2_flush_reg);
-		do_IRQ(ATH79_CPU_IRQ_IP2);
-	}
+	else if (pending & STATUSF_IP2)
+		ath79_ip2_handler();
 
 	else if (pending & STATUSF_IP4)
 		do_IRQ(ATH79_CPU_IRQ_GE0);
@@ -163,10 +195,8 @@
 	else if (pending & STATUSF_IP5)
 		do_IRQ(ATH79_CPU_IRQ_GE1);
 
-	else if (pending & STATUSF_IP3) {
-		ath79_ddr_wb_flush(ath79_ip3_flush_reg);
-		do_IRQ(ATH79_CPU_IRQ_USB);
-	}
+	else if (pending & STATUSF_IP3)
+		ath79_ip3_handler();
 
 	else if (pending & STATUSF_IP6)
 		do_IRQ(ATH79_CPU_IRQ_MISC);
@@ -175,24 +205,97 @@
 		spurious_interrupt();
 }
 
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ */
+static void ar71xx_ip2_handler(void)
+{
+	ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
+	do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar724x_ip2_handler(void)
+{
+	ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
+	do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar913x_ip2_handler(void)
+{
+	ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
+	do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar933x_ip2_handler(void)
+{
+	ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
+	do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar934x_ip2_handler(void)
+{
+	do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar71xx_ip3_handler(void)
+{
+	ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
+	do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar724x_ip3_handler(void)
+{
+	ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
+	do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar913x_ip3_handler(void)
+{
+	ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
+	do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar933x_ip3_handler(void)
+{
+	ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
+	do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar934x_ip3_handler(void)
+{
+	ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
+	do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
 void __init arch_init_irq(void)
 {
 	if (soc_is_ar71xx()) {
-		ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI;
-		ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB;
+		ath79_ip2_handler = ar71xx_ip2_handler;
+		ath79_ip3_handler = ar71xx_ip3_handler;
 	} else if (soc_is_ar724x()) {
-		ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE;
-		ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB;
+		ath79_ip2_handler = ar724x_ip2_handler;
+		ath79_ip3_handler = ar724x_ip3_handler;
 	} else if (soc_is_ar913x()) {
-		ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC;
-		ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB;
+		ath79_ip2_handler = ar913x_ip2_handler;
+		ath79_ip3_handler = ar913x_ip3_handler;
 	} else if (soc_is_ar933x()) {
-		ath79_ip2_flush_reg = AR933X_DDR_REG_FLUSH_WMAC;
-		ath79_ip3_flush_reg = AR933X_DDR_REG_FLUSH_USB;
-	} else
+		ath79_ip2_handler = ar933x_ip2_handler;
+		ath79_ip3_handler = ar933x_ip3_handler;
+	} else if (soc_is_ar934x()) {
+		ath79_ip2_handler = ar934x_ip2_handler;
+		ath79_ip3_handler = ar934x_ip3_handler;
+	} else {
 		BUG();
+	}
 
 	cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
 	mips_cpu_irq_init();
 	ath79_misc_irq_init();
+
+	if (soc_is_ar934x())
+		ar934x_ip2_irq_init();
 }
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
new file mode 100644
index 0000000..1983e4d
--- /dev/null
+++ b/arch/mips/ath79/mach-db120.c
@@ -0,0 +1,134 @@
+/*
+ * Atheros DB120 reference board support
+ *
+ * Copyright (c) 2011 Qualcomm Atheros
+ * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define DB120_GPIO_LED_WLAN_5G		12
+#define DB120_GPIO_LED_WLAN_2G		13
+#define DB120_GPIO_LED_STATUS		14
+#define DB120_GPIO_LED_WPS		15
+
+#define DB120_GPIO_BTN_WPS		16
+
+#define DB120_KEYS_POLL_INTERVAL	20	/* msecs */
+#define DB120_KEYS_DEBOUNCE_INTERVAL	(3 * DB120_KEYS_POLL_INTERVAL)
+
+#define DB120_WMAC_CALDATA_OFFSET 0x1000
+#define DB120_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led db120_leds_gpio[] __initdata = {
+	{
+		.name		= "db120:green:status",
+		.gpio		= DB120_GPIO_LED_STATUS,
+		.active_low	= 1,
+	},
+	{
+		.name		= "db120:green:wps",
+		.gpio		= DB120_GPIO_LED_WPS,
+		.active_low	= 1,
+	},
+	{
+		.name		= "db120:green:wlan-5g",
+		.gpio		= DB120_GPIO_LED_WLAN_5G,
+		.active_low	= 1,
+	},
+	{
+		.name		= "db120:green:wlan-2g",
+		.gpio		= DB120_GPIO_LED_WLAN_2G,
+		.active_low	= 1,
+	},
+};
+
+static struct gpio_keys_button db120_gpio_keys[] __initdata = {
+	{
+		.desc		= "WPS button",
+		.type		= EV_KEY,
+		.code		= KEY_WPS_BUTTON,
+		.debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
+		.gpio		= DB120_GPIO_BTN_WPS,
+		.active_low	= 1,
+	},
+};
+
+static struct spi_board_info db120_spi_info[] = {
+	{
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.max_speed_hz	= 25000000,
+		.modalias	= "s25sl064a",
+	}
+};
+
+static struct ath79_spi_platform_data db120_spi_data = {
+	.bus_num	= 0,
+	.num_chipselect	= 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data db120_ath9k_data;
+
+static int db120_pci_plat_dev_init(struct pci_dev *dev)
+{
+	switch (PCI_SLOT(dev->devfn)) {
+	case 0:
+		dev->dev.platform_data = &db120_ath9k_data;
+		break;
+	}
+
+	return 0;
+}
+
+static void __init db120_pci_init(u8 *eeprom)
+{
+	memcpy(db120_ath9k_data.eeprom_data, eeprom,
+	       sizeof(db120_ath9k_data.eeprom_data));
+
+	ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
+	ath79_register_pci();
+}
+#else
+static inline void db120_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init db120_setup(void)
+{
+	u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+	ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
+				 db120_leds_gpio);
+	ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
+					ARRAY_SIZE(db120_gpio_keys),
+					db120_gpio_keys);
+	ath79_register_spi(&db120_spi_data, db120_spi_info,
+			   ARRAY_SIZE(db120_spi_info));
+	ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
+	db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
+	     db120_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index fe9701a..c5f0ea5 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -19,6 +19,7 @@
 #include "dev-leds-gpio.h"
 #include "dev-spi.h"
 #include "dev-usb.h"
+#include "pci.h"
 
 #define PB44_GPIO_I2C_SCL	0
 #define PB44_GPIO_I2C_SDA	1
@@ -114,6 +115,7 @@
 	ath79_register_spi(&pb44_spi_data, pb44_spi_info,
 			   ARRAY_SIZE(pb44_spi_info));
 	ath79_register_usb();
+	ath79_register_pci();
 }
 
 MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c
index 3c311a5..4a3c606 100644
--- a/arch/mips/ath79/mach-ubnt-xm.c
+++ b/arch/mips/ath79/mach-ubnt-xm.c
@@ -12,16 +12,15 @@
 
 #include <linux/init.h>
 #include <linux/pci.h>
-
-#ifdef CONFIG_PCI
 #include <linux/ath9k_platform.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-#endif /* CONFIG_PCI */
+
+#include <asm/mach-ath79/irq.h>
 
 #include "machtypes.h"
 #include "dev-gpio-buttons.h"
 #include "dev-leds-gpio.h"
 #include "dev-spi.h"
+#include "pci.h"
 
 #define UBNT_XM_GPIO_LED_L1		0
 #define UBNT_XM_GPIO_LED_L2		1
@@ -33,7 +32,6 @@
 #define UBNT_XM_KEYS_POLL_INTERVAL	20
 #define UBNT_XM_KEYS_DEBOUNCE_INTERVAL	(3 * UBNT_XM_KEYS_POLL_INTERVAL)
 
-#define UBNT_XM_PCI_IRQ			48
 #define UBNT_XM_EEPROM_ADDR		(u8 *) KSEG1ADDR(0x1fff1000)
 
 static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
@@ -84,12 +82,27 @@
 #ifdef CONFIG_PCI
 static struct ath9k_platform_data ubnt_xm_eeprom_data;
 
-static struct ath724x_pci_data ubnt_xm_pci_data[] = {
-	{
-		.irq	= UBNT_XM_PCI_IRQ,
-		.pdata	= &ubnt_xm_eeprom_data,
-	},
-};
+static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
+{
+	switch (PCI_SLOT(dev->devfn)) {
+	case 0:
+		dev->dev.platform_data = &ubnt_xm_eeprom_data;
+		break;
+	}
+
+	return 0;
+}
+
+static void __init ubnt_xm_pci_init(void)
+{
+	memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
+	       sizeof(ubnt_xm_eeprom_data.eeprom_data));
+
+	ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
+	ath79_register_pci();
+}
+#else
+static inline void ubnt_xm_pci_init(void) {}
 #endif /* CONFIG_PCI */
 
 static void __init ubnt_xm_init(void)
@@ -104,13 +117,7 @@
 	ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
 			   ARRAY_SIZE(ubnt_xm_spi_info));
 
-#ifdef CONFIG_PCI
-	memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
-	       sizeof(ubnt_xm_eeprom_data.eeprom_data));
-
-	ath724x_pci_add_data(ubnt_xm_pci_data, ARRAY_SIZE(ubnt_xm_pci_data));
-#endif /* CONFIG_PCI */
-
+	ubnt_xm_pci_init();
 }
 
 MIPS_MACHINE(ATH79_MACH_UBNT_XM,
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index 9a1f382..af92e5c 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -18,6 +18,7 @@
 	ATH79_MACH_GENERIC = 0,
 	ATH79_MACH_AP121,		/* Atheros AP121 reference board */
 	ATH79_MACH_AP81,		/* Atheros AP81 reference board */
+	ATH79_MACH_DB120,		/* Atheros DB120 reference board */
 	ATH79_MACH_PB44,		/* Atheros PB44 reference board */
 	ATH79_MACH_UBNT_XM,		/* Ubiquiti Networks XM board rev 1.0 */
 };
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
new file mode 100644
index 0000000..ca83abd
--- /dev/null
+++ b/arch/mips/ath79/pci.c
@@ -0,0 +1,130 @@
+/*
+ *  Atheros AR71XX/AR724X specific PCI setup code
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/irq.h>
+#include <asm/mach-ath79/pci.h>
+#include "pci.h"
+
+static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
+static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
+static unsigned ath79_pci_nr_irqs __initdata;
+
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+	{
+		.slot	= 17,
+		.pin	= 1,
+		.irq	= ATH79_PCI_IRQ(0),
+	}, {
+		.slot	= 18,
+		.pin	= 1,
+		.irq	= ATH79_PCI_IRQ(1),
+	}, {
+		.slot	= 19,
+		.pin	= 1,
+		.irq	= ATH79_PCI_IRQ(2),
+	}
+};
+
+static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+	{
+		.slot	= 0,
+		.pin	= 1,
+		.irq	= ATH79_PCI_IRQ(0),
+	}
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+{
+	int irq = -1;
+	int i;
+
+	if (ath79_pci_nr_irqs == 0 ||
+	    ath79_pci_irq_map == NULL) {
+		if (soc_is_ar71xx()) {
+			ath79_pci_irq_map = ar71xx_pci_irq_map;
+			ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
+		} else if (soc_is_ar724x() ||
+			   soc_is_ar9342() ||
+			   soc_is_ar9344()) {
+			ath79_pci_irq_map = ar724x_pci_irq_map;
+			ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+		} else {
+			pr_crit("pci %s: invalid irq map\n",
+				pci_name((struct pci_dev *) dev));
+			return irq;
+		}
+	}
+
+	for (i = 0; i < ath79_pci_nr_irqs; i++) {
+		const struct ath79_pci_irq *entry;
+
+		entry = &ath79_pci_irq_map[i];
+		if (entry->slot == slot && entry->pin == pin) {
+			irq = entry->irq;
+			break;
+		}
+	}
+
+	if (irq < 0)
+		pr_crit("pci %s: no irq found for pin %u\n",
+			pci_name((struct pci_dev *) dev), pin);
+	else
+		pr_info("pci %s: using irq %d for pin %u\n",
+			pci_name((struct pci_dev *) dev), irq, pin);
+
+	return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+	if (ath79_pci_plat_dev_init)
+		return ath79_pci_plat_dev_init(dev);
+
+	return 0;
+}
+
+void __init ath79_pci_set_irq_map(unsigned nr_irqs,
+				  const struct ath79_pci_irq *map)
+{
+	ath79_pci_nr_irqs = nr_irqs;
+	ath79_pci_irq_map = map;
+}
+
+void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
+{
+	ath79_pci_plat_dev_init = func;
+}
+
+int __init ath79_register_pci(void)
+{
+	if (soc_is_ar71xx())
+		return ar71xx_pcibios_init();
+
+	if (soc_is_ar724x())
+		return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+
+	if (soc_is_ar9342() || soc_is_ar9344()) {
+		u32 bootstrap;
+
+		bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+		if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
+			return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+	}
+
+	return -ENODEV;
+}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
new file mode 100644
index 0000000..51c6625
--- /dev/null
+++ b/arch/mips/ath79/pci.h
@@ -0,0 +1,34 @@
+/*
+ *  Atheros AR71XX/AR724X PCI support
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_PCI_H
+#define _ATH79_PCI_H
+
+struct ath79_pci_irq {
+	u8	slot;
+	u8	pin;
+	int	irq;
+};
+
+#ifdef CONFIG_PCI
+void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
+void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
+int ath79_register_pci(void);
+#else
+static inline void
+ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
+static inline void
+ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
+static inline int ath79_register_pci(void) { return 0; }
+#endif
+
+#endif /* _ATH79_PCI_H */
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 80a7d40..60d212e 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X specific setup
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -116,18 +117,6 @@
 		rev = id & AR724X_REV_ID_REVISION_MASK;
 		break;
 
-	case REV_ID_MAJOR_AR9330:
-		ath79_soc = ATH79_SOC_AR9330;
-		chip = "9330";
-		rev = id & AR933X_REV_ID_REVISION_MASK;
-		break;
-
-	case REV_ID_MAJOR_AR9331:
-		ath79_soc = ATH79_SOC_AR9331;
-		chip = "9331";
-		rev = id & AR933X_REV_ID_REVISION_MASK;
-		break;
-
 	case REV_ID_MAJOR_AR913X:
 		minor = id & AR913X_REV_ID_MINOR_MASK;
 		rev = id >> AR913X_REV_ID_REVISION_SHIFT;
@@ -145,6 +134,36 @@
 		}
 		break;
 
+	case REV_ID_MAJOR_AR9330:
+		ath79_soc = ATH79_SOC_AR9330;
+		chip = "9330";
+		rev = id & AR933X_REV_ID_REVISION_MASK;
+		break;
+
+	case REV_ID_MAJOR_AR9331:
+		ath79_soc = ATH79_SOC_AR9331;
+		chip = "9331";
+		rev = id & AR933X_REV_ID_REVISION_MASK;
+		break;
+
+	case REV_ID_MAJOR_AR9341:
+		ath79_soc = ATH79_SOC_AR9341;
+		chip = "9341";
+		rev = id & AR934X_REV_ID_REVISION_MASK;
+		break;
+
+	case REV_ID_MAJOR_AR9342:
+		ath79_soc = ATH79_SOC_AR9342;
+		chip = "9342";
+		rev = id & AR934X_REV_ID_REVISION_MASK;
+		break;
+
+	case REV_ID_MAJOR_AR9344:
+		ath79_soc = ATH79_SOC_AR9344;
+		chip = "9344";
+		rev = id & AR934X_REV_ID_REVISION_MASK;
+		break;
+
 	default:
 		panic("ath79: unknown SoC, id:0x%08x", id);
 	}
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
index 9f64fb4..af07c1a 100644
--- a/arch/mips/bcm63xx/boards/Makefile
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -1,3 +1 @@
 obj-$(CONFIG_BOARD_BCM963XX)		+= board_bcm963xx.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d3a9f012..260dc24 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/delay.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/serial.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 97e7ce9..4b93048 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -257,8 +257,6 @@
 
 extern void fixup_irqs(void);
 
-static DEFINE_SPINLOCK(smp_reserve_lock);
-
 static int octeon_cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@
 	if (cpu == 0)
 		return -EBUSY;
 
-	spin_lock(&smp_reserve_lock);
-
 	set_cpu_online(cpu, false);
 	cpu_clear(cpu, cpu_callin_map);
 	local_irq_disable();
@@ -277,8 +273,6 @@
 	flush_cache_all();
 	local_flush_tlb_all();
 
-	spin_unlock(&smp_reserve_lock);
-
 	return 0;
 }
 
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index 5314b37..4f349ec 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -8,5 +8,3 @@
 lib-$(CONFIG_ARC_MEMORY)	+= memory.o
 lib-$(CONFIG_ARC_CONSOLE)	+= arc_con.o
 lib-$(CONFIG_ARC_PROMLIB)	+= promlib.o
-
-ccflags-y			:= -Werror
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
new file mode 100644
index 0000000..2624754
--- /dev/null
+++ b/arch/mips/include/asm/clkdev.h
@@ -0,0 +1,25 @@
+/*
+ *  based on arch/arm/include/asm/clkdev.h
+ *
+ *  Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+#define __clk_get(clk)	({ 1; })
+#define __clk_put(clk)	do { } while (0)
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+	return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 2f0becb..1caa78a 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X SoC register definitions
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -60,6 +61,9 @@
 #define AR933X_EHCI_BASE	0x1b000000
 #define AR933X_EHCI_SIZE	0x1000
 
+#define AR934X_WMAC_BASE	(AR71XX_APB_BASE + 0x00100000)
+#define AR934X_WMAC_SIZE	0x20000
+
 /*
  * DDR_CTRL block
  */
@@ -91,6 +95,12 @@
 #define AR933X_DDR_REG_FLUSH_USB	0x84
 #define AR933X_DDR_REG_FLUSH_WMAC	0x88
 
+#define AR934X_DDR_REG_FLUSH_GE0	0x9c
+#define AR934X_DDR_REG_FLUSH_GE1	0xa0
+#define AR934X_DDR_REG_FLUSH_USB	0xa4
+#define AR934X_DDR_REG_FLUSH_PCIE	0xa8
+#define AR934X_DDR_REG_FLUSH_WMAC	0xac
+
 /*
  * PLL block
  */
@@ -150,6 +160,41 @@
 #define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT	15
 #define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK	0x7
 
+#define AR934X_PLL_CPU_CONFIG_REG		0x00
+#define AR934X_PLL_DDR_CONFIG_REG		0x04
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG		0x08
+
+#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT	0
+#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK	0x3f
+#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT	6
+#define AR934X_PLL_CPU_CONFIG_NINT_MASK		0x3f
+#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT	12
+#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK	0x1f
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT	19
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK	0x3
+
+#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT	0
+#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK	0x3ff
+#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT	10
+#define AR934X_PLL_DDR_CONFIG_NINT_MASK		0x3f
+#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT	16
+#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK	0x1f
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT	23
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK	0x7
+
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS	BIT(2)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS	BIT(3)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS	BIT(4)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT	5
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK	0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT	10
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK	0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT	15
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK	0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL	BIT(20)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL	BIT(21)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL	BIT(24)
+
 /*
  * USB_CONFIG block
  */
@@ -185,6 +230,10 @@
 #define AR933X_RESET_REG_RESET_MODULE		0x1c
 #define AR933X_RESET_REG_BOOTSTRAP		0xac
 
+#define AR934X_RESET_REG_RESET_MODULE		0x1c
+#define AR934X_RESET_REG_BOOTSTRAP		0xb0
+#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS	0xac
+
 #define MISC_INT_ETHSW			BIT(12)
 #define MISC_INT_TIMER4			BIT(10)
 #define MISC_INT_TIMER3			BIT(9)
@@ -241,6 +290,40 @@
 
 #define AR933X_BOOTSTRAP_REF_CLK_40	BIT(0)
 
+#define AR934X_BOOTSTRAP_SW_OPTION8	BIT(23)
+#define AR934X_BOOTSTRAP_SW_OPTION7	BIT(22)
+#define AR934X_BOOTSTRAP_SW_OPTION6	BIT(21)
+#define AR934X_BOOTSTRAP_SW_OPTION5	BIT(20)
+#define AR934X_BOOTSTRAP_SW_OPTION4	BIT(19)
+#define AR934X_BOOTSTRAP_SW_OPTION3	BIT(18)
+#define AR934X_BOOTSTRAP_SW_OPTION2	BIT(17)
+#define AR934X_BOOTSTRAP_SW_OPTION1	BIT(16)
+#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
+#define AR934X_BOOTSTRAP_PCIE_RC	BIT(6)
+#define AR934X_BOOTSTRAP_EJTAG_MODE	BIT(5)
+#define AR934X_BOOTSTRAP_REF_CLK_40	BIT(4)
+#define AR934X_BOOTSTRAP_BOOT_FROM_SPI	BIT(2)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED	BIT(1)
+#define AR934X_BOOTSTRAP_DDR1		BIT(0)
+
+#define AR934X_PCIE_WMAC_INT_WMAC_MISC		BIT(0)
+#define AR934X_PCIE_WMAC_INT_WMAC_TX		BIT(1)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXLP		BIT(2)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXHP		BIT(3)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC		BIT(4)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC0		BIT(5)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC1		BIT(6)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC2		BIT(7)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC3		BIT(8)
+#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
+	(AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
+	 AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
+	(AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
+	 AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
+	 AR934X_PCIE_WMAC_INT_PCIE_RC3)
+
 #define REV_ID_MAJOR_MASK		0xfff0
 #define REV_ID_MAJOR_AR71XX		0x00a0
 #define REV_ID_MAJOR_AR913X		0x00b0
@@ -249,6 +332,9 @@
 #define REV_ID_MAJOR_AR7242		0x1100
 #define REV_ID_MAJOR_AR9330		0x0110
 #define REV_ID_MAJOR_AR9331		0x1110
+#define REV_ID_MAJOR_AR9341		0x0120
+#define REV_ID_MAJOR_AR9342		0x1120
+#define REV_ID_MAJOR_AR9344		0x2120
 
 #define AR71XX_REV_ID_MINOR_MASK	0x3
 #define AR71XX_REV_ID_MINOR_AR7130	0x0
@@ -267,6 +353,8 @@
 
 #define AR724X_REV_ID_REVISION_MASK	0x3
 
+#define AR934X_REV_ID_REVISION_MASK     0xf
+
 /*
  * SPI block
  */
@@ -308,5 +396,6 @@
 #define AR724X_GPIO_COUNT		18
 #define AR913X_GPIO_COUNT		22
 #define AR933X_GPIO_COUNT		30
+#define AR934X_GPIO_COUNT		23
 
 #endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 6d0c6c9..4f248c3 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -29,6 +29,9 @@
 	ATH79_SOC_AR9132,
 	ATH79_SOC_AR9330,
 	ATH79_SOC_AR9331,
+	ATH79_SOC_AR9341,
+	ATH79_SOC_AR9342,
+	ATH79_SOC_AR9344,
 };
 
 extern enum ath79_soc_type ath79_soc;
@@ -75,6 +78,26 @@
 		ath79_soc == ATH79_SOC_AR9331);
 }
 
+static inline int soc_is_ar9341(void)
+{
+	return (ath79_soc == ATH79_SOC_AR9341);
+}
+
+static inline int soc_is_ar9342(void)
+{
+	return (ath79_soc == ATH79_SOC_AR9342);
+}
+
+static inline int soc_is_ar9344(void)
+{
+	return (ath79_soc == ATH79_SOC_AR9344);
+}
+
+static inline int soc_is_ar934x(void)
+{
+	return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
+}
+
 extern void __iomem *ath79_ddr_base;
 extern void __iomem *ath79_pll_base;
 extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 519958f..0968f69 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,11 +10,19 @@
 #define __ASM_MACH_ATH79_IRQ_H
 
 #define MIPS_CPU_IRQ_BASE	0
-#define NR_IRQS			40
+#define NR_IRQS			48
 
 #define ATH79_MISC_IRQ_BASE	8
 #define ATH79_MISC_IRQ_COUNT	32
 
+#define ATH79_PCI_IRQ_BASE	(ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
+#define ATH79_PCI_IRQ_COUNT	6
+#define ATH79_PCI_IRQ(_x)	(ATH79_PCI_IRQ_BASE + (_x))
+
+#define ATH79_IP2_IRQ_BASE	(ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
+#define ATH79_IP2_IRQ_COUNT	2
+#define ATH79_IP2_IRQ(_x)	(ATH79_IP2_IRQ_BASE + (_x))
+
 #define ATH79_CPU_IRQ_IP2	(MIPS_CPU_IRQ_BASE + 2)
 #define ATH79_CPU_IRQ_USB	(MIPS_CPU_IRQ_BASE + 3)
 #define ATH79_CPU_IRQ_GE0	(MIPS_CPU_IRQ_BASE + 4)
diff --git a/arch/mips/include/asm/mach-ath79/pci-ath724x.h b/arch/mips/include/asm/mach-ath79/pci-ath724x.h
deleted file mode 100644
index 454885f..0000000
--- a/arch/mips/include/asm/mach-ath79/pci-ath724x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  Atheros 724x PCI support
- *
- *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_ATH724X_H
-#define __ASM_MACH_ATH79_PCI_ATH724X_H
-
-struct ath724x_pci_data {
-	int irq;
-	void *pdata;
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size);
-
-#endif /* __ASM_MACH_ATH79_PCI_ATH724X_H */
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
new file mode 100644
index 0000000..7868f7f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/pci.h
@@ -0,0 +1,28 @@
+/*
+ *  Atheros AR71XX/AR724X PCI support
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_ATH79_PCI_H
+#define __ASM_MACH_ATH79_PCI_H
+
+#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
+int ar71xx_pcibios_init(void);
+#else
+static inline int ar71xx_pcibios_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_PCI_AR724X)
+int ar724x_pcibios_init(int irq);
+#else
+static inline int ar724x_pcibios_init(int irq) { return 0; }
+#endif
+
+#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 3d5de96..1d7dd96 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -2,6 +2,7 @@
 #define BCM63XX_GPIO_H
 
 #include <linux/init.h>
+#include <bcm63xx_cpu.h>
 
 int __init bcm63xx_gpio_init(void);
 
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 0000000..318f982
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,23 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0			8
+#define INT_NUM_IM0_IRL0		(INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0		(INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0		(INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0		(INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0		(INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START		(INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET		(INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MIPS_CPU_TIMER_IRQ			7
+
+#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 0000000..2caccd9
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,18 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include_next <irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 0000000..b385252
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <linux/pinctrl/pinctrl.h>
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON		0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON		0x01
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * lets use KSEG1 instead
+ */
+#define LTQ_ASC0_BASE_ADDR	0x1E100C00
+#define LTQ_EARLY_ASC		KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST	0x0002
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR	0x1E802000
+
+#define FALCON_CHIPID		((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define FALCON_CHIPTYPE		((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define FALCON_CHIPCONF		((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define SYSCTL_SYS1		0
+#define SYSCTL_SYSETH		1
+#define SYSCTL_SYSGPE		2
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_FLASH		0x1
+#define BS_SPI                  0x4
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y)	ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x)		ltq_r32(ltq_ebu_membase + (x))
+
+#define ltq_sys1_w32(x, y)	ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x)		ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg)   \
+	ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/*
+ * to keep the irq code generic we need to define this to 0 as falcon
+ * has no EIU/EBU
+ */
+#define LTQ_EBU_PCC_ISTAT	0
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
new file mode 100644
index 0000000..f79505b
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/gpio.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
+#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+	return -1;
+}
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+
+#define gpio_cansleep __gpio_cansleep
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index ce2f029..5e8a6e9 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -9,6 +9,8 @@
 #define _LANTIQ_H__
 
 #include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
 
 /* generic reg access functions */
 #define ltq_r32(reg)		__raw_readl(reg)
@@ -21,25 +23,9 @@
 /* register access macros for EBU and CGU */
 #define ltq_ebu_w32(x, y)	ltq_w32((x), ltq_ebu_membase + (y))
 #define ltq_ebu_r32(x)		ltq_r32(ltq_ebu_membase + (x))
-#define ltq_cgu_w32(x, y)	ltq_w32((x), ltq_cgu_membase + (y))
-#define ltq_cgu_r32(x)		ltq_r32(ltq_cgu_membase + (x))
-
+#define ltq_ebu_w32_mask(x, y, z) \
+	ltq_w32_mask(x, y, ltq_ebu_membase + (z))
 extern __iomem void *ltq_ebu_membase;
-extern __iomem void *ltq_cgu_membase;
-
-extern unsigned int ltq_get_cpu_ver(void);
-extern unsigned int ltq_get_soc_type(void);
-
-/* clock speeds */
-#define CLOCK_60M	60000000
-#define CLOCK_83M	83333333
-#define CLOCK_111M	111111111
-#define CLOCK_133M	133333333
-#define CLOCK_167M	166666667
-#define CLOCK_200M	200000000
-#define CLOCK_266M	266666666
-#define CLOCK_333M	333333333
-#define CLOCK_400M	400000000
 
 /* spinlock all ebu i/o */
 extern spinlock_t ebu_lock;
@@ -49,15 +35,21 @@
 extern void ltq_mask_and_ack_irq(struct irq_data *data);
 extern void ltq_enable_irq(struct irq_data *data);
 
+/* clock handling */
+extern int clk_activate(struct clk *clk);
+extern void clk_deactivate(struct clk *clk);
+extern struct clk *clk_get_cpu(void);
+extern struct clk *clk_get_fpi(void);
+extern struct clk *clk_get_io(void);
+
+/* find out what bootsource we have */
+extern unsigned char ltq_boot_select(void);
 /* find out what caused the last cpu reset */
 extern int ltq_reset_cause(void);
-#define LTQ_RST_CAUSE_WDTRST	0x20
 
 #define IOPORT_RESOURCE_START	0x10000000
 #define IOPORT_RESOURCE_END	0xffffffff
 #define IOMEM_RESOURCE_START	0x10000000
 #define IOMEM_RESOURCE_END	0xffffffff
-#define LTQ_FLASH_START		0x10000000
-#define LTQ_FLASH_MAX		0x04000000
 
 #endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index a305f1d..e23bf7c 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -9,41 +9,8 @@
 #ifndef _LANTIQ_PLATFORM_H__
 #define _LANTIQ_PLATFORM_H__
 
-#include <linux/mtd/partitions.h>
 #include <linux/socket.h>
 
-/* struct used to pass info to the pci core */
-enum {
-	PCI_CLOCK_INT = 0,
-	PCI_CLOCK_EXT
-};
-
-#define PCI_EXIN0	0x0001
-#define PCI_EXIN1	0x0002
-#define PCI_EXIN2	0x0004
-#define PCI_EXIN3	0x0008
-#define PCI_EXIN4	0x0010
-#define PCI_EXIN5	0x0020
-#define PCI_EXIN_MAX	6
-
-#define PCI_GNT1	0x0040
-#define PCI_GNT2	0x0080
-#define PCI_GNT3	0x0100
-#define PCI_GNT4	0x0200
-
-#define PCI_REQ1	0x0400
-#define PCI_REQ2	0x0800
-#define PCI_REQ3	0x1000
-#define PCI_REQ4	0x2000
-#define PCI_REQ_SHIFT	10
-#define PCI_REQ_MASK	0xf
-
-struct ltq_pci_data {
-	int clock;
-	int gpio;
-	int irq[16];
-};
-
 /* struct used to pass info to network drivers */
 struct ltq_eth_data {
 	struct sockaddr mac;
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index b4465a8..aa0b3b8 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -17,50 +17,8 @@
 #define INT_NUM_IM4_IRL0	(INT_NUM_IRQ0 + 128)
 #define INT_NUM_IM_OFFSET	(INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
 
-#define LTQ_ASC_TIR(x)		(INT_NUM_IM3_IRL0 + (x * 8))
-#define LTQ_ASC_RIR(x)		(INT_NUM_IM3_IRL0 + (x * 8) + 1)
-#define LTQ_ASC_EIR(x)		(INT_NUM_IM3_IRL0 + (x * 8) + 2)
-
-#define LTQ_ASC_ASE_TIR		INT_NUM_IM2_IRL0
-#define LTQ_ASC_ASE_RIR		(INT_NUM_IM2_IRL0 + 2)
-#define LTQ_ASC_ASE_EIR		(INT_NUM_IM2_IRL0 + 3)
-
-#define LTQ_SSC_TIR		(INT_NUM_IM0_IRL0 + 15)
-#define LTQ_SSC_RIR		(INT_NUM_IM0_IRL0 + 14)
-#define LTQ_SSC_EIR		(INT_NUM_IM0_IRL0 + 16)
-
-#define LTQ_MEI_DYING_GASP_INT	(INT_NUM_IM1_IRL0 + 21)
-#define LTQ_MEI_INT		(INT_NUM_IM1_IRL0 + 23)
-
-#define LTQ_TIMER6_INT		(INT_NUM_IM1_IRL0 + 23)
-#define LTQ_USB_INT		(INT_NUM_IM1_IRL0 + 22)
-#define LTQ_USB_OC_INT		(INT_NUM_IM4_IRL0 + 23)
-
-#define MIPS_CPU_TIMER_IRQ		7
-
 #define LTQ_DMA_CH0_INT		(INT_NUM_IM2_IRL0)
-#define LTQ_DMA_CH1_INT		(INT_NUM_IM2_IRL0 + 1)
-#define LTQ_DMA_CH2_INT		(INT_NUM_IM2_IRL0 + 2)
-#define LTQ_DMA_CH3_INT		(INT_NUM_IM2_IRL0 + 3)
-#define LTQ_DMA_CH4_INT		(INT_NUM_IM2_IRL0 + 4)
-#define LTQ_DMA_CH5_INT		(INT_NUM_IM2_IRL0 + 5)
-#define LTQ_DMA_CH6_INT		(INT_NUM_IM2_IRL0 + 6)
-#define LTQ_DMA_CH7_INT		(INT_NUM_IM2_IRL0 + 7)
-#define LTQ_DMA_CH8_INT		(INT_NUM_IM2_IRL0 + 8)
-#define LTQ_DMA_CH9_INT		(INT_NUM_IM2_IRL0 + 9)
-#define LTQ_DMA_CH10_INT	(INT_NUM_IM2_IRL0 + 10)
-#define LTQ_DMA_CH11_INT	(INT_NUM_IM2_IRL0 + 11)
-#define LTQ_DMA_CH12_INT	(INT_NUM_IM2_IRL0 + 25)
-#define LTQ_DMA_CH13_INT	(INT_NUM_IM2_IRL0 + 26)
-#define LTQ_DMA_CH14_INT	(INT_NUM_IM2_IRL0 + 27)
-#define LTQ_DMA_CH15_INT	(INT_NUM_IM2_IRL0 + 28)
-#define LTQ_DMA_CH16_INT	(INT_NUM_IM2_IRL0 + 29)
-#define LTQ_DMA_CH17_INT	(INT_NUM_IM2_IRL0 + 30)
-#define LTQ_DMA_CH18_INT	(INT_NUM_IM2_IRL0 + 16)
-#define LTQ_DMA_CH19_INT	(INT_NUM_IM2_IRL0 + 21)
 
-#define LTQ_PPE_MBOX_INT	(INT_NUM_IM2_IRL0 + 24)
-
-#define INT_NUM_IM4_IRL14	(INT_NUM_IM4_IRL0 + 14)
+#define MIPS_CPU_TIMER_IRQ	7
 
 #endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index 8a3c6be..6a2df70 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -17,38 +17,56 @@
 #define SOC_ID_DANUBE1		0x129
 #define SOC_ID_DANUBE2		0x12B
 #define SOC_ID_TWINPASS		0x12D
-#define SOC_ID_AMAZON_SE	0x152
+#define SOC_ID_AMAZON_SE_1	0x152 /* 50601 */
+#define SOC_ID_AMAZON_SE_2	0x153 /* 50600 */
 #define SOC_ID_ARX188		0x16C
-#define SOC_ID_ARX168		0x16D
+#define SOC_ID_ARX168_1		0x16D
+#define SOC_ID_ARX168_2		0x16E
 #define SOC_ID_ARX182		0x16F
+#define SOC_ID_GRX188		0x170
+#define SOC_ID_GRX168		0x171
 
-/* SoC Types */
+#define SOC_ID_VRX288		0x1C0 /* v1.1 */
+#define SOC_ID_VRX282		0x1C1 /* v1.1 */
+#define SOC_ID_VRX268		0x1C2 /* v1.1 */
+#define SOC_ID_GRX268		0x1C8 /* v1.1 */
+#define SOC_ID_GRX288		0x1C9 /* v1.1 */
+#define SOC_ID_VRX288_2		0x00B /* v1.2 */
+#define SOC_ID_VRX268_2		0x00C /* v1.2 */
+#define SOC_ID_GRX288_2		0x00D /* v1.2 */
+#define SOC_ID_GRX282_2		0x00E /* v1.2 */
+
+ /* SoC Types */
 #define SOC_TYPE_DANUBE		0x01
 #define SOC_TYPE_TWINPASS	0x02
 #define SOC_TYPE_AR9		0x03
-#define SOC_TYPE_VR9		0x04
-#define SOC_TYPE_AMAZON_SE	0x05
+#define SOC_TYPE_VR9		0x04 /* v1.1 */
+#define SOC_TYPE_VR9_2		0x05 /* v1.2 */
+#define SOC_TYPE_AMAZON_SE	0x06
 
-/* ASC0/1 - serial port */
-#define LTQ_ASC0_BASE_ADDR	0x1E100400
+/* BOOT_SEL - find what boot media we have */
+#define BS_EXT_ROM		0x0
+#define BS_FLASH		0x1
+#define BS_MII0			0x2
+#define BS_PCI			0x3
+#define BS_UART1		0x4
+#define BS_SPI			0x5
+#define BS_NAND			0x6
+#define BS_RMII0		0x7
+
+/* helpers used to access the cgu */
+#define ltq_cgu_w32(x, y)	ltq_w32((x), ltq_cgu_membase + (y))
+#define ltq_cgu_r32(x)		ltq_r32(ltq_cgu_membase + (x))
+extern __iomem void *ltq_cgu_membase;
+
+/*
+ * during early_printk no ioremap is possible
+ * lets use KSEG1 instead
+ */
 #define LTQ_ASC1_BASE_ADDR	0x1E100C00
-#define LTQ_ASC_SIZE		0x400
-
-/* RCU - reset control unit */
-#define LTQ_RCU_BASE_ADDR	0x1F203000
-#define LTQ_RCU_SIZE		0x1000
-
-/* GPTU - general purpose timer unit */
-#define LTQ_GPTU_BASE_ADDR	0x18000300
-#define LTQ_GPTU_SIZE		0x100
+#define LTQ_EARLY_ASC		KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
 
 /* EBU - external bus unit */
-#define LTQ_EBU_GPIO_START	0x14000000
-#define LTQ_EBU_GPIO_SIZE	0x1000
-
-#define LTQ_EBU_BASE_ADDR	0x1E105300
-#define LTQ_EBU_SIZE		0x100
-
 #define LTQ_EBU_BUSCON0		0x0060
 #define LTQ_EBU_PCC_CON		0x0090
 #define LTQ_EBU_PCC_IEN		0x00A4
@@ -57,85 +75,17 @@
 #define LTQ_EBU_ADDRSEL1	0x0024
 #define EBU_WRDIS		0x80000000
 
-/* CGU - clock generation unit */
-#define LTQ_CGU_BASE_ADDR	0x1F103000
-#define LTQ_CGU_SIZE		0x1000
-
-/* ICU - interrupt control unit */
-#define LTQ_ICU_BASE_ADDR	0x1F880200
-#define LTQ_ICU_SIZE		0x100
-
-/* EIU - external interrupt unit */
-#define LTQ_EIU_BASE_ADDR	0x1F101000
-#define LTQ_EIU_SIZE		0x1000
-
-/* PMU - power management unit */
-#define LTQ_PMU_BASE_ADDR	0x1F102000
-#define LTQ_PMU_SIZE		0x1000
-
-#define PMU_DMA			0x0020
-#define PMU_USB			0x8041
-#define PMU_LED			0x0800
-#define PMU_GPT			0x1000
-#define PMU_PPE			0x2000
-#define PMU_FPI			0x4000
-#define PMU_SWITCH		0x10000000
-
-/* ETOP - ethernet */
-#define LTQ_ETOP_BASE_ADDR	0x1E180000
-#define LTQ_ETOP_SIZE		0x40000
-
-/* DMA */
-#define LTQ_DMA_BASE_ADDR	0x1E104100
-#define LTQ_DMA_SIZE		0x800
-
-/* PCI */
-#define PCI_CR_BASE_ADDR	0x1E105400
-#define PCI_CR_SIZE		0x400
-
 /* WDT */
-#define LTQ_WDT_BASE_ADDR	0x1F8803F0
-#define LTQ_WDT_SIZE		0x10
-
-/* STP - serial to parallel conversion unit */
-#define LTQ_STP_BASE_ADDR	0x1E100BB0
-#define LTQ_STP_SIZE		0x40
-
-/* GPIO */
-#define LTQ_GPIO0_BASE_ADDR	0x1E100B10
-#define LTQ_GPIO1_BASE_ADDR	0x1E100B40
-#define LTQ_GPIO2_BASE_ADDR	0x1E100B70
-#define LTQ_GPIO_SIZE		0x30
-
-/* SSC */
-#define LTQ_SSC_BASE_ADDR	0x1e100800
-#define LTQ_SSC_SIZE		0x100
-
-/* MEI - dsl core */
-#define LTQ_MEI_BASE_ADDR	0x1E116000
-
-/* DEU - data encryption unit */
-#define LTQ_DEU_BASE_ADDR	0x1E103100
+#define LTQ_RST_CAUSE_WDTRST	0x20
 
 /* MPS - multi processor unit (voice) */
 #define LTQ_MPS_BASE_ADDR	(KSEG1 + 0x1F107000)
 #define LTQ_MPS_CHIPID		((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
 
 /* request a non-gpio and set the PIO config */
-extern int  ltq_gpio_request(unsigned int pin, unsigned int alt0,
-	unsigned int alt1, unsigned int dir, const char *name);
+#define PMU_PPE			 BIT(13)
 extern void ltq_pmu_enable(unsigned int module);
 extern void ltq_pmu_disable(unsigned int module);
 
-static inline int ltq_is_ar9(void)
-{
-	return (ltq_get_soc_type() == SOC_TYPE_AR9);
-}
-
-static inline int ltq_is_vr9(void)
-{
-	return (ltq_get_soc_type() == SOC_TYPE_VR9);
-}
-
 #endif /* CONFIG_SOC_TYPE_XWAY */
 #endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 46c0856..6e23ceb 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -93,8 +93,4 @@
 #define mips_pcibios_init() do { } while (0)
 #endif
 
-#ifdef CONFIG_KGDB
-extern void kgdb_config(void);
-#endif
-
 #endif  /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7467d1d..5300080 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -2,6 +2,7 @@
 #define _ASM_MODULE_H
 
 #include <linux/list.h>
+#include <linux/elf.h>
 #include <asm/uaccess.h>
 
 struct mod_arch_specific {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
deleted file mode 100644
index d553f8e..0000000
--- a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
+++ /dev/null
@@ -1,1365 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCIEEP_DEFS_H__
-#define __CVMX_PCIEEP_DEFS_H__
-
-#define CVMX_PCIEEP_CFG000 \
-	 (0x0000000000000000ull)
-#define CVMX_PCIEEP_CFG001 \
-	 (0x0000000000000004ull)
-#define CVMX_PCIEEP_CFG002 \
-	 (0x0000000000000008ull)
-#define CVMX_PCIEEP_CFG003 \
-	 (0x000000000000000Cull)
-#define CVMX_PCIEEP_CFG004 \
-	 (0x0000000000000010ull)
-#define CVMX_PCIEEP_CFG004_MASK \
-	 (0x0000000080000010ull)
-#define CVMX_PCIEEP_CFG005 \
-	 (0x0000000000000014ull)
-#define CVMX_PCIEEP_CFG005_MASK \
-	 (0x0000000080000014ull)
-#define CVMX_PCIEEP_CFG006 \
-	 (0x0000000000000018ull)
-#define CVMX_PCIEEP_CFG006_MASK \
-	 (0x0000000080000018ull)
-#define CVMX_PCIEEP_CFG007 \
-	 (0x000000000000001Cull)
-#define CVMX_PCIEEP_CFG007_MASK \
-	 (0x000000008000001Cull)
-#define CVMX_PCIEEP_CFG008 \
-	 (0x0000000000000020ull)
-#define CVMX_PCIEEP_CFG008_MASK \
-	 (0x0000000080000020ull)
-#define CVMX_PCIEEP_CFG009 \
-	 (0x0000000000000024ull)
-#define CVMX_PCIEEP_CFG009_MASK \
-	 (0x0000000080000024ull)
-#define CVMX_PCIEEP_CFG010 \
-	 (0x0000000000000028ull)
-#define CVMX_PCIEEP_CFG011 \
-	 (0x000000000000002Cull)
-#define CVMX_PCIEEP_CFG012 \
-	 (0x0000000000000030ull)
-#define CVMX_PCIEEP_CFG012_MASK \
-	 (0x0000000080000030ull)
-#define CVMX_PCIEEP_CFG013 \
-	 (0x0000000000000034ull)
-#define CVMX_PCIEEP_CFG015 \
-	 (0x000000000000003Cull)
-#define CVMX_PCIEEP_CFG016 \
-	 (0x0000000000000040ull)
-#define CVMX_PCIEEP_CFG017 \
-	 (0x0000000000000044ull)
-#define CVMX_PCIEEP_CFG020 \
-	 (0x0000000000000050ull)
-#define CVMX_PCIEEP_CFG021 \
-	 (0x0000000000000054ull)
-#define CVMX_PCIEEP_CFG022 \
-	 (0x0000000000000058ull)
-#define CVMX_PCIEEP_CFG023 \
-	 (0x000000000000005Cull)
-#define CVMX_PCIEEP_CFG028 \
-	 (0x0000000000000070ull)
-#define CVMX_PCIEEP_CFG029 \
-	 (0x0000000000000074ull)
-#define CVMX_PCIEEP_CFG030 \
-	 (0x0000000000000078ull)
-#define CVMX_PCIEEP_CFG031 \
-	 (0x000000000000007Cull)
-#define CVMX_PCIEEP_CFG032 \
-	 (0x0000000000000080ull)
-#define CVMX_PCIEEP_CFG033 \
-	 (0x0000000000000084ull)
-#define CVMX_PCIEEP_CFG034 \
-	 (0x0000000000000088ull)
-#define CVMX_PCIEEP_CFG037 \
-	 (0x0000000000000094ull)
-#define CVMX_PCIEEP_CFG038 \
-	 (0x0000000000000098ull)
-#define CVMX_PCIEEP_CFG039 \
-	 (0x000000000000009Cull)
-#define CVMX_PCIEEP_CFG040 \
-	 (0x00000000000000A0ull)
-#define CVMX_PCIEEP_CFG041 \
-	 (0x00000000000000A4ull)
-#define CVMX_PCIEEP_CFG042 \
-	 (0x00000000000000A8ull)
-#define CVMX_PCIEEP_CFG064 \
-	 (0x0000000000000100ull)
-#define CVMX_PCIEEP_CFG065 \
-	 (0x0000000000000104ull)
-#define CVMX_PCIEEP_CFG066 \
-	 (0x0000000000000108ull)
-#define CVMX_PCIEEP_CFG067 \
-	 (0x000000000000010Cull)
-#define CVMX_PCIEEP_CFG068 \
-	 (0x0000000000000110ull)
-#define CVMX_PCIEEP_CFG069 \
-	 (0x0000000000000114ull)
-#define CVMX_PCIEEP_CFG070 \
-	 (0x0000000000000118ull)
-#define CVMX_PCIEEP_CFG071 \
-	 (0x000000000000011Cull)
-#define CVMX_PCIEEP_CFG072 \
-	 (0x0000000000000120ull)
-#define CVMX_PCIEEP_CFG073 \
-	 (0x0000000000000124ull)
-#define CVMX_PCIEEP_CFG074 \
-	 (0x0000000000000128ull)
-#define CVMX_PCIEEP_CFG448 \
-	 (0x0000000000000700ull)
-#define CVMX_PCIEEP_CFG449 \
-	 (0x0000000000000704ull)
-#define CVMX_PCIEEP_CFG450 \
-	 (0x0000000000000708ull)
-#define CVMX_PCIEEP_CFG451 \
-	 (0x000000000000070Cull)
-#define CVMX_PCIEEP_CFG452 \
-	 (0x0000000000000710ull)
-#define CVMX_PCIEEP_CFG453 \
-	 (0x0000000000000714ull)
-#define CVMX_PCIEEP_CFG454 \
-	 (0x0000000000000718ull)
-#define CVMX_PCIEEP_CFG455 \
-	 (0x000000000000071Cull)
-#define CVMX_PCIEEP_CFG456 \
-	 (0x0000000000000720ull)
-#define CVMX_PCIEEP_CFG458 \
-	 (0x0000000000000728ull)
-#define CVMX_PCIEEP_CFG459 \
-	 (0x000000000000072Cull)
-#define CVMX_PCIEEP_CFG460 \
-	 (0x0000000000000730ull)
-#define CVMX_PCIEEP_CFG461 \
-	 (0x0000000000000734ull)
-#define CVMX_PCIEEP_CFG462 \
-	 (0x0000000000000738ull)
-#define CVMX_PCIEEP_CFG463 \
-	 (0x000000000000073Cull)
-#define CVMX_PCIEEP_CFG464 \
-	 (0x0000000000000740ull)
-#define CVMX_PCIEEP_CFG465 \
-	 (0x0000000000000744ull)
-#define CVMX_PCIEEP_CFG466 \
-	 (0x0000000000000748ull)
-#define CVMX_PCIEEP_CFG467 \
-	 (0x000000000000074Cull)
-#define CVMX_PCIEEP_CFG468 \
-	 (0x0000000000000750ull)
-#define CVMX_PCIEEP_CFG490 \
-	 (0x00000000000007A8ull)
-#define CVMX_PCIEEP_CFG491 \
-	 (0x00000000000007ACull)
-#define CVMX_PCIEEP_CFG492 \
-	 (0x00000000000007B0ull)
-#define CVMX_PCIEEP_CFG516 \
-	 (0x0000000000000810ull)
-#define CVMX_PCIEEP_CFG517 \
-	 (0x0000000000000814ull)
-
-union cvmx_pcieep_cfg000 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg000_s {
-		uint32_t devid:16;
-		uint32_t vendid:16;
-	} s;
-	struct cvmx_pcieep_cfg000_s cn52xx;
-	struct cvmx_pcieep_cfg000_s cn52xxp1;
-	struct cvmx_pcieep_cfg000_s cn56xx;
-	struct cvmx_pcieep_cfg000_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg001 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg001_s {
-		uint32_t dpe:1;
-		uint32_t sse:1;
-		uint32_t rma:1;
-		uint32_t rta:1;
-		uint32_t sta:1;
-		uint32_t devt:2;
-		uint32_t mdpe:1;
-		uint32_t fbb:1;
-		uint32_t reserved_22_22:1;
-		uint32_t m66:1;
-		uint32_t cl:1;
-		uint32_t i_stat:1;
-		uint32_t reserved_11_18:8;
-		uint32_t i_dis:1;
-		uint32_t fbbe:1;
-		uint32_t see:1;
-		uint32_t ids_wcc:1;
-		uint32_t per:1;
-		uint32_t vps:1;
-		uint32_t mwice:1;
-		uint32_t scse:1;
-		uint32_t me:1;
-		uint32_t msae:1;
-		uint32_t isae:1;
-	} s;
-	struct cvmx_pcieep_cfg001_s cn52xx;
-	struct cvmx_pcieep_cfg001_s cn52xxp1;
-	struct cvmx_pcieep_cfg001_s cn56xx;
-	struct cvmx_pcieep_cfg001_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg002 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg002_s {
-		uint32_t bcc:8;
-		uint32_t sc:8;
-		uint32_t pi:8;
-		uint32_t rid:8;
-	} s;
-	struct cvmx_pcieep_cfg002_s cn52xx;
-	struct cvmx_pcieep_cfg002_s cn52xxp1;
-	struct cvmx_pcieep_cfg002_s cn56xx;
-	struct cvmx_pcieep_cfg002_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg003 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg003_s {
-		uint32_t bist:8;
-		uint32_t mfd:1;
-		uint32_t chf:7;
-		uint32_t lt:8;
-		uint32_t cls:8;
-	} s;
-	struct cvmx_pcieep_cfg003_s cn52xx;
-	struct cvmx_pcieep_cfg003_s cn52xxp1;
-	struct cvmx_pcieep_cfg003_s cn56xx;
-	struct cvmx_pcieep_cfg003_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg004_s {
-		uint32_t lbab:18;
-		uint32_t reserved_4_13:10;
-		uint32_t pf:1;
-		uint32_t typ:2;
-		uint32_t mspc:1;
-	} s;
-	struct cvmx_pcieep_cfg004_s cn52xx;
-	struct cvmx_pcieep_cfg004_s cn52xxp1;
-	struct cvmx_pcieep_cfg004_s cn56xx;
-	struct cvmx_pcieep_cfg004_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg004_mask_s {
-		uint32_t lmask:31;
-		uint32_t enb:1;
-	} s;
-	struct cvmx_pcieep_cfg004_mask_s cn52xx;
-	struct cvmx_pcieep_cfg004_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg004_mask_s cn56xx;
-	struct cvmx_pcieep_cfg004_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg005_s {
-		uint32_t ubab:32;
-	} s;
-	struct cvmx_pcieep_cfg005_s cn52xx;
-	struct cvmx_pcieep_cfg005_s cn52xxp1;
-	struct cvmx_pcieep_cfg005_s cn56xx;
-	struct cvmx_pcieep_cfg005_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg005_mask_s {
-		uint32_t umask:32;
-	} s;
-	struct cvmx_pcieep_cfg005_mask_s cn52xx;
-	struct cvmx_pcieep_cfg005_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg005_mask_s cn56xx;
-	struct cvmx_pcieep_cfg005_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg006_s {
-		uint32_t lbab:6;
-		uint32_t reserved_4_25:22;
-		uint32_t pf:1;
-		uint32_t typ:2;
-		uint32_t mspc:1;
-	} s;
-	struct cvmx_pcieep_cfg006_s cn52xx;
-	struct cvmx_pcieep_cfg006_s cn52xxp1;
-	struct cvmx_pcieep_cfg006_s cn56xx;
-	struct cvmx_pcieep_cfg006_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg006_mask_s {
-		uint32_t lmask:31;
-		uint32_t enb:1;
-	} s;
-	struct cvmx_pcieep_cfg006_mask_s cn52xx;
-	struct cvmx_pcieep_cfg006_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg006_mask_s cn56xx;
-	struct cvmx_pcieep_cfg006_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg007_s {
-		uint32_t ubab:32;
-	} s;
-	struct cvmx_pcieep_cfg007_s cn52xx;
-	struct cvmx_pcieep_cfg007_s cn52xxp1;
-	struct cvmx_pcieep_cfg007_s cn56xx;
-	struct cvmx_pcieep_cfg007_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg007_mask_s {
-		uint32_t umask:32;
-	} s;
-	struct cvmx_pcieep_cfg007_mask_s cn52xx;
-	struct cvmx_pcieep_cfg007_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg007_mask_s cn56xx;
-	struct cvmx_pcieep_cfg007_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg008_s {
-		uint32_t reserved_4_31:28;
-		uint32_t pf:1;
-		uint32_t typ:2;
-		uint32_t mspc:1;
-	} s;
-	struct cvmx_pcieep_cfg008_s cn52xx;
-	struct cvmx_pcieep_cfg008_s cn52xxp1;
-	struct cvmx_pcieep_cfg008_s cn56xx;
-	struct cvmx_pcieep_cfg008_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg008_mask_s {
-		uint32_t lmask:31;
-		uint32_t enb:1;
-	} s;
-	struct cvmx_pcieep_cfg008_mask_s cn52xx;
-	struct cvmx_pcieep_cfg008_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg008_mask_s cn56xx;
-	struct cvmx_pcieep_cfg008_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg009_s {
-		uint32_t ubab:25;
-		uint32_t reserved_0_6:7;
-	} s;
-	struct cvmx_pcieep_cfg009_s cn52xx;
-	struct cvmx_pcieep_cfg009_s cn52xxp1;
-	struct cvmx_pcieep_cfg009_s cn56xx;
-	struct cvmx_pcieep_cfg009_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg009_mask_s {
-		uint32_t umask:32;
-	} s;
-	struct cvmx_pcieep_cfg009_mask_s cn52xx;
-	struct cvmx_pcieep_cfg009_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg009_mask_s cn56xx;
-	struct cvmx_pcieep_cfg009_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg010 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg010_s {
-		uint32_t cisp:32;
-	} s;
-	struct cvmx_pcieep_cfg010_s cn52xx;
-	struct cvmx_pcieep_cfg010_s cn52xxp1;
-	struct cvmx_pcieep_cfg010_s cn56xx;
-	struct cvmx_pcieep_cfg010_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg011 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg011_s {
-		uint32_t ssid:16;
-		uint32_t ssvid:16;
-	} s;
-	struct cvmx_pcieep_cfg011_s cn52xx;
-	struct cvmx_pcieep_cfg011_s cn52xxp1;
-	struct cvmx_pcieep_cfg011_s cn56xx;
-	struct cvmx_pcieep_cfg011_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg012_s {
-		uint32_t eraddr:16;
-		uint32_t reserved_1_15:15;
-		uint32_t er_en:1;
-	} s;
-	struct cvmx_pcieep_cfg012_s cn52xx;
-	struct cvmx_pcieep_cfg012_s cn52xxp1;
-	struct cvmx_pcieep_cfg012_s cn56xx;
-	struct cvmx_pcieep_cfg012_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012_mask {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg012_mask_s {
-		uint32_t mask:31;
-		uint32_t enb:1;
-	} s;
-	struct cvmx_pcieep_cfg012_mask_s cn52xx;
-	struct cvmx_pcieep_cfg012_mask_s cn52xxp1;
-	struct cvmx_pcieep_cfg012_mask_s cn56xx;
-	struct cvmx_pcieep_cfg012_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg013 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg013_s {
-		uint32_t reserved_8_31:24;
-		uint32_t cp:8;
-	} s;
-	struct cvmx_pcieep_cfg013_s cn52xx;
-	struct cvmx_pcieep_cfg013_s cn52xxp1;
-	struct cvmx_pcieep_cfg013_s cn56xx;
-	struct cvmx_pcieep_cfg013_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg015 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg015_s {
-		uint32_t ml:8;
-		uint32_t mg:8;
-		uint32_t inta:8;
-		uint32_t il:8;
-	} s;
-	struct cvmx_pcieep_cfg015_s cn52xx;
-	struct cvmx_pcieep_cfg015_s cn52xxp1;
-	struct cvmx_pcieep_cfg015_s cn56xx;
-	struct cvmx_pcieep_cfg015_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg016 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg016_s {
-		uint32_t pmes:5;
-		uint32_t d2s:1;
-		uint32_t d1s:1;
-		uint32_t auxc:3;
-		uint32_t dsi:1;
-		uint32_t reserved_20_20:1;
-		uint32_t pme_clock:1;
-		uint32_t pmsv:3;
-		uint32_t ncp:8;
-		uint32_t pmcid:8;
-	} s;
-	struct cvmx_pcieep_cfg016_s cn52xx;
-	struct cvmx_pcieep_cfg016_s cn52xxp1;
-	struct cvmx_pcieep_cfg016_s cn56xx;
-	struct cvmx_pcieep_cfg016_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg017 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg017_s {
-		uint32_t pmdia:8;
-		uint32_t bpccee:1;
-		uint32_t bd3h:1;
-		uint32_t reserved_16_21:6;
-		uint32_t pmess:1;
-		uint32_t pmedsia:2;
-		uint32_t pmds:4;
-		uint32_t pmeens:1;
-		uint32_t reserved_4_7:4;
-		uint32_t nsr:1;
-		uint32_t reserved_2_2:1;
-		uint32_t ps:2;
-	} s;
-	struct cvmx_pcieep_cfg017_s cn52xx;
-	struct cvmx_pcieep_cfg017_s cn52xxp1;
-	struct cvmx_pcieep_cfg017_s cn56xx;
-	struct cvmx_pcieep_cfg017_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg020 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg020_s {
-		uint32_t reserved_24_31:8;
-		uint32_t m64:1;
-		uint32_t mme:3;
-		uint32_t mmc:3;
-		uint32_t msien:1;
-		uint32_t ncp:8;
-		uint32_t msicid:8;
-	} s;
-	struct cvmx_pcieep_cfg020_s cn52xx;
-	struct cvmx_pcieep_cfg020_s cn52xxp1;
-	struct cvmx_pcieep_cfg020_s cn56xx;
-	struct cvmx_pcieep_cfg020_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg021 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg021_s {
-		uint32_t lmsi:30;
-		uint32_t reserved_0_1:2;
-	} s;
-	struct cvmx_pcieep_cfg021_s cn52xx;
-	struct cvmx_pcieep_cfg021_s cn52xxp1;
-	struct cvmx_pcieep_cfg021_s cn56xx;
-	struct cvmx_pcieep_cfg021_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg022 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg022_s {
-		uint32_t umsi:32;
-	} s;
-	struct cvmx_pcieep_cfg022_s cn52xx;
-	struct cvmx_pcieep_cfg022_s cn52xxp1;
-	struct cvmx_pcieep_cfg022_s cn56xx;
-	struct cvmx_pcieep_cfg022_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg023 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg023_s {
-		uint32_t reserved_16_31:16;
-		uint32_t msimd:16;
-	} s;
-	struct cvmx_pcieep_cfg023_s cn52xx;
-	struct cvmx_pcieep_cfg023_s cn52xxp1;
-	struct cvmx_pcieep_cfg023_s cn56xx;
-	struct cvmx_pcieep_cfg023_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg028 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg028_s {
-		uint32_t reserved_30_31:2;
-		uint32_t imn:5;
-		uint32_t si:1;
-		uint32_t dpt:4;
-		uint32_t pciecv:4;
-		uint32_t ncp:8;
-		uint32_t pcieid:8;
-	} s;
-	struct cvmx_pcieep_cfg028_s cn52xx;
-	struct cvmx_pcieep_cfg028_s cn52xxp1;
-	struct cvmx_pcieep_cfg028_s cn56xx;
-	struct cvmx_pcieep_cfg028_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg029 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg029_s {
-		uint32_t reserved_28_31:4;
-		uint32_t cspls:2;
-		uint32_t csplv:8;
-		uint32_t reserved_16_17:2;
-		uint32_t rber:1;
-		uint32_t reserved_12_14:3;
-		uint32_t el1al:3;
-		uint32_t el0al:3;
-		uint32_t etfs:1;
-		uint32_t pfs:2;
-		uint32_t mpss:3;
-	} s;
-	struct cvmx_pcieep_cfg029_s cn52xx;
-	struct cvmx_pcieep_cfg029_s cn52xxp1;
-	struct cvmx_pcieep_cfg029_s cn56xx;
-	struct cvmx_pcieep_cfg029_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg030 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg030_s {
-		uint32_t reserved_22_31:10;
-		uint32_t tp:1;
-		uint32_t ap_d:1;
-		uint32_t ur_d:1;
-		uint32_t fe_d:1;
-		uint32_t nfe_d:1;
-		uint32_t ce_d:1;
-		uint32_t reserved_15_15:1;
-		uint32_t mrrs:3;
-		uint32_t ns_en:1;
-		uint32_t ap_en:1;
-		uint32_t pf_en:1;
-		uint32_t etf_en:1;
-		uint32_t mps:3;
-		uint32_t ro_en:1;
-		uint32_t ur_en:1;
-		uint32_t fe_en:1;
-		uint32_t nfe_en:1;
-		uint32_t ce_en:1;
-	} s;
-	struct cvmx_pcieep_cfg030_s cn52xx;
-	struct cvmx_pcieep_cfg030_s cn52xxp1;
-	struct cvmx_pcieep_cfg030_s cn56xx;
-	struct cvmx_pcieep_cfg030_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg031 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg031_s {
-		uint32_t pnum:8;
-		uint32_t reserved_22_23:2;
-		uint32_t lbnc:1;
-		uint32_t dllarc:1;
-		uint32_t sderc:1;
-		uint32_t cpm:1;
-		uint32_t l1el:3;
-		uint32_t l0el:3;
-		uint32_t aslpms:2;
-		uint32_t mlw:6;
-		uint32_t mls:4;
-	} s;
-	struct cvmx_pcieep_cfg031_s cn52xx;
-	struct cvmx_pcieep_cfg031_s cn52xxp1;
-	struct cvmx_pcieep_cfg031_s cn56xx;
-	struct cvmx_pcieep_cfg031_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg032 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg032_s {
-		uint32_t reserved_30_31:2;
-		uint32_t dlla:1;
-		uint32_t scc:1;
-		uint32_t lt:1;
-		uint32_t reserved_26_26:1;
-		uint32_t nlw:6;
-		uint32_t ls:4;
-		uint32_t reserved_10_15:6;
-		uint32_t hawd:1;
-		uint32_t ecpm:1;
-		uint32_t es:1;
-		uint32_t ccc:1;
-		uint32_t rl:1;
-		uint32_t ld:1;
-		uint32_t rcb:1;
-		uint32_t reserved_2_2:1;
-		uint32_t aslpc:2;
-	} s;
-	struct cvmx_pcieep_cfg032_s cn52xx;
-	struct cvmx_pcieep_cfg032_s cn52xxp1;
-	struct cvmx_pcieep_cfg032_s cn56xx;
-	struct cvmx_pcieep_cfg032_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg033 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg033_s {
-		uint32_t ps_num:13;
-		uint32_t nccs:1;
-		uint32_t emip:1;
-		uint32_t sp_ls:2;
-		uint32_t sp_lv:8;
-		uint32_t hp_c:1;
-		uint32_t hp_s:1;
-		uint32_t pip:1;
-		uint32_t aip:1;
-		uint32_t mrlsp:1;
-		uint32_t pcp:1;
-		uint32_t abp:1;
-	} s;
-	struct cvmx_pcieep_cfg033_s cn52xx;
-	struct cvmx_pcieep_cfg033_s cn52xxp1;
-	struct cvmx_pcieep_cfg033_s cn56xx;
-	struct cvmx_pcieep_cfg033_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg034 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg034_s {
-		uint32_t reserved_25_31:7;
-		uint32_t dlls_c:1;
-		uint32_t emis:1;
-		uint32_t pds:1;
-		uint32_t mrlss:1;
-		uint32_t ccint_d:1;
-		uint32_t pd_c:1;
-		uint32_t mrls_c:1;
-		uint32_t pf_d:1;
-		uint32_t abp_d:1;
-		uint32_t reserved_13_15:3;
-		uint32_t dlls_en:1;
-		uint32_t emic:1;
-		uint32_t pcc:1;
-		uint32_t pic:2;
-		uint32_t aic:2;
-		uint32_t hpint_en:1;
-		uint32_t ccint_en:1;
-		uint32_t pd_en:1;
-		uint32_t mrls_en:1;
-		uint32_t pf_en:1;
-		uint32_t abp_en:1;
-	} s;
-	struct cvmx_pcieep_cfg034_s cn52xx;
-	struct cvmx_pcieep_cfg034_s cn52xxp1;
-	struct cvmx_pcieep_cfg034_s cn56xx;
-	struct cvmx_pcieep_cfg034_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg037 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg037_s {
-		uint32_t reserved_5_31:27;
-		uint32_t ctds:1;
-		uint32_t ctrs:4;
-	} s;
-	struct cvmx_pcieep_cfg037_s cn52xx;
-	struct cvmx_pcieep_cfg037_s cn52xxp1;
-	struct cvmx_pcieep_cfg037_s cn56xx;
-	struct cvmx_pcieep_cfg037_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg038 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg038_s {
-		uint32_t reserved_5_31:27;
-		uint32_t ctd:1;
-		uint32_t ctv:4;
-	} s;
-	struct cvmx_pcieep_cfg038_s cn52xx;
-	struct cvmx_pcieep_cfg038_s cn52xxp1;
-	struct cvmx_pcieep_cfg038_s cn56xx;
-	struct cvmx_pcieep_cfg038_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg039 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg039_s {
-		uint32_t reserved_0_31:32;
-	} s;
-	struct cvmx_pcieep_cfg039_s cn52xx;
-	struct cvmx_pcieep_cfg039_s cn52xxp1;
-	struct cvmx_pcieep_cfg039_s cn56xx;
-	struct cvmx_pcieep_cfg039_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg040 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg040_s {
-		uint32_t reserved_0_31:32;
-	} s;
-	struct cvmx_pcieep_cfg040_s cn52xx;
-	struct cvmx_pcieep_cfg040_s cn52xxp1;
-	struct cvmx_pcieep_cfg040_s cn56xx;
-	struct cvmx_pcieep_cfg040_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg041 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg041_s {
-		uint32_t reserved_0_31:32;
-	} s;
-	struct cvmx_pcieep_cfg041_s cn52xx;
-	struct cvmx_pcieep_cfg041_s cn52xxp1;
-	struct cvmx_pcieep_cfg041_s cn56xx;
-	struct cvmx_pcieep_cfg041_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg042 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg042_s {
-		uint32_t reserved_0_31:32;
-	} s;
-	struct cvmx_pcieep_cfg042_s cn52xx;
-	struct cvmx_pcieep_cfg042_s cn52xxp1;
-	struct cvmx_pcieep_cfg042_s cn56xx;
-	struct cvmx_pcieep_cfg042_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg064 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg064_s {
-		uint32_t nco:12;
-		uint32_t cv:4;
-		uint32_t pcieec:16;
-	} s;
-	struct cvmx_pcieep_cfg064_s cn52xx;
-	struct cvmx_pcieep_cfg064_s cn52xxp1;
-	struct cvmx_pcieep_cfg064_s cn56xx;
-	struct cvmx_pcieep_cfg064_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg065 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg065_s {
-		uint32_t reserved_21_31:11;
-		uint32_t ures:1;
-		uint32_t ecrces:1;
-		uint32_t mtlps:1;
-		uint32_t ros:1;
-		uint32_t ucs:1;
-		uint32_t cas:1;
-		uint32_t cts:1;
-		uint32_t fcpes:1;
-		uint32_t ptlps:1;
-		uint32_t reserved_6_11:6;
-		uint32_t sdes:1;
-		uint32_t dlpes:1;
-		uint32_t reserved_0_3:4;
-	} s;
-	struct cvmx_pcieep_cfg065_s cn52xx;
-	struct cvmx_pcieep_cfg065_s cn52xxp1;
-	struct cvmx_pcieep_cfg065_s cn56xx;
-	struct cvmx_pcieep_cfg065_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg066 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg066_s {
-		uint32_t reserved_21_31:11;
-		uint32_t urem:1;
-		uint32_t ecrcem:1;
-		uint32_t mtlpm:1;
-		uint32_t rom:1;
-		uint32_t ucm:1;
-		uint32_t cam:1;
-		uint32_t ctm:1;
-		uint32_t fcpem:1;
-		uint32_t ptlpm:1;
-		uint32_t reserved_6_11:6;
-		uint32_t sdem:1;
-		uint32_t dlpem:1;
-		uint32_t reserved_0_3:4;
-	} s;
-	struct cvmx_pcieep_cfg066_s cn52xx;
-	struct cvmx_pcieep_cfg066_s cn52xxp1;
-	struct cvmx_pcieep_cfg066_s cn56xx;
-	struct cvmx_pcieep_cfg066_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg067 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg067_s {
-		uint32_t reserved_21_31:11;
-		uint32_t ures:1;
-		uint32_t ecrces:1;
-		uint32_t mtlps:1;
-		uint32_t ros:1;
-		uint32_t ucs:1;
-		uint32_t cas:1;
-		uint32_t cts:1;
-		uint32_t fcpes:1;
-		uint32_t ptlps:1;
-		uint32_t reserved_6_11:6;
-		uint32_t sdes:1;
-		uint32_t dlpes:1;
-		uint32_t reserved_0_3:4;
-	} s;
-	struct cvmx_pcieep_cfg067_s cn52xx;
-	struct cvmx_pcieep_cfg067_s cn52xxp1;
-	struct cvmx_pcieep_cfg067_s cn56xx;
-	struct cvmx_pcieep_cfg067_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg068 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg068_s {
-		uint32_t reserved_14_31:18;
-		uint32_t anfes:1;
-		uint32_t rtts:1;
-		uint32_t reserved_9_11:3;
-		uint32_t rnrs:1;
-		uint32_t bdllps:1;
-		uint32_t btlps:1;
-		uint32_t reserved_1_5:5;
-		uint32_t res:1;
-	} s;
-	struct cvmx_pcieep_cfg068_s cn52xx;
-	struct cvmx_pcieep_cfg068_s cn52xxp1;
-	struct cvmx_pcieep_cfg068_s cn56xx;
-	struct cvmx_pcieep_cfg068_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg069 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg069_s {
-		uint32_t reserved_14_31:18;
-		uint32_t anfem:1;
-		uint32_t rttm:1;
-		uint32_t reserved_9_11:3;
-		uint32_t rnrm:1;
-		uint32_t bdllpm:1;
-		uint32_t btlpm:1;
-		uint32_t reserved_1_5:5;
-		uint32_t rem:1;
-	} s;
-	struct cvmx_pcieep_cfg069_s cn52xx;
-	struct cvmx_pcieep_cfg069_s cn52xxp1;
-	struct cvmx_pcieep_cfg069_s cn56xx;
-	struct cvmx_pcieep_cfg069_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg070 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg070_s {
-		uint32_t reserved_9_31:23;
-		uint32_t ce:1;
-		uint32_t cc:1;
-		uint32_t ge:1;
-		uint32_t gc:1;
-		uint32_t fep:5;
-	} s;
-	struct cvmx_pcieep_cfg070_s cn52xx;
-	struct cvmx_pcieep_cfg070_s cn52xxp1;
-	struct cvmx_pcieep_cfg070_s cn56xx;
-	struct cvmx_pcieep_cfg070_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg071 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg071_s {
-		uint32_t dword1:32;
-	} s;
-	struct cvmx_pcieep_cfg071_s cn52xx;
-	struct cvmx_pcieep_cfg071_s cn52xxp1;
-	struct cvmx_pcieep_cfg071_s cn56xx;
-	struct cvmx_pcieep_cfg071_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg072 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg072_s {
-		uint32_t dword2:32;
-	} s;
-	struct cvmx_pcieep_cfg072_s cn52xx;
-	struct cvmx_pcieep_cfg072_s cn52xxp1;
-	struct cvmx_pcieep_cfg072_s cn56xx;
-	struct cvmx_pcieep_cfg072_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg073 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg073_s {
-		uint32_t dword3:32;
-	} s;
-	struct cvmx_pcieep_cfg073_s cn52xx;
-	struct cvmx_pcieep_cfg073_s cn52xxp1;
-	struct cvmx_pcieep_cfg073_s cn56xx;
-	struct cvmx_pcieep_cfg073_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg074 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg074_s {
-		uint32_t dword4:32;
-	} s;
-	struct cvmx_pcieep_cfg074_s cn52xx;
-	struct cvmx_pcieep_cfg074_s cn52xxp1;
-	struct cvmx_pcieep_cfg074_s cn56xx;
-	struct cvmx_pcieep_cfg074_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg448 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg448_s {
-		uint32_t rtl:16;
-		uint32_t rtltl:16;
-	} s;
-	struct cvmx_pcieep_cfg448_s cn52xx;
-	struct cvmx_pcieep_cfg448_s cn52xxp1;
-	struct cvmx_pcieep_cfg448_s cn56xx;
-	struct cvmx_pcieep_cfg448_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg449 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg449_s {
-		uint32_t omr:32;
-	} s;
-	struct cvmx_pcieep_cfg449_s cn52xx;
-	struct cvmx_pcieep_cfg449_s cn52xxp1;
-	struct cvmx_pcieep_cfg449_s cn56xx;
-	struct cvmx_pcieep_cfg449_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg450 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg450_s {
-		uint32_t lpec:8;
-		uint32_t reserved_22_23:2;
-		uint32_t link_state:6;
-		uint32_t force_link:1;
-		uint32_t reserved_8_14:7;
-		uint32_t link_num:8;
-	} s;
-	struct cvmx_pcieep_cfg450_s cn52xx;
-	struct cvmx_pcieep_cfg450_s cn52xxp1;
-	struct cvmx_pcieep_cfg450_s cn56xx;
-	struct cvmx_pcieep_cfg450_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg451 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg451_s {
-		uint32_t reserved_30_31:2;
-		uint32_t l1el:3;
-		uint32_t l0el:3;
-		uint32_t n_fts_cc:8;
-		uint32_t n_fts:8;
-		uint32_t ack_freq:8;
-	} s;
-	struct cvmx_pcieep_cfg451_s cn52xx;
-	struct cvmx_pcieep_cfg451_s cn52xxp1;
-	struct cvmx_pcieep_cfg451_s cn56xx;
-	struct cvmx_pcieep_cfg451_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg452 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg452_s {
-		uint32_t reserved_26_31:6;
-		uint32_t eccrc:1;
-		uint32_t reserved_22_24:3;
-		uint32_t lme:6;
-		uint32_t reserved_8_15:8;
-		uint32_t flm:1;
-		uint32_t reserved_6_6:1;
-		uint32_t dllle:1;
-		uint32_t reserved_4_4:1;
-		uint32_t ra:1;
-		uint32_t le:1;
-		uint32_t sd:1;
-		uint32_t omr:1;
-	} s;
-	struct cvmx_pcieep_cfg452_s cn52xx;
-	struct cvmx_pcieep_cfg452_s cn52xxp1;
-	struct cvmx_pcieep_cfg452_s cn56xx;
-	struct cvmx_pcieep_cfg452_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg453 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg453_s {
-		uint32_t dlld:1;
-		uint32_t reserved_26_30:5;
-		uint32_t ack_nak:1;
-		uint32_t fcd:1;
-		uint32_t ilst:24;
-	} s;
-	struct cvmx_pcieep_cfg453_s cn52xx;
-	struct cvmx_pcieep_cfg453_s cn52xxp1;
-	struct cvmx_pcieep_cfg453_s cn56xx;
-	struct cvmx_pcieep_cfg453_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg454 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg454_s {
-		uint32_t reserved_29_31:3;
-		uint32_t tmfcwt:5;
-		uint32_t tmanlt:5;
-		uint32_t tmrt:5;
-		uint32_t reserved_11_13:3;
-		uint32_t nskps:3;
-		uint32_t reserved_4_7:4;
-		uint32_t ntss:4;
-	} s;
-	struct cvmx_pcieep_cfg454_s cn52xx;
-	struct cvmx_pcieep_cfg454_s cn52xxp1;
-	struct cvmx_pcieep_cfg454_s cn56xx;
-	struct cvmx_pcieep_cfg454_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg455 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg455_s {
-		uint32_t m_cfg0_filt:1;
-		uint32_t m_io_filt:1;
-		uint32_t msg_ctrl:1;
-		uint32_t m_cpl_ecrc_filt:1;
-		uint32_t m_ecrc_filt:1;
-		uint32_t m_cpl_len_err:1;
-		uint32_t m_cpl_attr_err:1;
-		uint32_t m_cpl_tc_err:1;
-		uint32_t m_cpl_fun_err:1;
-		uint32_t m_cpl_rid_err:1;
-		uint32_t m_cpl_tag_err:1;
-		uint32_t m_lk_filt:1;
-		uint32_t m_cfg1_filt:1;
-		uint32_t m_bar_match:1;
-		uint32_t m_pois_filt:1;
-		uint32_t m_fun:1;
-		uint32_t dfcwt:1;
-		uint32_t reserved_11_14:4;
-		uint32_t skpiv:11;
-	} s;
-	struct cvmx_pcieep_cfg455_s cn52xx;
-	struct cvmx_pcieep_cfg455_s cn52xxp1;
-	struct cvmx_pcieep_cfg455_s cn56xx;
-	struct cvmx_pcieep_cfg455_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg456 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg456_s {
-		uint32_t reserved_2_31:30;
-		uint32_t m_vend1_drp:1;
-		uint32_t m_vend0_drp:1;
-	} s;
-	struct cvmx_pcieep_cfg456_s cn52xx;
-	struct cvmx_pcieep_cfg456_s cn52xxp1;
-	struct cvmx_pcieep_cfg456_s cn56xx;
-	struct cvmx_pcieep_cfg456_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg458 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg458_s {
-		uint32_t dbg_info_l32:32;
-	} s;
-	struct cvmx_pcieep_cfg458_s cn52xx;
-	struct cvmx_pcieep_cfg458_s cn52xxp1;
-	struct cvmx_pcieep_cfg458_s cn56xx;
-	struct cvmx_pcieep_cfg458_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg459 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg459_s {
-		uint32_t dbg_info_u32:32;
-	} s;
-	struct cvmx_pcieep_cfg459_s cn52xx;
-	struct cvmx_pcieep_cfg459_s cn52xxp1;
-	struct cvmx_pcieep_cfg459_s cn56xx;
-	struct cvmx_pcieep_cfg459_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg460 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg460_s {
-		uint32_t reserved_20_31:12;
-		uint32_t tphfcc:8;
-		uint32_t tpdfcc:12;
-	} s;
-	struct cvmx_pcieep_cfg460_s cn52xx;
-	struct cvmx_pcieep_cfg460_s cn52xxp1;
-	struct cvmx_pcieep_cfg460_s cn56xx;
-	struct cvmx_pcieep_cfg460_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg461 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg461_s {
-		uint32_t reserved_20_31:12;
-		uint32_t tchfcc:8;
-		uint32_t tcdfcc:12;
-	} s;
-	struct cvmx_pcieep_cfg461_s cn52xx;
-	struct cvmx_pcieep_cfg461_s cn52xxp1;
-	struct cvmx_pcieep_cfg461_s cn56xx;
-	struct cvmx_pcieep_cfg461_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg462 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg462_s {
-		uint32_t reserved_20_31:12;
-		uint32_t tchfcc:8;
-		uint32_t tcdfcc:12;
-	} s;
-	struct cvmx_pcieep_cfg462_s cn52xx;
-	struct cvmx_pcieep_cfg462_s cn52xxp1;
-	struct cvmx_pcieep_cfg462_s cn56xx;
-	struct cvmx_pcieep_cfg462_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg463 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg463_s {
-		uint32_t reserved_3_31:29;
-		uint32_t rqne:1;
-		uint32_t trbne:1;
-		uint32_t rtlpfccnr:1;
-	} s;
-	struct cvmx_pcieep_cfg463_s cn52xx;
-	struct cvmx_pcieep_cfg463_s cn52xxp1;
-	struct cvmx_pcieep_cfg463_s cn56xx;
-	struct cvmx_pcieep_cfg463_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg464 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg464_s {
-		uint32_t wrr_vc3:8;
-		uint32_t wrr_vc2:8;
-		uint32_t wrr_vc1:8;
-		uint32_t wrr_vc0:8;
-	} s;
-	struct cvmx_pcieep_cfg464_s cn52xx;
-	struct cvmx_pcieep_cfg464_s cn52xxp1;
-	struct cvmx_pcieep_cfg464_s cn56xx;
-	struct cvmx_pcieep_cfg464_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg465 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg465_s {
-		uint32_t wrr_vc7:8;
-		uint32_t wrr_vc6:8;
-		uint32_t wrr_vc5:8;
-		uint32_t wrr_vc4:8;
-	} s;
-	struct cvmx_pcieep_cfg465_s cn52xx;
-	struct cvmx_pcieep_cfg465_s cn52xxp1;
-	struct cvmx_pcieep_cfg465_s cn56xx;
-	struct cvmx_pcieep_cfg465_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg466 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg466_s {
-		uint32_t rx_queue_order:1;
-		uint32_t type_ordering:1;
-		uint32_t reserved_24_29:6;
-		uint32_t queue_mode:3;
-		uint32_t reserved_20_20:1;
-		uint32_t header_credits:8;
-		uint32_t data_credits:12;
-	} s;
-	struct cvmx_pcieep_cfg466_s cn52xx;
-	struct cvmx_pcieep_cfg466_s cn52xxp1;
-	struct cvmx_pcieep_cfg466_s cn56xx;
-	struct cvmx_pcieep_cfg466_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg467 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg467_s {
-		uint32_t reserved_24_31:8;
-		uint32_t queue_mode:3;
-		uint32_t reserved_20_20:1;
-		uint32_t header_credits:8;
-		uint32_t data_credits:12;
-	} s;
-	struct cvmx_pcieep_cfg467_s cn52xx;
-	struct cvmx_pcieep_cfg467_s cn52xxp1;
-	struct cvmx_pcieep_cfg467_s cn56xx;
-	struct cvmx_pcieep_cfg467_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg468 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg468_s {
-		uint32_t reserved_24_31:8;
-		uint32_t queue_mode:3;
-		uint32_t reserved_20_20:1;
-		uint32_t header_credits:8;
-		uint32_t data_credits:12;
-	} s;
-	struct cvmx_pcieep_cfg468_s cn52xx;
-	struct cvmx_pcieep_cfg468_s cn52xxp1;
-	struct cvmx_pcieep_cfg468_s cn56xx;
-	struct cvmx_pcieep_cfg468_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg490 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg490_s {
-		uint32_t reserved_26_31:6;
-		uint32_t header_depth:10;
-		uint32_t reserved_14_15:2;
-		uint32_t data_depth:14;
-	} s;
-	struct cvmx_pcieep_cfg490_s cn52xx;
-	struct cvmx_pcieep_cfg490_s cn52xxp1;
-	struct cvmx_pcieep_cfg490_s cn56xx;
-	struct cvmx_pcieep_cfg490_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg491 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg491_s {
-		uint32_t reserved_26_31:6;
-		uint32_t header_depth:10;
-		uint32_t reserved_14_15:2;
-		uint32_t data_depth:14;
-	} s;
-	struct cvmx_pcieep_cfg491_s cn52xx;
-	struct cvmx_pcieep_cfg491_s cn52xxp1;
-	struct cvmx_pcieep_cfg491_s cn56xx;
-	struct cvmx_pcieep_cfg491_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg492 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg492_s {
-		uint32_t reserved_26_31:6;
-		uint32_t header_depth:10;
-		uint32_t reserved_14_15:2;
-		uint32_t data_depth:14;
-	} s;
-	struct cvmx_pcieep_cfg492_s cn52xx;
-	struct cvmx_pcieep_cfg492_s cn52xxp1;
-	struct cvmx_pcieep_cfg492_s cn56xx;
-	struct cvmx_pcieep_cfg492_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg516 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg516_s {
-		uint32_t phy_stat:32;
-	} s;
-	struct cvmx_pcieep_cfg516_s cn52xx;
-	struct cvmx_pcieep_cfg516_s cn52xxp1;
-	struct cvmx_pcieep_cfg516_s cn56xx;
-	struct cvmx_pcieep_cfg516_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg517 {
-	uint32_t u32;
-	struct cvmx_pcieep_cfg517_s {
-		uint32_t phy_ctrl:32;
-	} s;
-	struct cvmx_pcieep_cfg517_s cn52xx;
-	struct cvmx_pcieep_cfg517_s cn52xxp1;
-	struct cvmx_pcieep_cfg517_s cn56xx;
-	struct cvmx_pcieep_cfg517_s cn56xxp1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index fcd4060..90bf3b3 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,6 +17,7 @@
  */
 
 #include <linux/ioport.h>
+#include <linux/of.h>
 
 /*
  * Each pci channel is a top-level PCI bus seem by CPU.  A machine  with
@@ -26,6 +27,7 @@
 struct pci_controller {
 	struct pci_controller *next;
 	struct pci_bus *bus;
+	struct device_node *of_node;
 
 	struct pci_ops *pci_ops;
 	struct resource *mem_resource;
@@ -142,4 +144,8 @@
 
 extern char * (*pcibios_plat_setup)(char *str);
 
+/* this function parses memory ranges from a device node */
+extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
+					 struct device_node *node);
+
 #endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h
index e0308dc..fa03ec3 100644
--- a/arch/mips/include/asm/posix_types.h
+++ b/arch/mips/include/asm/posix_types.h
@@ -17,11 +17,6 @@
  * assume GCC is being used.
  */
 
-#if (_MIPS_SZLONG == 64)
-typedef unsigned int	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-#endif
-
 typedef long		__kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 7a6e82e..7206d44 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -12,6 +12,9 @@
 #define __ASM_PROM_H
 
 #ifdef CONFIG_OF
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
 #include <asm/bootinfo.h>
 
 extern int early_init_dt_scan_memory_arch(unsigned long node,
@@ -21,6 +24,29 @@
 extern void free_mem_mach(unsigned long addr, unsigned long size);
 
 extern void device_tree_init(void);
+
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+	/*
+	 * The ioport address can be directly used by inX() / outX()
+	 */
+	BUG_ON(address > IO_SPACE_LIMIT);
+
+	return (unsigned long) address;
+}
+#define pci_address_to_pio pci_address_to_pio
+
+struct boot_param_header;
+
+extern void __dt_setup_arch(struct boot_param_header *bph);
+
+#define dt_setup_arch(sym)						\
+({									\
+	extern struct boot_param_header __dtb_##sym##_begin;		\
+									\
+	__dt_setup_arch(&__dtb_##sym##_begin);				\
+})
+
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 6dce6d8..2560b6b 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -14,7 +14,8 @@
 
 extern void *set_except_vector(int n, void *addr);
 extern unsigned long ebase;
-extern void per_cpu_trap_init(void);
+extern void per_cpu_trap_init(bool);
+extern void cpu_cache_init(void);
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 7165333..4461198 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,11 @@
  * SECTION_SIZE_BITS		2^N: how big each section will be
  * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
  */
-#define SECTION_SIZE_BITS       28
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+# define SECTION_SIZE_BITS	29
+#else
+# define SECTION_SIZE_BITS	28
+#endif
 #define MAX_PHYSMEM_BITS        35
 
 #endif /* CONFIG_SPARSEMEM */
diff --git a/arch/mips/include/asm/stat.h b/arch/mips/include/asm/stat.h
index 6e00f75..fe9a4c3 100644
--- a/arch/mips/include/asm/stat.h
+++ b/arch/mips/include/asm/stat.h
@@ -20,7 +20,7 @@
 	long		st_pad1[3];		/* Reserved for network id */
 	ino_t		st_ino;
 	mode_t		st_mode;
-	nlink_t		st_nlink;
+	__u32		st_nlink;
 	uid_t		st_uid;
 	gid_t		st_gid;
 	unsigned 	st_rdev;
@@ -55,7 +55,7 @@
 	unsigned long long	st_ino;
 
 	mode_t		st_mode;
-	nlink_t		st_nlink;
+	__u32		st_nlink;
 
 	uid_t		st_uid;
 	gid_t		st_gid;
@@ -96,7 +96,7 @@
 	unsigned long		st_ino;
 
 	mode_t			st_mode;
-	nlink_t			st_nlink;
+	__u32			st_nlink;
 
 	uid_t			st_uid;
 	gid_t			st_gid;
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h
index 8f77f77..abdd87a 100644
--- a/arch/mips/include/asm/termios.h
+++ b/arch/mips/include/asm/termios.h
@@ -60,7 +60,7 @@
 };
 
 #ifdef __KERNEL__
-#include <linux/module.h>
+#include <asm/uaccess.h>
 
 /*
  *	intr=^C		quit=^\		erase=del	kill=^U
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index ff74aec..420ca06 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -25,6 +25,7 @@
 extern void (*board_ejtag_handler_setup)(void);
 extern void (*board_bind_eic_interrupt)(int irq, int regset);
 extern void (*board_ebase_setup)(void);
+extern void (*board_cache_error_setup)(void);
 
 extern int register_nmi_notifier(struct notifier_block *nb);
 
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 504d40a..440a21d 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -11,7 +11,7 @@
 #include <linux/types.h>
 
 #ifdef CONFIG_EXPORT_UASM
-#include <linux/module.h>
+#include <linux/export.h>
 #define __uasminit
 #define __uasminitdata
 #define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index a9dff33..e44abea 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -16,5 +16,3 @@
 # PM support
 
 obj-$(CONFIG_PM) += pm.o
-
-ccflags-y := -Werror -Wall
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5099201..6ae7ce4 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -340,7 +340,7 @@
 		__cpu_name[cpu] = "R2000";
 		c->isa_level = MIPS_CPU_ISA_I;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
-		             MIPS_CPU_NOFPUEX;
+			     MIPS_CPU_NOFPUEX;
 		if (__cpu_has_fpu())
 			c->options |= MIPS_CPU_FPU;
 		c->tlbsize = 64;
@@ -361,7 +361,7 @@
 		}
 		c->isa_level = MIPS_CPU_ISA_I;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
-		             MIPS_CPU_NOFPUEX;
+			     MIPS_CPU_NOFPUEX;
 		if (__cpu_has_fpu())
 			c->options |= MIPS_CPU_FPU;
 		c->tlbsize = 64;
@@ -387,8 +387,8 @@
 
 		c->isa_level = MIPS_CPU_ISA_III;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_WATCH | MIPS_CPU_VCE |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_WATCH | MIPS_CPU_VCE |
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_VR41XX:
@@ -434,7 +434,7 @@
 		__cpu_name[cpu] = "R4300";
 		c->isa_level = MIPS_CPU_ISA_III;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 32;
 		break;
 	case PRID_IMP_R4600:
@@ -446,7 +446,7 @@
 		c->tlbsize = 48;
 		break;
 	#if 0
- 	case PRID_IMP_R4650:
+	case PRID_IMP_R4650:
 		/*
 		 * This processor doesn't have an MMU, so it's not
 		 * "real easy" to run Linux on it. It is left purely
@@ -455,9 +455,9 @@
 		 */
 		c->cputype = CPU_R4650;
 		__cpu_name[cpu] = "R4650";
-	 	c->isa_level = MIPS_CPU_ISA_III;
+		c->isa_level = MIPS_CPU_ISA_III;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
-	        c->tlbsize = 48;
+		c->tlbsize = 48;
 		break;
 	#endif
 	case PRID_IMP_TX39:
@@ -488,7 +488,7 @@
 		__cpu_name[cpu] = "R4700";
 		c->isa_level = MIPS_CPU_ISA_III;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_TX49:
@@ -505,7 +505,7 @@
 		__cpu_name[cpu] = "R5000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_R5432:
@@ -513,7 +513,7 @@
 		__cpu_name[cpu] = "R5432";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+			     MIPS_CPU_WATCH | MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_R5500:
@@ -521,7 +521,7 @@
 		__cpu_name[cpu] = "R5500";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+			     MIPS_CPU_WATCH | MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_NEVADA:
@@ -529,7 +529,7 @@
 		__cpu_name[cpu] = "Nevada";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+			     MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
 		c->tlbsize = 48;
 		break;
 	case PRID_IMP_R6000:
@@ -537,7 +537,7 @@
 		__cpu_name[cpu] = "R6000";
 		c->isa_level = MIPS_CPU_ISA_II;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 32;
 		break;
 	case PRID_IMP_R6000A:
@@ -545,7 +545,7 @@
 		__cpu_name[cpu] = "R6000A";
 		c->isa_level = MIPS_CPU_ISA_II;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 32;
 		break;
 	case PRID_IMP_RM7000:
@@ -553,7 +553,7 @@
 		__cpu_name[cpu] = "RM7000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		/*
 		 * Undocumented RM7000:  Bit 29 in the info register of
 		 * the RM7000 v2.0 indicates if the TLB has 48 or 64
@@ -569,7 +569,7 @@
 		__cpu_name[cpu] = "RM9000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		/*
 		 * Bit 29 in the info register of the RM9000
 		 * indicates if the TLB has 48 or 64 entries.
@@ -584,8 +584,8 @@
 		__cpu_name[cpu] = "RM8000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
-		             MIPS_CPU_FPU | MIPS_CPU_32FPR |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_FPU | MIPS_CPU_32FPR |
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 384;      /* has weird TLB: 3-way x 128 */
 		break;
 	case PRID_IMP_R10000:
@@ -593,9 +593,9 @@
 		__cpu_name[cpu] = "R10000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-		             MIPS_CPU_FPU | MIPS_CPU_32FPR |
+			     MIPS_CPU_FPU | MIPS_CPU_32FPR |
 			     MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 64;
 		break;
 	case PRID_IMP_R12000:
@@ -603,9 +603,9 @@
 		__cpu_name[cpu] = "R12000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-		             MIPS_CPU_FPU | MIPS_CPU_32FPR |
+			     MIPS_CPU_FPU | MIPS_CPU_32FPR |
 			     MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 64;
 		break;
 	case PRID_IMP_R14000:
@@ -613,9 +613,9 @@
 		__cpu_name[cpu] = "R14000";
 		c->isa_level = MIPS_CPU_ISA_IV;
 		c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-		             MIPS_CPU_FPU | MIPS_CPU_32FPR |
+			     MIPS_CPU_FPU | MIPS_CPU_32FPR |
 			     MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-		             MIPS_CPU_LLSC;
+			     MIPS_CPU_LLSC;
 		c->tlbsize = 64;
 		break;
 	case PRID_IMP_LOONGSON2:
@@ -739,7 +739,7 @@
 	if (config3 & MIPS_CONF3_VEIC)
 		c->options |= MIPS_CPU_VEIC;
 	if (config3 & MIPS_CONF3_MT)
-	        c->ases |= MIPS_ASE_MIPSMT;
+		c->ases |= MIPS_ASE_MIPSMT;
 	if (config3 & MIPS_CONF3_ULRI)
 		c->options |= MIPS_CPU_ULRI;
 
@@ -767,7 +767,7 @@
 
 	/* MIPS32 or MIPS64 compliant CPU.  */
 	c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
-	             MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+		     MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
 
 	c->scache.flags = MIPS_CACHE_NOT_PRESENT;
 
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index ab73fa2..f29099b 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1532,7 +1532,8 @@
 		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
 	} else {
 #endif
-		if (cp0_perfcount_irq >= 0)
+		if ((cp0_perfcount_irq >= 0) &&
+				(cp0_compare_irq != cp0_perfcount_irq))
 			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 		else
 			irq = -1;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index f8b2c59..5542817 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -41,27 +41,27 @@
 
 	seq_printf(m, "processor\t\t: %ld\n", n);
 	sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
-	        cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
+		      cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
 	seq_printf(m, fmt, __cpu_name[n],
-	                           (version >> 4) & 0x0f, version & 0x0f,
-	                           (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+		      (version >> 4) & 0x0f, version & 0x0f,
+		      (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
 	seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
-	              cpu_data[n].udelay_val / (500000/HZ),
-	              (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+		      cpu_data[n].udelay_val / (500000/HZ),
+		      (cpu_data[n].udelay_val / (5000/HZ)) % 100);
 	seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
 	seq_printf(m, "microsecond timers\t: %s\n",
-	              cpu_has_counter ? "yes" : "no");
+		      cpu_has_counter ? "yes" : "no");
 	seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
 	seq_printf(m, "extra interrupt vector\t: %s\n",
-	              cpu_has_divec ? "yes" : "no");
+		      cpu_has_divec ? "yes" : "no");
 	seq_printf(m, "hardware watchpoint\t: %s",
-		   cpu_has_watch ? "yes, " : "no\n");
+		      cpu_has_watch ? "yes, " : "no\n");
 	if (cpu_has_watch) {
 		seq_printf(m, "count: %d, address/irw mask: [",
-			   cpu_data[n].watch_reg_count);
+		      cpu_data[n].watch_reg_count);
 		for (i = 0; i < cpu_data[n].watch_reg_count; i++)
 			seq_printf(m, "%s0x%04x", i ? ", " : "" ,
-				   cpu_data[n].watch_reg_masks[i]);
+				cpu_data[n].watch_reg_masks[i]);
 		seq_printf(m, "]\n");
 	}
 	seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
@@ -73,13 +73,13 @@
 		      cpu_has_mipsmt ? " mt" : ""
 		);
 	seq_printf(m, "shadow register sets\t: %d\n",
-		       cpu_data[n].srsets);
+		      cpu_data[n].srsets);
 	seq_printf(m, "kscratch registers\t: %d\n",
-		   hweight8(cpu_data[n].kscratch_mask));
+		      hweight8(cpu_data[n].kscratch_mask));
 	seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
 
 	sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
-	        cpu_has_vce ? "%u" : "not available");
+		      cpu_has_vce ? "%u" : "not available");
 	seq_printf(m, fmt, 'D', vced_count);
 	seq_printf(m, fmt, 'I', vcei_count);
 	seq_printf(m, "\n");
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 558b539..f11b2bb 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -95,3 +95,16 @@
 	/* free the space reserved for the dt blob */
 	free_mem_mach(base, size);
 }
+
+void __init __dt_setup_arch(struct boot_param_header *bph)
+{
+	if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
+		pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
+
+		return;
+	}
+
+	initial_boot_params = bph;
+
+	early_init_devtree(initial_boot_params);
+}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c504b21..a53f8ec 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -605,6 +605,8 @@
 
 	resource_init();
 	plat_smp_setup();
+
+	cpu_cache_init();
 }
 
 unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 10263b4..9c60d09 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -19,8 +19,6 @@
 #  define DEBUGP(fmt, args...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Determine which stack to use..
  */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 17f6ee3..f2c09cf 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -339,7 +339,6 @@
 	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
 		goto badframe;
 
-	sigdelsetmask(&blocked, ~_BLOCKABLE);
 	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext(&regs, &frame->sf_sc);
@@ -375,7 +374,6 @@
 	if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
@@ -514,9 +512,10 @@
 	.restart	= __NR_restart_syscall
 };
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-	struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+	struct k_sigaction *ka, struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 	struct mips_abi *abi = current->thread.abi;
 	void *vdso = current->mm->context.vdso;
@@ -550,17 +549,14 @@
 				       ka, regs, sig, oldset);
 
 	if (ret)
-		return ret;
+		return;
 
-	block_sigmask(ka, sig);
-
-	return ret;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
 	struct k_sigaction ka;
-	sigset_t *oldset;
 	siginfo_t info;
 	int signr;
 
@@ -572,25 +568,10 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
-
+		handle_signal(signr, &info, &ka, regs);
 		return;
 	}
 
@@ -614,10 +595,7 @@
 	 * If there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -630,14 +608,12 @@
 	local_irq_enable();
 
 	/* deal with pending signal delivery */
-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
 
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index b4fe2ea..da1b56a 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -465,7 +465,6 @@
 	if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
 		goto badframe;
 
-	sigdelsetmask(&blocked, ~_BLOCKABLE);
 	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext32(&regs, &frame->sf_sc);
@@ -503,7 +502,6 @@
 	if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 63ffac9..3574c14 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -109,7 +109,6 @@
 	if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 71a95f5..48650c8 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -106,7 +106,7 @@
 #endif /* CONFIG_MIPS_MT_SMTC */
 	cpu_probe();
 	cpu_report();
-	per_cpu_trap_init();
+	per_cpu_trap_init(false);
 	mips_clockevent_init();
 	mp_ops->init_secondary();
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cfdaaa4..2d0c2a2 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
@@ -91,7 +92,7 @@
 void (*board_ejtag_handler_setup)(void);
 void (*board_bind_eic_interrupt)(int irq, int regset);
 void (*board_ebase_setup)(void);
-
+void __cpuinitdata(*board_cache_error_setup)(void);
 
 static void show_raw_backtrace(unsigned long reg29)
 {
@@ -1490,7 +1491,6 @@
 	return set_vi_srs_handler(n, addr, 0);
 }
 
-extern void cpu_cache_init(void);
 extern void tlb_init(void);
 extern void flush_tlb_handlers(void);
 
@@ -1517,7 +1517,7 @@
 }
 __setup("noulri", ulri_disable);
 
-void __cpuinit per_cpu_trap_init(void)
+void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
 {
 	unsigned int cpu = smp_processor_id();
 	unsigned int status_set = ST0_CU0;
@@ -1616,7 +1616,9 @@
 #ifdef CONFIG_MIPS_MT_SMTC
 	if (bootTC) {
 #endif /* CONFIG_MIPS_MT_SMTC */
-		cpu_cache_init();
+		/* Boot CPU's cache setup in setup_arch(). */
+		if (!is_boot_cpu)
+			cpu_cache_init();
 		tlb_init();
 #ifdef CONFIG_MIPS_MT_SMTC
 	} else if (!secondaryTC) {
@@ -1632,7 +1634,7 @@
 }
 
 /* Install CPU exception handler */
-void __init set_handler(unsigned long offset, void *addr, unsigned long size)
+void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
 	memcpy((void *)(ebase + offset), addr, size);
 	local_flush_icache_range(ebase + offset, ebase + offset + size);
@@ -1693,7 +1695,7 @@
 
 	if (board_ebase_setup)
 		board_ebase_setup();
-	per_cpu_trap_init();
+	per_cpu_trap_init(true);
 
 	/*
 	 * Copy the generic exception handlers to their final destination.
@@ -1797,6 +1799,9 @@
 
 	set_except_vector(26, handle_dsp);
 
+	if (board_cache_error_setup)
+		board_cache_error_setup();
+
 	if (cpu_has_vce)
 		/* Special exception: R4[04]00 uses also the divec space. */
 		memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 3fccf21..20bdf40 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -16,8 +16,22 @@
 	bool "XWAY"
 	select SOC_TYPE_XWAY
 	select HW_HAS_PCI
+
+config SOC_FALCON
+	bool "FALCON"
+
 endchoice
 
-source "arch/mips/lantiq/xway/Kconfig"
+choice
+	prompt "Devicetree"
+
+config DT_EASY50712
+	bool "Easy50712"
+	depends on SOC_XWAY
+endchoice
+
+config PCI_LANTIQ
+	bool "PCI Support"
+	depends on SOC_XWAY && PCI
 
 endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index e5dae0e..d6bdc57 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -4,8 +4,11 @@
 # under the terms of the GNU General Public License version 2 as published
 # by the Free Software Foundation.
 
-obj-y := irq.o setup.o clk.o prom.o devices.o
+obj-y := irq.o clk.o prom.o
+
+obj-y += dts/
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
 obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
index f3dff05..b3ec498 100644
--- a/arch/mips/lantiq/Platform
+++ b/arch/mips/lantiq/Platform
@@ -6,3 +6,4 @@
 cflags-$(CONFIG_LANTIQ)		+= -I$(srctree)/arch/mips/include/asm/mach-lantiq
 load-$(CONFIG_LANTIQ)		= 0xffffffff80002000
 cflags-$(CONFIG_SOC_TYPE_XWAY)	+= -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON)	+= -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 412814f..d3bcc33 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -12,6 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/list.h>
 
@@ -22,44 +23,32 @@
 #include <lantiq_soc.h>
 
 #include "clk.h"
-
-struct clk {
-	const char *name;
-	unsigned long rate;
-	unsigned long (*get_rate) (void);
-};
-
-static struct clk *cpu_clk;
-static int cpu_clk_cnt;
+#include "prom.h"
 
 /* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[] = {
-	{
-		.name = "cpu",
-		.get_rate = ltq_get_cpu_hz,
-	}, {
-		.name = "fpi",
-		.get_rate = ltq_get_fpi_hz,
-	}, {
-		.name = "io",
-		.get_rate = ltq_get_io_region_clock,
-	},
-};
+static struct clk cpu_clk_generic[3];
 
-static struct resource ltq_cgu_resource = {
-	.name	= "cgu",
-	.start	= LTQ_CGU_BASE_ADDR,
-	.end	= LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
-/* remapped clock register range */
-void __iomem *ltq_cgu_membase;
-
-void clk_init(void)
+void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
 {
-	cpu_clk = cpu_clk_generic;
-	cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
+	cpu_clk_generic[0].rate = cpu;
+	cpu_clk_generic[1].rate = fpi;
+	cpu_clk_generic[2].rate = io;
+}
+
+struct clk *clk_get_cpu(void)
+{
+	return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+	return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
+{
+	return &cpu_clk_generic[2];
 }
 
 static inline int clk_good(struct clk *clk)
@@ -82,38 +71,71 @@
 }
 EXPORT_SYMBOL(clk_get_rate);
 
-struct clk *clk_get(struct device *dev, const char *id)
+int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-	int i;
+	if (unlikely(!clk_good(clk)))
+		return 0;
+	if (clk->rates && *clk->rates) {
+		unsigned long *r = clk->rates;
 
-	for (i = 0; i < cpu_clk_cnt; i++)
-		if (!strcmp(id, cpu_clk[i].name))
-			return &cpu_clk[i];
-	BUG();
-	return ERR_PTR(-ENOENT);
+		while (*r && (*r != rate))
+			r++;
+		if (!*r) {
+			pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+				clk->cl.dev_id, clk->cl.con_id, rate);
+			return -1;
+		}
+	}
+	clk->rate = rate;
+	return 0;
 }
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-	/* not used */
-}
-EXPORT_SYMBOL(clk_put);
+EXPORT_SYMBOL(clk_set_rate);
 
 int clk_enable(struct clk *clk)
 {
-	/* not used */
-	return 0;
+	if (unlikely(!clk_good(clk)))
+		return -1;
+
+	if (clk->enable)
+		return clk->enable(clk);
+
+	return -1;
 }
 EXPORT_SYMBOL(clk_enable);
 
 void clk_disable(struct clk *clk)
 {
-	/* not used */
+	if (unlikely(!clk_good(clk)))
+		return;
+
+	if (clk->disable)
+		clk->disable(clk);
 }
 EXPORT_SYMBOL(clk_disable);
 
-static inline u32 ltq_get_counter_resolution(void)
+int clk_activate(struct clk *clk)
+{
+	if (unlikely(!clk_good(clk)))
+		return -1;
+
+	if (clk->activate)
+		return clk->activate(clk);
+
+	return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+	if (unlikely(!clk_good(clk)))
+		return;
+
+	if (clk->deactivate)
+		clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+static inline u32 get_counter_resolution(void)
 {
 	u32 res;
 
@@ -133,21 +155,11 @@
 {
 	struct clk *clk;
 
-	if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
-		panic("Failed to insert cgu memory");
+	ltq_soc_init();
 
-	if (request_mem_region(ltq_cgu_resource.start,
-			resource_size(&ltq_cgu_resource), "cgu") < 0)
-		panic("Failed to request cgu memory");
-
-	ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
-				resource_size(&ltq_cgu_resource));
-	if (!ltq_cgu_membase) {
-		pr_err("Failed to remap cgu memory\n");
-		unreachable();
-	}
-	clk = clk_get(0, "cpu");
-	mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
+	clk = clk_get_cpu();
+	mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
 	write_c0_compare(read_c0_count());
+	pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
 	clk_put(clk);
 }
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 3328925..fa67060 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -9,10 +9,70 @@
 #ifndef _LTQ_CLK_H__
 #define _LTQ_CLK_H__
 
-extern void clk_init(void);
+#include <linux/clkdev.h>
 
-extern unsigned long ltq_get_cpu_hz(void);
-extern unsigned long ltq_get_fpi_hz(void);
-extern unsigned long ltq_get_io_region_clock(void);
+/* clock speeds */
+#define CLOCK_33M	33333333
+#define CLOCK_60M	60000000
+#define CLOCK_62_5M	62500000
+#define CLOCK_83M	83333333
+#define CLOCK_83_5M	83500000
+#define CLOCK_98_304M	98304000
+#define CLOCK_100M	100000000
+#define CLOCK_111M	111111111
+#define CLOCK_125M	125000000
+#define CLOCK_133M	133333333
+#define CLOCK_150M	150000000
+#define CLOCK_166M	166666666
+#define CLOCK_167M	166666667
+#define CLOCK_196_608M	196608000
+#define CLOCK_200M	200000000
+#define CLOCK_250M	250000000
+#define CLOCK_266M	266666666
+#define CLOCK_300M	300000000
+#define CLOCK_333M	333333333
+#define CLOCK_393M	393215332
+#define CLOCK_400M	400000000
+#define CLOCK_500M	500000000
+#define CLOCK_600M	600000000
+
+/* clock out speeds */
+#define CLOCK_32_768K	32768
+#define CLOCK_1_536M	1536000
+#define CLOCK_2_5M	2500000
+#define CLOCK_12M	12000000
+#define CLOCK_24M	24000000
+#define CLOCK_25M	25000000
+#define CLOCK_30M	30000000
+#define CLOCK_40M	40000000
+#define CLOCK_48M	48000000
+#define CLOCK_50M	50000000
+#define CLOCK_60M	60000000
+
+struct clk {
+	struct clk_lookup cl;
+	unsigned long rate;
+	unsigned long *rates;
+	unsigned int module;
+	unsigned int bits;
+	unsigned long (*get_rate) (void);
+	int (*enable) (struct clk *clk);
+	void (*disable) (struct clk *clk);
+	int (*activate) (struct clk *clk);
+	void (*deactivate) (struct clk *clk);
+	void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+				unsigned long io);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
 
 #endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
deleted file mode 100644
index de1cb2b..0000000
--- a/arch/mips/lantiq/devices.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-
-#include "devices.h"
-
-/* nor flash */
-static struct resource ltq_nor_resource = {
-	.name	= "nor",
-	.start	= LTQ_FLASH_START,
-	.end	= LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
-	.flags  = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_nor = {
-	.name		= "ltq_nor",
-	.resource	= &ltq_nor_resource,
-	.num_resources	= 1,
-};
-
-void __init ltq_register_nor(struct physmap_flash_data *data)
-{
-	ltq_nor.dev.platform_data = data;
-	platform_device_register(&ltq_nor);
-}
-
-/* watchdog */
-static struct resource ltq_wdt_resource = {
-	.name	= "watchdog",
-	.start  = LTQ_WDT_BASE_ADDR,
-	.end    = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
-	.flags  = IORESOURCE_MEM,
-};
-
-void __init ltq_register_wdt(void)
-{
-	platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
-}
-
-/* asc ports */
-static struct resource ltq_asc0_resources[] = {
-	{
-		.name	= "asc0",
-		.start  = LTQ_ASC0_BASE_ADDR,
-		.end    = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	IRQ_RES(tx, LTQ_ASC_TIR(0)),
-	IRQ_RES(rx, LTQ_ASC_RIR(0)),
-	IRQ_RES(err, LTQ_ASC_EIR(0)),
-};
-
-static struct resource ltq_asc1_resources[] = {
-	{
-		.name	= "asc1",
-		.start  = LTQ_ASC1_BASE_ADDR,
-		.end    = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	IRQ_RES(tx, LTQ_ASC_TIR(1)),
-	IRQ_RES(rx, LTQ_ASC_RIR(1)),
-	IRQ_RES(err, LTQ_ASC_EIR(1)),
-};
-
-void __init ltq_register_asc(int port)
-{
-	switch (port) {
-	case 0:
-		platform_device_register_simple("ltq_asc", 0,
-			ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
-		break;
-	case 1:
-		platform_device_register_simple("ltq_asc", 1,
-			ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
-		break;
-	default:
-		break;
-	}
-}
-
-#ifdef CONFIG_PCI
-/* pci */
-static struct platform_device ltq_pci = {
-	.name		= "ltq_pci",
-	.num_resources	= 0,
-};
-
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
-	ltq_pci.dev.platform_data = data;
-	platform_device_register(&ltq_pci);
-}
-#else
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
-	pr_err("kernel is compiled without PCI support\n");
-}
-#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
deleted file mode 100644
index 2947bb1..0000000
--- a/arch/mips/lantiq/devices.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_H__
-#define _LTQ_DEVICES_H__
-
-#include <lantiq_platform.h>
-#include <linux/mtd/physmap.h>
-
-#define IRQ_RES(resname, irq) \
-	{.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
-
-extern void ltq_register_nor(struct physmap_flash_data *data);
-extern void ltq_register_wdt(void);
-extern void ltq_register_asc(int port);
-extern void ltq_register_pci(struct ltq_pci_data *data);
-
-#endif
diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile
new file mode 100644
index 0000000..674fca4
--- /dev/null
+++ b/arch/mips/lantiq/dts/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+	$(call if_changed,dtc)
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
new file mode 100644
index 0000000..3a4520f
--- /dev/null
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -0,0 +1,105 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "lantiq,xway", "lantiq,danube";
+
+	cpus {
+		cpu@0 {
+			compatible = "mips,mips24Kc";
+		};
+	};
+
+	biu@1F800000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lantiq,biu", "simple-bus";
+		reg = <0x1F800000 0x800000>;
+		ranges = <0x0 0x1F800000 0x7FFFFF>;
+
+		icu0: icu@80200 {
+			#interrupt-cells = <1>;
+			interrupt-controller;
+			compatible = "lantiq,icu";
+			reg = <0x80200 0x120>;
+		};
+
+		watchdog@803F0 {
+			compatible = "lantiq,wdt";
+			reg = <0x803F0 0x10>;
+		};
+	};
+
+	sram@1F000000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lantiq,sram";
+		reg = <0x1F000000 0x800000>;
+		ranges = <0x0 0x1F000000 0x7FFFFF>;
+
+		eiu0: eiu@101000 {
+			#interrupt-cells = <1>;
+			interrupt-controller;
+			interrupt-parent;
+			compatible = "lantiq,eiu-xway";
+			reg = <0x101000 0x1000>;
+		};
+
+		pmu0: pmu@102000 {
+			compatible = "lantiq,pmu-xway";
+			reg = <0x102000 0x1000>;
+		};
+
+		cgu0: cgu@103000 {
+			compatible = "lantiq,cgu-xway";
+			reg = <0x103000 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		rcu0: rcu@203000 {
+			compatible = "lantiq,rcu-xway";
+			reg = <0x203000 0x1000>;
+		};
+	};
+
+	fpi@10000000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lantiq,fpi", "simple-bus";
+		ranges = <0x0 0x10000000 0xEEFFFFF>;
+		reg = <0x10000000 0xEF00000>;
+
+		gptu@E100A00 {
+			compatible = "lantiq,gptu-xway";
+			reg = <0xE100A00 0x100>;
+		};
+
+		serial@E100C00 {
+			compatible = "lantiq,asc";
+			reg = <0xE100C00 0x400>;
+			interrupt-parent = <&icu0>;
+			interrupts = <112 113 114>;
+		};
+
+		dma0: dma@E104100 {
+			compatible = "lantiq,dma-xway";
+			reg = <0xE104100 0x800>;
+		};
+
+		ebu0: ebu@E105300 {
+			compatible = "lantiq,ebu-xway";
+			reg = <0xE105300 0x100>;
+		};
+
+		pci0: pci@E105400 {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+			compatible = "lantiq,pci-xway";
+			bus-range = <0x0 0x0>;
+			ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000	/* pci memory */
+				  0x1000000 0 0x00000000 0xAE00000 0 0x200000>;	/* io space */
+			reg = <0x7000000 0x8000		/* config space */
+				0xE105400 0x400>;	/* pci bridge */
+		};
+	};
+};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
new file mode 100644
index 0000000..68c1731
--- /dev/null
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -0,0 +1,113 @@
+/dts-v1/;
+
+/include/ "danube.dtsi"
+
+/ {
+	chosen {
+		bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
+	};
+
+	memory@0 {
+		reg = <0x0 0x2000000>;
+	};
+
+	fpi@10000000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		localbus@0 {
+			#address-cells = <2>;
+			#size-cells = <1>;
+			ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+				1 0 0x4000000 0x4000010>; /* addsel1 */
+			compatible = "lantiq,localbus", "simple-bus";
+
+			nor-boot@0 {
+				compatible = "lantiq,nor";
+				bank-width = <2>;
+				reg = <0 0x0 0x2000000>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				partition@0 {
+					label = "uboot";
+					reg = <0x00000 0x10000>; /* 64 KB */
+				};
+
+				partition@10000 {
+					label = "uboot_env";
+					reg = <0x10000 0x10000>; /* 64 KB */
+				};
+
+				partition@20000 {
+					label = "linux";
+					reg = <0x20000 0x3d0000>;
+				};
+
+				partition@400000 {
+					label = "rootfs";
+					reg = <0x400000 0x400000>;
+				};
+			};
+		};
+
+		gpio: pinmux@E100B10 {
+			compatible = "lantiq,pinctrl-xway";
+			pinctrl-names = "default";
+			pinctrl-0 = <&state_default>;
+
+			#gpio-cells = <2>;
+			gpio-controller;
+			reg = <0xE100B10 0xA0>;
+
+			state_default: pinmux {
+				stp {
+					lantiq,groups = "stp";
+					lantiq,function = "stp";
+				};
+				exin {
+					lantiq,groups = "exin1";
+					lantiq,function = "exin";
+				};
+				pci {
+					lantiq,groups = "gnt1";
+					lantiq,function = "pci";
+				};
+				conf_out {
+					lantiq,pins = "io4", "io5", "io6"; /* stp */
+					lantiq,open-drain;
+					lantiq,pull = <0>;
+				};
+			};
+		};
+
+		etop@E180000 {
+			compatible = "lantiq,etop-xway";
+			reg = <0xE180000 0x40000>;
+			interrupt-parent = <&icu0>;
+			interrupts = <73 78>;
+			phy-mode = "rmii";
+			mac-address = [ 00 11 22 33 44 55 ];
+		};
+
+		stp0: stp@E100BB0 {
+			#gpio-cells = <2>;
+			compatible = "lantiq,gpio-stp-xway";
+			gpio-controller;
+			reg = <0xE100BB0 0x40>;
+
+			lantiq,shadow = <0xfff>;
+			lantiq,groups = <0x3>;
+		};
+
+		pci@E105400 {
+			lantiq,bus-clock = <33333333>;
+			interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+			interrupt-map = <
+                                0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+			>;
+			gpios-reset = <&gpio 21 0>;
+			req-mask = <0x1>;		/* GNT1 */
+		};
+
+	};
+};
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 972e05f..9b28d09 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -6,17 +6,16 @@
  *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 
-#include <linux/init.h>
 #include <linux/cpu.h>
-
-#include <lantiq.h>
 #include <lantiq_soc.h>
 
-/* no ioremap possible at this early stage, lets use KSEG1 instead  */
-#define LTQ_ASC_BASE	KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
 #define ASC_BUF		1024
-#define LTQ_ASC_FSTAT	((u32 *)(LTQ_ASC_BASE + 0x0048))
-#define LTQ_ASC_TBUF	((u32 *)(LTQ_ASC_BASE + 0x0020))
+#define LTQ_ASC_FSTAT	((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF	((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF	((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
 #define TXMASK		0x3F00
 #define TXOFFSET	8
 
@@ -27,7 +26,7 @@
 	local_irq_save(flags);
 	do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
 	if (c == '\n')
-		ltq_w32('\r', LTQ_ASC_TBUF);
-	ltq_w32(c, LTQ_ASC_TBUF);
+		ltq_w8('\r', LTQ_ASC_TBUF);
+	ltq_w8(c, LTQ_ASC_TBUF);
 	local_irq_restore(flags);
 }
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 0000000..ff220f9
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1 @@
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 0000000..c1d278f
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON	"Falcon"
+#define SOC_FALCON_D	"Falcon-D"
+#define SOC_FALCON_V	"Falcon-V"
+#define SOC_FALCON_M	"Falcon-M"
+
+#define COMP_FALCON	"lantiq,falcon"
+
+#define PART_SHIFT	12
+#define PART_MASK	0x0FFFF000
+#define REV_SHIFT	28
+#define REV_MASK	0xF0000000
+#define SREV_SHIFT	22
+#define SREV_MASK	0x03C00000
+#define TYPE_SHIFT	26
+#define TYPE_MASK	0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE	(KSEG1 | 0x1F200000)
+#define BOOT_RVEC	(BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC	(BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC	(BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+	extern void (*nmi_handler)(void);
+
+	ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+	extern void (*ejtag_debug_handler)(void);
+
+	ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+	u32 type;
+	i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+	i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+	i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+	i->compatible = COMP_FALCON;
+	i->type = SOC_TYPE_FALCON;
+	sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+		i->rev & 0x7, (i->srev & 0x3) + 1);
+
+	switch (i->partnum) {
+	case SOC_ID_FALCON:
+		type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+		switch (type) {
+		case 0:
+			i->name = SOC_FALCON_D;
+			break;
+		case 1:
+			i->name = SOC_FALCON_V;
+			break;
+		case 2:
+			i->name = SOC_FALCON_M;
+			break;
+		default:
+			i->name = SOC_FALCON;
+			break;
+		}
+		break;
+
+	default:
+		unreachable();
+		break;
+	}
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 0000000..5682482
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/* CPU0 Reset Source Register */
+#define SYS1_CPU0RS		0x0040
+/* reset cause mask */
+#define CPU0RS_MASK		0x0003
+/* CPU0 Boot Mode Register */
+#define SYS1_BM			0x00a0
+/* boot mode mask */
+#define BM_MASK			0x0005
+
+/* allow platform code to find out what surce we booted from */
+unsigned char ltq_boot_select(void)
+{
+	return ltq_sys1_r32(SYS1_BM) & BM_MASK;
+}
+
+/* allow the watchdog driver to find out what the boot reason was */
+int ltq_reset_cause(void)
+{
+	return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+#define BOOT_REG_BASE	(KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG	(BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG	(BOOT_REG_BASE | 0x24)
+#define BOOT_PW1	0x4C545100
+#define BOOT_PW2	0x0051544C
+
+#define WDT_REG_BASE	(KSEG1 | 0x1F8803F0)
+#define WDT_PW1		0x00BE0000
+#define WDT_PW2		0x00DC0000
+
+static void machine_restart(char *command)
+{
+	local_irq_disable();
+
+	/* reboot magic */
+	ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+	ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+	ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+	/* watchdog magic */
+	ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+	ltq_w32(WDT_PW2 |
+		(0x3 << 26) | /* PWL */
+		(0x2 << 24) | /* CLKDIV */
+		(0x1 << 31) | /* enable */
+		(1), /* reload */
+		(void *)WDT_REG_BASE);
+	unreachable();
+}
+
+static void machine_halt(void)
+{
+	local_irq_disable();
+	unreachable();
+}
+
+static void machine_power_off(void)
+{
+	local_irq_disable();
+	unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+	_machine_restart = machine_restart;
+	_machine_halt = machine_halt;
+	pm_power_off = machine_power_off;
+	return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 0000000..ba0123d
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,260 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC		0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG		0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET		24
+#define GPEFREQ_MASK		0x00000C0
+#define GPEFREQ_OFFSET		10
+/* Clock status register */
+#define SYSCTL_CLKS		0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN		0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR		0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS		0x0020
+/* Activation Register */
+#define SYSCTL_ACT		0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT		0x0028
+/* reboot Register */
+#define SYSCTL_RBT		0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC		0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC		0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV		0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC1_ACT	0x00000800
+#define ACTS_I2C_ACT	0x00004000
+#define ACTS_P0		0x00010000
+#define ACTS_P1		0x00010000
+#define ACTS_P2		0x00020000
+#define ACTS_P3		0x00020000
+#define ACTS_P4		0x00040000
+#define ACTS_PADCTRL0	0x00100000
+#define ACTS_PADCTRL1	0x00100000
+#define ACTS_PADCTRL2	0x00200000
+#define ACTS_PADCTRL3	0x00200000
+#define ACTS_PADCTRL4	0x00400000
+
+#define sysctl_w32(m, x, y)	ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x)	ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg)	\
+		sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y)	ltq_w32((x), status_membase + (y))
+#define status_r32(x)		ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+	sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+		unsigned int test, unsigned int reg)
+{
+	int err = 1000000;
+
+	do {} while (--err && ((sysctl_r32(clk->module, reg)
+					& clk->bits) != test));
+	if (!err)
+		pr_err("module de/activation failed %d %08X %08X %08X\n",
+			clk->module, clk->bits, test,
+			sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+	sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+	sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+	sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+	return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+	sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+	sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+	sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+	sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+	sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+	return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+	sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+	sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+	unsigned int act;
+	unsigned int bits;
+
+	act = sysctl_r32(clk->module, SYSCTL_ACT);
+	bits = ~act & clk->bits;
+	if (bits != 0) {
+		sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+		sysctl_w32(clk->module, bits, SYSCTL_ACT);
+		sysctl_wait(clk, bits, SYSCTL_ACTS);
+	}
+	sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+	sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+	unsigned int freq;
+	unsigned int status;
+
+	/* if if the clock is already enabled */
+	status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+	if (status & (1 << (GPPC_OFFSET + 1)))
+		return;
+
+	if (status_r32(STATUS_CONFIG) == 0)
+		freq = 1; /* use 625MHz on unfused chip */
+	else
+		freq = (status_r32(STATUS_CONFIG) &
+			GPEFREQ_MASK) >>
+			GPEFREQ_OFFSET;
+
+	/* apply new frequency */
+	sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+		freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+	udelay(1);
+
+	/* enable new frequency */
+	sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+	udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+					unsigned int bits)
+{
+	struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+	clk->cl.dev_id = dev;
+	clk->cl.con_id = NULL;
+	clk->cl.clk = clk;
+	clk->module = module;
+	clk->activate = sysctl_activate;
+	clk->deactivate = sysctl_deactivate;
+	clk->enable = sysctl_clken;
+	clk->disable = sysctl_clkdis;
+	clk->reboot = sysctl_reboot;
+	clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+	struct device_node *np_status =
+		of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+	struct device_node *np_ebu =
+		of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+	struct device_node *np_sys1 =
+		of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+	struct device_node *np_syseth =
+		of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+	struct device_node *np_sysgpe =
+		of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+	struct resource res_status, res_ebu, res_sys[3];
+	int i;
+
+	/* check if all the core register ranges are available */
+	if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+		panic("Failed to load core nodes from devicetree");
+
+	if (of_address_to_resource(np_status, 0, &res_status) ||
+			of_address_to_resource(np_ebu, 0, &res_ebu) ||
+			of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+			of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+			of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+		panic("Failed to get core resources");
+
+	if ((request_mem_region(res_status.start, resource_size(&res_status),
+				res_status.name) < 0) ||
+		(request_mem_region(res_ebu.start, resource_size(&res_ebu),
+				res_ebu.name) < 0) ||
+		(request_mem_region(res_sys[0].start,
+				resource_size(&res_sys[0]),
+				res_sys[0].name) < 0) ||
+		(request_mem_region(res_sys[1].start,
+				resource_size(&res_sys[1]),
+				res_sys[1].name) < 0) ||
+		(request_mem_region(res_sys[2].start,
+				resource_size(&res_sys[2]),
+				res_sys[2].name) < 0))
+		pr_err("Failed to request core reources");
+
+	status_membase = ioremap_nocache(res_status.start,
+					resource_size(&res_status));
+	ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+					resource_size(&res_ebu));
+
+	if (!status_membase || !ltq_ebu_membase)
+		panic("Failed to remap core resources");
+
+	for (i = 0; i < 3; i++) {
+		sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
+						resource_size(&res_sys[i]));
+		if (!sysctl_membase[i])
+			panic("Failed to remap sysctrl resources");
+	}
+	ltq_sys1_membase = sysctl_membase[0];
+
+	falcon_gpe_enable();
+
+	/* get our 3 static rates for cpu, fpi and io clocks */
+	if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+		clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+	else
+		clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+
+	/* add our clock domains */
+	clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+	clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+	clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+	clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+	clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+	clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+	clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+	clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+	clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+	clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+	clkdev_add_sys("1e100C00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+	clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index d673731..57c1a4e 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -9,6 +9,11 @@
 
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/bootinfo.h>
 #include <asm/irq_cpu.h>
@@ -16,7 +21,7 @@
 #include <lantiq_soc.h>
 #include <irq.h>
 
-/* register definitions */
+/* register definitions - internal irqs */
 #define LTQ_ICU_IM0_ISR		0x0000
 #define LTQ_ICU_IM0_IER		0x0008
 #define LTQ_ICU_IM0_IOSR	0x0010
@@ -25,6 +30,7 @@
 #define LTQ_ICU_IM1_ISR		0x0028
 #define LTQ_ICU_OFFSET		(LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
 
+/* register definitions - external irqs */
 #define LTQ_EIU_EXIN_C		0x0000
 #define LTQ_EIU_EXIN_INIC	0x0004
 #define LTQ_EIU_EXIN_INEN	0x000C
@@ -37,10 +43,14 @@
 #define LTQ_EIU_IR4		(INT_NUM_IM1_IRL0 + 1)
 #define LTQ_EIU_IR5		(INT_NUM_IM1_IRL0 + 2)
 #define LTQ_EIU_IR6		(INT_NUM_IM2_IRL0 + 30)
-
+#define XWAY_EXIN_COUNT		3
 #define MAX_EIU			6
 
-/* irqs generated by device attached to the EBU need to be acked in
+/* the performance counter */
+#define LTQ_PERF_IRQ		(INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
  * a special manner
  */
 #define LTQ_ICU_EBU_IRQ		22
@@ -51,6 +61,17 @@
 #define ltq_eiu_w32(x, y)	ltq_w32((x), ltq_eiu_membase + (y))
 #define ltq_eiu_r32(x)		ltq_r32(ltq_eiu_membase + (x))
 
+/* our 2 ipi interrupts for VSMP */
+#define MIPS_CPU_IPI_RESCHED_IRQ	0
+#define MIPS_CPU_IPI_CALL_IRQ		1
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE		8
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+int gic_present;
+#endif
+
 static unsigned short ltq_eiu_irq[MAX_EIU] = {
 	LTQ_EIU_IR0,
 	LTQ_EIU_IR1,
@@ -60,64 +81,51 @@
 	LTQ_EIU_IR5,
 };
 
-static struct resource ltq_icu_resource = {
-	.name	= "icu",
-	.start	= LTQ_ICU_BASE_ADDR,
-	.end	= LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
-static struct resource ltq_eiu_resource = {
-	.name	= "eiu",
-	.start	= LTQ_EIU_BASE_ADDR,
-	.end	= LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
+static int exin_avail;
 static void __iomem *ltq_icu_membase;
 static void __iomem *ltq_eiu_membase;
 
 void ltq_disable_irq(struct irq_data *d)
 {
 	u32 ier = LTQ_ICU_IM0_IER;
-	int irq_nr = d->irq - INT_NUM_IRQ0;
+	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-	ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-	irq_nr %= INT_NUM_IM_OFFSET;
-	ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
+	ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+	offset %= INT_NUM_IM_OFFSET;
+	ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
 }
 
 void ltq_mask_and_ack_irq(struct irq_data *d)
 {
 	u32 ier = LTQ_ICU_IM0_IER;
 	u32 isr = LTQ_ICU_IM0_ISR;
-	int irq_nr = d->irq - INT_NUM_IRQ0;
+	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-	ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-	isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-	irq_nr %= INT_NUM_IM_OFFSET;
-	ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
-	ltq_icu_w32((1 << irq_nr), isr);
+	ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+	isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+	offset %= INT_NUM_IM_OFFSET;
+	ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
+	ltq_icu_w32(BIT(offset), isr);
 }
 
 static void ltq_ack_irq(struct irq_data *d)
 {
 	u32 isr = LTQ_ICU_IM0_ISR;
-	int irq_nr = d->irq - INT_NUM_IRQ0;
+	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-	isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-	irq_nr %= INT_NUM_IM_OFFSET;
-	ltq_icu_w32((1 << irq_nr), isr);
+	isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+	offset %= INT_NUM_IM_OFFSET;
+	ltq_icu_w32(BIT(offset), isr);
 }
 
 void ltq_enable_irq(struct irq_data *d)
 {
 	u32 ier = LTQ_ICU_IM0_IER;
-	int irq_nr = d->irq - INT_NUM_IRQ0;
+	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-	ier += LTQ_ICU_OFFSET  * (irq_nr / INT_NUM_IM_OFFSET);
-	irq_nr %= INT_NUM_IM_OFFSET;
-	ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
+	ier += LTQ_ICU_OFFSET  * (offset / INT_NUM_IM_OFFSET);
+	offset %= INT_NUM_IM_OFFSET;
+	ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier);
 }
 
 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -126,15 +134,15 @@
 
 	ltq_enable_irq(d);
 	for (i = 0; i < MAX_EIU; i++) {
-		if (d->irq == ltq_eiu_irq[i]) {
+		if (d->hwirq == ltq_eiu_irq[i]) {
 			/* low level - we should really handle set_type */
 			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
 				(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
 			/* clear all pending */
-			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
+			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
 				LTQ_EIU_EXIN_INIC);
 			/* enable */
-			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
+			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
 				LTQ_EIU_EXIN_INEN);
 			break;
 		}
@@ -149,9 +157,9 @@
 
 	ltq_disable_irq(d);
 	for (i = 0; i < MAX_EIU; i++) {
-		if (d->irq == ltq_eiu_irq[i]) {
+		if (d->hwirq == ltq_eiu_irq[i]) {
 			/* disable */
-			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
+			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
 				LTQ_EIU_EXIN_INEN);
 			break;
 		}
@@ -188,14 +196,15 @@
 	if (irq == 0)
 		return;
 
-	/* silicon bug causes only the msb set to 1 to be valid. all
+	/*
+	 * silicon bug causes only the msb set to 1 to be valid. all
 	 * other bits might be bogus
 	 */
 	irq = __fls(irq);
-	do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
+	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
 
 	/* if this is a EBU irq, we need to ack it or get a deadlock */
-	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
+	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
 		ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
 			LTQ_EBU_PCC_ISTAT);
 }
@@ -216,6 +225,47 @@
 	do_IRQ(MIPS_CPU_TIMER_IRQ);
 }
 
+#ifdef CONFIG_MIPS_MT_SMP
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+	setup_irq(irq, action);
+	irq_set_handler(irq, handle_percpu_irq);
+}
+
+static void ltq_sw0_irqdispatch(void)
+{
+	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ltq_sw1_irqdispatch(void)
+{
+	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+	scheduler_ipi();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+	smp_call_function_interrupt();
+	return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+	.handler	= ipi_resched_interrupt,
+	.flags		= IRQF_PERCPU,
+	.name		= "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+	.handler	= ipi_call_interrupt,
+	.flags		= IRQF_PERCPU,
+	.name		= "IPI_call"
+};
+#endif
+
 asmlinkage void plat_irq_dispatch(void)
 {
 	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -238,45 +288,75 @@
 	return;
 }
 
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	struct irq_chip *chip = &ltq_irq_type;
+	int i;
+
+	for (i = 0; i < exin_avail; i++)
+		if (hw == ltq_eiu_irq[i])
+			chip = &ltq_eiu_type;
+
+	irq_set_chip_and_handler(hw, chip, handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+	.xlate = irq_domain_xlate_onetwocell,
+	.map = icu_map,
+};
+
 static struct irqaction cascade = {
 	.handler = no_action,
 	.name = "cascade",
 };
 
-void __init arch_init_irq(void)
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
 {
+	struct device_node *eiu_node;
+	struct resource res;
 	int i;
 
-	if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
-		panic("Failed to insert icu memory");
+	if (of_address_to_resource(node, 0, &res))
+		panic("Failed to get icu memory range");
 
-	if (request_mem_region(ltq_icu_resource.start,
-			resource_size(&ltq_icu_resource), "icu") < 0)
-		panic("Failed to request icu memory");
+	if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+		pr_err("Failed to request icu memory");
 
-	ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
-				resource_size(&ltq_icu_resource));
+	ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res));
 	if (!ltq_icu_membase)
 		panic("Failed to remap icu memory");
 
-	if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
-		panic("Failed to insert eiu memory");
+	/* the external interrupts are optional and xway only */
+	eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+	if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) {
+		/* find out how many external irq sources we have */
+		const __be32 *count = of_get_property(node,
+							"lantiq,count",	NULL);
 
-	if (request_mem_region(ltq_eiu_resource.start,
-			resource_size(&ltq_eiu_resource), "eiu") < 0)
-		panic("Failed to request eiu memory");
+		if (count)
+			exin_avail = *count;
+		if (exin_avail > MAX_EIU)
+			exin_avail = MAX_EIU;
 
-	ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
-				resource_size(&ltq_eiu_resource));
-	if (!ltq_eiu_membase)
-		panic("Failed to remap eiu memory");
+		if (request_mem_region(res.start, resource_size(&res),
+							res.name) < 0)
+			pr_err("Failed to request eiu memory");
 
-	/* make sure all irqs are turned off by default */
-	for (i = 0; i < 5; i++)
+		ltq_eiu_membase = ioremap_nocache(res.start,
+							resource_size(&res));
+		if (!ltq_eiu_membase)
+			panic("Failed to remap eiu memory");
+	}
+
+	/* turn off all irqs by default */
+	for (i = 0; i < 5; i++) {
+		/* make sure all irqs are turned off by default */
 		ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
-
-	/* clear all possibly pending interrupts */
-	ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+		/* clear all possibly pending interrupts */
+		ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+	}
 
 	mips_cpu_irq_init();
 
@@ -293,20 +373,19 @@
 		set_vi_handler(7, ltq_hw5_irqdispatch);
 	}
 
-	for (i = INT_NUM_IRQ0;
-		i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
-		if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
-			(i == LTQ_EIU_IR2))
-			irq_set_chip_and_handler(i, &ltq_eiu_type,
-				handle_level_irq);
-		/* EIU3-5 only exist on ar9 and vr9 */
-		else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
-			(i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
-			irq_set_chip_and_handler(i, &ltq_eiu_type,
-				handle_level_irq);
-		else
-			irq_set_chip_and_handler(i, &ltq_irq_type,
-				handle_level_irq);
+	irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET,
+		&irq_domain_ops, 0);
+
+#if defined(CONFIG_MIPS_MT_SMP)
+	if (cpu_has_vint) {
+		pr_info("Setting up IPI vectored interrupts\n");
+		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
+		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
+	}
+	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
+		&irq_resched);
+	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+#endif
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
 	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
@@ -315,9 +394,23 @@
 	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
 		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
 #endif
+
+	/* tell oprofile which irq to use */
+	cp0_perfcount_irq = LTQ_PERF_IRQ;
+	return 0;
 }
 
 unsigned int __cpuinit get_c0_compare_int(void)
 {
 	return CP0_LEGACY_COMPARE_IRQ;
 }
+
+static struct of_device_id __initdata of_irq_ids[] = {
+	{ .compatible = "lantiq,icu", .data = icu_of_init },
+	{},
+};
+
+void __init arch_init_irq(void)
+{
+	of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
deleted file mode 100644
index 7e01b8c..0000000
--- a/arch/mips/lantiq/machtypes.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LANTIQ_MACH_H__
-#define _LANTIQ_MACH_H__
-
-#include <asm/mips_machine.h>
-
-enum lantiq_mach_type {
-	LTQ_MACH_GENERIC = 0,
-	LTQ_MACH_EASY50712,	/* Danube evaluation board */
-	LTQ_MACH_EASY50601,	/* Amazon SE evaluation board */
-};
-
-#endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index e34fcfd..d185e84 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,6 +8,7 @@
 
 #include <linux/export.h>
 #include <linux/clk.h>
+#include <linux/of_platform.h>
 #include <asm/bootinfo.h>
 #include <asm/time.h>
 
@@ -16,20 +17,16 @@
 #include "prom.h"
 #include "clk.h"
 
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
+
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
 static struct ltq_soc_info soc_info;
 
-unsigned int ltq_get_cpu_ver(void)
-{
-	return soc_info.rev;
-}
-EXPORT_SYMBOL(ltq_get_cpu_ver);
-
-unsigned int ltq_get_soc_type(void)
-{
-	return soc_info.type;
-}
-EXPORT_SYMBOL(ltq_get_soc_type);
-
 const char *get_system_type(void)
 {
 	return soc_info.sys_type;
@@ -45,27 +42,62 @@
 	char **argv = (char **) KSEG1ADDR(fw_arg1);
 	int i;
 
-	for (i = 0; i < argc; i++) {
-		char *p = (char *)  KSEG1ADDR(argv[i]);
+	arcs_cmdline[0] = '\0';
 
-		if (p && *p) {
+	for (i = 0; i < argc; i++) {
+		char *p = (char *) KSEG1ADDR(argv[i]);
+
+		if (CPHYSADDR(p) && *p) {
 			strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
 			strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
 		}
 	}
 }
 
+void __init plat_mem_setup(void)
+{
+	ioport_resource.start = IOPORT_RESOURCE_START;
+	ioport_resource.end = IOPORT_RESOURCE_END;
+	iomem_resource.start = IOMEM_RESOURCE_START;
+	iomem_resource.end = IOMEM_RESOURCE_END;
+
+	set_io_port_base((unsigned long) KSEG1);
+
+	/*
+	 * Load the builtin devicetree. This causes the chosen node to be
+	 * parsed resulting in our memory appearing
+	 */
+	__dt_setup_arch(&__dtb_start);
+}
+
 void __init prom_init(void)
 {
-	struct clk *clk;
-
+	/* call the soc specific detetcion code and get it to fill soc_info */
 	ltq_soc_detect(&soc_info);
-	clk_init();
-	clk = clk_get(0, "cpu");
-	snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
-		soc_info.name, soc_info.rev);
-	clk_put(clk);
+	snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+		soc_info.name, soc_info.rev_type);
 	soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
 	pr_info("SoC: %s\n", soc_info.sys_type);
 	prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+	if (register_vsmp_smp_ops())
+		panic("failed to register_vsmp_smp_ops()");
+#endif
 }
+
+int __init plat_of_setup(void)
+{
+	static struct of_device_id of_ids[3];
+
+	if (!of_have_populated_dt())
+		panic("device tree not present");
+
+	strncpy(of_ids[0].compatible, soc_info.compatible,
+		sizeof(of_ids[0].compatible));
+	strncpy(of_ids[1].compatible, "simple-bus",
+		sizeof(of_ids[1].compatible));
+	return of_platform_bus_probe(NULL, of_ids, NULL);
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index b4229d9..a3fa1a2 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,16 +10,22 @@
 #define _LTQ_PROM_H__
 
 #define LTQ_SYS_TYPE_LEN	0x100
+#define LTQ_SYS_REV_LEN         0x10
 
 struct ltq_soc_info {
 	unsigned char *name;
 	unsigned int rev;
+	unsigned char rev_type[LTQ_SYS_REV_LEN];
+	unsigned int srev;
 	unsigned int partnum;
 	unsigned int type;
 	unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+	unsigned char *compatible;
 };
 
 extern void ltq_soc_detect(struct ltq_soc_info *i);
-extern void ltq_soc_setup(void);
+extern void ltq_soc_init(void);
+
+extern struct boot_param_header __dtb_start;
 
 #endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
deleted file mode 100644
index 1ff6c9d..0000000
--- a/arch/mips/lantiq/setup.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <asm/bootinfo.h>
-
-#include <lantiq_soc.h>
-
-#include "machtypes.h"
-#include "devices.h"
-#include "prom.h"
-
-void __init plat_mem_setup(void)
-{
-	/* assume 16M as default incase uboot fails to pass proper ramsize */
-	unsigned long memsize = 16;
-	char **envp = (char **) KSEG1ADDR(fw_arg2);
-
-	ioport_resource.start = IOPORT_RESOURCE_START;
-	ioport_resource.end = IOPORT_RESOURCE_END;
-	iomem_resource.start = IOMEM_RESOURCE_START;
-	iomem_resource.end = IOMEM_RESOURCE_END;
-
-	set_io_port_base((unsigned long) KSEG1);
-
-	while (*envp) {
-		char *e = (char *)KSEG1ADDR(*envp);
-		if (!strncmp(e, "memsize=", 8)) {
-			e += 8;
-			if (strict_strtoul(e, 0, &memsize))
-				pr_warn("bad memsize specified\n");
-		}
-		envp++;
-	}
-	memsize *= 1024 * 1024;
-	add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
-}
-
-static int __init
-lantiq_setup(void)
-{
-	ltq_soc_setup();
-	mips_machine_setup();
-	return 0;
-}
-
-arch_initcall(lantiq_setup);
-
-static void __init
-lantiq_generic_init(void)
-{
-	/* Nothing to do */
-}
-
-MIPS_MACHINE(LTQ_MACH_GENERIC,
-	     "Generic",
-	     "Generic Lantiq based board",
-	     lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
deleted file mode 100644
index 2b857de..0000000
--- a/arch/mips/lantiq/xway/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-if SOC_XWAY
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50712
-	bool "Easy50712 - Danube"
-	default y
-
-endmenu
-
-endif
-
-if SOC_AMAZON_SE
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50601
-	bool "Easy50601 - Amazon SE"
-	default y
-
-endmenu
-
-endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index c517f2e..dc3194f 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,7 +1 @@
-obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o
-
-obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
-obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
-
-obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
-obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
+obj-y := prom.o sysctrl.o clk.o reset.o gpio.o dma.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
deleted file mode 100644
index 6522583..0000000
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-/* cgu registers */
-#define LTQ_CGU_SYS	0x0010
-
-unsigned int ltq_get_io_region_clock(void)
-{
-	return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
-	return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
-	if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
-		return CLOCK_266M;
-	else
-		return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
-	return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
deleted file mode 100644
index 696b1a3..0000000
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-static unsigned int ltq_ram_clocks[] = {
-	CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
-#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
-
-#define BASIC_FREQUENCY_1	35328000
-#define BASIC_FREQUENCY_2	36000000
-#define BASIS_REQUENCY_USB	12000000
-
-#define GET_BITS(x, msb, lsb) \
-	(((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
-
-#define LTQ_CGU_PLL0_CFG	0x0004
-#define LTQ_CGU_PLL1_CFG	0x0008
-#define LTQ_CGU_PLL2_CFG	0x000C
-#define LTQ_CGU_SYS		0x0010
-#define LTQ_CGU_UPDATE		0x0014
-#define LTQ_CGU_IF_CLK		0x0018
-#define LTQ_CGU_OSC_CON		0x001C
-#define LTQ_CGU_SMD		0x0020
-#define LTQ_CGU_CT1SR		0x0028
-#define LTQ_CGU_CT2SR		0x002C
-#define LTQ_CGU_PCMCR		0x0030
-#define LTQ_CGU_PCI_CR		0x0034
-#define LTQ_CGU_PD_PC		0x0038
-#define LTQ_CGU_FMR		0x003C
-
-#define CGU_PLL0_PHASE_DIVIDER_ENABLE	\
-	(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
-#define CGU_PLL0_BYPASS			\
-	(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
-#define CGU_PLL0_CFG_DSMSEL		\
-	(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
-#define CGU_PLL0_CFG_FRAC_EN		\
-	(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
-#define CGU_PLL1_SRC			\
-	(ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
-#define CGU_PLL2_PHASE_DIVIDER_ENABLE	\
-	(ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
-#define CGU_SYS_FPI_SEL			(1 << 6)
-#define CGU_SYS_DDR_SEL			0x3
-#define CGU_PLL0_SRC			(1 << 29)
-
-#define CGU_PLL0_CFG_PLLK	GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
-#define CGU_PLL0_CFG_PLLN	GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
-#define CGU_PLL0_CFG_PLLM	GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
-#define CGU_PLL2_SRC		GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
-#define CGU_PLL2_CFG_INPUT_DIV	GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
-
-static unsigned int ltq_get_pll0_fdiv(void);
-
-static inline unsigned int get_input_clock(int pll)
-{
-	switch (pll) {
-	case 0:
-		if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
-			return BASIS_REQUENCY_USB;
-		else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
-			return BASIC_FREQUENCY_1;
-		else
-			return BASIC_FREQUENCY_2;
-	case 1:
-		if (CGU_PLL1_SRC)
-			return BASIS_REQUENCY_USB;
-		else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
-			return BASIC_FREQUENCY_1;
-		else
-			return BASIC_FREQUENCY_2;
-	case 2:
-		switch (CGU_PLL2_SRC) {
-		case 0:
-			return ltq_get_pll0_fdiv();
-		case 1:
-			return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
-				BASIC_FREQUENCY_1 :
-				BASIC_FREQUENCY_2;
-		case 2:
-			return BASIS_REQUENCY_USB;
-		}
-	default:
-		return 0;
-	}
-}
-
-static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
-{
-	u64 res, clock = get_input_clock(pll);
-
-	res = num * clock;
-	do_div(res, den);
-	return res;
-}
-
-static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
-	unsigned int K)
-{
-	unsigned int num = ((N + 1) << 10) + K;
-	unsigned int den = (M + 1) << 10;
-
-	return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
-	unsigned int K)
-{
-	unsigned int num = ((N + 1) << 11) + K + 512;
-	unsigned int den = (M + 1) << 11;
-
-	return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
-	unsigned int K)
-{
-	unsigned int num = K >= 512 ?
-		((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
-	unsigned int den = (M + 1) << 12;
-
-	return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
-	unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
-{
-	if (!dsmsel)
-		return mash_dsm(pll, M, N, K);
-	else if (!phase_div_en)
-		return mash_dsm(pll, M, N, K);
-	else
-		return ssff_dsm_2(pll, M, N, K);
-}
-
-static inline unsigned int ltq_get_pll0_fosc(void)
-{
-	if (CGU_PLL0_BYPASS)
-		return get_input_clock(0);
-	else
-		return !CGU_PLL0_CFG_FRAC_EN
-			? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
-				CGU_PLL0_CFG_DSMSEL,
-				CGU_PLL0_PHASE_DIVIDER_ENABLE)
-			: dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
-				CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
-				CGU_PLL0_PHASE_DIVIDER_ENABLE);
-}
-
-static unsigned int ltq_get_pll0_fdiv(void)
-{
-	unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
-
-	return (ltq_get_pll0_fosc() + (div >> 1)) / div;
-}
-
-unsigned int ltq_get_io_region_clock(void)
-{
-	unsigned int ret = ltq_get_pll0_fosc();
-
-	switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
-	default:
-	case 0:
-		return (ret + 1) / 2;
-	case 1:
-		return (ret * 2 + 2) / 5;
-	case 2:
-		return (ret + 1) / 3;
-	case 3:
-		return (ret + 2) / 4;
-	}
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
-	unsigned int ret = ltq_get_io_region_clock();
-
-	if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
-		ret >>= 1;
-	return ret;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
-	switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
-	case 0:
-		return CLOCK_333M;
-	case 4:
-		return DDR_HZ;
-	case 8:
-		return DDR_HZ << 1;
-	default:
-		return DDR_HZ >> 1;
-	}
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
-	unsigned int ddr_clock = DDR_HZ;
-
-	if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
-		return ddr_clock >> 1;
-	return ddr_clock;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 0000000..9aa17f7
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,151 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+	CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS			0x10
+
+/* vr9 clock */
+#define CGU_SYS_VR9		0x0c
+#define CGU_IF_CLK_VR9		0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+	unsigned long ddr_clock = DDR_HZ;
+
+	if (ltq_cgu_r32(CGU_SYS) & 0x40)
+		return ddr_clock >> 1;
+	return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+	switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+	case 0:
+		return CLOCK_333M;
+	case 4:
+		return DDR_HZ;
+	case 8:
+		return DDR_HZ << 1;
+	default:
+		return DDR_HZ >> 1;
+	}
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+	if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+		return CLOCK_393M;
+	return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+	unsigned long sys = ltq_ar9_sys_hz();
+
+	if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+		return sys;
+	return sys >> 1;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+	if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+		return ltq_ar9_fpi_hz();
+	else
+		return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+	unsigned int cpu_sel;
+	unsigned long clk;
+
+	cpu_sel = (ltq_cgu_r32(CGU_SYS_VR9) >> 4) & 0xf;
+
+	switch (cpu_sel) {
+	case 0:
+		clk = CLOCK_600M;
+		break;
+	case 1:
+		clk = CLOCK_500M;
+		break;
+	case 2:
+		clk = CLOCK_393M;
+		break;
+	case 3:
+		clk = CLOCK_333M;
+		break;
+	case 5:
+	case 6:
+		clk = CLOCK_196_608M;
+		break;
+	case 7:
+		clk = CLOCK_167M;
+		break;
+	case 4:
+	case 8:
+	case 9:
+		clk = CLOCK_125M;
+		break;
+	default:
+		clk = 0;
+		break;
+	}
+
+	return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+	unsigned int ocp_sel, cpu_clk;
+	unsigned long clk;
+
+	cpu_clk = ltq_vr9_cpu_hz();
+	ocp_sel = ltq_cgu_r32(CGU_SYS_VR9) & 0x3;
+
+	switch (ocp_sel) {
+	case 0:
+		/* OCP ratio 1 */
+		clk = cpu_clk;
+		break;
+	case 2:
+		/* OCP ratio 2 */
+		clk = cpu_clk / 2;
+		break;
+	case 3:
+		/* OCP ratio 2.5 */
+		clk = (cpu_clk * 2) / 5;
+		break;
+	case 4:
+		/* OCP ratio 3 */
+		clk = cpu_clk / 3;
+		break;
+	default:
+		clk = 0;
+		break;
+	}
+
+	return clk;
+}
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
deleted file mode 100644
index d614aa7..0000000
--- a/arch/mips/lantiq/xway/devices.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mtd/physmap.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-#include <lantiq_irq.h>
-#include <lantiq_platform.h>
-
-#include "devices.h"
-
-/* gpio */
-static struct resource ltq_gpio_resource[] = {
-	{
-		.name	= "gpio0",
-		.start  = LTQ_GPIO0_BASE_ADDR,
-		.end    = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.name	= "gpio1",
-		.start  = LTQ_GPIO1_BASE_ADDR,
-		.end    = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.name	= "gpio2",
-		.start  = LTQ_GPIO2_BASE_ADDR,
-		.end    = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	}
-};
-
-void __init ltq_register_gpio(void)
-{
-	platform_device_register_simple("ltq_gpio", 0,
-		&ltq_gpio_resource[0], 1);
-	platform_device_register_simple("ltq_gpio", 1,
-		&ltq_gpio_resource[1], 1);
-
-	/* AR9 and VR9 have an extra gpio block */
-	if (ltq_is_ar9() || ltq_is_vr9()) {
-		platform_device_register_simple("ltq_gpio", 2,
-			&ltq_gpio_resource[2], 1);
-	}
-}
-
-/* serial to parallel conversion */
-static struct resource ltq_stp_resource = {
-	.name   = "stp",
-	.start  = LTQ_STP_BASE_ADDR,
-	.end    = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
-	.flags  = IORESOURCE_MEM,
-};
-
-void __init ltq_register_gpio_stp(void)
-{
-	platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
-}
-
-/* asc ports - amazon se has its own serial mapping */
-static struct resource ltq_ase_asc_resources[] = {
-	{
-		.name	= "asc0",
-		.start  = LTQ_ASC1_BASE_ADDR,
-		.end    = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	IRQ_RES(tx, LTQ_ASC_ASE_TIR),
-	IRQ_RES(rx, LTQ_ASC_ASE_RIR),
-	IRQ_RES(err, LTQ_ASC_ASE_EIR),
-};
-
-void __init ltq_register_ase_asc(void)
-{
-	platform_device_register_simple("ltq_asc", 0,
-		ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
-}
-
-/* ethernet */
-static struct resource ltq_etop_resources = {
-	.name	= "etop",
-	.start	= LTQ_ETOP_BASE_ADDR,
-	.end	= LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_etop = {
-	.name		= "ltq_etop",
-	.resource	= &ltq_etop_resources,
-	.num_resources	= 1,
-};
-
-void __init
-ltq_register_etop(struct ltq_eth_data *eth)
-{
-	if (eth) {
-		ltq_etop.dev.platform_data = eth;
-		platform_device_register(&ltq_etop);
-	}
-}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
deleted file mode 100644
index e904934..0000000
--- a/arch/mips/lantiq/xway/devices.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_XWAY_H__
-#define _LTQ_DEVICES_XWAY_H__
-
-#include "../devices.h"
-#include <linux/phy.h>
-
-extern void ltq_register_gpio(void);
-extern void ltq_register_gpio_stp(void);
-extern void ltq_register_ase_asc(void);
-extern void ltq_register_etop(struct ltq_eth_data *eth);
-
-#endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index b210e93..55d2c4f 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,7 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/clk.h>
 
 #include <lantiq_soc.h>
 #include <xway_dma.h>
@@ -55,13 +56,6 @@
 #define ltq_dma_w32_mask(x, y, z)	ltq_w32_mask(x, y, \
 						ltq_dma_membase + (z))
 
-static struct resource ltq_dma_resource = {
-	.name	= "dma",
-	.start	= LTQ_DMA_BASE_ADDR,
-	.end	= LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
-	.flags  = IORESOURCE_MEM,
-};
-
 static void __iomem *ltq_dma_membase;
 
 void
@@ -215,27 +209,28 @@
 }
 EXPORT_SYMBOL_GPL(ltq_dma_init_port);
 
-int __init
-ltq_dma_init(void)
+static int __devinit
+ltq_dma_init(struct platform_device *pdev)
 {
+	struct clk *clk;
+	struct resource *res;
 	int i;
 
-	/* insert and request the memory region */
-	if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
-		panic("Failed to insert dma memory");
-
-	if (request_mem_region(ltq_dma_resource.start,
-			resource_size(&ltq_dma_resource), "dma") < 0)
-		panic("Failed to request dma memory");
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		panic("Failed to get dma resource");
 
 	/* remap dma register range */
-	ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
-				resource_size(&ltq_dma_resource));
+	ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
 	if (!ltq_dma_membase)
-		panic("Failed to remap dma memory");
+		panic("Failed to remap dma resource");
 
 	/* power up and reset the dma engine */
-	ltq_pmu_enable(PMU_DMA);
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		panic("Failed to get dma clock");
+
+	clk_enable(clk);
 	ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
 
 	/* disable all interrupts */
@@ -248,7 +243,29 @@
 		ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
 		ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
 	}
+	dev_info(&pdev->dev, "init done\n");
 	return 0;
 }
 
-postcore_initcall(ltq_dma_init);
+static const struct of_device_id dma_match[] = {
+	{ .compatible = "lantiq,dma-xway" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dma_match);
+
+static struct platform_driver dma_driver = {
+	.probe = ltq_dma_init,
+	.driver = {
+		.name = "dma-xway",
+		.owner = THIS_MODULE,
+		.of_match_table = dma_match,
+	},
+};
+
+int __init
+dma_init(void)
+{
+	return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
deleted file mode 100644
index 862e3e8..0000000
--- a/arch/mips/lantiq/xway/ebu.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  EBU - the external bus unit attaches PCI, NOR and NAND
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* all access to the ebu must be locked */
-DEFINE_SPINLOCK(ebu_lock);
-EXPORT_SYMBOL_GPL(ebu_lock);
-
-static struct resource ltq_ebu_resource = {
-	.name	= "ebu",
-	.start	= LTQ_EBU_BASE_ADDR,
-	.end	= LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
-/* remapped base addr of the clock unit and external bus unit */
-void __iomem *ltq_ebu_membase;
-
-static int __init lantiq_ebu_init(void)
-{
-	/* insert and request the memory region */
-	if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
-		panic("Failed to insert ebu memory");
-
-	if (request_mem_region(ltq_ebu_resource.start,
-			resource_size(&ltq_ebu_resource), "ebu") < 0)
-		panic("Failed to request ebu memory");
-
-	/* remap ebu register range */
-	ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
-				resource_size(&ltq_ebu_resource));
-	if (!ltq_ebu_membase)
-		panic("Failed to remap ebu memory");
-
-	/* make sure to unprotect the memory region where flash is located */
-	ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
-	return 0;
-}
-
-postcore_initcall(lantiq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index c429a5b..2ab39e9 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -36,18 +36,6 @@
 
 static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
 
-int gpio_to_irq(unsigned int gpio)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned int gpio)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
 int ltq_gpio_request(unsigned int pin, unsigned int alt0,
 	unsigned int alt1, unsigned int dir, const char *name)
 {
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
deleted file mode 100644
index aae1717..0000000
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <lantiq_soc.h>
-
-/*
- * By attaching hardware latches to the EBU it is possible to create output
- * only gpios. This driver configures a special memory address, which when
- * written to outputs 16 bit to the latches.
- */
-
-#define LTQ_EBU_BUSCON	0x1e7ff		/* 16 bit access, slowest timing */
-#define LTQ_EBU_WP	0x80000000	/* write protect bit */
-
-/* we keep a shadow value of the last value written to the ebu */
-static int ltq_ebu_gpio_shadow = 0x0;
-static void __iomem *ltq_ebu_gpio_membase;
-
-static void ltq_ebu_apply(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ebu_lock, flags);
-	ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
-	*((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
-	ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-	spin_unlock_irqrestore(&ebu_lock, flags);
-}
-
-static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-	if (value)
-		ltq_ebu_gpio_shadow |= (1 << offset);
-	else
-		ltq_ebu_gpio_shadow &= ~(1 << offset);
-	ltq_ebu_apply();
-}
-
-static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
-	int value)
-{
-	ltq_ebu_set(chip, offset, value);
-
-	return 0;
-}
-
-static struct gpio_chip ltq_ebu_chip = {
-	.label = "ltq_ebu",
-	.direction_output = ltq_ebu_direction_output,
-	.set = ltq_ebu_set,
-	.base = 72,
-	.ngpio = 16,
-	.can_sleep = 1,
-	.owner = THIS_MODULE,
-};
-
-static int ltq_ebu_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource\n");
-		return -ENOENT;
-	}
-
-	res = devm_request_mem_region(&pdev->dev, res->start,
-		resource_size(res), dev_name(&pdev->dev));
-	if (!res) {
-		dev_err(&pdev->dev, "failed to request memory resource\n");
-		return -EBUSY;
-	}
-
-	ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-		resource_size(res));
-	if (!ltq_ebu_gpio_membase) {
-		dev_err(&pdev->dev, "Failed to ioremap mem region\n");
-		return -ENOMEM;
-	}
-
-	/* grab the default shadow value passed form the platform code */
-	ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
-
-	/* tell the ebu controller which memory address we will be using */
-	ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
-
-	/* write protect the region */
-	ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-
-	ret = gpiochip_add(&ltq_ebu_chip);
-	if (!ret)
-		ltq_ebu_apply();
-	return ret;
-}
-
-static struct platform_driver ltq_ebu_driver = {
-	.probe = ltq_ebu_probe,
-	.driver = {
-		.name = "ltq_ebu",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init ltq_ebu_init(void)
-{
-	int ret = platform_driver_register(&ltq_ebu_driver);
-
-	if (ret)
-		pr_info("ltq_ebu : Error registering platform driver!");
-	return ret;
-}
-
-postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
deleted file mode 100644
index fd07d87..0000000
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2007 John Crispin <blogic@openwrt.org>
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <lantiq_soc.h>
-
-#define LTQ_STP_CON0		0x00
-#define LTQ_STP_CON1		0x04
-#define LTQ_STP_CPU0		0x08
-#define LTQ_STP_CPU1		0x0C
-#define LTQ_STP_AR		0x10
-
-#define LTQ_STP_CON_SWU		(1 << 31)
-#define LTQ_STP_2HZ		0
-#define LTQ_STP_4HZ		(1 << 23)
-#define LTQ_STP_8HZ		(2 << 23)
-#define LTQ_STP_10HZ		(3 << 23)
-#define LTQ_STP_SPEED_MASK	(0xf << 23)
-#define LTQ_STP_UPD_FPI		(1 << 31)
-#define LTQ_STP_UPD_MASK	(3 << 30)
-#define LTQ_STP_ADSL_SRC	(3 << 24)
-
-#define LTQ_STP_GROUP0		(1 << 0)
-
-#define LTQ_STP_RISING		0
-#define LTQ_STP_FALLING		(1 << 26)
-#define LTQ_STP_EDGE_MASK	(1 << 26)
-
-#define ltq_stp_r32(reg)	__raw_readl(ltq_stp_membase + reg)
-#define ltq_stp_w32(val, reg)	__raw_writel(val, ltq_stp_membase + reg)
-#define ltq_stp_w32_mask(clear, set, reg) \
-		ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
-		ltq_stp_membase + (reg))
-
-static int ltq_stp_shadow = 0xffff;
-static void __iomem *ltq_stp_membase;
-
-static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-	if (value)
-		ltq_stp_shadow |= (1 << offset);
-	else
-		ltq_stp_shadow &= ~(1 << offset);
-	ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
-}
-
-static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
-	int value)
-{
-	ltq_stp_set(chip, offset, value);
-
-	return 0;
-}
-
-static struct gpio_chip ltq_stp_chip = {
-	.label = "ltq_stp",
-	.direction_output = ltq_stp_direction_output,
-	.set = ltq_stp_set,
-	.base = 48,
-	.ngpio = 24,
-	.can_sleep = 1,
-	.owner = THIS_MODULE,
-};
-
-static int ltq_stp_hw_init(void)
-{
-	/* the 3 pins used to control the external stp */
-	ltq_gpio_request(4, 1, 0, 1, "stp-st");
-	ltq_gpio_request(5, 1, 0, 1, "stp-d");
-	ltq_gpio_request(6, 1, 0, 1, "stp-sh");
-
-	/* sane defaults */
-	ltq_stp_w32(0, LTQ_STP_AR);
-	ltq_stp_w32(0, LTQ_STP_CPU0);
-	ltq_stp_w32(0, LTQ_STP_CPU1);
-	ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
-	ltq_stp_w32(0, LTQ_STP_CON1);
-
-	/* rising or falling edge */
-	ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
-
-	/* per default stp 15-0 are set */
-	ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
-
-	/* stp are update periodically by the FPI bus */
-	ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
-
-	/* set stp update speed */
-	ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
-
-	/* tell the hardware that pin (led) 0 and 1 are controlled
-	 *  by the dsl arc
-	 */
-	ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
-
-	ltq_pmu_enable(PMU_LED);
-	return 0;
-}
-
-static int __devinit ltq_stp_probe(struct platform_device *pdev)
-{
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	int ret = 0;
-
-	if (!res)
-		return -ENOENT;
-	res = devm_request_mem_region(&pdev->dev, res->start,
-		resource_size(res), dev_name(&pdev->dev));
-	if (!res) {
-		dev_err(&pdev->dev, "failed to request STP memory\n");
-		return -EBUSY;
-	}
-	ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-		resource_size(res));
-	if (!ltq_stp_membase) {
-		dev_err(&pdev->dev, "failed to remap STP memory\n");
-		return -ENOMEM;
-	}
-	ret = gpiochip_add(&ltq_stp_chip);
-	if (!ret)
-		ret = ltq_stp_hw_init();
-
-	return ret;
-}
-
-static struct platform_driver ltq_stp_driver = {
-	.probe = ltq_stp_probe,
-	.driver = {
-		.name = "ltq_stp",
-		.owner = THIS_MODULE,
-	},
-};
-
-int __init ltq_stp_init(void)
-{
-	int ret = platform_driver_register(&ltq_stp_driver);
-
-	if (ret)
-		pr_info("ltq_stp: error registering platform driver");
-	return ret;
-}
-
-postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
deleted file mode 100644
index d5aaf63..0000000
--- a/arch/mips/lantiq/xway/mach-easy50601.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-
-#include <lantiq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50601_partitions[] = {
-	{
-		.name	= "uboot",
-		.offset	= 0x0,
-		.size	= 0x10000,
-	},
-	{
-		.name	= "uboot_env",
-		.offset	= 0x10000,
-		.size	= 0x10000,
-	},
-	{
-		.name	= "linux",
-		.offset	= 0x20000,
-		.size	= 0xE0000,
-	},
-	{
-		.name	= "rootfs",
-		.offset	= 0x100000,
-		.size	= 0x300000,
-	},
-};
-
-static struct physmap_flash_data easy50601_flash_data = {
-	.nr_parts	= ARRAY_SIZE(easy50601_partitions),
-	.parts		= easy50601_partitions,
-};
-
-static void __init easy50601_init(void)
-{
-	ltq_register_nor(&easy50601_flash_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50601,
-			"EASY50601",
-			"EASY50601 Eval Board",
-			easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
deleted file mode 100644
index ea5027b..0000000
--- a/arch/mips/lantiq/xway/mach-easy50712.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-#include <linux/phy.h>
-
-#include <lantiq_soc.h>
-#include <irq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50712_partitions[] = {
-	{
-		.name	= "uboot",
-		.offset	= 0x0,
-		.size	= 0x10000,
-	},
-	{
-		.name	= "uboot_env",
-		.offset	= 0x10000,
-		.size	= 0x10000,
-	},
-	{
-		.name	= "linux",
-		.offset	= 0x20000,
-		.size	= 0xe0000,
-	},
-	{
-		.name	= "rootfs",
-		.offset	= 0x100000,
-		.size	= 0x300000,
-	},
-};
-
-static struct physmap_flash_data easy50712_flash_data = {
-	.nr_parts	= ARRAY_SIZE(easy50712_partitions),
-	.parts		= easy50712_partitions,
-};
-
-static struct ltq_pci_data ltq_pci_data = {
-	.clock	= PCI_CLOCK_INT,
-	.gpio	= PCI_GNT1 | PCI_REQ1,
-	.irq	= {
-		[14] = INT_NUM_IM0_IRL0 + 22,
-	},
-};
-
-static struct ltq_eth_data ltq_eth_data = {
-	.mii_mode = PHY_INTERFACE_MODE_MII,
-};
-
-static void __init easy50712_init(void)
-{
-	ltq_register_gpio_stp();
-	ltq_register_nor(&easy50712_flash_data);
-	ltq_register_pci(&ltq_pci_data);
-	ltq_register_etop(&ltq_eth_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50712,
-	     "EASY50712",
-	     "EASY50712 Eval Board",
-	      easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
deleted file mode 100644
index fe85361..0000000
--- a/arch/mips/lantiq/xway/pmu.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* PMU - the power management unit allows us to turn part of the core
- * on and off
- */
-
-/* the enable / disable registers */
-#define LTQ_PMU_PWDCR	0x1C
-#define LTQ_PMU_PWDSR	0x20
-
-#define ltq_pmu_w32(x, y)	ltq_w32((x), ltq_pmu_membase + (y))
-#define ltq_pmu_r32(x)		ltq_r32(ltq_pmu_membase + (x))
-
-static struct resource ltq_pmu_resource = {
-	.name	= "pmu",
-	.start	= LTQ_PMU_BASE_ADDR,
-	.end	= LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
-};
-
-static void __iomem *ltq_pmu_membase;
-
-void ltq_pmu_enable(unsigned int module)
-{
-	int err = 1000000;
-
-	ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
-	do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
-
-	if (!err)
-		panic("activating PMU module failed!");
-}
-EXPORT_SYMBOL(ltq_pmu_enable);
-
-void ltq_pmu_disable(unsigned int module)
-{
-	ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
-}
-EXPORT_SYMBOL(ltq_pmu_disable);
-
-int __init ltq_pmu_init(void)
-{
-	if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
-		panic("Failed to insert pmu memory");
-
-	if (request_mem_region(ltq_pmu_resource.start,
-			resource_size(&ltq_pmu_resource), "pmu") < 0)
-		panic("Failed to request pmu memory");
-
-	ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
-				resource_size(&ltq_pmu_resource));
-	if (!ltq_pmu_membase)
-		panic("Failed to remap pmu memory");
-	return 0;
-}
-
-core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
deleted file mode 100644
index ae4959a..0000000
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_AMAZON_SE	"Amazon_SE"
-
-#define PART_SHIFT	12
-#define PART_MASK	0x0FFFFFFF
-#define REV_SHIFT	28
-#define REV_MASK	0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
-	i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
-	i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
-	switch (i->partnum) {
-	case SOC_ID_AMAZON_SE:
-		i->name = SOC_AMAZON_SE;
-		i->type = SOC_TYPE_AMAZON_SE;
-		break;
-
-	default:
-		unreachable();
-		break;
-	}
-}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
deleted file mode 100644
index 2228133..0000000
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_DANUBE	"Danube"
-#define SOC_TWINPASS	"Twinpass"
-#define SOC_AR9		"AR9"
-
-#define PART_SHIFT	12
-#define PART_MASK	0x0FFFFFFF
-#define REV_SHIFT	28
-#define REV_MASK	0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
-	i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
-	i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
-	switch (i->partnum) {
-	case SOC_ID_DANUBE1:
-	case SOC_ID_DANUBE2:
-		i->name = SOC_DANUBE;
-		i->type = SOC_TYPE_DANUBE;
-		break;
-
-	case SOC_ID_TWINPASS:
-		i->name = SOC_TWINPASS;
-		i->type = SOC_TYPE_DANUBE;
-		break;
-
-	case SOC_ID_ARX188:
-	case SOC_ID_ARX168:
-	case SOC_ID_ARX182:
-		i->name = SOC_AR9;
-		i->type = SOC_TYPE_AR9;
-		break;
-
-	default:
-		unreachable();
-		break;
-	}
-}
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 0000000..248429a
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,115 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE	"Danube"
+#define SOC_TWINPASS	"Twinpass"
+#define SOC_AMAZON_SE	"Amazon_SE"
+#define SOC_AR9		"AR9"
+#define SOC_GR9		"GR9"
+#define SOC_VR9		"VR9"
+
+#define COMP_DANUBE	"lantiq,danube"
+#define COMP_TWINPASS	"lantiq,twinpass"
+#define COMP_AMAZON_SE	"lantiq,ase"
+#define COMP_AR9	"lantiq,ar9"
+#define COMP_GR9	"lantiq,gr9"
+#define COMP_VR9	"lantiq,vr9"
+
+#define PART_SHIFT	12
+#define PART_MASK	0x0FFFFFFF
+#define REV_SHIFT	28
+#define REV_MASK	0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+	i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+	i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+	sprintf(i->rev_type, "1.%d", i->rev);
+	switch (i->partnum) {
+	case SOC_ID_DANUBE1:
+	case SOC_ID_DANUBE2:
+		i->name = SOC_DANUBE;
+		i->type = SOC_TYPE_DANUBE;
+		i->compatible = COMP_DANUBE;
+		break;
+
+	case SOC_ID_TWINPASS:
+		i->name = SOC_TWINPASS;
+		i->type = SOC_TYPE_DANUBE;
+		i->compatible = COMP_TWINPASS;
+		break;
+
+	case SOC_ID_ARX188:
+	case SOC_ID_ARX168_1:
+	case SOC_ID_ARX168_2:
+	case SOC_ID_ARX182:
+		i->name = SOC_AR9;
+		i->type = SOC_TYPE_AR9;
+		i->compatible = COMP_AR9;
+		break;
+
+	case SOC_ID_GRX188:
+	case SOC_ID_GRX168:
+		i->name = SOC_GR9;
+		i->type = SOC_TYPE_AR9;
+		i->compatible = COMP_GR9;
+		break;
+
+	case SOC_ID_AMAZON_SE_1:
+	case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+		panic("ase is only supported for non pci kernels");
+#endif
+		i->name = SOC_AMAZON_SE;
+		i->type = SOC_TYPE_AMAZON_SE;
+		i->compatible = COMP_AMAZON_SE;
+		break;
+
+	case SOC_ID_VRX282:
+	case SOC_ID_VRX268:
+	case SOC_ID_VRX288:
+		i->name = SOC_VR9;
+		i->type = SOC_TYPE_VR9;
+		i->compatible = COMP_VR9;
+		break;
+
+	case SOC_ID_GRX268:
+	case SOC_ID_GRX288:
+		i->name = SOC_GR9;
+		i->type = SOC_TYPE_VR9;
+		i->compatible = COMP_GR9;
+		break;
+
+	case SOC_ID_VRX268_2:
+	case SOC_ID_VRX288_2:
+		i->name = SOC_VR9;
+		i->type = SOC_TYPE_VR9_2;
+		i->compatible = COMP_VR9;
+		break;
+
+	case SOC_ID_GRX282_2:
+	case SOC_ID_GRX288_2:
+		i->name = SOC_GR9;
+		i->type = SOC_TYPE_VR9_2;
+		i->compatible = COMP_GR9;
+		break;
+
+	default:
+		unreachable();
+		break;
+	}
+}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 8b66bd8..22c55f7 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -11,26 +11,31 @@
 #include <linux/ioport.h>
 #include <linux/pm.h>
 #include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
 #include <asm/reboot.h>
 
 #include <lantiq_soc.h>
 
+#include "../prom.h"
+
 #define ltq_rcu_w32(x, y)	ltq_w32((x), ltq_rcu_membase + (y))
 #define ltq_rcu_r32(x)		ltq_r32(ltq_rcu_membase + (x))
 
-/* register definitions */
-#define LTQ_RCU_RST		0x0010
-#define LTQ_RCU_RST_ALL		0x40000000
+/* reset request register */
+#define RCU_RST_REQ		0x0010
+/* reset status register */
+#define RCU_RST_STAT		0x0014
 
-#define LTQ_RCU_RST_STAT	0x0014
-#define LTQ_RCU_STAT_SHIFT	26
-
-static struct resource ltq_rcu_resource = {
-	.name   = "rcu",
-	.start  = LTQ_RCU_BASE_ADDR,
-	.end    = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1,
-	.flags  = IORESOURCE_MEM,
-};
+/* reboot bit */
+#define RCU_RD_SRST		BIT(30)
+/* reset cause */
+#define RCU_STAT_SHIFT		26
+/* boot selection */
+#define RCU_BOOT_SEL_SHIFT	26
+#define RCU_BOOT_SEL_MASK	0x7
 
 /* remapped base addr of the reset control unit */
 static void __iomem *ltq_rcu_membase;
@@ -38,48 +43,64 @@
 /* This function is used by the watchdog driver */
 int ltq_reset_cause(void)
 {
-	u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT);
-	return val >> LTQ_RCU_STAT_SHIFT;
+	u32 val = ltq_rcu_r32(RCU_RST_STAT);
+	return val >> RCU_STAT_SHIFT;
 }
 EXPORT_SYMBOL_GPL(ltq_reset_cause);
 
+/* allow platform code to find out what source we booted from */
+unsigned char ltq_boot_select(void)
+{
+	u32 val = ltq_rcu_r32(RCU_RST_STAT);
+	return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
+}
+
+/* reset a io domain for u micro seconds */
+void ltq_reset_once(unsigned int module, ulong u)
+{
+	ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ);
+	udelay(u);
+	ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
+}
+
 static void ltq_machine_restart(char *command)
 {
-	pr_notice("System restart\n");
 	local_irq_disable();
-	ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST);
+	ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
 	unreachable();
 }
 
 static void ltq_machine_halt(void)
 {
-	pr_notice("System halted.\n");
 	local_irq_disable();
 	unreachable();
 }
 
 static void ltq_machine_power_off(void)
 {
-	pr_notice("Please turn off the power now.\n");
 	local_irq_disable();
 	unreachable();
 }
 
 static int __init mips_reboot_setup(void)
 {
-	/* insert and request the memory region */
-	if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
-		panic("Failed to insert rcu memory");
+	struct resource res;
+	struct device_node *np =
+		of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
 
-	if (request_mem_region(ltq_rcu_resource.start,
-			resource_size(&ltq_rcu_resource), "rcu") < 0)
-		panic("Failed to request rcu memory");
+	/* check if all the reset register range is available */
+	if (!np)
+		panic("Failed to load reset resources from devicetree");
 
-	/* remap rcu register range */
-	ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
-				resource_size(&ltq_rcu_resource));
+	if (of_address_to_resource(np, 0, &res))
+		panic("Failed to get rcu memory range");
+
+	if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+		pr_err("Failed to request rcu memory");
+
+	ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res));
 	if (!ltq_rcu_membase)
-		panic("Failed to remap rcu memory");
+		panic("Failed to remap core memory");
 
 	_machine_restart = ltq_machine_restart;
 	_machine_halt = ltq_machine_halt;
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
deleted file mode 100644
index f6f3267..0000000
--- a/arch/mips/lantiq/xway/setup-ase.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
-	ltq_register_ase_asc();
-	ltq_register_gpio();
-	ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
deleted file mode 100644
index c292f64..0000000
--- a/arch/mips/lantiq/xway/setup-xway.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
-	ltq_register_asc(0);
-	ltq_register_asc(1);
-	ltq_register_gpio();
-	ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 0000000..83780f7
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,371 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register */
+#define CGU_IFCCR	0x0018
+/* system clock register */
+#define CGU_SYS		0x0010
+/* pci control register */
+#define CGU_PCICR	0x0034
+/* ephy configuration register */
+#define CGU_EPHY	0x10
+/* power control register */
+#define PMU_PWDCR	0x1C
+/* power status register */
+#define PMU_PWDSR	0x20
+/* power control register */
+#define PMU_PWDCR1	0x24
+/* power status register */
+#define PMU_PWDSR1	0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P	BIT(0)
+#define PMU_PCI		BIT(4)
+#define PMU_DMA		BIT(5)
+#define PMU_USB0	BIT(6)
+#define PMU_ASC0	BIT(7)
+#define PMU_EPHY	BIT(7)	/* ase */
+#define PMU_SPI		BIT(8)
+#define PMU_DFE		BIT(9)
+#define PMU_EBU		BIT(10)
+#define PMU_STP		BIT(11)
+#define PMU_GPT		BIT(12)
+#define PMU_AHBS	BIT(13) /* vr9 */
+#define PMU_FPI		BIT(14)
+#define PMU_AHBM	BIT(15)
+#define PMU_ASC1	BIT(17)
+#define PMU_PPE_QSB	BIT(18)
+#define PMU_PPE_SLL01	BIT(19)
+#define PMU_PPE_TC	BIT(21)
+#define PMU_PPE_EMA	BIT(22)
+#define PMU_PPE_DPLUM	BIT(23)
+#define PMU_PPE_DPLUS	BIT(24)
+#define PMU_USB1_P	BIT(26)
+#define PMU_USB1	BIT(27)
+#define PMU_SWITCH	BIT(28)
+#define PMU_PPE_TOP	BIT(29)
+#define PMU_GPHY	BIT(30)
+#define PMU_PCIE_CLK	BIT(31)
+
+#define PMU1_PCIE_PHY	BIT(0)
+#define PMU1_PCIE_CTL	BIT(1)
+#define PMU1_PCIE_PDI	BIT(4)
+#define PMU1_PCIE_MSI	BIT(5)
+
+#define pmu_w32(x, y)	ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x)	ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+	int err = 1000000;
+
+	pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+	do {} while (--err && (pmu_r32(PMU_PWDSR) & module));
+
+	if (!err)
+		panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+	pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+	ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+	return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+	ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+	int retry = 1000000;
+
+	pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+		PWDCR(clk->module));
+	do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
+
+	if (!retry)
+		panic("activating PMU module failed!\n");
+
+	return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+	pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+		PWDCR(clk->module));
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+	unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+	/* set bus clock speed */
+	if (of_machine_is_compatible("lantiq,ar9")) {
+		ifccr &= ~0x1f00000;
+		if (clk->rate == CLOCK_33M)
+			ifccr |= 0xe00000;
+		else
+			ifccr |= 0x700000; /* 62.5M */
+	} else {
+		ifccr &= ~0xf00000;
+		if (clk->rate == CLOCK_33M)
+			ifccr |= 0x800000;
+		else
+			ifccr |= 0x400000; /* 62.5M */
+	}
+	ltq_cgu_w32(ifccr, CGU_IFCCR);
+	pmu_enable(clk);
+	return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+	ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
+		CGU_IFCCR);
+	ltq_cgu_w32((1 << 30), CGU_PCICR);
+	return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+	ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
+		CGU_IFCCR);
+	ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+	int i;
+
+	/* get the correct rate */
+	for (i = 0; i < 4; i++) {
+		if (clk->rates[i] == clk->rate) {
+			int shift = 14 - (2 * clk->module);
+			unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+
+			ifccr &= ~(3 << shift);
+			ifccr |= i << shift;
+			ltq_cgu_w32(ifccr, CGU_IFCCR);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con,
+					unsigned int module, unsigned int bits)
+{
+	struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+	clk->cl.dev_id = dev;
+	clk->cl.con_id = con;
+	clk->cl.clk = clk;
+	clk->enable = pmu_enable;
+	clk->disable = pmu_disable;
+	clk->module = module;
+	clk->bits = bits;
+	clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+					unsigned int bits)
+{
+	struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+	clk->cl.dev_id = dev;
+	clk->cl.con_id = con;
+	clk->cl.clk = clk;
+	clk->enable = cgu_enable;
+	clk->disable = cgu_disable;
+	clk->bits = bits;
+	clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+	struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+	struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+	/* main pci clock */
+	clk->cl.dev_id = "17000000.pci";
+	clk->cl.con_id = NULL;
+	clk->cl.clk = clk;
+	clk->rate = CLOCK_33M;
+	clk->rates = valid_pci_rates;
+	clk->enable = pci_enable;
+	clk->disable = pmu_disable;
+	clk->module = 0;
+	clk->bits = PMU_PCI;
+	clkdev_add(&clk->cl);
+
+	/* use internal/external bus clock */
+	clk_ext->cl.dev_id = "17000000.pci";
+	clk_ext->cl.con_id = "external";
+	clk_ext->cl.clk = clk_ext;
+	clk_ext->enable = pci_ext_enable;
+	clk_ext->disable = pci_ext_disable;
+	clkdev_add(&clk_ext->cl);
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+	{CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+	{CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+	{CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+	{CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		struct clk *clk;
+		char *name;
+
+		name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+		sprintf(name, "clkout%d", i);
+
+		clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+		clk->cl.dev_id = "1f103000.cgu";
+		clk->cl.con_id = name;
+		clk->cl.clk = clk;
+		clk->rate = 0;
+		clk->rates = valid_clkout_rates[i];
+		clk->enable = clkout_enable;
+		clk->module = i;
+		clkdev_add(&clk->cl);
+	}
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+	struct resource res_pmu, res_cgu, res_ebu;
+	struct device_node *np_pmu =
+			of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+	struct device_node *np_cgu =
+			of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+	struct device_node *np_ebu =
+			of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+	/* check if all the core register ranges are available */
+	if (!np_pmu || !np_cgu || !np_ebu)
+		panic("Failed to load core nodess from devicetree");
+
+	if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+			of_address_to_resource(np_cgu, 0, &res_cgu) ||
+			of_address_to_resource(np_ebu, 0, &res_ebu))
+		panic("Failed to get core resources");
+
+	if ((request_mem_region(res_pmu.start, resource_size(&res_pmu),
+				res_pmu.name) < 0) ||
+		(request_mem_region(res_cgu.start, resource_size(&res_cgu),
+				res_cgu.name) < 0) ||
+		(request_mem_region(res_ebu.start, resource_size(&res_ebu),
+				res_ebu.name) < 0))
+		pr_err("Failed to request core reources");
+
+	pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
+	ltq_cgu_membase = ioremap_nocache(res_cgu.start,
+						resource_size(&res_cgu));
+	ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+						resource_size(&res_ebu));
+	if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+		panic("Failed to remap core resources");
+
+	/* make sure to unprotect the memory region where flash is located */
+	ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+	/* add our generic xway clocks */
+	clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI);
+	clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0);
+	clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT);
+	clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP);
+	clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA);
+	clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI);
+	clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU);
+	clkdev_add_clkout();
+
+	/* add the soc dependent clocks */
+	if (!of_machine_is_compatible("lantiq,vr9"))
+		clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
+
+	if (!of_machine_is_compatible("lantiq,ase")) {
+		clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
+		clkdev_add_pci();
+	}
+
+	if (of_machine_is_compatible("lantiq,ase")) {
+		if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+			clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+		else
+			clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+		clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
+		clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
+	} else if (of_machine_is_compatible("lantiq,vr9")) {
+		clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+				ltq_vr9_fpi_hz());
+		clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
+		clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
+		clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
+		clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
+		clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
+		clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+	} else if (of_machine_is_compatible("lantiq,ar9")) {
+		clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+				ltq_ar9_fpi_hz());
+		clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
+	} else {
+		clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+				ltq_danube_fpi_hz());
+	}
+}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 47037ec..44e69e7 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -21,6 +21,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/r4kcache.h>
+#include <asm/traps.h>
 #include <asm/mmu_context.h>
 #include <asm/war.h>
 
@@ -248,6 +249,11 @@
 	}
 }
 
+static void  __cpuinit octeon_cache_error_setup(void)
+{
+	extern char except_vec2_octeon;
+	set_handler(0x100, &except_vec2_octeon, 0x80);
+}
 
 /**
  * Setup the Octeon cache flush routines
@@ -255,12 +261,6 @@
  */
 void __cpuinit octeon_cache_init(void)
 {
-	extern unsigned long ebase;
-	extern char except_vec2_octeon;
-
-	memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
-	octeon_flush_cache_sigtramp(ebase + 0x100);
-
 	probe_octeon();
 
 	shm_align_mask = PAGE_SIZE - 1;
@@ -280,6 +280,8 @@
 
 	build_clear_page();
 	build_copy_page();
+
+	board_cache_error_setup = octeon_cache_error_setup;
 }
 
 /**
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bda8eb2..5109be9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -32,7 +32,7 @@
 #include <asm/mmu_context.h>
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
-
+#include <asm/traps.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -1385,10 +1385,8 @@
 __setup("coherentio", setcoherentio);
 #endif
 
-void __cpuinit r4k_cache_init(void)
+static void __cpuinit r4k_cache_error_setup(void)
 {
-	extern void build_clear_page(void);
-	extern void build_copy_page(void);
 	extern char __weak except_vec2_generic;
 	extern char __weak except_vec2_sb1;
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -1403,6 +1401,13 @@
 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
 		break;
 	}
+}
+
+void __cpuinit r4k_cache_init(void)
+{
+	extern void build_clear_page(void);
+	extern void build_copy_page(void);
+	struct cpuinfo_mips *c = &current_cpu_data;
 
 	probe_pcache();
 	setup_scache();
@@ -1465,4 +1470,5 @@
 	local_r4k___flush_cache_all(NULL);
 #endif
 	coherency_setup();
+	board_cache_error_setup = r4k_cache_error_setup;
 }
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 29f2f13..1208c28 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Werror
-
 obj-$(CONFIG_OPROFILE) += oprofile.o
 
 DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 54759f1..baba3bc 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -298,6 +298,11 @@
 	}
 }
 
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+	return mipsxx_perfcount_handler();
+}
+
 static int __init mipsxx_init(void)
 {
 	int counters;
@@ -374,6 +379,10 @@
 	save_perf_irq = perf_irq;
 	perf_irq = mipsxx_perfcount_handler;
 
+	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+		return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+			0, "Perfcounter", save_perf_irq);
+
 	return 0;
 }
 
@@ -381,6 +390,9 @@
 {
 	int counters = op_model_mipsxx_ops.num_counters;
 
+	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+		free_irq(cp0_perfcount_irq, save_perf_irq);
+
 	counters = counters_per_cpu_to_total(counters);
 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
 
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c3ac4b0..c703f43 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,7 +19,8 @@
 obj-$(CONFIG_BCM63XX)		+= pci-bcm63xx.o fixup-bcm63xx.o \
 					ops-bcm63xx.o
 obj-$(CONFIG_MIPS_ALCHEMY)	+= pci-alchemy.o
-obj-$(CONFIG_SOC_AR724X)	+= pci-ath724x.o
+obj-$(CONFIG_SOC_AR71XX)	+= pci-ar71xx.o
+obj-$(CONFIG_PCI_AR724X)	+= pci-ar724x.o
 
 #
 # These are still pretty much in the old state, watch, go blind.
@@ -41,7 +42,8 @@
 obj-$(CONFIG_SIBYTE_BCM112X)	+= fixup-sb1250.o pci-sb1250.o
 obj-$(CONFIG_SIBYTE_BCM1x80)	+= pci-bcm1480.o pci-bcm1480ht.o
 obj-$(CONFIG_SNI_RM)		+= fixup-sni.o ops-sni.o
-obj-$(CONFIG_SOC_XWAY)		+= pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_LANTIQ)		+= fixup-lantiq.o
+obj-$(CONFIG_PCI_LANTIQ)	+= pci-lantiq.o ops-lantiq.o
 obj-$(CONFIG_TANBAC_TB0219)	+= fixup-tb0219.o
 obj-$(CONFIG_TANBAC_TB0226)	+= fixup-tb0226.o
 obj-$(CONFIG_TANBAC_TB0287)	+= fixup-tb0287.o
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 0000000..6c829df
--- /dev/null
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,40 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+	if (ltq_pci_plat_arch_init)
+		return ltq_pci_plat_arch_init(dev);
+
+	if (ltq_pci_plat_dev_init)
+		return ltq_pci_plat_dev_init(dev);
+
+	return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct of_irq dev_irq;
+	int irq;
+
+	if (of_irq_map_pci(dev, &dev_irq)) {
+		dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
+			slot, pin);
+		return 0;
+	}
+	irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
+					dev_irq.size);
+	dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
+	return irq;
+}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index d657ee0..afd2211 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -15,6 +15,7 @@
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/export.h>
 
 #include <loongson.h>
 
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644
index 0000000..1552522
--- /dev/null
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -0,0 +1,375 @@
+/*
+ *  Atheros AR71xx PCI host controller driver
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR71XX_PCI_MEM_BASE	0x10000000
+#define AR71XX_PCI_MEM_SIZE	0x08000000
+
+#define AR71XX_PCI_WIN0_OFFS		0x10000000
+#define AR71XX_PCI_WIN1_OFFS		0x11000000
+#define AR71XX_PCI_WIN2_OFFS		0x12000000
+#define AR71XX_PCI_WIN3_OFFS		0x13000000
+#define AR71XX_PCI_WIN4_OFFS		0x14000000
+#define AR71XX_PCI_WIN5_OFFS		0x15000000
+#define AR71XX_PCI_WIN6_OFFS		0x16000000
+#define AR71XX_PCI_WIN7_OFFS		0x07000000
+
+#define AR71XX_PCI_CFG_BASE		\
+	(AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE		0x100
+
+#define AR71XX_PCI_REG_CRP_AD_CBE	0x00
+#define AR71XX_PCI_REG_CRP_WRDATA	0x04
+#define AR71XX_PCI_REG_CRP_RDDATA	0x08
+#define AR71XX_PCI_REG_CFG_AD		0x0c
+#define AR71XX_PCI_REG_CFG_CBE		0x10
+#define AR71XX_PCI_REG_CFG_WRDATA	0x14
+#define AR71XX_PCI_REG_CFG_RDDATA	0x18
+#define AR71XX_PCI_REG_PCI_ERR		0x1c
+#define AR71XX_PCI_REG_PCI_ERR_ADDR	0x20
+#define AR71XX_PCI_REG_AHB_ERR		0x24
+#define AR71XX_PCI_REG_AHB_ERR_ADDR	0x28
+
+#define AR71XX_PCI_CRP_CMD_WRITE	0x00010000
+#define AR71XX_PCI_CRP_CMD_READ		0x00000000
+#define AR71XX_PCI_CFG_CMD_READ		0x0000000a
+#define AR71XX_PCI_CFG_CMD_WRITE	0x0000000b
+
+#define AR71XX_PCI_INT_CORE		BIT(4)
+#define AR71XX_PCI_INT_DEV2		BIT(2)
+#define AR71XX_PCI_INT_DEV1		BIT(1)
+#define AR71XX_PCI_INT_DEV0		BIT(0)
+
+#define AR71XX_PCI_IRQ_COUNT		5
+
+static DEFINE_SPINLOCK(ar71xx_pci_lock);
+static void __iomem *ar71xx_pcicfg_base;
+
+/* Byte lane enable bits */
+static const u8 ar71xx_pci_ble_table[4][4] = {
+	{0x0, 0xf, 0xf, 0xf},
+	{0xe, 0xd, 0xb, 0x7},
+	{0xc, 0xf, 0x3, 0xf},
+	{0xf, 0xf, 0xf, 0xf},
+};
+
+static const u32 ar71xx_pci_read_mask[8] = {
+	0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
+};
+
+static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
+{
+	u32 t;
+
+	t = ar71xx_pci_ble_table[size & 3][where & 3];
+	BUG_ON(t == 0xf);
+	t <<= (local) ? 20 : 4;
+
+	return t;
+}
+
+static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
+				      int where)
+{
+	u32 ret;
+
+	if (!bus->number) {
+		/* type 0 */
+		ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+		      (where & ~3);
+	} else {
+		/* type 1 */
+		ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
+		      (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
+	}
+
+	return ret;
+}
+
+static int ar71xx_pci_check_error(int quiet)
+{
+	void __iomem *base = ar71xx_pcicfg_base;
+	u32 pci_err;
+	u32 ahb_err;
+
+	pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
+	if (pci_err) {
+		if (!quiet) {
+			u32 addr;
+
+			addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
+			pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+				"PCI", pci_err, addr);
+		}
+
+		/* clear PCI error status */
+		__raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
+	}
+
+	ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
+	if (ahb_err) {
+		if (!quiet) {
+			u32 addr;
+
+			addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
+			pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+				"AHB", ahb_err, addr);
+		}
+
+		/* clear AHB error status */
+		__raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
+	}
+
+	return !!(ahb_err | pci_err);
+}
+
+static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+{
+	void __iomem *base = ar71xx_pcicfg_base;
+	u32 ad_cbe;
+
+	value = value << (8 * (where & 3));
+
+	ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
+	ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
+
+	__raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
+	__raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
+}
+
+static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
+					 unsigned int devfn,
+					 int where, int size, u32 cmd)
+{
+	void __iomem *base = ar71xx_pcicfg_base;
+	u32 addr;
+
+	addr = ar71xx_pci_bus_addr(bus, devfn, where);
+
+	__raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
+	__raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
+		     base + AR71XX_PCI_REG_CFG_CBE);
+
+	return ar71xx_pci_check_error(1);
+}
+
+static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+				  int where, int size, u32 *value)
+{
+	void __iomem *base = ar71xx_pcicfg_base;
+	unsigned long flags;
+	u32 data;
+	int err;
+	int ret;
+
+	ret = PCIBIOS_SUCCESSFUL;
+	data = ~0;
+
+	spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+	err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+				     AR71XX_PCI_CFG_CMD_READ);
+	if (err)
+		ret = PCIBIOS_DEVICE_NOT_FOUND;
+	else
+		data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
+
+	spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+	*value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
+
+	return ret;
+}
+
+static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 value)
+{
+	void __iomem *base = ar71xx_pcicfg_base;
+	unsigned long flags;
+	int err;
+	int ret;
+
+	value = value << (8 * (where & 3));
+	ret = PCIBIOS_SUCCESSFUL;
+
+	spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+	err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+				     AR71XX_PCI_CFG_CMD_WRITE);
+	if (err)
+		ret = PCIBIOS_DEVICE_NOT_FOUND;
+	else
+		__raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
+
+	spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+	return ret;
+}
+
+static struct pci_ops ar71xx_pci_ops = {
+	.read	= ar71xx_pci_read_config,
+	.write	= ar71xx_pci_write_config,
+};
+
+static struct resource ar71xx_pci_io_resource = {
+	.name		= "PCI IO space",
+	.start		= 0,
+	.end		= 0,
+	.flags		= IORESOURCE_IO,
+};
+
+static struct resource ar71xx_pci_mem_resource = {
+	.name		= "PCI memory space",
+	.start		= AR71XX_PCI_MEM_BASE,
+	.end		= AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
+	.flags		= IORESOURCE_MEM
+};
+
+static struct pci_controller ar71xx_pci_controller = {
+	.pci_ops	= &ar71xx_pci_ops,
+	.mem_resource	= &ar71xx_pci_mem_resource,
+	.io_resource	= &ar71xx_pci_io_resource,
+};
+
+static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	void __iomem *base = ath79_reset_base;
+	u32 pending;
+
+	pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
+		  __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+	if (pending & AR71XX_PCI_INT_DEV0)
+		generic_handle_irq(ATH79_PCI_IRQ(0));
+
+	else if (pending & AR71XX_PCI_INT_DEV1)
+		generic_handle_irq(ATH79_PCI_IRQ(1));
+
+	else if (pending & AR71XX_PCI_INT_DEV2)
+		generic_handle_irq(ATH79_PCI_IRQ(2));
+
+	else if (pending & AR71XX_PCI_INT_CORE)
+		generic_handle_irq(ATH79_PCI_IRQ(4));
+
+	else
+		spurious_interrupt();
+}
+
+static void ar71xx_pci_irq_unmask(struct irq_data *d)
+{
+	unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+	void __iomem *base = ath79_reset_base;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+	__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static void ar71xx_pci_irq_mask(struct irq_data *d)
+{
+	unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+	void __iomem *base = ath79_reset_base;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static struct irq_chip ar71xx_pci_irq_chip = {
+	.name		= "AR71XX PCI",
+	.irq_mask	= ar71xx_pci_irq_mask,
+	.irq_unmask	= ar71xx_pci_irq_unmask,
+	.irq_mask_ack	= ar71xx_pci_irq_mask,
+};
+
+static __init void ar71xx_pci_irq_init(void)
+{
+	void __iomem *base = ath79_reset_base;
+	int i;
+
+	__raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+	__raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
+
+	BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
+
+	for (i = ATH79_PCI_IRQ_BASE;
+	     i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+		irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
+					 handle_level_irq);
+
+	irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+}
+
+static __init void ar71xx_pci_reset(void)
+{
+	void __iomem *ddr_base = ath79_ddr_base;
+
+	ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+	mdelay(100);
+
+	ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+	mdelay(100);
+
+	__raw_writel(AR71XX_PCI_WIN0_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN0);
+	__raw_writel(AR71XX_PCI_WIN1_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN1);
+	__raw_writel(AR71XX_PCI_WIN2_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN2);
+	__raw_writel(AR71XX_PCI_WIN3_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN3);
+	__raw_writel(AR71XX_PCI_WIN4_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN4);
+	__raw_writel(AR71XX_PCI_WIN5_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN5);
+	__raw_writel(AR71XX_PCI_WIN6_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN6);
+	__raw_writel(AR71XX_PCI_WIN7_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN7);
+
+	mdelay(100);
+}
+
+__init int ar71xx_pcibios_init(void)
+{
+	u32 t;
+
+	ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
+	if (ar71xx_pcicfg_base == NULL)
+		return -ENOMEM;
+
+	ar71xx_pci_reset();
+
+	/* setup COMMAND register */
+	t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
+	  | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+	ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+
+	/* clear bus errors */
+	ar71xx_pci_check_error(1);
+
+	ar71xx_pci_irq_init();
+
+	register_pci_controller(&ar71xx_pci_controller);
+
+	return 0;
+}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644
index 0000000..414a745
--- /dev/null
+++ b/arch/mips/pci/pci-ar724x.c
@@ -0,0 +1,292 @@
+/*
+ *  Atheros AR724X PCI host controller driver
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR724X_PCI_CFG_BASE	0x14000000
+#define AR724X_PCI_CFG_SIZE	0x1000
+#define AR724X_PCI_CTRL_BASE	(AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE	0x100
+
+#define AR724X_PCI_MEM_BASE	0x10000000
+#define AR724X_PCI_MEM_SIZE	0x08000000
+
+#define AR724X_PCI_REG_INT_STATUS	0x4c
+#define AR724X_PCI_REG_INT_MASK		0x50
+
+#define AR724X_PCI_INT_DEV0		BIT(14)
+
+#define AR724X_PCI_IRQ_COUNT		1
+
+#define AR7240_BAR0_WAR_VALUE	0xffff
+
+static DEFINE_SPINLOCK(ar724x_pci_lock);
+static void __iomem *ar724x_pci_devcfg_base;
+static void __iomem *ar724x_pci_ctrl_base;
+
+static u32 ar724x_pci_bar0_value;
+static bool ar724x_pci_bar0_is_cached;
+
+static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+			    int size, uint32_t *value)
+{
+	unsigned long flags;
+	void __iomem *base;
+	u32 data;
+
+	if (devfn)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	base = ar724x_pci_devcfg_base;
+
+	spin_lock_irqsave(&ar724x_pci_lock, flags);
+	data = __raw_readl(base + (where & ~3));
+
+	switch (size) {
+	case 1:
+		if (where & 1)
+			data >>= 8;
+		if (where & 2)
+			data >>= 16;
+		data &= 0xff;
+		break;
+	case 2:
+		if (where & 2)
+			data >>= 16;
+		data &= 0xffff;
+		break;
+	case 4:
+		break;
+	default:
+		spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+	if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
+	    ar724x_pci_bar0_is_cached) {
+		/* use the cached value */
+		*value = ar724x_pci_bar0_value;
+	} else {
+		*value = data;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+			     int size, uint32_t value)
+{
+	unsigned long flags;
+	void __iomem *base;
+	u32 data;
+	int s;
+
+	if (devfn)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
+		if (value != 0xffffffff) {
+			/*
+			 * WAR for a hw issue. If the BAR0 register of the
+			 * device is set to the proper base address, the
+			 * memory space of the device is not accessible.
+			 *
+			 * Cache the intended value so it can be read back,
+			 * and write a SoC specific constant value to the
+			 * BAR0 register in order to make the device memory
+			 * accessible.
+			 */
+			ar724x_pci_bar0_is_cached = true;
+			ar724x_pci_bar0_value = value;
+
+			value = AR7240_BAR0_WAR_VALUE;
+		} else {
+			ar724x_pci_bar0_is_cached = false;
+		}
+	}
+
+	base = ar724x_pci_devcfg_base;
+
+	spin_lock_irqsave(&ar724x_pci_lock, flags);
+	data = __raw_readl(base + (where & ~3));
+
+	switch (size) {
+	case 1:
+		s = ((where & 3) * 8);
+		data &= ~(0xff << s);
+		data |= ((value & 0xff) << s);
+		break;
+	case 2:
+		s = ((where & 2) * 8);
+		data &= ~(0xffff << s);
+		data |= ((value & 0xffff) << s);
+		break;
+	case 4:
+		data = value;
+		break;
+	default:
+		spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	__raw_writel(data, base + (where & ~3));
+	/* flush write */
+	__raw_readl(base + (where & ~3));
+	spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ar724x_pci_ops = {
+	.read	= ar724x_pci_read,
+	.write	= ar724x_pci_write,
+};
+
+static struct resource ar724x_io_resource = {
+	.name   = "PCI IO space",
+	.start  = 0,
+	.end    = 0,
+	.flags  = IORESOURCE_IO,
+};
+
+static struct resource ar724x_mem_resource = {
+	.name   = "PCI memory space",
+	.start  = AR724X_PCI_MEM_BASE,
+	.end    = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
+	.flags  = IORESOURCE_MEM,
+};
+
+static struct pci_controller ar724x_pci_controller = {
+	.pci_ops        = &ar724x_pci_ops,
+	.io_resource    = &ar724x_io_resource,
+	.mem_resource	= &ar724x_mem_resource,
+};
+
+static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	void __iomem *base;
+	u32 pending;
+
+	base = ar724x_pci_ctrl_base;
+
+	pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
+		  __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+	if (pending & AR724X_PCI_INT_DEV0)
+		generic_handle_irq(ATH79_PCI_IRQ(0));
+
+	else
+		spurious_interrupt();
+}
+
+static void ar724x_pci_irq_unmask(struct irq_data *d)
+{
+	void __iomem *base;
+	u32 t;
+
+	base = ar724x_pci_ctrl_base;
+
+	switch (d->irq) {
+	case ATH79_PCI_IRQ(0):
+		t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+		__raw_writel(t | AR724X_PCI_INT_DEV0,
+			     base + AR724X_PCI_REG_INT_MASK);
+		/* flush write */
+		__raw_readl(base + AR724X_PCI_REG_INT_MASK);
+	}
+}
+
+static void ar724x_pci_irq_mask(struct irq_data *d)
+{
+	void __iomem *base;
+	u32 t;
+
+	base = ar724x_pci_ctrl_base;
+
+	switch (d->irq) {
+	case ATH79_PCI_IRQ(0):
+		t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+		__raw_writel(t & ~AR724X_PCI_INT_DEV0,
+			     base + AR724X_PCI_REG_INT_MASK);
+
+		/* flush write */
+		__raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+		t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+		__raw_writel(t | AR724X_PCI_INT_DEV0,
+			     base + AR724X_PCI_REG_INT_STATUS);
+
+		/* flush write */
+		__raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+	}
+}
+
+static struct irq_chip ar724x_pci_irq_chip = {
+	.name		= "AR724X PCI ",
+	.irq_mask	= ar724x_pci_irq_mask,
+	.irq_unmask	= ar724x_pci_irq_unmask,
+	.irq_mask_ack	= ar724x_pci_irq_mask,
+};
+
+static void __init ar724x_pci_irq_init(int irq)
+{
+	void __iomem *base;
+	int i;
+
+	base = ar724x_pci_ctrl_base;
+
+	__raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
+	__raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
+
+	BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+
+	for (i = ATH79_PCI_IRQ_BASE;
+	     i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+		irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
+					 handle_level_irq);
+
+	irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+}
+
+int __init ar724x_pcibios_init(int irq)
+{
+	int ret;
+
+	ret = -ENOMEM;
+
+	ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
+					 AR724X_PCI_CFG_SIZE);
+	if (ar724x_pci_devcfg_base == NULL)
+		goto err;
+
+	ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
+				       AR724X_PCI_CTRL_SIZE);
+	if (ar724x_pci_ctrl_base == NULL)
+		goto err_unmap_devcfg;
+
+	ar724x_pci_irq_init(irq);
+	register_pci_controller(&ar724x_pci_controller);
+
+	return PCIBIOS_SUCCESSFUL;
+
+err_unmap_devcfg:
+	iounmap(ar724x_pci_devcfg_base);
+err:
+	return ret;
+}
diff --git a/arch/mips/pci/pci-ath724x.c b/arch/mips/pci/pci-ath724x.c
deleted file mode 100644
index a4dd24a..0000000
--- a/arch/mips/pci/pci-ath724x.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- *  Atheros 724x PCI support
- *
- *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#include <linux/pci.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-
-#define reg_read(_phys)		(*(unsigned int *) KSEG1ADDR(_phys))
-#define reg_write(_phys, _val)	((*(unsigned int *) KSEG1ADDR(_phys)) = (_val))
-
-#define ATH724X_PCI_DEV_BASE	0x14000000
-#define ATH724X_PCI_MEM_BASE	0x10000000
-#define ATH724X_PCI_MEM_SIZE	0x08000000
-
-static DEFINE_SPINLOCK(ath724x_pci_lock);
-static struct ath724x_pci_data *pci_data;
-static int pci_data_size;
-
-static int ath724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
-			    int size, uint32_t *value)
-{
-	unsigned long flags, addr, tval, mask;
-
-	if (devfn)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	if (where & (size - 1))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	spin_lock_irqsave(&ath724x_pci_lock, flags);
-
-	switch (size) {
-	case 1:
-		addr = where & ~3;
-		mask = 0xff000000 >> ((where % 4) * 8);
-		tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
-		tval = tval & ~mask;
-		*value = (tval >> ((4 - (where % 4))*8));
-		break;
-	case 2:
-		addr = where & ~3;
-		mask = 0xffff0000 >> ((where % 4)*8);
-		tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
-		tval = tval & ~mask;
-		*value = (tval >> ((4 - (where % 4))*8));
-		break;
-	case 4:
-		*value = reg_read(ATH724X_PCI_DEV_BASE + where);
-		break;
-	default:
-		spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-	}
-
-	spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static int ath724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
-			     int size, uint32_t value)
-{
-	unsigned long flags, tval, addr, mask;
-
-	if (devfn)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	if (where & (size - 1))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	spin_lock_irqsave(&ath724x_pci_lock, flags);
-
-	switch (size) {
-	case 1:
-		addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
-		mask = 0xff000000 >> ((where % 4)*8);
-		tval = reg_read(addr);
-		tval = tval & ~mask;
-		tval |= (value << ((4 - (where % 4))*8)) & mask;
-		reg_write(addr, tval);
-		break;
-	case 2:
-		addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
-		mask = 0xffff0000 >> ((where % 4)*8);
-		tval = reg_read(addr);
-		tval = tval & ~mask;
-		tval |= (value << ((4 - (where % 4))*8)) & mask;
-		reg_write(addr, tval);
-		break;
-	case 4:
-		reg_write((ATH724X_PCI_DEV_BASE + where), value);
-		break;
-	default:
-		spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-	}
-
-	spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops ath724x_pci_ops = {
-	.read	= ath724x_pci_read,
-	.write	= ath724x_pci_write,
-};
-
-static struct resource ath724x_io_resource = {
-	.name   = "PCI IO space",
-	.start  = 0,
-	.end    = 0,
-	.flags  = IORESOURCE_IO,
-};
-
-static struct resource ath724x_mem_resource = {
-	.name   = "PCI memory space",
-	.start  = ATH724X_PCI_MEM_BASE,
-	.end    = ATH724X_PCI_MEM_BASE + ATH724X_PCI_MEM_SIZE - 1,
-	.flags  = IORESOURCE_MEM,
-};
-
-static struct pci_controller ath724x_pci_controller = {
-	.pci_ops        = &ath724x_pci_ops,
-	.io_resource    = &ath724x_io_resource,
-	.mem_resource	= &ath724x_mem_resource,
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size)
-{
-	pci_data	= data;
-	pci_data_size	= size;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
-{
-	unsigned int devfn = dev->devfn;
-	int irq = -1;
-
-	if (devfn > pci_data_size - 1)
-		return irq;
-
-	irq = pci_data[devfn].irq;
-
-	return irq;
-}
-
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-	unsigned int devfn = dev->devfn;
-
-	if (devfn > pci_data_size - 1)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	dev->dev.platform_data = pci_data[devfn].pdata;
-
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static int __init ath724x_pcibios_init(void)
-{
-	register_pci_controller(&ath724x_pci_controller);
-
-	return PCIBIOS_SUCCESSFUL;
-}
-
-arch_initcall(ath724x_pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 030c77e..ea45353 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,8 +13,12 @@
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
 
 #include <asm/pci.h>
 #include <asm/gpio.h>
@@ -22,17 +26,9 @@
 
 #include <lantiq_soc.h>
 #include <lantiq_irq.h>
-#include <lantiq_platform.h>
 
 #include "pci-lantiq.h"
 
-#define LTQ_PCI_CFG_BASE		0x17000000
-#define LTQ_PCI_CFG_SIZE		0x00008000
-#define LTQ_PCI_MEM_BASE		0x18000000
-#define LTQ_PCI_MEM_SIZE		0x02000000
-#define LTQ_PCI_IO_BASE			0x1AE00000
-#define LTQ_PCI_IO_SIZE			0x00200000
-
 #define PCI_CR_FCI_ADDR_MAP0		0x00C0
 #define PCI_CR_FCI_ADDR_MAP1		0x00C4
 #define PCI_CR_FCI_ADDR_MAP2		0x00C8
@@ -68,79 +64,27 @@
 #define ltq_pci_cfg_w32(x, y)	ltq_w32((x), ltq_pci_mapped_cfg + (y))
 #define ltq_pci_cfg_r32(x)	ltq_r32(ltq_pci_mapped_cfg + (x))
 
-struct ltq_pci_gpio_map {
-	int pin;
-	int alt0;
-	int alt1;
-	int dir;
-	char *name;
-};
-
-/* the pci core can make use of the following gpios */
-static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
-	{ 0, 1, 0, 0, "pci-exin0" },
-	{ 1, 1, 0, 0, "pci-exin1" },
-	{ 2, 1, 0, 0, "pci-exin2" },
-	{ 39, 1, 0, 0, "pci-exin3" },
-	{ 10, 1, 0, 0, "pci-exin4" },
-	{ 9, 1, 0, 0, "pci-exin5" },
-	{ 30, 1, 0, 1, "pci-gnt1" },
-	{ 23, 1, 0, 1, "pci-gnt2" },
-	{ 19, 1, 0, 1, "pci-gnt3" },
-	{ 38, 1, 0, 1, "pci-gnt4" },
-	{ 29, 1, 0, 0, "pci-req1" },
-	{ 31, 1, 0, 0, "pci-req2" },
-	{ 3, 1, 0, 0, "pci-req3" },
-	{ 37, 1, 0, 0, "pci-req4" },
-};
-
 __iomem void *ltq_pci_mapped_cfg;
 static __iomem void *ltq_pci_membase;
 
-int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
-
-/* Since the PCI REQ pins can be reused for other functionality, make it
-   possible to exclude those from interpretation by the PCI controller */
-static int ltq_pci_req_mask = 0xf;
-
-static int *ltq_pci_irq_map;
-
-struct pci_ops ltq_pci_ops = {
+static int reset_gpio;
+static struct clk *clk_pci, *clk_external;
+static struct resource pci_io_resource;
+static struct resource pci_mem_resource;
+static struct pci_ops pci_ops = {
 	.read	= ltq_pci_read_config_dword,
 	.write	= ltq_pci_write_config_dword
 };
 
-static struct resource pci_io_resource = {
-	.name	= "pci io space",
-	.start	= LTQ_PCI_IO_BASE,
-	.end	= LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
-	.flags	= IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
-	.name	= "pci memory space",
-	.start	= LTQ_PCI_MEM_BASE,
-	.end	= LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
-	.flags	= IORESOURCE_MEM
-};
-
-static struct pci_controller ltq_pci_controller = {
-	.pci_ops	= &ltq_pci_ops,
+static struct pci_controller pci_controller = {
+	.pci_ops	= &pci_ops,
 	.mem_resource	= &pci_mem_resource,
 	.mem_offset	= 0x00000000UL,
 	.io_resource	= &pci_io_resource,
 	.io_offset	= 0x00000000UL,
 };
 
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-	if (ltqpci_plat_dev_init)
-		return ltqpci_plat_dev_init(dev);
-
-	return 0;
-}
-
-static u32 ltq_calc_bar11mask(void)
+static inline u32 ltq_calc_bar11mask(void)
 {
 	u32 mem, bar11mask;
 
@@ -151,48 +95,42 @@
 	return bar11mask;
 }
 
-static void ltq_pci_setup_gpio(int gpio)
+static int __devinit ltq_pci_startup(struct platform_device *pdev)
 {
-	int i;
-	for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
-		if (gpio & (1 << i)) {
-			ltq_gpio_request(ltq_pci_gpio_map[i].pin,
-				ltq_pci_gpio_map[i].alt0,
-				ltq_pci_gpio_map[i].alt1,
-				ltq_pci_gpio_map[i].dir,
-				ltq_pci_gpio_map[i].name);
-		}
-	}
-	ltq_gpio_request(21, 0, 0, 1, "pci-reset");
-	ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
-}
-
-static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
-{
+	struct device_node *node = pdev->dev.of_node;
+	const __be32 *req_mask, *bus_clk;
 	u32 temp_buffer;
 
-	/* set clock to 33Mhz */
-	if (ltq_is_ar9()) {
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
-	} else {
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+	/* get our clocks */
+	clk_pci = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk_pci)) {
+		dev_err(&pdev->dev, "failed to get pci clock\n");
+		return PTR_ERR(clk_pci);
 	}
 
-	/* external or internal clock ? */
-	if (conf->clock) {
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16),
-			LTQ_CGU_IFCCR);
-		ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR);
-	} else {
-		ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
-			LTQ_CGU_IFCCR);
-		ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
+	clk_external = clk_get(&pdev->dev, "external");
+	if (IS_ERR(clk_external)) {
+		clk_put(clk_pci);
+		dev_err(&pdev->dev, "failed to get external pci clock\n");
+		return PTR_ERR(clk_external);
 	}
 
-	/* setup pci clock and gpis used by pci */
-	ltq_pci_setup_gpio(conf->gpio);
+	/* read the bus speed that we want */
+	bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
+	if (bus_clk)
+		clk_set_rate(clk_pci, *bus_clk);
+
+	/* and enable the clocks */
+	clk_enable(clk_pci);
+	if (of_find_property(node, "lantiq,external-clock", NULL))
+		clk_enable(clk_external);
+	else
+		clk_disable(clk_external);
+
+	/* setup reset gpio used by pci */
+	reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
+	if (reset_gpio > 0)
+		devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
 	/* enable auto-switching between PCI and EBU */
 	ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -205,7 +143,12 @@
 
 	/* enable external 2 PCI masters */
 	temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
-	temp_buffer &= (~(ltq_pci_req_mask << 16));
+	/* setup the request mask */
+	req_mask = of_get_property(node, "req-mask", NULL);
+	if (req_mask)
+		temp_buffer &= ~((*req_mask & 0xf) << 16);
+	else
+		temp_buffer &= ~0xf0000;
 	/* enable internal arbiter */
 	temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
 	/* enable internal PCI master reqest */
@@ -249,47 +192,55 @@
 	ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
 	/* toggle reset pin */
-	__gpio_set_value(21, 0);
-	wmb();
-	mdelay(1);
-	__gpio_set_value(21, 1);
-	return 0;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-	if (ltq_pci_irq_map[slot])
-		return ltq_pci_irq_map[slot];
-	printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
-		slot);
-
+	if (reset_gpio > 0) {
+		__gpio_set_value(reset_gpio, 0);
+		wmb();
+		mdelay(1);
+		__gpio_set_value(reset_gpio, 1);
+	}
 	return 0;
 }
 
 static int __devinit ltq_pci_probe(struct platform_device *pdev)
 {
-	struct ltq_pci_data *ltq_pci_data =
-		(struct ltq_pci_data *) pdev->dev.platform_data;
+	struct resource *res_cfg, *res_bridge;
 
 	pci_clear_flags(PCI_PROBE_ONLY);
-	ltq_pci_irq_map = ltq_pci_data->irq;
-	ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
-	ltq_pci_mapped_cfg =
-		ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
-	ltq_pci_controller.io_map_base =
-		(unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
-	ltq_pci_startup(ltq_pci_data);
-	register_pci_controller(&ltq_pci_controller);
 
+	res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res_cfg || !res_bridge) {
+		dev_err(&pdev->dev, "missing memory reources\n");
+		return -EINVAL;
+	}
+
+	ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
+	ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
+
+	if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
+		dev_err(&pdev->dev, "failed to remap resources\n");
+		return -ENOMEM;
+	}
+
+	ltq_pci_startup(pdev);
+
+	pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
+	register_pci_controller(&pci_controller);
 	return 0;
 }
 
-static struct platform_driver
-ltq_pci_driver = {
+static const struct of_device_id ltq_pci_match[] = {
+	{ .compatible = "lantiq,pci-xway" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_pci_match);
+
+static struct platform_driver ltq_pci_driver = {
 	.probe = ltq_pci_probe,
 	.driver = {
-		.name = "ltq_pci",
+		.name = "pci-xway",
 		.owner = THIS_MODULE,
+		.of_match_table = ltq_pci_match,
 	},
 };
 
@@ -297,7 +248,7 @@
 {
 	int ret = platform_driver_register(&ltq_pci_driver);
 	if (ret)
-		printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
+		pr_info("pci-xway: Error registering platform driver!");
 	return ret;
 }
 
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0514866..271e8c4a 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
 
 #include <asm/cpu-info.h>
 
@@ -114,9 +115,63 @@
 			pci_bus_assign_resources(bus);
 			pci_enable_bridges(bus);
 		}
+		bus->dev.of_node = hose->of_node;
 	}
 }
 
+#ifdef CONFIG_OF
+void __devinit pci_load_of_ranges(struct pci_controller *hose,
+				struct device_node *node)
+{
+	const __be32 *ranges;
+	int rlen;
+	int pna = of_n_addr_cells(node);
+	int np = pna + 5;
+
+	pr_info("PCI host bridge %s ranges:\n", node->full_name);
+	ranges = of_get_property(node, "ranges", &rlen);
+	if (ranges == NULL)
+		return;
+	hose->of_node = node;
+
+	while ((rlen -= np * 4) >= 0) {
+		u32 pci_space;
+		struct resource *res = NULL;
+		u64 addr, size;
+
+		pci_space = be32_to_cpup(&ranges[0]);
+		addr = of_translate_address(node, ranges + 3);
+		size = of_read_number(ranges + pna + 3, 2);
+		ranges += np;
+		switch ((pci_space >> 24) & 0x3) {
+		case 1:		/* PCI IO space */
+			pr_info("  IO 0x%016llx..0x%016llx\n",
+					addr, addr + size - 1);
+			hose->io_map_base =
+				(unsigned long)ioremap(addr, size);
+			res = hose->io_resource;
+			res->flags = IORESOURCE_IO;
+			break;
+		case 2:		/* PCI Memory space */
+		case 3:		/* PCI 64 bits Memory space */
+			pr_info(" MEM 0x%016llx..0x%016llx\n",
+					addr, addr + size - 1);
+			res = hose->mem_resource;
+			res->flags = IORESOURCE_MEM;
+			break;
+		}
+		if (res != NULL) {
+			res->start = addr;
+			res->name = node->full_name;
+			res->end = res->start + size - 1;
+			res->parent = NULL;
+			res->sibling = NULL;
+			res->child = NULL;
+		}
+	}
+}
+#endif
+
 static DEFINE_MUTEX(pci_scan_mutex);
 
 void __devinit register_pci_controller(struct pci_controller *hose)
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
index 02f5fb9..5af95ec 100644
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ b/arch/mips/pmc-sierra/yosemite/Makefile
@@ -5,5 +5,3 @@
 obj-y    += irq.o prom.o py-console.o setup.o
 
 obj-$(CONFIG_SMP)		+= smp.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 3498ac9..b6472fc 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -27,6 +27,7 @@
 #include <linux/bcd.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/bootmem.h>
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 87167dc..05a1d92 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -244,11 +244,6 @@
 	.resource      = pnx833x_sata_resources,
 };
 
-static const char *part_probes[] = {
-	"cmdlinepart",
-	NULL
-};
-
 static void
 pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
@@ -268,7 +263,6 @@
 	.chip = {
 		.nr_chips		= 1,
 		.chip_delay		= 25,
-		.part_probe_types 	= part_probes,
 	},
 	.ctrl = {
 		.cmd_ctrl 		= pnx833x_flash_nand_cmd_ctrl
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
index 348d2e8..39ca9f8 100644
--- a/arch/mips/powertv/Makefile
+++ b/arch/mips/powertv/Makefile
@@ -27,5 +27,3 @@
 	asic/ pci/
 
 obj-$(CONFIG_USB) += powertv-usb.o
-
-ccflags-y := -Wall
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
index d810a33..35dcc53 100644
--- a/arch/mips/powertv/asic/Makefile
+++ b/arch/mips/powertv/asic/Makefile
@@ -19,5 +19,3 @@
 obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
 	asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
 	prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
index 5783201..2610a6a 100644
--- a/arch/mips/powertv/pci/Makefile
+++ b/arch/mips/powertv/pci/Makefile
@@ -17,5 +17,3 @@
 #
 
 obj-$(CONFIG_PCI)	+= fixup-powertv.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index a969eb82..716e9a1 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -15,6 +15,7 @@
  *  GNU General Public License for more details.
  */
 #include <linux/kernel.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
@@ -292,7 +293,6 @@
 	rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
 	rb532_nand_data.chip.partitions = rb532_partition_info;
 	rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
-	rb532_nand_data.chip.options = NAND_NO_AUTOINCR;
 }
 
 
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index d16b462..413f17f 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -10,6 +10,7 @@
  */
 #include <linux/eisa.h>
 #include <linux/init.h>
+#include <linux/export.h>
 #include <linux/console.h>
 #include <linux/fb.h>
 #include <linux/screen_info.h>
diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h
index ab50618..d31eeea 100644
--- a/arch/mn10300/include/asm/posix_types.h
+++ b/arch/mn10300/include/asm/posix_types.h
@@ -20,9 +20,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 890cf91..6ab0bee 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -163,7 +161,6 @@
 			     sizeof(frame->extramask)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(current_frame(), &frame->sc, &d0))
@@ -191,7 +188,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
@@ -430,8 +426,9 @@
  */
 static int handle_signal(int sig,
 			 siginfo_t *info, struct k_sigaction *ka,
-			 sigset_t *oldset, struct pt_regs *regs)
+			 struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Are we from a system call? */
@@ -461,11 +458,11 @@
 		ret = setup_rt_frame(sig, ka, info, oldset, regs);
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
+	if (ret)
+		return;
 
-	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+	signal_delivered(sig, info, ka, regs,
+				 test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -475,7 +472,6 @@
 {
 	struct k_sigaction ka;
 	siginfo_t info;
-	sigset_t *oldset;
 	int signr;
 
 	/* we want the common case to go fast, which is why we may in certain
@@ -483,23 +479,9 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			tracehook_signal_handler(signr, &info, &ka, regs,
-						 test_thread_flag(TIF_SINGLESTEP));
+		if (handle_signal(signr, &info, &ka, regs) == 0) {
 		}
 
 		return;
@@ -525,10 +507,7 @@
 
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -548,13 +527,11 @@
 	}
 
 	/* deal with pending signal delivery */
-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
 
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(current_frame());
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 4932247..49765b5 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -19,6 +19,8 @@
 	select GENERIC_CPU_DEVICES
 	select GENERIC_ATOMIC64
 	select GENERIC_CLOCKEVENTS
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 
 config MMU
 	def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c936483..3f35c38 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,4 @@
 generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
+generic-y += word-at-a-time.h
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f5abaa0..ab2e7a1 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -313,42 +313,12 @@
 	return size;
 }
 
-extern int __strncpy_from_user(char *dst, const char *src, long count);
+#define user_addr_max() \
+	(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 
-static inline long strncpy_from_user(char *dst, const char *src, long count)
-{
-	if (access_ok(VERIFY_READ, src, 1))
-		return __strncpy_from_user(dst, src, count);
-	return -EFAULT;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline long strnlen_user(const char __user *str, long len)
-{
-	unsigned long top = (unsigned long)get_fs();
-	unsigned long res = 0;
-
-	if (__addr_ok(str))
-		res = __strnlen_user(str, len, top);
-
-	return res;
-}
-
-#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index e970743..3011029 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -33,8 +33,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 _sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
 {
@@ -101,7 +99,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -251,20 +248,19 @@
 	return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long sig,
 	      siginfo_t *info, struct k_sigaction *ka,
-	      sigset_t *oldset, struct pt_regs *regs)
+	      struct pt_regs *regs)
 {
 	int ret;
 
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
+	ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
 	if (ret)
-		return ret;
+		return;
 
-	block_sigmask(ka, sig);
-
-	return 0;
+	signal_delivered(sig, info, ka, regs,
+				 test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -339,30 +335,10 @@
 	if (signr <= 0) {
 		/* no signal to deliver so we just put the saved sigmask
 		 * back */
-		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-		}
-
+		restore_saved_sigmask();
 	} else {		/* signr > 0 */
-		sigset_t *oldset;
-
-		if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
-			oldset = &current->saved_sigmask;
-		else
-			oldset = &current->blocked;
-
 		/* Whee!  Actually deliver the signal.  */
-		if (!handle_signal(signr, &info, &ka, oldset, regs)) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag */
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
-
-		tracehook_signal_handler(signr, &info, &ka, regs,
-					 test_thread_flag(TIF_SINGLESTEP));
+		handle_signal(signr, &info, &ka, regs);
 	}
 
 	return;
@@ -376,7 +352,5 @@
 	if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
index 465f04b..c09fee7 100644
--- a/arch/openrisc/lib/string.S
+++ b/arch/openrisc/lib/string.S
@@ -103,102 +103,3 @@
 	.section __ex_table, "a"
 		.long 9b, 99b		// write fault
 	.previous
-
-/*
- * long strncpy_from_user(char *dst, const char *src, long count)
- *
- *
- */
-	.global	__strncpy_from_user
-__strncpy_from_user:
-	l.addi  r1,r1,-16
-	l.sw    0(r1),r6
-	l.sw    4(r1),r5
-	l.sw    8(r1),r4
-	l.sw    12(r1),r3
-
-	l.addi  r11,r5,0
-2:  	l.sfeq  r5,r0
-	l.bf    1f
-	l.addi  r5,r5,-1
-8:    	l.lbz   r6,0(r4)
-	l.sfeq  r6,r0
-	l.bf    1f
-9:    	l.sb    0(r3),r6
-	l.addi  r3,r3,1
-	l.j     2b
-	l.addi  r4,r4,1
-1:
-	l.lwz   r6,0(r1)
-	l.addi  r5,r5,1
-	l.sub   r11,r11,r5              // r11 holds the return value
-
-	l.lwz   r6,0(r1)
-	l.lwz   r5,4(r1)
-	l.lwz   r4,8(r1)
-	l.lwz   r3,12(r1)
-	l.jr    r9
-	l.addi  r1,r1,16
-
-	.section .fixup, "ax"
-99:
-		l.movhi r11,hi(-EFAULT)
-		l.ori   r11,r11,lo(-EFAULT)
-
-		l.lwz   r6,0(r1)
-		l.lwz   r5,4(r1)
-		l.lwz   r4,8(r1)
-		l.lwz   r3,12(r1)
-		l.jr	r9
-		l.addi  r1,r1,16
-	.previous
-
-	.section __ex_table, "a"
-		.long 8b, 99b		// read fault
-	.previous
-
-/*
- * extern int __strnlen_user(const char *str, long len, unsigned long top);
- *
- *
- * RTRN: - length of a string including NUL termination character
- *       - on page fault 0
- */
-
-	.global __strnlen_user
-__strnlen_user:
-	l.addi  r1,r1,-8
-	l.sw    0(r1),r6
-	l.sw    4(r1),r3
-
-	l.addi  r11,r0,0
-2:  	l.sfeq  r11,r4
-	l.bf    1f
-	l.addi  r11,r11,1
-8:    	l.lbz   r6,0(r3)
-	l.sfeq  r6,r0
-	l.bf    1f
-	l.sfgeu r3,r5                  // are we over the top ?
-	l.bf    99f
-	l.j     2b
-	l.addi  r3,r3,1
-
-1:
-	l.lwz   r6,0(r1)
-	l.lwz	r3,4(r1)
-	l.jr    r9
-	l.addi  r1,r1,8
-
-	.section .fixup, "ax"
-99:
-		l.addi  r11,r0,0
-
-		l.lwz   r6,0(r1)
-		l.lwz	r3,4(r1)
-		l.jr	r9
-		l.addi  r1,r1,8
-	.previous
-
-	.section __ex_table, "a"
-		.long 8b, 99b		// read fault
-	.previous
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ddb8b24..3ff21b5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@
 	select IRQ_PER_CPU
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select GENERIC_SMP_IDLE_THREAD
+	select GENERIC_STRNCPY_FROM_USER
 
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index dbc3850..5707f1a 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -21,6 +21,7 @@
 
 NM		= sh $(srctree)/arch/parisc/nm
 CHECKFLAGS	+= -D__hppa__=1
+LIBGCC		= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
 MACHINE		:= $(shell uname -m)
 ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@
 kernel-$(CONFIG_HPUX)		+= hpux/
 
 core-y	+= $(addprefix arch/parisc/, $(kernel-y))
-libs-y	+= arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
+libs-y	+= arch/parisc/lib/ $(LIBGCC)
 
 drivers-$(CONFIG_OPROFILE)		+= arch/parisc/oprofile/
 
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 19a434f..4383707 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
 include include/asm-generic/Kbuild.asm
 
 header-y += pdc.h
+generic-y += word-at-a-time.h
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 72cfdb0..62a3333 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -1,6 +1,8 @@
 #ifndef _PARISC_BUG_H
 #define _PARISC_BUG_H
 
+#include <linux/kernel.h>	/* for BUGFLAG_TAINT */
+
 /*
  * Tell the user there is some problem.
  * The offending file and line are encoded in the __bug_table section.
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h
index 5212b03..b934425 100644
--- a/arch/parisc/include/asm/posix_types.h
+++ b/arch/parisc/include/asm/posix_types.h
@@ -10,9 +10,6 @@
 typedef unsigned short		__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short		__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short		__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index e8f8037..a5dc906 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -25,7 +25,6 @@
 #define cpu_number_map(cpu)	(cpu)
 #define cpu_logical_map(cpu)	(cpu)
 
-extern void smp_send_reschedule(int cpu);
 extern void smp_send_all_nop(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
@@ -50,6 +49,5 @@
   while(1)
     ;
 }
-extern int __cpu_up (unsigned int cpu);
 
 #endif /*  __ASM_SMP_H */
diff --git a/arch/parisc/include/asm/stat.h b/arch/parisc/include/asm/stat.h
index 9d5fbbc..d76fbda 100644
--- a/arch/parisc/include/asm/stat.h
+++ b/arch/parisc/include/asm/stat.h
@@ -7,7 +7,7 @@
 	unsigned int	st_dev;		/* dev_t is 32 bits on parisc */
 	ino_t		st_ino;		/* 32 bits */
 	mode_t		st_mode;	/* 16 bits */
-	nlink_t		st_nlink;	/* 16 bits */
+	unsigned short	st_nlink;	/* 16 bits */
 	unsigned short	st_reserved1;	/* old st_uid */
 	unsigned short	st_reserved2;	/* old st_gid */
 	unsigned int	st_rdev;
@@ -42,7 +42,7 @@
 	unsigned int	st_dev;		/* dev_t is 32 bits on parisc */
 	ino_t           st_ino;         /* 32 bits */
 	mode_t		st_mode;	/* 16 bits */
-	nlink_t		st_nlink;	/* 16 bits */
+	unsigned short	st_nlink;	/* 16 bits */
 	unsigned short	st_reserved1;	/* old st_uid */
 	unsigned short	st_reserved2;	/* old st_gid */
 	unsigned int	st_rdev;
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 83ae7dd..22b4726 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -74,7 +74,7 @@
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
-                                 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
+                                 _TIF_NEED_RESCHED)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 9ac0660..4ba2c93 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -218,15 +218,14 @@
 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
-extern long lstrncpy_from_user(char *, const char __user *, long);
+extern long strncpy_from_user(char *, const char __user *, long);
 extern unsigned lclear_user(void __user *,unsigned long);
 extern long lstrnlen_user(const char __user *,long);
-
 /*
  * Complex access routines -- macros
  */
+#define user_addr_max() (~0UL)
 
-#define strncpy_from_user lstrncpy_from_user
 #define strnlen_user lstrnlen_user
 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
 #define clear_user lclear_user
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 5350342..18670a0 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -552,7 +552,7 @@
 	 * entry (identifying the physical page) and %r23 up with
 	 * the from tlb entry (or nothing if only a to entry---for
 	 * clear_user_page_asm) */
-	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
+	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
 	cmpib,COND(<>),n 0,\spc,\fault
 	ldil		L%(TMPALIAS_MAP_START),\tmp
 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
@@ -581,11 +581,15 @@
 	 */
 	cmpiclr,=	0x01,\tmp,%r0
 	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
-#ifdef CONFIG_64BIT
+.ifc \patype,20
 	depd,z		\prot,8,7,\prot
-#else
+.else
+.ifc \patype,11
 	depw,z		\prot,8,7,\prot
-#endif
+.else
+	.error "undefined PA type to do_alias"
+.endif
+.endif
 	/*
 	 * OK, it is in the temp alias region, check whether "from" or "to".
 	 * Check "subtle" note in pacache.S re: r23/r26.
@@ -920,7 +924,7 @@
 	/* As above */
 	mfctl   %cr30,%r1
 	LDREG	TI_FLAGS(%r1),%r19
-	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
+	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
 	and,COND(<>)	%r19, %r20, %r0
 	b,n	intr_restore	/* skip past if we've nothing to do */
 
@@ -1189,7 +1193,7 @@
 	nop
 
 dtlb_check_alias_20w:
-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
 
 	idtlbt          pte,prot
 
@@ -1213,7 +1217,7 @@
 	nop
 
 nadtlb_check_alias_20w:
-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
 	idtlbt          pte,prot
 
@@ -1245,7 +1249,7 @@
 	nop
 
 dtlb_check_alias_11:
-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
 
 	idtlba          pte,(va)
 	idtlbp          prot,(va)
@@ -1277,7 +1281,7 @@
 	nop
 
 nadtlb_check_alias_11:
-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
 
 	idtlba          pte,(va)
 	idtlbp          prot,(va)
@@ -1304,7 +1308,7 @@
 	nop
 
 dtlb_check_alias_20:
-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
 	
 	idtlbt          pte,prot
 
@@ -1330,7 +1334,7 @@
 	nop
 
 nadtlb_check_alias_20:
-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
 	idtlbt          pte,prot
 
@@ -1457,7 +1461,7 @@
 	nop
 
 naitlb_check_alias_20w:
-	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
 
 	iitlbt		pte,prot
 
@@ -1511,7 +1515,7 @@
 	nop
 
 naitlb_check_alias_11:
-	do_alias	spc,t0,t1,va,pte,prot,itlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
 
 	iitlba          pte,(%sr0, va)
 	iitlbp          prot,(%sr0, va)
@@ -1557,7 +1561,7 @@
 	nop
 
 naitlb_check_alias_20:
-	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault
+	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
 
 	iitlbt          pte,prot
 
@@ -2028,7 +2032,7 @@
 	.import do_signal,code
 syscall_check_sig:
 	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
-	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
 	and,COND(<>)	%r19, %r26, %r0
 	b,n	syscall_restore	/* skip past if we've nothing to do */
 
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index a7bb757..ceec85d 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -44,7 +44,6 @@
 #endif
 
 #include <asm/uaccess.h>
-EXPORT_SYMBOL(lstrncpy_from_user);
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 4b9cb0d..594459b 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -48,9 +48,6 @@
 #define DBG(LEVEL, ...)
 #endif
 	
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* gcc will complain if a pointer is cast to an integer of different
  * size.  If you really need to do this (and we do for an ELF32 user
  * application in an ELF64 kernel) then you have to do a cast to an
@@ -131,7 +128,6 @@
 			goto give_sigsegv;
 	}
 		
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	/* Good thing we saved the old gr[30], eh? */
@@ -443,8 +439,9 @@
 
 static long
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-		sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+		struct pt_regs *regs, int in_syscall)
 {
+	sigset_t *oldset = sigmask_to_save();
 	DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
 	       sig, ka, info, oldset, regs);
 	
@@ -452,12 +449,13 @@
 	if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
 		return 0;
 
-	block_sigmask(ka, sig);
-
-	tracehook_signal_handler(sig, info, ka, regs, 
+	signal_delivered(sig, info, ka, regs, 
 		test_thread_flag(TIF_SINGLESTEP) ||
 		test_thread_flag(TIF_BLOCKSTEP));
 
+	DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+		regs->gr[28]);
+
 	return 1;
 }
 
@@ -568,28 +566,17 @@
 	siginfo_t info;
 	struct k_sigaction ka;
 	int signr;
-	sigset_t *oldset;
 
-	DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
-	       oldset, regs, regs->sr[7], in_syscall);
+	DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+	       regs, regs->sr[7], in_syscall);
 
 	/* Everyone else checks to see if they are in kernel mode at
 	   this point and exits if that's the case.  I'm not sure why
 	   we would be called in that case, but for some reason we
 	   are. */
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
-	DBG(1,"do_signal: oldset %08lx / %08lx\n", 
-		oldset->sig[0], oldset->sig[1]);
-
-
 	/* May need to force signal if handle_signal failed to deliver */
 	while (1) {
-	  
 		signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 		DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); 
 	
@@ -603,14 +590,8 @@
 		/* Whee!  Actually deliver the signal.  If the
 		   delivery failed, we need to continue to iterate in
 		   this loop so we can deliver the SIGSEGV... */
-		if (handle_signal(signr, &info, &ka, oldset,
-				  regs, in_syscall)) {
-			DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
-				regs->gr[28]);
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
+		if (handle_signal(signr, &info, &ka, regs, in_syscall))
 			return;
-		}
 	}
 	/* end of while(1) looping forever if we can't force a signal */
 
@@ -621,24 +602,16 @@
 	DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", 
 		regs->gr[28]);
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-
-	return;
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, long in_syscall)
 {
-	if (test_thread_flag(TIF_SIGPENDING) ||
-	    test_thread_flag(TIF_RESTORE_SIGMASK))
+	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal(regs, in_syscall);
 
 	if (test_thread_flag(TIF_NOTIFY_RESUME)) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index e141324..fd49aed 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -47,8 +47,6 @@
 #define DBG(LEVEL, ...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 inline void
 sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
 {
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fa6f2b8..64a9998 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -50,8 +50,10 @@
 	. = KERNEL_BINARY_TEXT_START;
 
 	_text = .;		/* Text and read-only data */
-	.text ALIGN(16) : {
+	.head ALIGN(16) : {
 		HEAD_TEXT
+	} = 0
+	.text ALIGN(16) : {
 		TEXT_TEXT
 		SCHED_TEXT
 		LOCK_TEXT
@@ -65,7 +67,7 @@
 		*(.fixup)
 		*(.lock.text)		/* out-of-line lock text */
 		*(.gnu.warning)
-	} = 0
+	}
 	/* End of text section */
 	_etext = .;
 
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 1bd23cc..6f2d935 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -61,47 +61,6 @@
 	.endm
 
 	/*
-	 * long lstrncpy_from_user(char *dst, const char *src, long n)
-	 *
-	 * Returns -EFAULT if exception before terminator,
-	 *         N if the entire buffer filled,
-	 *         otherwise strlen (i.e. excludes zero byte)
-	 */
-
-ENTRY(lstrncpy_from_user)
-	.proc
-	.callinfo NO_CALLS
-	.entry
-	comib,=     0,%r24,$lsfu_done
-	copy        %r24,%r23
-	get_sr
-1:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_loop:
-	stbs,ma     %r1,1(%r26)
-	comib,=,n   0,%r1,$lsfu_done
-	addib,<>,n  -1,%r24,$lsfu_loop
-2:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_done:
-	sub         %r23,%r24,%r28
-$lsfu_exit:
-	bv          %r0(%r2)
-	nop
-	.exit
-ENDPROC(lstrncpy_from_user)
-
-	.section .fixup,"ax"
-3:      fixup_branch $lsfu_exit
-	ldi         -EFAULT,%r28
-	.previous
-
-	.section __ex_table,"aw"
-	ASM_ULONG_INSN 1b,3b
-	ASM_ULONG_INSN 2b,3b
-	.previous
-
-	.procend
-
-	/*
 	 * unsigned long lclear_user(void *to, unsigned long n)
 	 *
 	 * Returns 0 for success.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 00b9874..050cb37 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,8 @@
 	select GENERIC_CMOS_UPDATE
 	select GENERIC_TIME_VSYSCALL
 	select GENERIC_CLOCKEVENTS
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 
 config EARLY_PRINTK
 	bool
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 7e283c8..fe0d609 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -119,6 +119,7 @@
 		sdhc@2e000 {
 			status = "disabled";
 			sdhci,1-bit-only;
+			bus-width = <1>;
 		};
 
 		par_io@e0100 {
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c9aac24..6eb75b8 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -100,6 +100,14 @@
 	get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
 }
 
+/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
+#define hard_irq_disable	hard_irq_disable
+
+static inline bool lazy_irq_pending(void)
+{
+	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
 /*
  * This is called by asynchronous interrupts to conditionally
  * re-enable hard interrupts when soft-disabled after having
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
index f139325..2958c5b 100644
--- a/arch/powerpc/include/asm/posix_types.h
+++ b/arch/powerpc/include/asm/posix_types.h
@@ -16,9 +16,6 @@
 typedef long		__kernel_ptrdiff_t;
 #define __kernel_size_t __kernel_size_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef short		__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #endif
diff --git a/arch/powerpc/include/asm/stat.h b/arch/powerpc/include/asm/stat.h
index e4edc51..84880b8 100644
--- a/arch/powerpc/include/asm/stat.h
+++ b/arch/powerpc/include/asm/stat.h
@@ -30,11 +30,11 @@
 	unsigned long	st_dev;
 	ino_t		st_ino;
 #ifdef __powerpc64__
-	nlink_t		st_nlink;
+	unsigned long	st_nlink;
 	mode_t		st_mode;
 #else
 	mode_t		st_mode;
-	nlink_t		st_nlink;
+	unsigned short	st_nlink;
 #endif
 	uid_t		st_uid;
 	gid_t		st_gid;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index a556ccc..68831e9 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -140,7 +140,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->local_flags |= _TLF_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, &ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
+		return false;
+	ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
+	return true;
 }
 
 static inline bool test_thread_local_flags(unsigned int flags)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bd0fb84..17bb40c 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -40,6 +40,8 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
+#define user_addr_max()	(get_fs().seg)
+
 #ifdef __powerpc64__
 /*
  * This check is sufficient because there is a large enough
@@ -453,42 +455,9 @@
 	return size;
 }
 
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dst, const char __user *src,
-		long count)
-{
-	might_sleep();
-	if (likely(access_ok(VERIFY_READ, src, 1)))
-		return __strncpy_from_user(dst, src, count);
-	return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
-	unsigned long top = current->thread.fs.seg;
-
-	if ((unsigned long)str > top)
-		return 0;
-	return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str)	strnlen_user((str), 0x7ffffffe)
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..d0b6d4a
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+
+struct word_at_a_time {
+	const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+	unsigned long mask = (val & c->low_bits) + c->low_bits;
+	return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+	long leading_zero_bits;
+
+	asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+	return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+	unsigned long rhs = val | c->low_bits;
+	*data = rhs;
+	return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718f..5971c85 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -558,27 +558,54 @@
 	mtmsrd	r10,1		  /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
 
-#ifdef CONFIG_PREEMPT
 	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
-	li	r0,_TIF_NEED_RESCHED	/* bits to check */
 	ld	r3,_MSR(r1)
 	ld	r4,TI_FLAGS(r9)
-	/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
-	rlwimi	r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
-	and.	r0,r4,r0	/* check NEED_RESCHED and maybe SIGPENDING */
-	bne	do_work
-
-#else /* !CONFIG_PREEMPT */
-	ld	r3,_MSR(r1)	/* Returning to user mode? */
 	andi.	r3,r3,MSR_PR
-	beq	restore		/* if not, just restore regs and return */
+	beq	resume_kernel
 
 	/* Check current_thread_info()->flags */
+	andi.	r0,r4,_TIF_USER_WORK_MASK
+	beq	restore
+
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	beq	1f
+	bl	.restore_interrupts
+	bl	.schedule
+	b	.ret_from_except_lite
+
+1:	bl	.save_nvgprs
+	bl	.restore_interrupts
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_notify_resume
+	b	.ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+	/* Check if we need to preempt */
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	beq+	restore
+	/* Check that preempt_count() == 0 and interrupts are enabled */
+	lwz	r8,TI_PREEMPT(r9)
+	cmpwi	cr1,r8,0
+	ld	r0,SOFTE(r1)
+	cmpdi	r0,0
+	crandc	eq,cr1*4+eq,eq
+	bne	restore
+
+	/*
+	 * Here we are preempting the current task. We want to make
+	 * sure we are soft-disabled first
+	 */
+	SOFT_DISABLE_INTS(r3,r4)
+1:	bl	.preempt_schedule_irq
+
+	/* Re-test flags and eventually loop */
 	clrrdi	r9,r1,THREAD_SHIFT
 	ld	r4,TI_FLAGS(r9)
-	andi.	r0,r4,_TIF_USER_WORK_MASK
-	bne	do_work
-#endif /* !CONFIG_PREEMPT */
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	bne	1b
+#endif /* CONFIG_PREEMPT */
 
 	.globl	fast_exc_return_irq
 fast_exc_return_irq:
@@ -759,50 +786,6 @@
 #endif /* CONFIG_PPC_BOOK3E */
 1:	b	.ret_from_except /* What else to do here ? */
  
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
-	andi.	r0,r3,MSR_PR	/* Returning to user mode? */
-	bne	user_work
-	/* Check that preempt_count() == 0 and interrupts are enabled */
-	lwz	r8,TI_PREEMPT(r9)
-	cmpwi	cr1,r8,0
-	ld	r0,SOFTE(r1)
-	cmpdi	r0,0
-	crandc	eq,cr1*4+eq,eq
-	bne	restore
-
-	/*
-	 * Here we are preempting the current task. We want to make
-	 * sure we are soft-disabled first
-	 */
-	SOFT_DISABLE_INTS(r3,r4)
-1:	bl	.preempt_schedule_irq
-
-	/* Re-test flags and eventually loop */
-	clrrdi	r9,r1,THREAD_SHIFT
-	ld	r4,TI_FLAGS(r9)
-	andi.	r0,r4,_TIF_NEED_RESCHED
-	bne	1b
-	b	restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
-	andi.	r0,r4,_TIF_NEED_RESCHED
-	beq	1f
-	bl	.restore_interrupts
-	bl	.schedule
-	b	.ret_from_except_lite
-
-1:	bl	.save_nvgprs
-	bl	.restore_interrupts
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_notify_resume
-	b	.ret_from_except
-
 unrecov_restore:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	.unrecoverable_exception
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7835a5e..1b41502 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -277,7 +277,7 @@
  * NOTE: This is called with interrupts hard disabled but not marked
  * as such in paca->irq_happened, so we need to resync this.
  */
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
 {
 	if (irqs_disabled()) {
 		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 0b6d796..2e3200c 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -176,8 +176,8 @@
 
 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
 {
-	if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
-	    && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+	if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+	    && entry->jump[1] == 0x398c0000 + (val & 0xffff))
 		return 1;
 	return 0;
 }
@@ -204,10 +204,9 @@
 		entry++;
 	}
 
-	/* Stolen from Paul Mackerras as well... */
-	entry->jump[0] = 0x3d600000+((val+0x8000)>>16);	/* lis r11,sym@ha */
-	entry->jump[1] = 0x396b0000 + (val&0xffff);	/* addi r11,r11,sym@l*/
-	entry->jump[2] = 0x7d6903a6;			/* mtctr r11 */
+	entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+	entry->jump[1] = 0x398c0000 + (val&0xffff);     /* addi r12,r12,sym@l*/
+	entry->jump[2] = 0x7d8903a6;                    /* mtctr r12 */
 	entry->jump[3] = 0x4e800420;			/* bctr */
 
 	DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d1f2aaf..3e40315 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -85,8 +85,6 @@
 
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(copy_page);
 
 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1b488e5..0794a30 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1312,7 +1312,7 @@
 
 extern char opal_secondary_entry;
 
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
 {
 	long rc;
 
@@ -1436,7 +1436,7 @@
 	prom_debug("prom_opal_hold_cpus: end...\n");
 }
 
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
 {
 	struct opal_secondary_data *data = &RELOC(opal_secondary_data);
 	struct opal_takeover_args *args = &data->args;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 651c596..5c023c9 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -51,16 +51,6 @@
         return (void __user *)newsp;
 }
 
-
-/*
- * Restore the user process's signal mask
- */
-void restore_sigmask(sigset_t *set)
-{
-	sigdelsetmask(set, ~_BLOCKABLE);
-	set_current_blocked(set);
-}
-
 static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
 				  int has_handler)
 {
@@ -114,30 +104,21 @@
 
 static int do_signal(struct pt_regs *regs)
 {
-	sigset_t *oldset;
+	sigset_t *oldset = sigmask_to_save();
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
 	int ret;
 	int is32 = is_32bit_task();
 
-	if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
 	/* Is there any syscall restart business here ? */
 	check_syscall_restart(regs, &ka, signr > 0);
 
 	if (signr <= 0) {
-		struct thread_info *ti = current_thread_info();
 		/* No signal to deliver -- put the saved sigmask back */
-		if (ti->local_flags & _TLF_RESTORE_SIGMASK) {
-			ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
-			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-		}
+		restore_saved_sigmask();
 		regs->trap = 0;
 		return 0;               /* no signals delivered */
 	}
@@ -167,18 +148,7 @@
 
 	regs->trap = 0;
 	if (ret) {
-		block_sigmask(&ka, signr);
-
-		/*
-		 * A signal was successfully delivered; the saved sigmask is in
-		 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
-		 */
-		current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-
-		/*
-		 * Let tracing know that we've done the handler setup.
-		 */
-		tracehook_signal_handler(signr, &info, &ka, regs,
+		signal_delivered(signr, &info, &ka, regs,
 					 test_thread_flag(TIF_SINGLESTEP));
 	}
 
@@ -193,8 +163,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 8dde973..e00acb4 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -10,13 +10,10 @@
 #ifndef _POWERPC_ARCH_SIGNAL_H
 #define _POWERPC_ARCH_SIGNAL_H
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
 extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
 				  size_t frame_size, int is_32);
-extern void restore_sigmask(sigset_t *set);
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 			   siginfo_t *info, sigset_t *oldset,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 61f6aff..8b4c049 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -919,7 +919,7 @@
 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
 		return -EFAULT;
 #endif
-	restore_sigmask(&set);
+	set_current_blocked(&set);
 	if (restore_user_regs(regs, mcp, sig))
 		return -EFAULT;
 
@@ -1273,7 +1273,7 @@
 	set.sig[0] = sigctx.oldmask;
 	set.sig[1] = sigctx._unused[3];
 #endif
-	restore_sigmask(&set);
+	set_current_blocked(&set);
 
 	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
 	addr = sr;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2692efd..d183f87 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -335,7 +335,7 @@
 
 	if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
 		do_exit(SIGSEGV);
-	restore_sigmask(&set);
+	set_current_blocked(&set);
 	if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
 		do_exit(SIGSEGV);
 
@@ -364,7 +364,7 @@
 
 	if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
 		goto badframe;
-	restore_sigmask(&set);
+	set_current_blocked(&set);
 	if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
 		goto badframe;
 
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99a995c..be171ee 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -475,6 +475,7 @@
 	struct pt_regs *old_regs;
 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 	struct clock_event_device *evt = &__get_cpu_var(decrementers);
+	u64 now;
 
 	/* Ensure a positive value is written to the decrementer, or else
 	 * some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@
 		irq_work_run();
 	}
 
-	*next_tb = ~(u64)0;
-	if (evt->event_handler)
-		evt->event_handler(evt);
+	now = get_tb_or_rtc();
+	if (now >= *next_tb) {
+		*next_tb = ~(u64)0;
+		if (evt->event_handler)
+			evt->event_handler(evt);
+	} else {
+		now = *next_tb - now;
+		if (now <= DECREMENTER_MAX)
+			set_dec((int)now);
+	}
 
 #ifdef CONFIG_PPC64
 	/* collect purr register values often, for accurate calculations */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d6..3abe1b86 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@
 	return err;
 }
 
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
 {
+	struct kvm *kvm = vcpu->kvm;
 	void *va;
 	unsigned long nb;
+	unsigned long gpa;
+
+	/*
+	 * We need to pin the page pointed to by vpap->next_gpa,
+	 * but we can't call kvmppc_pin_guest_page under the lock
+	 * as it does get_user_pages() and down_read().  So we
+	 * have to drop the lock, pin the page, then get the lock
+	 * again and check that a new area didn't get registered
+	 * in the meantime.
+	 */
+	for (;;) {
+		gpa = vpap->next_gpa;
+		spin_unlock(&vcpu->arch.vpa_update_lock);
+		va = NULL;
+		nb = 0;
+		if (gpa)
+			va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+		spin_lock(&vcpu->arch.vpa_update_lock);
+		if (gpa == vpap->next_gpa)
+			break;
+		/* sigh... unpin that one and try again */
+		if (va)
+			kvmppc_unpin_guest_page(kvm, va);
+	}
 
 	vpap->update_pending = 0;
-	va = NULL;
-	if (vpap->next_gpa) {
-		va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
-		if (nb < vpap->len) {
-			/*
-			 * If it's now too short, it must be that userspace
-			 * has changed the mappings underlying guest memory,
-			 * so unregister the region.
-			 */
-			kvmppc_unpin_guest_page(kvm, va);
-			va = NULL;
-		}
+	if (va && nb < vpap->len) {
+		/*
+		 * If it's now too short, it must be that userspace
+		 * has changed the mappings underlying guest memory,
+		 * so unregister the region.
+		 */
+		kvmppc_unpin_guest_page(kvm, va);
+		va = NULL;
 	}
 	if (vpap->pinned_addr)
 		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@
 
 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 {
-	struct kvm *kvm = vcpu->kvm;
-
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	if (vcpu->arch.vpa.update_pending) {
-		kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
 		init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
 	}
 	if (vcpu->arch.dtl.update_pending) {
-		kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
 		vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
 		vcpu->arch.dtl_index = 0;
 	}
 	if (vcpu->arch.slb_shadow.update_pending)
-		kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+		kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
 	spin_unlock(&vcpu->arch.vpa_update_lock);
 }
 
@@ -800,12 +819,39 @@
 	struct kvm_vcpu *vcpu, *vcpu0, *vnext;
 	long ret;
 	u64 now;
-	int ptid, i;
+	int ptid, i, need_vpa_update;
 
 	/* don't start if any threads have a signal pending */
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+	need_vpa_update = 0;
+	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
 		if (signal_pending(vcpu->arch.run_task))
 			return 0;
+		need_vpa_update |= vcpu->arch.vpa.update_pending |
+			vcpu->arch.slb_shadow.update_pending |
+			vcpu->arch.dtl.update_pending;
+	}
+
+	/*
+	 * Initialize *vc, in particular vc->vcore_state, so we can
+	 * drop the vcore lock if necessary.
+	 */
+	vc->n_woken = 0;
+	vc->nap_count = 0;
+	vc->entry_exit_count = 0;
+	vc->vcore_state = VCORE_RUNNING;
+	vc->in_guest = 0;
+	vc->napping_threads = 0;
+
+	/*
+	 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+	 * which can't be called with any spinlocks held.
+	 */
+	if (need_vpa_update) {
+		spin_unlock(&vc->lock);
+		list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+			kvmppc_update_vpas(vcpu);
+		spin_lock(&vc->lock);
+	}
 
 	/*
 	 * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@
 		if (vcpu->arch.ceded)
 			vcpu->arch.ptid = ptid++;
 
-	vc->n_woken = 0;
-	vc->nap_count = 0;
-	vc->entry_exit_count = 0;
-	vc->vcore_state = VCORE_RUNNING;
 	vc->stolen_tb += mftb() - vc->preempt_tb;
-	vc->in_guest = 0;
 	vc->pcpu = smp_processor_id();
-	vc->napping_threads = 0;
 	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
 		kvmppc_start_thread(vcpu);
-		if (vcpu->arch.vpa.update_pending ||
-		    vcpu->arch.slb_shadow.update_pending ||
-		    vcpu->arch.dtl.update_pending)
-			kvmppc_update_vpas(vcpu);
 		kvmppc_create_dtl_entry(vcpu, vc);
 	}
 	/* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 455881a..093d631 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -160,48 +160,3 @@
 	PPC_LONG	1b,91b
 	PPC_LONG	8b,92b
 	.text
-
-_GLOBAL(__strncpy_from_user)
-	addi	r6,r3,-1
-	addi	r4,r4,-1
-	cmpwi	0,r5,0
-	beq	2f
-	mtctr	r5
-1:	lbzu	r0,1(r4)
-	cmpwi	0,r0,0
-	stbu	r0,1(r6)
-	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
-	beq	3f
-2:	addi	r6,r6,1
-3:	subf	r3,r3,r6
-	blr
-99:	li	r3,-EFAULT
-	blr
-
-	.section __ex_table,"a"
-	PPC_LONG	1b,99b
-	.text
-
-/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
-_GLOBAL(__strnlen_user)
-	addi	r7,r3,-1
-	subf	r6,r7,r5	/* top+1 - str */
-	cmplw	0,r4,r6
-	bge	0f
-	mr	r6,r4
-0:	mtctr	r6		/* ctr = min(len, top - str) */
-1:	lbzu	r0,1(r7)	/* get next byte */
-	cmpwi	0,r0,0
-	bdnzf	2,1b		/* loop if --ctr != 0 && byte != 0 */
-	addi	r7,r7,1
-	subf	r3,r3,r7	/* number of bytes we have looked at */
-	beqlr			/* return if we found a 0 byte */
-	cmpw	0,r3,r4		/* did we look at all len bytes? */
-	blt	99f		/* if not, must have hit top */
-	addi	r3,r4,1		/* return len + 1 to indicate no null found */
-	blr
-99:	li	r3,0		/* bad address, return 0 */
-	blr
-
-	.section __ex_table,"a"
-	PPC_LONG	1b,99b
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5b63bd3..e779642 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -333,9 +333,7 @@
 					    unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned int)(long)hcpu;
-#ifdef CONFIG_HOTPLUG_CPU
-	struct task_struct *p;
-#endif
+
 	/* We don't touch CPU 0 map, it's allocated at aboot and kept
 	 * around forever
 	 */
@@ -358,12 +356,7 @@
 		stale_map[cpu] = NULL;
 
 		/* We also clear the cpu_vm_mask bits of CPUs going away */
-		read_lock(&tasklist_lock);
-		for_each_process(p) {
-			if (p->mm)
-				cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-		}
-		read_unlock(&tasklist_lock);
+		clear_tasks_mm_cpumask(cpu);
 	break;
 #endif /* CONFIG_HOTPLUG_CPU */
 	}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b6edbb3..6e8f677 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -635,7 +635,7 @@
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-	const u32 *dm, *usm;
+	const u32 *uninitialized_var(dm), *usm;
 	unsigned int n, rc, ranges, is_kexec_kdump = 0;
 	unsigned long lmb_size, base, size, sz;
 	int nid;
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 55ba385..7d3a3b5 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -105,6 +105,7 @@
 	mr	r4, r_addr;					\
 	li	r6, SIZE;					\
 	bl	skb_copy_bits;					\
+	nop;							\
 	/* R3 = 0 on success */					\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	ld	r0, 16(r1);					\
@@ -156,6 +157,7 @@
 	mr	r4, r_addr;					\
 	li	r5, SIZE;					\
 	bl	bpf_internal_load_pointer_neg_helper;		\
+	nop;							\
 	/* R3 != 0 on success */				\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	ld	r0, 16(r1);					\
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1d75c92..66519d2 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -151,7 +151,7 @@
 spufs_evict_inode(struct inode *inode)
 {
 	struct spufs_inode_info *ei = SPUFS_I(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (ei->i_ctx)
 		put_spu_context(ei->i_ctx);
 	if (ei->i_gang)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0915b1a..2d311c0 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -106,7 +106,7 @@
 		tcep++;
 	}
 
-	if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+	if (tbl->it_type & TCE_PCI_SWINV_CREATE)
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 	return 0;
 }
@@ -121,7 +121,7 @@
 	while (npages--)
 		*(tcep++) = 0;
 
-	if (tbl->it_type == TCE_PCI_SWINV_FREE)
+	if (tbl->it_type & TCE_PCI_SWINV_FREE)
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 36f957f..8733a86 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -68,9 +68,7 @@
 };
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-		enum kmsg_dump_reason reason,
-		const char *old_msgs, unsigned long old_len,
-		const char *new_msgs, unsigned long new_len);
+			  enum kmsg_dump_reason reason);
 
 static struct kmsg_dumper nvram_kmsg_dumper = {
 	.dump = oops_to_nvram
@@ -504,28 +502,6 @@
 }
 
 /*
- * Try to capture the last capture_len bytes of the printk buffer.  Return
- * the amount actually captured.
- */
-static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
-				const char *new_msgs, size_t new_len,
-				char *captured, size_t capture_len)
-{
-	if (new_len >= capture_len) {
-		memcpy(captured, new_msgs + (new_len - capture_len),
-								capture_len);
-		return capture_len;
-	} else {
-		/* Grab the end of old_msgs. */
-		size_t old_tail_len = min(old_len, capture_len - new_len);
-		memcpy(captured, old_msgs + (old_len - old_tail_len),
-								old_tail_len);
-		memcpy(captured + old_tail_len, new_msgs, new_len);
-		return old_tail_len + new_len;
-	}
-}
-
-/*
  * Are we using the ibm,rtas-log for oops/panic reports?  And if so,
  * would logging this oops/panic overwrite an RTAS event that rtas_errd
  * hasn't had a chance to read and process?  Return 1 if so, else 0.
@@ -541,27 +517,6 @@
 						NVRAM_RTAS_READ_TIMEOUT);
 }
 
-/* Squeeze out each line's <n> severity prefix. */
-static size_t elide_severities(char *buf, size_t len)
-{
-	char *in, *out, *buf_end = buf + len;
-	/* Assume a <n> at the very beginning marks the start of a line. */
-	int newline = 1;
-
-	in = out = buf;
-	while (in < buf_end) {
-		if (newline && in+3 <= buf_end &&
-				*in == '<' && isdigit(in[1]) && in[2] == '>') {
-			in += 3;
-			newline = 0;
-		} else {
-			newline = (*in == '\n');
-			*out++ = *in++;
-		}
-	}
-	return out - buf;
-}
-
 /* Derived from logfs_compress() */
 static int nvram_compress(const void *in, void *out, size_t inlen,
 							size_t outlen)
@@ -619,9 +574,7 @@
  * partition.  If that's too much, go back and capture uncompressed text.
  */
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-		enum kmsg_dump_reason reason,
-		const char *old_msgs, unsigned long old_len,
-		const char *new_msgs, unsigned long new_len)
+			  enum kmsg_dump_reason reason)
 {
 	static unsigned int oops_count = 0;
 	static bool panicking = false;
@@ -660,14 +613,14 @@
 		return;
 
 	if (big_oops_buf) {
-		text_len = capture_last_msgs(old_msgs, old_len,
-			new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
-		text_len = elide_severities(big_oops_buf, text_len);
+		kmsg_dump_get_buffer(dumper, false,
+				     big_oops_buf, big_oops_buf_sz, &text_len);
 		rc = zip_oops(text_len);
 	}
 	if (rc != 0) {
-		text_len = capture_last_msgs(old_msgs, old_len,
-				new_msgs, new_len, oops_data, oops_data_sz);
+		kmsg_dump_rewind(dumper);
+		kmsg_dump_get_buffer(dumper, true,
+				     oops_data, oops_data_sz, &text_len);
 		err_type = ERR_TYPE_KERNEL_PANIC;
 		*oops_len = (u16) text_len;
 	}
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 41a34bc..e61483e 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -106,7 +106,7 @@
 	 * we first hard disable then check.
 	 */
 	hard_irq_disable();
-	if (get_paca()->irq_happened == 0)
+	if (!lazy_irq_pending())
 		cede_processor();
 }
 
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b403c53..a39b469 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -87,6 +87,7 @@
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
+	select HAVE_CMPXCHG_LOCAL
 	select ARCH_DISCARD_MEMBLOCK
 	select ARCH_INLINE_SPIN_TRYLOCK
 	select ARCH_INLINE_SPIN_TRYLOCK_BH
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 6a2cb56..73dae8b 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -115,7 +115,7 @@
 
 static void hypfs_evict_inode(struct inode *inode)
 {
-	end_writeback(inode);
+	clear_inode(inode);
 	kfree(inode->i_private);
 }
 
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index e5beb49..a6ff5a8 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -13,8 +13,6 @@
  *
  */
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -63,7 +61,7 @@
 extern const char _zb_findmap[];
 extern const char _sb_findmap[];
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define __BITOPS_ALIGN		3
 #define __BITOPS_WORDSIZE	32
@@ -83,7 +81,7 @@
 		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
 		: "cc");
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __BITOPS_ALIGN		7
 #define __BITOPS_WORDSIZE	64
@@ -103,7 +101,7 @@
 		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
 		: "cc");
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
 #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
@@ -412,7 +410,7 @@
 	unsigned long bytes = 0;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	ahi	%1,-1\n"
 		"	sra	%1,5\n"
 		"	jz	1f\n"
@@ -449,7 +447,7 @@
 	unsigned long bytes = 0;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	ahi	%1,-1\n"
 		"	sra	%1,5\n"
 		"	jz	1f\n"
@@ -481,7 +479,7 @@
  */
 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if ((word & 0xffffffff) == 0xffffffff) {
 		word >>= 32;
 		nr += 32;
@@ -505,7 +503,7 @@
  */
 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if ((word & 0xffffffff) == 0) {
 		word >>= 32;
 		nr += 32;
@@ -546,7 +544,7 @@
 	unsigned long word;
 
 	p = (unsigned long *)((unsigned long) p + offset);
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	asm volatile(
 		"	ic	%0,%O1(%R1)\n"
 		"	icm	%0,2,%O1+1(%R1)\n"
@@ -834,7 +832,4 @@
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
 
-
-#endif /* __KERNEL__ */
-
 #endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index fc50a334..4c8d4d5 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -10,8 +10,6 @@
 #include <linux/spinlock.h>
 #include <asm/types.h>
 
-#ifdef __KERNEL__
-
 #define LPM_ANYPATH 0xff
 #define __MAX_CSSID 0
 
@@ -291,5 +289,3 @@
 int chsc_sstpi(void *page, void *result, size_t size);
 
 #endif
-
-#endif
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 81d7908..8d798e9 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -29,7 +29,7 @@
 			"	cs	%0,0,%4\n"
 			"	jl	0b\n"
 			: "=&d" (old), "=Q" (*(int *) addr)
-			: "d" (x << shift), "d" (~(255 << shift)),
+			: "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
 			  "Q" (*(int *) addr) : "memory", "cc", "0");
 		return old >> shift;
 	case 2:
@@ -44,7 +44,7 @@
 			"	cs	%0,0,%4\n"
 			"	jl	0b\n"
 			: "=&d" (old), "=Q" (*(int *) addr)
-			: "d" (x << shift), "d" (~(65535 << shift)),
+			: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
 			  "Q" (*(int *) addr) : "memory", "cc", "0");
 		return old >> shift;
 	case 4:
@@ -113,9 +113,10 @@
 			"	nr	%1,%5\n"
 			"	jnz	0b\n"
 			"1:"
-			: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-			: "d" (old << shift), "d" (new << shift),
-			  "d" (~(255 << shift)), "Q" (*(int *) ptr)
+			: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+			: "d" ((old & 0xff) << shift),
+			  "d" ((new & 0xff) << shift),
+			  "d" (~(0xff << shift))
 			: "memory", "cc");
 		return prev >> shift;
 	case 2:
@@ -134,9 +135,10 @@
 			"	nr	%1,%5\n"
 			"	jnz	0b\n"
 			"1:"
-			: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-			: "d" (old << shift), "d" (new << shift),
-			  "d" (~(65535 << shift)), "Q" (*(int *) ptr)
+			: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+			: "d" ((old & 0xffff) << shift),
+			  "d" ((new & 0xffff) << shift),
+			  "d" (~(0xffff << shift))
 			: "memory", "cc");
 		return prev >> shift;
 	case 4:
@@ -160,9 +162,14 @@
 	return old;
 }
 
-#define cmpxchg(ptr, o, n)						\
-	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
-				       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n)						 \
+({									 \
+	__typeof__(*(ptr)) __ret;					 \
+	__ret = (__typeof__(*(ptr)))					 \
+		__cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
+			  sizeof(*(ptr)));				 \
+	__ret;								 \
+})
 
 #ifdef CONFIG_64BIT
 #define cmpxchg64(ptr, o, n)						\
@@ -181,13 +188,19 @@
 		"	cds	%0,%2,%1"
 		: "+&d" (rp_old), "=Q" (ptr)
 		: "d" (rp_new), "Q" (ptr)
-		: "cc");
+		: "memory", "cc");
 	return rp_old.pair;
 }
-#define cmpxchg64(ptr, o, n)						\
-	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\
-					 (unsigned long long)(o),	\
-					 (unsigned long long)(n)))
+
+#define cmpxchg64(ptr, o, n)				\
+({							\
+	__typeof__(*(ptr)) __ret;			\
+	__ret = (__typeof__(*(ptr)))			\
+		__cmpxchg64((ptr),			\
+			    (unsigned long long)(o),	\
+			    (unsigned long long)(n));	\
+	__ret;						\
+})
 #endif /* CONFIG_64BIT */
 
 #include <asm-generic/cmpxchg-local.h>
@@ -216,8 +229,13 @@
  * them available.
  */
 #define cmpxchg_local(ptr, o, n)					\
-	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
-			(unsigned long)(n), sizeof(*(ptr))))
+({									\
+	__typeof__(*(ptr)) __ret;					\
+	__ret = (__typeof__(*(ptr)))					\
+		__cmpxchg_local((ptr), (unsigned long)(o),		\
+				(unsigned long)(n), sizeof(*(ptr)));	\
+	__ret;								\
+})
 
 #define cmpxchg64_local(ptr, o, n)	cmpxchg64((ptr), (o), (n))
 
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 24ef186..718374d 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -21,15 +21,15 @@
 
 static inline unsigned long __div(unsigned long long n, unsigned long base)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	register_pair rp;
 
 	rp.pair = n >> 1;
 	asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
 	return rp.subreg.odd;
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 	return n / base;
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 #define cputime_one_jiffy		jiffies_to_cputime(1)
@@ -100,7 +100,7 @@
 				       struct timespec *value)
 {
 	unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	register_pair rp;
 
 	rp.pair = __cputime >> 1;
@@ -128,7 +128,7 @@
 				      struct timeval *value)
 {
 	unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	register_pair rp;
 
 	rp.pair = __cputime >> 1;
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index ecde941..debfda3 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -7,7 +7,7 @@
 #ifndef __ASM_CTL_REG_H
 #define __ASM_CTL_REG_H
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 
 #define __ctl_load(array, low, high) ({				\
 	typedef struct { char _[sizeof(array)]; } addrtype;	\
@@ -25,7 +25,7 @@
 		: "i" (low), "i" (high));			\
 	})
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __ctl_load(array, low, high) ({				\
 	typedef struct { char _[sizeof(array)]; } addrtype;	\
@@ -43,7 +43,7 @@
 		: "i" (low), "i" (high));			\
 	})
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __ctl_set_bit(cr, bit) ({	\
 	unsigned long __dummy;		\
diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 83cf36c..7a68084 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -11,13 +11,10 @@
 #ifndef _S390_CURRENT_H
 #define _S390_CURRENT_H
 
-#ifdef __KERNEL__
 #include <asm/lowcore.h>
 
 struct task_struct;
 
 #define current ((struct task_struct *const)S390_lowcore.current_task)
 
-#endif
-
 #endif /* !(_S390_CURRENT_H) */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c4ee39f..06151e6 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,11 @@
 /*
  * These are used to set parameters in the core dumps.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define ELF_CLASS	ELFCLASS32
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define ELF_CLASS	ELFCLASS64
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define ELF_DATA	ELFDATA2MSB
 #define ELF_ARCH	EM_S390
 
@@ -181,9 +181,9 @@
 extern char elf_platform[];
 #define ELF_PLATFORM (elf_platform)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define SET_PERSONALITY(ex) set_personality(PER_LINUX)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define SET_PERSONALITY(ex)					\
 do {								\
 	if (personality(current->personality) != PER_LINUX32)	\
@@ -194,7 +194,7 @@
 	else							\
 		clear_thread_flag(TIF_31BIT);			\
 } while (0)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define STACK_RND_MASK	0x7ffUL
 
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 81cf36b..96bc83e 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,8 +1,6 @@
 #ifndef _ASM_S390_FUTEX_H
 #define _ASM_S390_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/errno.h>
@@ -48,5 +46,4 @@
 	return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index aae276d..aef0dde 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -20,7 +20,7 @@
 #include <asm/cio.h>
 #include <asm/uaccess.h>
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
 #else
 #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
@@ -33,7 +33,7 @@
 static inline int
 idal_is_needed(void *vaddr, unsigned int length)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	return ((__pa(vaddr) + length - 1) >> 31) != 0;
 #else
 	return 0;
@@ -78,7 +78,7 @@
 static inline int
 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	unsigned int nridaws;
 	unsigned long *idal;
 
@@ -105,7 +105,7 @@
 static inline void
 clear_normalized_cda(struct ccw1 * ccw)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if (ccw->flags & CCW_FLAG_IDA) {
 		kfree((void *)(unsigned long) ccw->cda);
 		ccw->flags &= ~CCW_FLAG_IDA;
@@ -182,7 +182,7 @@
 static inline int
 __idal_buffer_is_needed(struct idal_buffer *ib)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	return ib->size > (4096ul << ib->page_order) ||
 		idal_is_needed(ib->data[0], ib->size);
 #else
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27216d3..f81a097 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -11,8 +11,6 @@
 #ifndef _S390_IO_H
 #define _S390_IO_H
 
-#ifdef __KERNEL__
-
 #include <asm/page.h>
 
 #define IO_SPACE_LIMIT 0xffffffff
@@ -46,6 +44,4 @@
  */
 #define xlate_dev_kmem_ptr(p)	p
 
-#endif /* __KERNEL__ */
-
 #endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 5289cac..2b9d418 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -17,7 +17,8 @@
 	EXTINT_VRT,
 	EXTINT_SCP,
 	EXTINT_IUC,
-	EXTINT_CPM,
+	EXTINT_CMS,
+	EXTINT_CMC,
 	IOINT_CIO,
 	IOINT_QAI,
 	IOINT_DAS,
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 3f30dac..f4f3882 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -10,10 +10,8 @@
 #ifndef _S390_KEXEC_H
 #define _S390_KEXEC_H
 
-#ifdef __KERNEL__
-#include <asm/page.h>
-#endif
 #include <asm/processor.h>
+#include <asm/page.h>
 /*
  * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  * I.e. Maximum page that is mapped directly into kernel memory,
diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h
index 94ec3ee..0a88622 100644
--- a/arch/s390/include/asm/kmap_types.h
+++ b/arch/s390/include/asm/kmap_types.h
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
 #include <asm-generic/kmap_types.h>
 
 #endif
-#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d09e40..69bdf72 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -49,7 +49,7 @@
 
 #define destroy_context(mm)             do { } while (0)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define LCTL_OPCODE "lctl"
 #else
 #define LCTL_OPCODE "lctlg"
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 1cc1c5a..f0b6b26 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -28,7 +28,7 @@
 	struct mod_arch_syminfo *syminfo;
 };
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define ElfW(x) Elf64_ ## x
 #define ELFW(x) ELF64_ ## x
 #else
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index d07518a..295f2c4 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -13,7 +13,6 @@
 
 #define OS_INFO_VMCOREINFO	0
 #define OS_INFO_REIPL_BLOCK	1
-#define OS_INFO_INIT_FN		2
 
 struct os_info_entry {
 	u64	addr;
@@ -28,8 +27,8 @@
 	u16	version_minor;
 	u64	crashkernel_addr;
 	u64	crashkernel_size;
-	struct os_info_entry entry[3];
-	u8	reserved[4004];
+	struct os_info_entry entry[2];
+	u8	reserved[4024];
 } __packed;
 
 void os_info_init(void);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 0fbd189..6537e72 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -15,7 +15,7 @@
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 78e3041..43078c1 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -48,7 +48,7 @@
 	clear_table(crst, entry, sizeof(unsigned long)*2048);
 }
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -64,7 +64,7 @@
 #define pgd_populate(mm, pgd, pud)		BUG()
 #define pud_populate(mm, pud, pmd)		BUG()
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -106,7 +106,7 @@
 	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 011358c..b322741 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -74,15 +74,15 @@
  * table can map
  * PGDIR_SHIFT determines what a third-level page table entry can map
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PMD_SHIFT	20
 # define PUD_SHIFT	20
 # define PGDIR_SHIFT	20
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PMD_SHIFT	20
 # define PUD_SHIFT	31
 # define PGDIR_SHIFT	42
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define PMD_SIZE        (1UL << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE-1))
@@ -98,13 +98,13 @@
  * that leads to 1024 pte per pgd
  */
 #define PTRS_PER_PTE	256
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PUD	1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define PTRS_PER_PMD	2048
 #define PTRS_PER_PUD	2048
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define PTRS_PER_PGD	2048
 
 #define FIRST_USER_ADDRESS  0
@@ -276,7 +276,7 @@
  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 /* Bits in the segment table address-space-control-element */
 #define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */
@@ -308,7 +308,7 @@
 #define KVM_UR_BIT	0x00008000UL
 #define KVM_UC_BIT	0x00004000UL
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 /* Bits in the segment/region table address-space-control-element */
 #define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
@@ -363,7 +363,7 @@
 #define KVM_UR_BIT	0x0000800000000000UL
 #define KVM_UC_BIT	0x0000400000000000UL
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 /*
  * A user page table pointer has the space-switch-event bit, the
@@ -424,7 +424,7 @@
 /*
  * pgd/pmd/pte query functions
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline int pgd_present(pgd_t pgd) { return 1; }
 static inline int pgd_none(pgd_t pgd)    { return 0; }
@@ -434,7 +434,7 @@
 static inline int pud_none(pud_t pud)	 { return 0; }
 static inline int pud_bad(pud_t pud)	 { return 0; }
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline int pgd_present(pgd_t pgd)
 {
@@ -490,7 +490,7 @@
 	return (pud_val(pud) & mask) != 0;
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline int pmd_present(pmd_t pmd)
 {
@@ -741,7 +741,7 @@
 
 static inline void pgd_clear(pgd_t *pgd)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
 		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
 #endif
@@ -749,7 +749,7 @@
 
 static inline void pud_clear(pud_t *pud)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
 #endif
@@ -921,7 +921,7 @@
 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
 {
 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		/* pto must point to the start of the segment table */
 		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
 #else
@@ -1116,7 +1116,7 @@
 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pmd) ({ BUG(); 0UL; })
@@ -1125,7 +1125,7 @@
 #define pud_offset(pgd, address) ((pud_t *) pgd)
 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1147,7 +1147,7 @@
 	return pmd + pmd_index(address);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1196,7 +1196,7 @@
  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define __SWP_OFFSET_MASK (~0UL >> 12)
 #else
 #define __SWP_OFFSET_MASK (~0UL >> 11)
@@ -1217,11 +1217,11 @@
 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PTE_FILE_MAX_BITS	26
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PTE_FILE_MAX_BITS	59
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pte_to_pgoff(__pte) \
 	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index edf8527..7be104c 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -24,7 +24,6 @@
 
 typedef unsigned long   __kernel_ino_t;
 typedef unsigned short  __kernel_mode_t;
-typedef unsigned short  __kernel_nlink_t;
 typedef unsigned short  __kernel_ipc_pid_t;
 typedef unsigned short  __kernel_uid_t;
 typedef unsigned short  __kernel_gid_t;
@@ -35,7 +34,6 @@
 
 typedef unsigned int    __kernel_ino_t;
 typedef unsigned int    __kernel_mode_t;
-typedef unsigned int    __kernel_nlink_t;
 typedef int             __kernel_ipc_pid_t;
 typedef unsigned int    __kernel_uid_t;
 typedef unsigned int    __kernel_gid_t;
@@ -47,7 +45,6 @@
 
 #define __kernel_ino_t  __kernel_ino_t
 #define __kernel_mode_t __kernel_mode_t
-#define __kernel_nlink_t __kernel_nlink_t
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #define __kernel_uid_t __kernel_uid_t
 #define __kernel_gid_t __kernel_gid_t
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6cbf313..20d0585 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -20,7 +20,6 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 
-#ifdef __KERNEL__
 /*
  * Default implementation of macro that returns current
  * instruction pointer ("program counter").
@@ -33,39 +32,33 @@
 }
 
 extern void s390_adjust_jiffies(void);
-extern int get_cpu_capability(unsigned int *);
 extern const struct seq_operations cpuinfo_op;
 extern int sysctl_ieee_emulation_warnings;
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define TASK_SIZE		(1UL << 31)
 #define TASK_UNMAPPED_BASE	(1UL << 30)
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define TASK_SIZE_OF(tsk)	((tsk)->mm->context.asce_limit)
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_31BIT) ? \
 					(1UL << 30) : (1UL << 41))
 #define TASK_SIZE		TASK_SIZE_OF(current)
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define STACK_TOP		(1UL << 31)
 #define STACK_TOP_MAX		(1UL << 31)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define STACK_TOP		(1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
 #define STACK_TOP_MAX		(1UL << 42)
-#endif /* __s390x__ */
-
-
-#endif
+#endif /* CONFIG_64BIT */
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
@@ -182,7 +175,7 @@
  */
 static inline void __load_psw(psw_t psw)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	asm volatile("lpsw  %0" : : "Q" (psw) : "cc");
 #else
 	asm volatile("lpswe %0" : : "Q" (psw) : "cc");
@@ -200,7 +193,7 @@
 
 	psw.mask = mask;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	asm volatile(
 		"	basr	%0,0\n"
 		"0:	ahi	%0,1f-0b\n"
@@ -208,14 +201,14 @@
 		"	lpsw	%1\n"
 		"1:"
 		: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 	asm volatile(
 		"	larl	%0,1f\n"
 		"	stg	%0,%O1+8(%R1)\n"
 		"	lpswe	%1\n"
 		"1:"
 		: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 /*
@@ -223,7 +216,7 @@
  */
 static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	if (psw.addr & PSW_ADDR_AMODE)
 		/* 31 bit mode */
 		return (psw.addr - ilc) | PSW_ADDR_AMODE;
@@ -253,7 +246,7 @@
          * Store status and then load disabled wait psw,
          * the processor is dead afterwards
          */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	asm volatile(
 		"	stctl	0,0,0(%2)\n"
 		"	ni	0(%2),0xef\n"	/* switch off protection */
@@ -272,7 +265,7 @@
 		"	lpsw	0(%1)"
 		: "=m" (ctl_buf)
 		: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 	asm volatile(
 		"	stctg	0,0,0(%2)\n"
 		"	ni	4(%2),0xef\n"	/* switch off protection */
@@ -305,7 +298,7 @@
 		"	lpswe	0(%1)"
 		: "=m" (ctl_buf)
 		: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 	while (1);
 }
 
@@ -338,12 +331,10 @@
 
 #define ARCH_LOW_ADDRESS_LIMIT	0x7fffffffUL
 
-#endif
-
 /*
  * Helper macro for exception table entries
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define EX_TABLE(_fault,_target)			\
 	".section __ex_table,\"a\"\n"			\
 	"	.align 4\n"				\
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index d0eb465..1ceee10 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -41,19 +41,17 @@
 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
 #endif
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define RWSEM_UNLOCKED_VALUE	0x00000000
 #define RWSEM_ACTIVE_BIAS	0x00000001
 #define RWSEM_ACTIVE_MASK	0x0000ffff
 #define RWSEM_WAITING_BIAS	(-0x00010000)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define RWSEM_UNLOCKED_VALUE	0x0000000000000000L
 #define RWSEM_ACTIVE_BIAS	0x0000000000000001L
 #define RWSEM_ACTIVE_MASK	0x00000000ffffffffL
 #define RWSEM_WAITING_BIAS	(-0x0000000100000000L)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define RWSEM_ACTIVE_READ_BIAS	RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS	(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
@@ -65,19 +63,19 @@
 	signed long old, new;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	ahi	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	aghi	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
 		: "cc", "memory");
@@ -93,7 +91,7 @@
 	signed long old, new;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	ltr	%1,%0\n"
 		"	jm	1f\n"
@@ -101,7 +99,7 @@
 		"	cs	%0,%1,%2\n"
 		"	jl	0b\n"
 		"1:"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	ltgr	%1,%0\n"
 		"	jm	1f\n"
@@ -109,7 +107,7 @@
 		"	csg	%0,%1,%2\n"
 		"	jl	0b\n"
 		"1:"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
 		: "cc", "memory");
@@ -125,19 +123,19 @@
 
 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	a	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	ag	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "m" (tmp)
 		: "cc", "memory");
@@ -158,19 +156,19 @@
 	signed long old;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%1\n"
 		"0:	ltr	%0,%0\n"
 		"	jnz	1f\n"
 		"	cs	%0,%3,%1\n"
 		"	jl	0b\n"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%1\n"
 		"0:	ltgr	%0,%0\n"
 		"	jnz	1f\n"
 		"	csg	%0,%3,%1\n"
 		"	jl	0b\n"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		"1:"
 		: "=&d" (old), "=Q" (sem->count)
 		: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -186,19 +184,19 @@
 	signed long old, new;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	ahi	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	aghi	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
 		: "cc", "memory");
@@ -216,19 +214,19 @@
 
 	tmp = -RWSEM_ACTIVE_WRITE_BIAS;
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	a	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	ag	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "m" (tmp)
 		: "cc", "memory");
@@ -246,19 +244,19 @@
 
 	tmp = -RWSEM_WAITING_BIAS;
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	a	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	ag	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "m" (tmp)
 		: "cc", "memory");
@@ -274,19 +272,19 @@
 	signed long old, new;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	ar	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	agr	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "d" (delta)
 		: "cc", "memory");
@@ -300,24 +298,23 @@
 	signed long old, new;
 
 	asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 		"	l	%0,%2\n"
 		"0:	lr	%1,%0\n"
 		"	ar	%1,%4\n"
 		"	cs	%0,%1,%2\n"
 		"	jl	0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 		"	lg	%0,%2\n"
 		"0:	lgr	%1,%0\n"
 		"	agr	%1,%4\n"
 		"	csg	%0,%1,%2\n"
 		"	jl	0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "d" (delta)
 		: "cc", "memory");
 	return new;
 }
 
-#endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7244e1f..40eb2ff 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -22,19 +22,19 @@
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        (*(unsigned long *)  (0x10404))
 #define INITRD_START      (*(unsigned long *)  (0x1040C))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10414))
 #define OLDMEM_BASE	  (*(unsigned long *)  (0x1041C))
 #define OLDMEM_SIZE	  (*(unsigned long *)  (0x10424))
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        (*(unsigned long *)  (0x10400))
 #define INITRD_START      (*(unsigned long *)  (0x10408))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10410))
 #define OLDMEM_BASE	  (*(unsigned long *)  (0x10418))
 #define OLDMEM_SIZE	  (*(unsigned long *)  (0x10420))
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      ((char *)            (0x10480))
 
 #define CHUNK_READ_WRITE 0
@@ -89,7 +89,7 @@
 
 #define MACHINE_HAS_DIAG9C	(S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define MACHINE_HAS_IEEE	(S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
 #define MACHINE_HAS_CSP		(S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
 #define MACHINE_HAS_IDTE	(0)
@@ -100,7 +100,7 @@
 #define MACHINE_HAS_PFMF	(0)
 #define MACHINE_HAS_SPP		(0)
 #define MACHINE_HAS_TOPOLOGY	(0)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define MACHINE_HAS_IEEE	(1)
 #define MACHINE_HAS_CSP		(1)
 #define MACHINE_HAS_IDTE	(S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
@@ -111,7 +111,7 @@
 #define MACHINE_HAS_PFMF	(S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
 #define MACHINE_HAS_SPP		(S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
 #define MACHINE_HAS_TOPOLOGY	(S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define ZFCPDUMP_HSA_SIZE	(32UL<<20)
 #define ZFCPDUMP_HSA_SIZE_MAX	(64UL<<20)
@@ -153,19 +153,19 @@
 
 #else /* __ASSEMBLY__ */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        0x10404
 #define INITRD_START      0x1040C
 #define INITRD_SIZE       0x10414
 #define OLDMEM_BASE	  0x1041C
 #define OLDMEM_SIZE	  0x10424
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        0x10400
 #define INITRD_START      0x10408
 #define INITRD_SIZE       0x10410
 #define OLDMEM_BASE	  0x10418
 #define OLDMEM_SIZE	  0x10420
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      0x10480
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index ca3f881..5959bfb 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,7 @@
 	wl = __wl;					\
 })
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define udiv_qrnnd(q, r, n1, n0, d)			\
   do { unsigned long __n;				\
        unsigned int __r, __d;				\
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index cd0241d..8cc160c 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -9,8 +9,6 @@
 #ifndef _S390_STRING_H_
 #define _S390_STRING_H_
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_TYPES_H
 #include <linux/types.h>
 #endif
@@ -152,6 +150,4 @@
 size_t strnlen(const char * s, size_t n);
 #endif /* !IN_ARCH_STRING_C */
 
-#endif /* __KERNEL__ */
-
 #endif /* __S390_STRING_H_ */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 003b04e..4e40b25 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,15 +9,13 @@
 #ifndef _ASM_THREAD_INFO_H
 #define _ASM_THREAD_INFO_H
 
-#ifdef __KERNEL__
-
 /*
  * Size of kernel stack for each process
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #ifndef __SMALL_STACK
 #define THREAD_ORDER 2
 #define ASYNC_ORDER  2
@@ -25,7 +23,7 @@
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
 #endif
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
 #define ASYNC_SIZE  (PAGE_SIZE << ASYNC_ORDER)
@@ -123,8 +121,6 @@
 #define is_32bit_task()		(1)
 #endif
 
-#endif /* __KERNEL__ */
-
 #define PREEMPT_ACTIVE		0x4000000
 
 #endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index e63069b..15d6479 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -10,8 +10,6 @@
 #ifndef _ASM_S390_TIMER_H
 #define _ASM_S390_TIMER_H
 
-#ifdef __KERNEL__
-
 #include <linux/timer.h>
 
 #define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -50,6 +48,4 @@
 extern void vtime_stop_cpu(void);
 extern void vtime_start_leave(void);
 
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 775a5ee..06e5acb 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -106,7 +106,7 @@
 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 				unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if (tlb->mm->context.asce_limit <= (1UL << 31))
 		return;
 	if (!tlb->fullmm)
@@ -125,7 +125,7 @@
 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 				unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	if (tlb->mm->context.asce_limit <= (1UL << 42))
 		return;
 	if (!tlb->fullmm)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8648c..9fde315 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -27,12 +27,12 @@
 	register unsigned long reg4 asm("4");
 	long dummy;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 	if (!MACHINE_HAS_CSP) {
 		smp_ptlb_all();
 		return;
 	}
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 	dummy = 0;
 	reg2 = reg3 = 0;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 05ebbcd..6c8c35f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -28,7 +28,7 @@
 
 #ifndef __ASSEMBLY__
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 typedef union {
 	unsigned long long pair;
 	struct {
@@ -37,7 +37,7 @@
 	} subreg;
 } register_pair;
 
-#endif /* ! __s390x__   */
+#endif /* ! CONFIG_64BIT   */
 #endif /* __ASSEMBLY__  */
 #endif /* __KERNEL__    */
 #endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8f2cada..1f3a79b 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -50,10 +50,15 @@
 
 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
 
-#define __access_ok(addr, size)	\
-({				\
-	__chk_user_ptr(addr);	\
-	1;			\
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+	return 1;
+}
+
+#define __access_ok(addr, size)				\
+({							\
+	__chk_user_ptr(addr);				\
+	__range_ok((unsigned long)(addr), (size));	\
 })
 
 #define access_ok(type, addr, size) __access_ok(addr, size)
@@ -377,7 +382,7 @@
 }
 
 extern int memcpy_real(void *, void *, size_t);
-extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
+extern void memcpy_absolute(void *, void *, size_t);
 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
 extern int copy_from_user_real(void *dest, void __user *src, size_t count);
 
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index c4a11cf..a73eb2e 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -1,8 +1,6 @@
 #ifndef __S390_VDSO_H__
 #define __S390_VDSO_H__
 
-#ifdef __KERNEL__
-
 /* Default link addresses for the vDSOs */
 #define VDSO32_LBASE	0
 #define VDSO64_LBASE	0
@@ -45,7 +43,4 @@
 #endif
 
 #endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
 #endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 3aa4d00..c880ff7 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -88,6 +88,9 @@
 	stctg	%c0,%c15,0(%r4)
 	larl	%r4,.Lfpctl		# Floating point control register
 	stfpc	0(%r4)
+	larl	%r4,.Lcontinue_psw	# Save PSW flags
+	epsw	%r2,%r3
+	stm	%r2,%r3,0(%r4)
 	larl	%r4,.Lrestart_psw	# Setup restart PSW at absolute 0
 	lghi	%r3,0
 	lg	%r4,0(%r4)		# Save PSW
@@ -103,11 +106,20 @@
 	lctlg	%c0,%c15,0(%r4)
 	larl	%r4,.Lfpctl		# Restore floating point ctl register
 	lfpc	0(%r4)
+	larl	%r4,.Lcontinue_psw	# Restore PSW flags
+	lpswe	0(%r4)
+.Lcontinue:
 	br	%r14
 .align 16
 .Lrestart_psw:
 	.long	0x00080000,0x80000000 + .Lrestart_part2
 
+	.section .data..nosave,"aw",@progbits
+.align 8
+.Lcontinue_psw:
+	.quad	0,.Lcontinue
+	.previous
+
 	.section .bss
 .align 8
 .Lctlregs:
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 377c096..3c0c198 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -32,8 +32,6 @@
 #include "compat_ptrace.h"
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 typedef struct 
 {
 	__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
@@ -364,7 +362,6 @@
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
 		goto badframe;
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	if (restore_sigregs32(regs, &frame->sregs))
 		goto badframe;
@@ -390,7 +387,6 @@
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
 		goto badframe;
@@ -572,7 +568,7 @@
  * OK, we're invoking a handler
  */	
 
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
 	int ret;
@@ -583,8 +579,8 @@
 	else
 		ret = setup_frame32(sig, ka, oldset, regs);
 	if (ret)
-		return ret;
-	block_sigmask(ka, sig);
-	return 0;
+		return;
+	signal_delivered(sig, info, ka, regs,
+				 test_thread_flag(TIF_SINGLE_STEP));
 }
 
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d84181f..6684fff 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -237,7 +237,7 @@
 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
 {
 	unsigned long addr;
 	const struct exception_table_entry *fixup;
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6cdddac..f66a229 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -31,7 +31,7 @@
 void syscall_trace(struct pt_regs *regs, int entryexit);
 void kernel_stack_overflow(struct pt_regs * regs);
 void do_signal(struct pt_regs *regs);
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
 void do_notify_resume(struct pt_regs *regs);
 
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index e1ac389..796c976 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -85,11 +85,6 @@
 	basr	%r13,0
 0:
 	mvc	0(8,%r0),.Lrestart_psw-0b(%r13)	# Setup restart PSW
-	mvc	464(16,%r0),.Lpgm_psw-0b(%r13)	# Setup pgm check PSW
-	lhi	%r1,1				# Start new kernel
-	diag	%r1,%r1,0x308			# with diag 308
-
-.Lno_diag308:					# No diag 308
 	sam31					# Switch to 31 bit addr mode
 	sr	%r1,%r1				# Erase register r1
 	sr	%r2,%r2				# Erase register r2
@@ -98,8 +93,6 @@
 .align	8
 .Lrestart_psw:
 	.long	0x00080000,0x80000000 + startup
-.Lpgm_psw:
-	.quad	0x0000000180000000,0x0000000000000000 + .Lno_diag308
 #else
 .align 2
 .Lep_startup_kdump:
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8342e65..2f6cfd4 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1528,12 +1528,15 @@
 
 static void dump_reipl_run(struct shutdown_trigger *trigger)
 {
-	u32 csum;
+	struct {
+		void	*addr;
+		__u32	csum;
+	} __packed ipib;
 
-	csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-	copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
-	copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
-			      sizeof(reipl_block_actual));
+	ipib.csum = csum_partial(reipl_block_actual,
+				 reipl_block_actual->hdr.len, 0);
+	ipib.addr = reipl_block_actual;
+	memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
 	dump_run(trigger);
 }
 
@@ -1750,6 +1753,7 @@
 
 static void __do_restart(void *ignore)
 {
+	__arch_local_irq_stosm(0x04); /* enable DAT */
 	smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
 	crash_kexec(NULL);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8a22c27..b4f4a71 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -42,7 +42,8 @@
 	{.name = "VRT", .desc = "[EXT] Virtio" },
 	{.name = "SCP", .desc = "[EXT] Service Call" },
 	{.name = "IUC", .desc = "[EXT] IUCV" },
-	{.name = "CPM", .desc = "[EXT] CPU Measurement" },
+	{.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
+	{.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
 	{.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
 	{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
 	{.name = "DAS", .desc = "[I/O] DASD" },
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bdad47d..cdacf8f 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -24,6 +24,7 @@
 #include <asm/ipl.h>
 #include <asm/diag.h>
 #include <asm/asm-offsets.h>
+#include <asm/os_info.h>
 
 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
 
@@ -79,8 +80,8 @@
 #ifdef CONFIG_CRASH_DUMP
 	int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
 
-	__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
 	setup_regs();
+	__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
 	start_kdump(1);
 #endif
 }
@@ -114,8 +115,13 @@
 	       size % KEXEC_CRASH_MEM_ALIGN);
 	if (enable)
 		vmem_add_mapping(crashk_res.start, size);
-	else
+	else {
 		vmem_remove_mapping(crashk_res.start, size);
+		if (size)
+			os_info_crashkernel_add(crashk_res.start, size);
+		else
+			os_info_crashkernel_add(0, 0);
+	}
 }
 
 /*
@@ -208,6 +214,7 @@
 {
 	struct kimage *image = data;
 
+	__arch_local_irq_stosm(0x04); /* enable DAT */
 	pfault_fini();
 	tracing_off();
 	debug_locks_off();
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index e8d6c21..95fa5ac 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -60,7 +60,7 @@
 	os_info.version_minor = OS_INFO_VERSION_MINOR;
 	os_info.magic = OS_INFO_MAGIC;
 	os_info.csum = os_info_csum(&os_info);
-	copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+	memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
@@ -138,7 +138,6 @@
 		goto fail_free;
 	os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
 	os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
-	os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
 	pr_info("crashkernel: addr=0x%lx size=%lu\n",
 		(unsigned long) os_info_old->crashkernel_addr,
 		(unsigned long) os_info_old->crashkernel_size);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cb019f4..9871b19 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -225,7 +225,7 @@
 	if (!(alert & CPU_MF_INT_CF_MASK))
 		return;
 
-	kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
 	cpuhw = &__get_cpu_var(cpu_hw_events);
 
 	/* Measurement alerts are shared and might happen when the PMU
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 06264ae..489d1d8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -428,10 +428,12 @@
 	lc->restart_fn = (unsigned long) do_restart;
 	lc->restart_data = 0;
 	lc->restart_source = -1UL;
-	memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
-	       4*sizeof(unsigned long));
-	copy_to_absolute_zero(&S390_lowcore.restart_psw,
-			      &lc->restart_psw, sizeof(psw_t));
+
+	/* Setup absolute zero lowcore */
+	memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
+			4 * sizeof(unsigned long));
+	memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
+			sizeof(lc->restart_psw));
 
 	set_prefix((u32)(unsigned long) lc);
 	lowcore_ptr[0] = lc;
@@ -598,7 +600,7 @@
 #ifdef CONFIG_KEXEC
 	unsigned long ptr = paddr_vmcoreinfo_note();
 
-	copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
+	memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
 #endif
 }
 
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index f626232..ac565b4 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -33,9 +33,6 @@
 #include <asm/switch_to.h>
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-
 typedef struct 
 {
 	__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
@@ -169,7 +166,6 @@
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
 		goto badframe;
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	if (restore_sigregs(regs, &frame->sregs))
 		goto badframe;
@@ -189,7 +185,6 @@
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
 		goto badframe;
@@ -367,7 +362,7 @@
 	return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
 			 siginfo_t *info, sigset_t *oldset,
 			 struct pt_regs *regs)
 {
@@ -379,9 +374,9 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 	if (ret)
-		return ret;
-	block_sigmask(ka, sig);
-	return 0;
+		return;
+	signal_delivered(sig, info, ka, regs,
+				 test_thread_flag(TIF_SINGLE_STEP));
 }
 
 /*
@@ -398,12 +393,7 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
+	sigset_t *oldset = sigmask_to_save();
 
 	/*
 	 * Get signal to deliver. When running under ptrace, at this point
@@ -441,24 +431,10 @@
 		/* No longer in a system call */
 		clear_thread_flag(TIF_SYSCALL);
 
-		if ((is_compat_task() ?
-		     handle_signal32(signr, &ka, &info, oldset, regs) :
-		     handle_signal(signr, &ka, &info, oldset, regs)) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-			/*
-			 * Let tracing know that we've done the handler setup.
-			 */
-			tracehook_signal_handler(signr, &info, &ka, regs,
-					 test_thread_flag(TIF_SINGLE_STEP));
-		}
+		if (is_compat_task())
+			handle_signal32(signr, &ka, &info, oldset, regs);
+		else
+			handle_signal(signr, &ka, &info, oldset, regs);
 		return;
 	}
 
@@ -484,16 +460,11 @@
 	/*
 	 * If there's no signal to deliver, we just put the saved sigmask back.
 	 */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
 {
 	clear_thread_flag(TIF_NOTIFY_RESUME);
 	tracehook_notify_resume(regs);
-	if (current->replacement_session_keyring)
-		key_replace_session_keyring();
 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 647ba94..15cca26 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -297,26 +297,27 @@
 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
 			  void *data, unsigned long stack)
 {
-	struct _lowcore *lc = pcpu->lowcore;
-	unsigned short this_cpu;
+	struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
+	struct {
+		unsigned long	stack;
+		void		*func;
+		void		*data;
+		unsigned long	source;
+	} restart = { stack, func, data, stap() };
 
 	__load_psw_mask(psw_kernel_bits);
-	this_cpu = stap();
-	if (pcpu->address == this_cpu)
+	if (pcpu->address == restart.source)
 		func(data);	/* should not return */
 	/* Stop target cpu (if func returns this stops the current cpu). */
 	pcpu_sigp_retry(pcpu, sigp_stop, 0);
 	/* Restart func on the target cpu and stop the current cpu. */
-	lc->restart_stack = stack;
-	lc->restart_fn = (unsigned long) func;
-	lc->restart_data = (unsigned long) data;
-	lc->restart_source = (unsigned long) this_cpu;
+	memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
 	asm volatile(
 		"0:	sigp	0,%0,6	# sigp restart to target cpu\n"
 		"	brc	2,0b	# busy, try again\n"
 		"1:	sigp	0,%1,5	# sigp stop to current cpu\n"
 		"	brc	2,1b	# busy, try again\n"
-		: : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
+		: : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
 	for (;;) ;
 }
 
@@ -800,17 +801,6 @@
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void smp_call_os_info_init_fn(void)
-{
-	int (*init_fn)(void);
-	unsigned long size;
-
-	init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
-	if (!init_fn)
-		return;
-	init_fn();
-}
-
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 	/* request the 0x1201 emergency signal external interrupt */
@@ -819,7 +809,6 @@
 	/* request the 0x1202 external call external interrupt */
 	if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
 		panic("Couldn't request external interrupt 0x1202");
-	smp_call_os_info_init_fn();
 	smp_detect_cpus();
 }
 
@@ -943,19 +932,6 @@
 	.attrs = cpu_common_attrs,
 };
 
-static ssize_t show_capability(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	unsigned int capability;
-	int rc;
-
-	rc = get_cpu_capability(&capability);
-	if (rc)
-		return rc;
-	return sprintf(buf, "%u\n", capability);
-}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
-
 static ssize_t show_idle_count(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -993,7 +969,6 @@
 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 
 static struct attribute *cpu_online_attrs[] = {
-	&dev_attr_capability.attr,
 	&dev_attr_idle_count.attr,
 	&dev_attr_idle_time_us.attr,
 	NULL,
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2a94b77..fa0eb23 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -393,27 +393,6 @@
 subsys_initcall(create_proc_service_level);
 
 /*
- * Bogomips calculation based on cpu capability.
- */
-int get_cpu_capability(unsigned int *capability)
-{
-	struct sysinfo_1_2_2 *info;
-	int rc;
-
-	info = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-	rc = stsi(info, 1, 2, 2);
-	if (rc == -ENOSYS)
-		goto out;
-	rc = 0;
-	*capability = info->capability;
-out:
-	free_page((unsigned long) info);
-	return rc;
-}
-
-/*
  * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
  */
 void s390_adjust_jiffies(void)
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 60455f1..58a75a8 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -14,7 +14,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI	"ahi"
 #define ALR	"alr"
 #define CLR	"clr"
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index bb1a7ee..57e9429 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -15,7 +15,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI	"ahi"
 #define ALR	"alr"
 #define CLR	"clr"
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 795a0a9..921fa54 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -101,19 +101,27 @@
 }
 
 /*
- * Copy memory to absolute zero
+ * Copy memory in absolute mode (kernel to kernel)
  */
-void copy_to_absolute_zero(void *dest, void *src, size_t count)
+void memcpy_absolute(void *dest, void *src, size_t count)
 {
-	unsigned long cr0;
+	unsigned long cr0, flags, prefix;
 
-	BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
-	preempt_disable();
+	flags = arch_local_irq_save();
 	__ctl_store(cr0, 0, 0);
 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
-	memcpy_real(dest + store_prefix(), src, count);
+	prefix = store_prefix();
+	if (prefix) {
+		local_mcck_disable();
+		set_prefix(0);
+		memcpy(dest, src, count);
+		set_prefix(prefix);
+		local_mcck_enable();
+	} else {
+		memcpy(dest, src, count);
+	}
 	__ctl_load(cr0, 0, 0);
-	preempt_enable();
+	arch_local_irq_restore(flags);
 }
 
 /*
@@ -188,20 +196,6 @@
 }
 
 /*
- * Return swapped prefix or zero page address
- */
-static unsigned long get_swapped(unsigned long addr)
-{
-	unsigned long prefix = store_prefix();
-
-	if (addr < sizeof(struct _lowcore))
-		return addr + prefix;
-	if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
-		return addr - prefix;
-	return addr;
-}
-
-/*
  * Convert a physical pointer for /dev/mem access
  *
  * For swapped prefix pages a new buffer is returned that contains a copy of
@@ -218,7 +212,7 @@
 		size = PAGE_SIZE - (addr & ~PAGE_MASK);
 		bounce = (void *) __get_free_page(GFP_ATOMIC);
 		if (bounce)
-			memcpy_real(bounce, (void *) get_swapped(addr), size);
+			memcpy_absolute(bounce, (void *) addr, size);
 	}
 	preempt_enable();
 	put_online_cpus();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4799383..71ae20d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -109,7 +109,7 @@
 		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
 		pm_dir = pmd_offset(pu_dir, address);
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
 		    (address + HPAGE_SIZE <= start + size) &&
 		    (address >= HPAGE_SIZE)) {
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index c6646de..a4a89fa 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -235,7 +235,7 @@
 	if (!(param32 & CPU_MF_INT_SF_MASK))
 		return;
 
-	kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
 	atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 
 	if (hws_wq)
diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c
index d4a49011..e382c52 100644
--- a/arch/score/kernel/signal.c
+++ b/arch/score/kernel/signal.c
@@ -34,8 +34,6 @@
 #include <asm/syscalls.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
 	u32 rs_ass[4];		/* argument save space */
 	u32 rs_code[2];		/* signal trampoline */
@@ -162,7 +160,6 @@
 	if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
@@ -241,11 +238,9 @@
 	return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-	struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+	struct k_sigaction *ka, struct pt_regs *regs)
 {
-	int ret;
-
 	if (regs->is_syscall) {
 		switch (regs->regs[4]) {
 		case ERESTART_RESTARTBLOCK:
@@ -269,18 +264,15 @@
 	/*
 	 * Set up the stack frame
 	 */
-	ret = setup_rt_frame(ka, regs, sig, oldset, info);
+	if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0)
+		return;
 
-	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
 	struct k_sigaction ka;
-	sigset_t *oldset;
 	siginfo_t info;
 	int signr;
 
@@ -292,25 +284,10 @@
 	if (!user_mode(regs))
 		return;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Actually deliver the signal.  */
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
-
+		handle_signal(signr, &info, &ka, regs);
 		return;
 	}
 
@@ -337,10 +314,7 @@
 	 * If there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -356,7 +330,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 99bcd0e..31d9db7 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -32,6 +32,8 @@
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 	help
 	  The SuperH is a RISC processor targeted for use in embedded systems
 	  and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 46edf07..aed701c 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -9,6 +9,12 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
+ifneq ($(SUBARCH),$(ARCH))
+  ifeq ($(CROSS_COMPILE),)
+    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
+  endif
+endif
+
 isa-y					:= any
 isa-$(CONFIG_SH_DSP)			:= sh
 isa-$(CONFIG_CPU_SH2)			:= sh2
@@ -106,19 +112,13 @@
 KBUILD_DEFCONFIG	:= cayman_defconfig
 endif
 
-ifneq ($(SUBARCH),$(ARCH))
-  ifeq ($(CROSS_COMPILE),)
-    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
-  endif
-endif
-
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 ld-bfd			:= elf32-$(UTS_MACHINE)-linux
-LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
+LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
 LDFLAGS			+= -EL
 else
 ld-bfd			:= elf32-$(UTS_MACHINE)big-linux
-LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
+LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
 LDFLAGS			+= -EB
 endif
 
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 158c917..43a179c 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -201,8 +201,8 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= evtirq(0xa20),
-		.end	= evtirq(0xa20),
+		.start	= evt2irq(0xa20),
+		.end	= evt2irq(0xa20),
 		.flags	= IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
 	},
 };
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 34cd0c5..a8a1ca7 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -188,7 +188,6 @@
 		.partitions = migor_nand_flash_partitions,
 		.nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions),
 		.chip_delay = 20,
-		.part_probe_types = (const char *[]) { "cmdlinepart", NULL },
 	},
 	.ctrl = {
 		.dev_ready = migor_nand_flash_ready,
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c045142..9e702f2 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -239,7 +239,7 @@
 	clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
 	clk->enable_bit = BITS_CKE;
 
-	ret = sh_clk_mstp32_register(clk, 1);
+	ret = sh_clk_mstp_register(clk, 1);
 	if (unlikely(ret < 0))
 		goto err_phy;
 
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7beb423..7b673dd 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,5 +1,39 @@
 include include/asm-generic/Kbuild.asm
 
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += ucontext.h
+generic-y += xor.h
+
 header-y += cachectl.h
 header-y += cpu-features.h
 header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0..0000000
--- a/arch/sh/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644
index 6ca395d..0000000
--- a/arch/sh/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SH_CPUTIME_H
-#define __SH_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644
index 4c51401..0000000
--- a/arch/sh/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644
index 9670e12..0000000
--- a/arch/sh/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644
index 6cd978c..0000000
--- a/arch/sh/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c4..0000000
--- a/arch/sh/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644
index 51cf6f9..0000000
--- a/arch/sh/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_ERRNO_H
-#define __ASM_SH_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644
index 46ab12d..0000000
--- a/arch/sh/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644
index b279fe0..0000000
--- a/arch/sh/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51..0000000
--- a/arch/sh/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b..0000000
--- a/arch/sh/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f..0000000
--- a/arch/sh/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644
index 9ed9b9c..0000000
--- a/arch/sh/include/asm/local.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_LOCAL_H
-#define __ASM_SH_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_SH_LOCAL_H */
-
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644
index 36c93b5..0000000
--- a/arch/sh/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5..0000000
--- a/arch/sh/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644
index 809134c..0000000
--- a/arch/sh/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644
index 965d454..0000000
--- a/arch/sh/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644
index cf252af..0000000
--- a/arch/sh/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644
index 4db4b39..0000000
--- a/arch/sh/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_SH_PERCPU
-#define __ARCH_SH_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644
index c98509d..0000000
--- a/arch/sh/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/sh/include/asm/posix_types_32.h b/arch/sh/include/asm/posix_types_32.h
index abda584..ba0bdc4 100644
--- a/arch/sh/include/asm/posix_types_32.h
+++ b/arch/sh/include/asm/posix_types_32.h
@@ -3,8 +3,6 @@
 
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short	__kernel_uid_t;
diff --git a/arch/sh/include/asm/posix_types_64.h b/arch/sh/include/asm/posix_types_64.h
index fcda07b..244f7e9 100644
--- a/arch/sh/include/asm/posix_types_64.h
+++ b/arch/sh/include/asm/posix_types_64.h
@@ -3,8 +3,6 @@
 
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short	__kernel_uid_t;
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644
index 9c2499a..0000000
--- a/arch/sh/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_RESOURCE_H
-#define __ASM_SH_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644
index 98dfc35..0000000
--- a/arch/sh/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SCATTERLIST_H
-#define __ASM_SH_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644
index 7673b83..0000000
--- a/arch/sh/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644
index a0cb0caf..0000000
--- a/arch/sh/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc..0000000
--- a/arch/sh/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644
index 813040e..0000000
--- a/arch/sh/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SIGINFO_H
-#define __ASM_SH_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644
index dd248c2..0000000
--- a/arch/sh/include/asm/sizes.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644
index 6b71384..0000000
--- a/arch/sh/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644
index 9202a02..0000000
--- a/arch/sh/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_STATFS_H
-#define __ASM_SH_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644
index 3935b10..0000000
--- a/arch/sh/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644
index 280d78a..0000000
--- a/arch/sh/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 0c04ffc..bc13b57 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -169,7 +169,7 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
 }
 
 #define TI_FLAG_FAULT_CODE_SHIFT	24
@@ -189,6 +189,23 @@
 	struct thread_info *ti = current_thread_info();
 	return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
 }
+
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
+}
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 050f221..8698a80 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -25,6 +25,8 @@
 	(__chk_user_ptr(addr),		\
 	 __access_ok((unsigned long __force)(addr), (size)))
 
+#define user_addr_max()	(current_thread_info()->addr_limit.seg)
+
 /*
  * Uh, these should become the main single-value transfer routines ...
  * They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@
 # include "uaccess_64.h"
 #endif
 
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 /* Generic arbitrary sized copy.  */
 /* Return the number of bytes NOT copied */
 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@
 	__cl_size;							\
 })
 
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-#define strncpy_from_user(dest,src,count)				\
-({									\
-	unsigned long __sfu_src = (unsigned long)(src);			\
-	int __sfu_count = (int)(count);					\
-	long __sfu_res = -EFAULT;					\
-									\
-	if (__access_ok(__sfu_src, __sfu_count))			\
-		__sfu_res = __strncpy_from_user((unsigned long)(dest),	\
-				__sfu_src, __sfu_count);		\
-									\
-	__sfu_res;							\
-})
-
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
@@ -192,43 +168,6 @@
 	return __copy_size;
 }
 
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-static inline long strnlen_user(const char __user *s, long n)
-{
-	if (!__addr_ok(s))
-		return 0;
-	else
-		return __strnlen_user(s, n);
-}
-
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str)	strnlen_user(str, ~0UL >> 1)
-
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index ae0d24f..c0de7ee 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -170,79 +170,4 @@
 
 extern void __put_user_unknown(void);
 
-static inline int
-__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
-{
-	__kernel_size_t res;
-	unsigned long __dummy, _d, _s, _c;
-
-	__asm__ __volatile__(
-		"9:\n"
-		"mov.b	@%2+, %1\n\t"
-		"cmp/eq	#0, %1\n\t"
-		"bt/s	2f\n"
-		"1:\n"
-		"mov.b	%1, @%3\n\t"
-		"dt	%4\n\t"
-		"bf/s	9b\n\t"
-		" add	#1, %3\n\t"
-		"2:\n\t"
-		"sub	%4, %0\n"
-		"3:\n"
-		".section .fixup,\"ax\"\n"
-		"4:\n\t"
-		"mov.l	5f, %1\n\t"
-		"jmp	@%1\n\t"
-		" mov	%9, %0\n\t"
-		".balign 4\n"
-		"5:	.long 3b\n"
-		".previous\n"
-		".section __ex_table,\"a\"\n"
-		"	.balign 4\n"
-		"	.long 9b,4b\n"
-		".previous"
-		: "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
-		: "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
-		  "i" (-EFAULT)
-		: "memory", "t");
-
-	return res;
-}
-
-/*
- * Return the size of a string (including the ending 0 even when we have
- * exceeded the maximum string length).
- */
-static inline long __strnlen_user(const char __user *__s, long __n)
-{
-	unsigned long res;
-	unsigned long __dummy;
-
-	__asm__ __volatile__(
-		"1:\t"
-		"mov.b	@(%0,%3), %1\n\t"
-		"cmp/eq	%4, %0\n\t"
-		"bt/s	2f\n\t"
-		" add	#1, %0\n\t"
-		"tst	%1, %1\n\t"
-		"bf	1b\n\t"
-		"2:\n"
-		".section .fixup,\"ax\"\n"
-		"3:\n\t"
-		"mov.l	4f, %1\n\t"
-		"jmp	@%1\n\t"
-		" mov	#0, %0\n"
-		".balign 4\n"
-		"4:	.long 2b\n"
-		".previous\n"
-		".section __ex_table,\"a\"\n"
-		"	.balign 4\n"
-		"	.long 1b,3b\n"
-		".previous"
-		: "=z" (res), "=&r" (__dummy)
-		: "0" (0), "r" (__s), "r" (__n)
-		: "t");
-	return res;
-}
-
 #endif /* __ASM_SH_UACCESS_32_H */
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 56fd20b..2e07e0f 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -84,8 +84,4 @@
 extern long __put_user_asm_q(void *, long);
 extern void __put_user_unknown(void);
 
-extern long __strnlen_user(const char *__s, long __n);
-extern int __strncpy_from_user(unsigned long __dest,
-	       unsigned long __user __src, int __count);
-
 #endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9..0000000
--- a/arch/sh/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..6e38953
--- /dev/null
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH_WORD_AT_A_TIME_H
+#define __ASM_SH_WORD_AT_A_TIME_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+# include <asm-generic/word-at-a-time.h>
+#else
+/*
+ * Little-endian version cribbed from x86.
+ */
+struct word_at_a_time {
+	const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+	long a = (0x0ff0001+mask) >> 23;
+	/* Fix the 1 for 00 case */
+	return a & mask;
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+	*bits = mask;
+	return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+	bits = (bits - 1) & ~bits;
+	return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+	return count_masked_bytes(mask);
+}
+#endif
+
+#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644
index c82eb12..0000000
--- a/arch/sh/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644
index 1192e1c..0000000
--- a/arch/sh/include/cpu-sh2a/cpu/ubc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * SH-2A UBC definitions
- *
- * Copyright (C) 2008 Kieran Bingham
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ASM_CPU_SH2A_UBC_H
-#define __ASM_CPU_SH2A_UBC_H
-
-#define UBC_BARA                0xfffc0400
-#define UBC_BAMRA               0xfffc0404
-#define UBC_BBRA                0xfffc04a0	/* 16 bit access */
-#define UBC_BDRA                0xfffc0408
-#define UBC_BDMRA               0xfffc040c
-
-#define UBC_BARB                0xfffc0410
-#define UBC_BAMRB               0xfffc0414
-#define UBC_BBRB                0xfffc04b0	/* 16 bit access */
-#define UBC_BDRB                0xfffc0418
-#define UBC_BDMRB               0xfffc041c
-
-#define UBC_BRCR                0xfffc04c0
-
-#endif /* __ASM_CPU_SH2A_UBC_H */
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index ea01a72..53638e2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -283,7 +283,7 @@
 		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index 7ac07b4..22e485d 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -276,7 +276,7 @@
 		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 8e1f970..c4cb740 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -261,7 +261,7 @@
 		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+		ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 35f75cf..37c41c7 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -311,7 +311,7 @@
 		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+		ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2a87901..c87e78f 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -375,7 +375,7 @@
 		ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+		ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
index 1697642..deb683a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
@@ -260,7 +260,7 @@
 			&div4_table);
 
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 04ab5ae..e84a432 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -148,7 +148,7 @@
 		ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
 					   &div4_table);
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index ab1c58f..1c83788 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -175,7 +175,7 @@
 		ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
 					   &div4_table);
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index 4917094..8bba6f1 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -194,7 +194,7 @@
 		ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
 					   &div4_table);
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 0f11b39..a9422da 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -149,7 +149,7 @@
 		ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
 					   &div4_table);
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index ff1f0e6..b7cf6a5 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -1569,86 +1569,6 @@
 #endif /* CONFIG_MMU */
 
 /*
- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
- *			   int __count)
- *
- * Inputs:
- * (r2)  target address
- * (r3)  source address
- * (r4)  maximum size in bytes
- *
- * Ouputs:
- * (*r2) copied data
- * (r2)  -EFAULT (in case of faulting)
- *       copied data (otherwise)
- */
-	.global	__strncpy_from_user
-__strncpy_from_user:
-	pta	___strncpy_from_user1, tr0
-	pta	___strncpy_from_user_done, tr1
-	or	r4, ZERO, r5		/* r5 = original count */
-	beq/u	r4, r63, tr1		/* early exit if r4==0 */
-	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
-	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
-
-___strncpy_from_user1:
-	ld.b	r3, 0, r7		/* Fault address: only in reading */
-	st.b	r2, 0, r7
-	addi	r2, 1, r2
-	addi	r3, 1, r3
-	beq/u	ZERO, r7, tr1
-	addi	r4, -1, r4		/* return real number of copied bytes */
-	bne/l	ZERO, r4, tr0
-
-___strncpy_from_user_done:
-	sub	r5, r4, r6		/* If done, return copied */
-
-___strncpy_from_user_exit:
-	or	r6, ZERO, r2
-	ptabs	LINK, tr0
-	blink	tr0, ZERO
-
-/*
- * extern long __strnlen_user(const char *__s, long __n)
- *
- * Inputs:
- * (r2)  source address
- * (r3)  source size in bytes
- *
- * Ouputs:
- * (r2)  -EFAULT (in case of faulting)
- *       string length (otherwise)
- */
-	.global	__strnlen_user
-__strnlen_user:
-	pta	___strnlen_user_set_reply, tr0
-	pta	___strnlen_user1, tr1
-	or	ZERO, ZERO, r5		/* r5 = counter */
-	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
-	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
-	beq	r3, ZERO, tr0
-
-___strnlen_user1:
-	ldx.b	r2, r5, r7		/* Fault address: only in reading */
-	addi	r3, -1, r3		/* No real fixup */
-	addi	r5, 1, r5
-	beq	r3, ZERO, tr0
-	bne	r7, ZERO, tr1
-! The line below used to be active.  This meant led to a junk byte lying between each pair
-! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-!	addi	r5, 1, r5		/* Include '\0' */
-
-___strnlen_user_set_reply:
-	or	r5, ZERO, r6		/* If done, return counter */
-
-___strnlen_user_exit:
-	or	r6, ZERO, r2
-	ptabs	LINK, tr0
-	blink	tr0, ZERO
-
-/*
  * extern long __get_user_asm_?(void *val, long addr)
  *
  * Inputs:
@@ -1982,8 +1902,6 @@
 	.long	___copy_user2, ___copy_user_exit
 	.long	___clear_user1, ___clear_user_exit
 #endif
-	.long	___strncpy_from_user1, ___strncpy_from_user_exit
-	.long	___strnlen_user1, ___strnlen_user_exit
 	.long	___get_user_asm_b1, ___get_user_asm_b_exit
 	.long	___get_user_asm_w1, ___get_user_asm_w_exit
 	.long	___get_user_asm_l1, ___get_user_asm_l_exit
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9b7a459..055d91b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/stackprotector.h>
+#include <asm/fpu.h>
 
 struct kmem_cache *task_xstate_cachep = NULL;
 unsigned int xstate_size;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 4264583e..602545b 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -33,6 +33,7 @@
 #include <asm/switch_to.h>
 
 struct task_struct *last_task_used_math = NULL;
+struct pt_regs fake_swapper_regs = { 0, };
 
 void show_regs(struct pt_regs *regs)
 {
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index 45afa5c..26a0774 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -32,8 +32,6 @@
 EXPORT_SYMBOL(__get_user_asm_w);
 EXPORT_SYMBOL(__get_user_asm_l);
 EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index cb4172c..d6b7b61 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -32,8 +32,6 @@
 #include <asm/syscalls.h>
 #include <asm/fpu.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
 	unsigned long	text;
 	unsigned long	GOT;
@@ -226,7 +224,6 @@
 				    sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc, &r0))
@@ -256,7 +253,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -522,10 +518,11 @@
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+	      struct pt_regs *regs, unsigned int save_r0)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Set up the stack frame */
@@ -534,10 +531,10 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 
-	if (ret == 0)
-		block_sigmask(ka, sig);
-
-	return ret;
+	if (ret)
+		return;
+	signal_delivered(sig, info, ka, regs,
+			test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -554,7 +551,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -565,30 +561,12 @@
 	if (!user_mode(regs))
 		return;
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		handle_syscall_restart(save_r0, regs, &ka.sa);
 
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &ka, &info, oldset,
-				  regs, save_r0) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-			tracehook_signal_handler(signr, &info, &ka, regs,
-					test_thread_flag(TIF_SINGLESTEP));
-		}
-
+		handle_signal(signr, &ka, &info, regs, save_r0);
 		return;
 	}
 
@@ -610,10 +588,7 @@
 	 * If there's no signal to deliver, we just put the saved sigmask
 	 * back.
 	 */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
@@ -626,7 +601,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index b589a35..6b5b3df 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -41,11 +41,9 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-		sigset_t *oldset, struct pt_regs * regs);
+		struct pt_regs * regs);
 
 static inline void
 handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
@@ -88,7 +86,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -99,28 +96,13 @@
 	if (!user_mode(regs))
 		return;
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, 0);
 	if (signr > 0) {
 		handle_syscall_restart(regs, &ka.sa);
 
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/*
-			 * If a signal was successfully delivered, the
-			 * saved sigmask is in its frame, and we can
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-			tracehook_signal_handler(signr, &info, &ka, regs,
-					test_thread_flag(TIF_SINGLESTEP));
-			return;
-		}
+		handle_signal(signr, &info, &ka, regs);
+		return;
 	}
 
 	/* Did we come from a system call? */
@@ -143,12 +125,7 @@
 	}
 
 	/* No signal to deliver -- put the saved sigmask back */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-
-	return;
+	restore_saved_sigmask();
 }
 
 /*
@@ -351,7 +328,6 @@
 				    sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc, &ret))
@@ -384,7 +360,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
@@ -659,10 +634,11 @@
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-		sigset_t *oldset, struct pt_regs * regs)
+		struct pt_regs * regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Set up the stack frame */
@@ -671,10 +647,11 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 
-	if (ret == 0)
-		block_sigmask(ka, sig);
+	if (ret)
+		return;
 
-	return ret;
+	signal_delivered(sig, info, ka, regs,
+			test_thread_flag(TIF_SINGLESTEP));
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -685,7 +662,5 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index b86e9ca..2062aa8 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -123,7 +123,6 @@
 int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
-	struct task_struct *p;
 	int ret;
 
 	ret = mp_ops->cpu_disable(cpu);
@@ -153,11 +152,7 @@
 	flush_cache_all();
 	local_flush_tlb_all();
 
-	read_lock(&tasklist_lock);
-	for_each_process(p)
-		if (p->mm)
-			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-	read_unlock(&tasklist_lock);
+	clear_tasks_mm_cpumask(cpu);
 
 	return 0;
 }
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 15e9e05..e74ff13 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -35,12 +35,12 @@
 	select GENERIC_CMOS_UPDATE
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 
 config SPARC32
 	def_bool !64BIT
 	select GENERIC_ATOMIC64
 	select CLZ_TAB
-	select ARCH_USES_GETTIMEOFFSET
 
 config SPARC64
 	def_bool 64BIT
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2c2e388..67f83e0 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -21,3 +21,4 @@
 generic-y += local64.h
 generic-y += irq_regs.h
 generic-y += local.h
+generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index cbb93e5..61ebe74 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,11 +40,7 @@
 #define ASI_M_UNA01         0x01   /* Same here... */
 #define ASI_M_MXCC          0x02   /* Access to TI VIKING MXCC registers */
 #define ASI_M_FLUSH_PROBE   0x03   /* Reference MMU Flush/Probe; rw, ss */
-#ifndef CONFIG_SPARC_LEON
 #define ASI_M_MMUREGS       0x04   /* MMU Registers; rw, ss */
-#else
-#define ASI_M_MMUREGS       0x19
-#endif /* CONFIG_SPARC_LEON */
 #define ASI_M_TLBDIAG       0x05   /* MMU TLB only Diagnostics */
 #define ASI_M_DIAGS         0x06   /* Reference MMU Diagnostics */
 #define ASI_M_IODIAG        0x07   /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/asmmacro.h b/arch/sparc/include/asm/asmmacro.h
index 02a172f..a0e28ef 100644
--- a/arch/sparc/include/asm/asmmacro.h
+++ b/arch/sparc/include/asm/asmmacro.h
@@ -20,4 +20,26 @@
 /* All traps low-level code here must end with this macro. */
 #define RESTORE_ALL b ret_trap_entry; clr %l6;
 
+/* Support for run-time patching of single instructions.
+ * This is used to handle the differences in the ASI for
+ * MMUREGS for LEON and SUN.
+ *
+ * Sample:
+ * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
+ * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
+ * PI == Patch Instruction
+ *
+ * For LEON we will use the first variant,
+ * and for all other we will use the SUN variant.
+ * The order is important.
+ */
+#define LEON_PI(...)				\
+662:	__VA_ARGS__
+
+#define SUN_PI_(...)				\
+	.section .leon_1insn_patch, "ax";	\
+	.word 662b;				\
+	__VA_ARGS__;				\
+	.previous
+
 #endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h
deleted file mode 100644
index 870db59..0000000
--- a/arch/sparc/include/asm/cmt.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _SPARC64_CMT_H
-#define _SPARC64_CMT_H
-
-/* cmt.h: Chip Multi-Threading register definitions
- *
- * Copyright (C) 2004 David S. Miller (davem@redhat.com)
- */
-
-/* ASI_CORE_ID - private */
-#define LP_ID		0x0000000000000010UL
-#define  LP_ID_MAX	0x00000000003f0000UL
-#define  LP_ID_ID	0x000000000000003fUL
-
-/* ASI_INTR_ID - private */
-#define LP_INTR_ID	0x0000000000000000UL
-#define  LP_INTR_ID_ID	0x00000000000003ffUL
-
-/* ASI_CESR_ID - private */
-#define CESR_ID		0x0000000000000040UL
-#define  CESR_ID_ID	0x00000000000000ffUL
-
-/* ASI_CORE_AVAILABLE - shared */
-#define LP_AVAIL	0x0000000000000000UL
-#define  LP_AVAIL_1	0x0000000000000002UL
-#define  LP_AVAIL_0	0x0000000000000001UL
-
-/* ASI_CORE_ENABLE_STATUS - shared */
-#define LP_ENAB_STAT	0x0000000000000010UL
-#define  LP_ENAB_STAT_1	0x0000000000000002UL
-#define  LP_ENAB_STAT_0	0x0000000000000001UL
-
-/* ASI_CORE_ENABLE - shared */
-#define LP_ENAB		0x0000000000000020UL
-#define  LP_ENAB_1	0x0000000000000002UL
-#define  LP_ENAB_0	0x0000000000000001UL
-
-/* ASI_CORE_RUNNING - shared */
-#define LP_RUNNING_RW	0x0000000000000050UL
-#define LP_RUNNING_W1S	0x0000000000000060UL
-#define LP_RUNNING_W1C	0x0000000000000068UL
-#define  LP_RUNNING_1	0x0000000000000002UL
-#define  LP_RUNNING_0	0x0000000000000001UL
-
-/* ASI_CORE_RUNNING_STAT - shared */
-#define LP_RUN_STAT	0x0000000000000058UL
-#define  LP_RUN_STAT_1	0x0000000000000002UL
-#define  LP_RUN_STAT_0	0x0000000000000001UL
-
-/* ASI_XIR_STEERING - shared */
-#define LP_XIR_STEER	0x0000000000000030UL
-#define  LP_XIR_STEER_1	0x0000000000000002UL
-#define  LP_XIR_STEER_0	0x0000000000000001UL
-
-/* ASI_CMT_ERROR_STEERING - shared */
-#define CMT_ER_STEER	0x0000000000000040UL
-#define  CMT_ER_STEER_1	0x0000000000000002UL
-#define  CMT_ER_STEER_0	0x0000000000000001UL
-
-#endif /* _SPARC64_CMT_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 48a7c65..8493fd3 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,13 +12,18 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops *leon_dma_ops;
+extern struct dma_map_ops pci32_dma_ops;
+
 extern struct bus_type pci_bus_type;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
-	if (dev->bus == &pci_bus_type)
+	if (sparc_cpu_model == sparc_leon)
+		return leon_dma_ops;
+	else if (dev->bus == &pci_bus_type)
 		return &pci32_dma_ops;
 #endif
 	return dma_ops;
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 0765912..3375c62 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -8,8 +8,6 @@
 #ifndef LEON_H_INCLUDE
 #define LEON_H_INCLUDE
 
-#ifdef CONFIG_SPARC_LEON
-
 /* mmu register access, ASI_LEON_MMUREGS */
 #define LEON_CNR_CTRL		0x000
 #define LEON_CNR_CTXP		0x100
@@ -62,15 +60,6 @@
 
 #ifndef __ASSEMBLY__
 
-/* do a virtual address read without cache */
-static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
-{
-	unsigned long retval;
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
-	return retval;
-}
-
 /* do a physical address bypass write, i.e. for 0x80000000 */
 static inline void leon_store_reg(unsigned long paddr, unsigned long value)
 {
@@ -87,47 +76,16 @@
 	return retval;
 }
 
-static inline void leon_srmmu_disabletlb(void)
-{
-	unsigned int retval;
-	__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-			     "i"(ASI_LEON_MMUREGS));
-	retval |= LEON_CNR_CTRL_TLBDIS;
-	__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-			     "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
-static inline void leon_srmmu_enabletlb(void)
-{
-	unsigned int retval;
-	__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-			     "i"(ASI_LEON_MMUREGS));
-	retval = retval & ~LEON_CNR_CTRL_TLBDIS;
-	__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-			     "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
 /* macro access for leon_load_reg() and leon_store_reg() */
 #define LEON3_BYPASS_LOAD_PA(x)	    (leon_load_reg((unsigned long)(x)))
 #define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
-#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
-#define LEON3_BYPASS_ORIN_PA(x, v)  LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
 #define LEON_BYPASS_LOAD_PA(x)      leon_load_reg((unsigned long)(x))
 #define LEON_BYPASS_STORE_PA(x, v)  leon_store_reg((unsigned long)(x), (unsigned long)(v))
-#define LEON_REGLOAD_PA(x)          leon_load_reg((unsigned long)(x)+LEON_PREGS)
-#define LEON_REGSTORE_PA(x, v)      leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
-#define LEON_REGSTORE_OR_PA(x, v)   LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
-#define LEON_REGSTORE_AND_PA(x, v)  LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
-
-/* macro access for leon_readnobuffer_reg() */
-#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
 
 extern void leon_init(void);
 extern void leon_switch_mm(void);
 extern void leon_init_IRQ(void);
 
-extern unsigned long last_valid_pfn;
-
 static inline unsigned long sparc_leon3_get_dcachecfg(void)
 {
 	unsigned int retval;
@@ -230,9 +188,6 @@
 #error cannot determine LEON_PAGE_SIZE_LEON
 #endif
 
-#define PAGE_MIN_SHIFT   (12)
-#define PAGE_MIN_SIZE    (1UL << PAGE_MIN_SHIFT)
-
 #define LEON3_XCCR_SETS_MASK  0x07000000UL
 #define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
 
@@ -242,7 +197,7 @@
 #ifndef __ASSEMBLY__
 struct vm_area_struct;
 
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
+extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
 extern void leon_flush_icache_all(void);
 extern void leon_flush_dcache_all(void);
 extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@
 	unsigned long dccr;	/* 0x0c - Data Cache Configuration Register */
 };
 
-/* struct that hold LEON2 cache configuration register
- * & configuration register
- */
-struct leon2_cacheregs {
-	unsigned long ccr, cfg;
-};
-
-#ifdef __KERNEL__
-
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 
 struct device_node;
@@ -292,24 +239,15 @@
 extern void leon_boot_cpus(void);
 extern int leon_boot_one_cpu(int i, struct task_struct *);
 void leon_init_smp(void);
-extern void cpu_idle(void);
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-extern int __leon_processor_id(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
 extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
-extern unsigned int real_irq_entry[];
 extern unsigned int smpleon_ipi[];
-extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
-extern unsigned int linux_trap_ipi15_sun4m[];
+extern unsigned int linux_trap_ipi15_leon[];
 extern int leon_ipi_irq;
 
 #endif /* CONFIG_SMP */
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASSEMBLY__ */
 
 /* macros used in leon_mm.c */
@@ -317,18 +255,4 @@
 #define _pfn_valid(pfn)	 ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
 #define _SRMMU_PTE_PMASK_LEON 0xffffffff
 
-#else /* defined(CONFIG_SPARC_LEON) */
-
-/* nop definitions for !LEON case */
-#define leon_init() do {} while (0)
-#define leon_switch_mm() do {} while (0)
-#define leon_init_IRQ() do {} while (0)
-#define init_leon() do {} while (0)
-#define leon_smp_done() do {} while (0)
-#define leon_boot_cpus() do {} while (0)
-#define leon_boot_one_cpu(i, t) 1
-#define leon_init_smp() do {} while (0)
-
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index e50f326..f3034ed 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -87,8 +87,6 @@
 #define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
 #define LEON3_GPTIMER_CTRL_ISPENDING(r)  (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
 
-#ifdef CONFIG_SPARC_LEON
-
 #ifndef __ASSEMBLY__
 
 struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@
 
 #define amba_device(x) (((x) >> 12) & 0xfff)
 
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h
deleted file mode 100644
index f842303..0000000
--- a/arch/sparc/include/asm/mpmbox.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * mpmbox.h:  Interface and defines for the OpenProm mailbox
- *               facilities for MP machines under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_MPMBOX_H
-#define _SPARC_MPMBOX_H
-
-/* The prom allocates, for each CPU on the machine an unsigned
- * byte in physical ram.  You probe the device tree prom nodes
- * for these values.  The purpose of this byte is to be able to
- * pass messages from one cpu to another.
- */
-
-/* These are the main message types we have to look for in our
- * Cpu mailboxes, based upon these values we decide what course
- * of action to take.
- */
-
-/* The CPU is executing code in the kernel. */
-#define MAILBOX_ISRUNNING     0xf0
-
-/* Another CPU called romvec->pv_exit(), you should call
- * prom_stopcpu() when you see this in your mailbox.
- */
-#define MAILBOX_EXIT          0xfb
-
-/* Another CPU called romvec->pv_enter(), you should call
- * prom_cpuidle() when this is seen.
- */
-#define MAILBOX_GOSPIN        0xfc
-
-/* Another CPU has hit a breakpoint either into kadb or the prom
- * itself.  Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
- * at this point.
- */
-#define MAILBOX_BPT_SPIN      0xfd
-
-/* Oh geese, some other nitwit got a damn watchdog reset.  The party's
- * over so go call prom_stopcpu().
- */
-#define MAILBOX_WDOG_STOP     0xfe
-
-#ifndef __ASSEMBLY__
-
-/* Handy macro's to determine a cpu's state. */
-
-/* Is the cpu still in Power On Self Test? */
-#define MBOX_POST_P(letter)  ((letter) >= 0x00 && (letter) <= 0x7f)
-
-/* Is the cpu at the 'ok' prompt of the PROM? */
-#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
-
-/* Is the cpu spinning in the PROM? */
-#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
-
-/* Sanity check... This is junk mail, throw it out. */
-#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
-
-/* Is the cpu actively running an application/kernel-code? */
-#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_MPMBOX_H) */
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index cb82870..79da178 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -139,6 +139,7 @@
 	 restore %g0, %g0, %g0;
 
 #ifndef __ASSEMBLY__
+extern unsigned long last_valid_pfn;
 
 /* This makes sense. Honest it does - Anton */
 /* XXX Yes but it's ugly as sin.  FIXME. -KMW */
@@ -148,67 +149,13 @@
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 
 /* Accessing the MMU control register. */
-static inline unsigned int srmmu_get_mmureg(void)
-{
-        unsigned int retval;
-	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
-			     "=r" (retval) :
-			     "i" (ASI_M_MMUREGS));
-	return retval;
-}
-
-static inline void srmmu_set_mmureg(unsigned long regval)
-{
-	__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
-			     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
-
-}
-
-static inline void srmmu_set_ctable_ptr(unsigned long paddr)
-{
-	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
-	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-			     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
-			     "i" (ASI_M_MMUREGS) :
-			     "memory");
-}
-
-static inline void srmmu_set_context(int context)
-{
-	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-			     "r" (context), "r" (SRMMU_CTX_REG),
-			     "i" (ASI_M_MMUREGS) : "memory");
-}
-
-static inline int srmmu_get_context(void)
-{
-	register int retval;
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_CTX_REG),
-			     "i" (ASI_M_MMUREGS));
-	return retval;
-}
-
-static inline unsigned int srmmu_get_fstatus(void)
-{
-	unsigned int retval;
-
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
-	return retval;
-}
-
-static inline unsigned int srmmu_get_faddr(void)
-{
-	unsigned int retval;
-
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
-	return retval;
-}
+unsigned int srmmu_get_mmureg(void);
+void srmmu_set_mmureg(unsigned long regval);
+void srmmu_set_ctable_ptr(unsigned long paddr);
+void srmmu_set_context(int context);
+int srmmu_get_context(void);
+unsigned int srmmu_get_fstatus(void);
+unsigned int srmmu_get_faddr(void);
 
 /* This is guaranteed on all SRMMU's. */
 static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@
 
 }
 
-/* These flush types are not available on all chips... */
-#ifndef CONFIG_SPARC_LEON
-static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
-{
-	unsigned long retval;
-
-	vaddr &= PAGE_MASK;
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-
-	return retval;
-}
-#else
-#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
-#endif
-
 static inline int
 srmmu_get_pte (unsigned long addr)
 {
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index 3070f25..156220e 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -9,8 +9,6 @@
 
 #if defined(__sparc__) && defined(__arch64__)
 /* sparc 64 bit */
-typedef unsigned int           __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 
 typedef unsigned short 	       __kernel_old_uid_t;
 typedef unsigned short         __kernel_old_gid_t;
@@ -38,9 +36,6 @@
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef short                  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef long                   __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h
index b8c0e5f..cee7ed9 100644
--- a/arch/sparc/include/asm/psr.h
+++ b/arch/sparc/include/asm/psr.h
@@ -35,6 +35,14 @@
 #define PSR_VERS    0x0f000000         /* cpu-version field          */
 #define PSR_IMPL    0xf0000000         /* cpu-implementation field   */
 
+#define PSR_VERS_SHIFT		24
+#define PSR_IMPL_SHIFT		28
+#define PSR_VERS_SHIFTED_MASK	0xf
+#define PSR_IMPL_SHIFTED_MASK	0xf
+
+#define PSR_IMPL_TI		0x4
+#define PSR_IMPL_LEON		0xf
+
 #ifdef __KERNEL__
 
 #ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/sections.h b/arch/sparc/include/asm/sections.h
index 0b0553b..f300d1a 100644
--- a/arch/sparc/include/asm/sections.h
+++ b/arch/sparc/include/asm/sections.h
@@ -7,4 +7,7 @@
 /* sparc entry point */
 extern char _start[];
 
+extern char __leon_1insn_patch[];
+extern char __leon_1insn_patch_end[];
+
 #endif
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 5af6649..e6cd224 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -131,8 +131,7 @@
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \
-					 _TIF_SIGPENDING | \
-					 _TIF_RESTORE_SIGMASK)
+					 _TIF_SIGPENDING)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 7f0981b..cfa8c38 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -238,7 +238,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, &ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
 }
 #endif	/* !__ASSEMBLY__ */
 
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 59586b5..53a28dd 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -16,6 +16,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/processor.h>
+
 #define ARCH_HAS_SORT_EXTABLE
 #define ARCH_HAS_SEARCH_EXTABLE
 
@@ -304,24 +306,8 @@
 		return n;
 }
 
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
-
-static inline long strlen_user(const char __user *str)
-{
-	if (!access_ok(VERIFY_READ, str, 0))
-		return 0;
-	else
-		return __strlen_user(str);
-}
-
-static inline long strnlen_user(const char __user *str, long len)
-{
-	if (!access_ok(VERIFY_READ, str, 0))
-		return 0;
-	else
-		return __strnlen_user(str, len);
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index dcdfb89..7c831d8 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/processor.h>
+
 /*
  * Sparc64 is segmented, though more like the M68K than the I386.
  * We use the secondary ASI to address user memory, which references a
@@ -257,11 +259,9 @@
 
 #define clear_user __clear_user
 
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
-#define strlen_user __strlen_user
-#define strnlen_user __strnlen_user
 #define __copy_to_user_inatomic ___copy_to_user
 #define __copy_from_user_inatomic ___copy_from_user
 
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 72308f9..6cf591b 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -51,8 +51,8 @@
 obj-y                   += of_device_$(BITS).o
 obj-$(CONFIG_SPARC64)   += prom_irqtrans.o
 
-obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
-obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
+obj-$(CONFIG_SPARC32)   += leon_kernel.o
+obj-$(CONFIG_SPARC32)   += leon_pmc.o
 
 obj-$(CONFIG_SPARC64)   += reboot.o
 obj-$(CONFIG_SPARC64)   += sysfs.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 2d18196..a6c94a2 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -121,7 +121,7 @@
 		FPU(-1, NULL)
 	}
 },{
-	4,
+	PSR_IMPL_TI,
 	.cpu_info = {
 		CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
 		/* SparcClassic  --  borned STP1010TAB-50*/
@@ -191,7 +191,7 @@
 		FPU(-1, NULL)
 	}
 },{
-	0xF,		/* Aeroflex Gaisler */
+	PSR_IMPL_LEON,		/* Aeroflex Gaisler */
 	.cpu_info = {
 		CPU(3, "LEON"),
 		CPU(-1, NULL)
@@ -440,16 +440,16 @@
 	int psr_impl, psr_vers, fpu_vers;
 	int psr;
 
-	psr_impl = ((get_psr() >> 28) & 0xf);
-	psr_vers = ((get_psr() >> 24) & 0xf);
+	psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
+	psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
 
 	psr = get_psr();
 	put_psr(psr | PSR_EF);
-#ifdef CONFIG_SPARC_LEON
-	fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
-#else
-	fpu_vers = ((get_fsr() >> 17) & 0x7);
-#endif
+
+	if (psr_impl == PSR_IMPL_LEON)
+		fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
+	else
+		fpu_vers = ((get_fsr() >> 17) & 0x7);
 
 	put_psr(psr);
 
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 2dbe180..dcaa1cf 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -393,7 +393,6 @@
 	/* FIXME */
 1:	b,a	1b
 
-#ifdef CONFIG_SPARC_LEON
 	.globl	smpleon_ipi
 	.extern leon_ipi_interrupt
 	/* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@
 	b	ret_trap_lockless_ipi
 	 clr	%l6
 
-#endif /* CONFIG_SPARC_LEON */
-
 #endif /* CONFIG_SMP */
 
 	/* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@
 	mov	0x400, %l5
 	mov	0x300, %l4
 
-	lda	[%l5] ASI_M_MMUREGS, %l6	! read sfar first
-	lda	[%l4] ASI_M_MMUREGS, %l5	! read sfsr last
+LEON_PI(lda	[%l5] ASI_LEON_MMUREGS, %l6)	! read sfar first
+SUN_PI_(lda	[%l5] ASI_M_MMUREGS, %l6)	! read sfar first
+
+LEON_PI(lda	[%l4] ASI_LEON_MMUREGS, %l5)	! read sfsr last
+SUN_PI_(lda	[%l4] ASI_M_MMUREGS, %l5)	! read sfsr last
 
 	andn	%l6, 0xfff, %l6
 	srl	%l5, 6, %l5			! and encode all info into l7
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index 84b5f0d..e3e80d6 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -234,7 +234,8 @@
 
 	cmp	%glob_tmp, %sp
 	bleu,a	1f
-	 lda	[%g0] ASI_M_MMUREGS, %glob_tmp		! read MMU control
+LEON_PI( lda	[%g0] ASI_LEON_MMUREGS, %glob_tmp)	! read MMU control
+SUN_PI_( lda	[%g0] ASI_M_MMUREGS, %glob_tmp)		! read MMU control
 
 trap_setup_user_stack_is_bolixed:
 	/* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@
 1:
 	/* Clear the fault status and turn on the no_fault bit. */
 	or	%glob_tmp, 0x2, %glob_tmp		! or in no_fault bit
-	sta	%glob_tmp, [%g0] ASI_M_MMUREGS		! set it
+LEON_PI(sta	%glob_tmp, [%g0] ASI_LEON_MMUREGS)		! set it
+SUN_PI_(sta	%glob_tmp, [%g0] ASI_M_MMUREGS)		! set it
 
 	/* Dump the registers and cross fingers. */
 	STORE_WINDOW(sp)
 
 	/* Clear the no_fault bit and check the status. */
 	andn	%glob_tmp, 0x2, %glob_tmp
-	sta	%glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta	%glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%glob_tmp, [%g0] ASI_M_MMUREGS)
+
 	mov	AC_M_SFAR, %glob_tmp
-	lda	[%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda	[%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda	[%glob_tmp] ASI_M_MMUREGS, %g0)
+
 	mov	AC_M_SFSR, %glob_tmp
-	lda	[%glob_tmp] ASI_M_MMUREGS, %glob_tmp	! save away status of winstore
+LEON_PI(lda	[%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda	[%glob_tmp] ASI_M_MMUREGS, %glob_tmp)	! save away status of winstore
+
 	andcc	%glob_tmp, 0x2, %g0			! did we fault?
 	bne	trap_setup_user_stack_is_bolixed	! failure
 	 nop
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index a0f5c20..afeb1d7 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -30,10 +30,6 @@
  * the cpu-type
  */
 	.align 4
-cputyp:
-        .word   1
-
-	.align 4
 	.globl cputypval
 cputypval:
 	.asciz "sun4m"
@@ -46,8 +42,8 @@
 
 	.align 4
 
-sun4c_notsup:
-	.asciz	"Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
+notsup:
+	.asciz	"Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
 	.align 4
 
 sun4e_notsup:
@@ -123,7 +119,7 @@
 		tst	%o0
 		be	no_sun4u_here
 		 mov	%g4, %o7		/* Previous %o7. */
-	
+
 		mov	%o0, %l0		! stash away romvec
 		mov	%o0, %g7		! put it here too
 		mov	%o1, %l1		! stash away debug_vec too
@@ -132,7 +128,7 @@
 		set	current_pc, %g5
 		cmp	%g3, %g5
 		be	already_mapped
-		 nop 
+		 nop
 
 		/* %l6 will hold the offset we have to subtract
 		 * from absolute symbols in order to access areas
@@ -192,9 +188,9 @@
 		bne	not_a_sun4
 		 nop
 
-halt_sun4_or_sun4c:
+halt_notsup:
 		ld	[%g7 + 0x68], %o1
-		set	sun4c_notsup, %o0
+		set	notsup, %o0
 		sub	%o0, %l6, %o0
 		call	%o1
 		 nop
@@ -202,18 +198,31 @@
 		 nop
 
 not_a_sun4:
-		lda	[%g0] ASI_M_MMUREGS, %g1
-		andcc	%g1, 1, %g0
-		be	halt_sun4_or_sun4c
+		/* It looks like this is a machine we support.
+		 * Now find out what MMU we are dealing with
+		 * LEON - identified by the psr.impl field
+		 * Viking - identified by the psr.impl field
+		 * In all other cases a sun4m srmmu.
+		 * We check that the MMU is enabled in all cases.
+		 */
+
+		/* Check if this is a LEON CPU */
+		rd	%psr, %g3
+		srl	%g3, PSR_IMPL_SHIFT, %g3
+		and	%g3, PSR_IMPL_SHIFTED_MASK, %g3
+		cmp	%g3, PSR_IMPL_LEON
+		be	leon_remap		/* It is a LEON - jump */
 		 nop
 
-srmmu_remap:
-		/* First, check for a viking (TI) module. */
-		set	0x40000000, %g2
-		rd	%psr, %g3
-		and	%g2, %g3, %g3
-		subcc	%g3, 0x0, %g0
-		bz	srmmu_nviking
+		/* Sanity-check, is MMU enabled */
+		lda	[%g0] ASI_M_MMUREGS, %g1
+		andcc	%g1, 1, %g0
+		be	halt_notsup
+		 nop
+
+		/* Check for a viking (TI) module. */
+		cmp	%g3, PSR_IMPL_TI
+		bne	srmmu_not_viking
 		 nop
 
 		/* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@
 		lda	[%g0] ASI_M_MMUREGS, %g3	! peek in the control reg
 		and	%g2, %g3, %g3
 		subcc	%g3, 0x0, %g0
-		bnz	srmmu_nviking			! is in mbus mode
+		bnz	srmmu_not_viking			! is in mbus mode
 		 nop
-		
+
 		rd	%psr, %g3			! DO NOT TOUCH %g3
 		andn	%g3, PSR_ET, %g2
 		wr	%g2, 0x0, %psr
 		WRITE_PAUSE
-		
+
 		/* Get context table pointer, then convert to
 		 * a physical address, which is 36 bits.
 		 */
@@ -258,12 +267,12 @@
 		lda	[%g4] ASI_M_BYPASS, %o1		! This is a level 1 ptr
 		srl	%o1, 0x4, %o1			! Clear low 4 bits
 		sll	%o1, 0x8, %o1			! Make physical
-		
+
 		/* Ok, pull in the PTD. */
 		lda	[%o1] ASI_M_BYPASS, %o2		! This is the 0x0 16MB pgd
 
 		/* Calculate to KERNBASE entry. */
-		add	%o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3		
+		add	%o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
 
 		/* Poke the entry into the calculated address. */
 		sta	%o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@
 		b	go_to_highmem
 		 nop
 
+srmmu_not_viking:
 		/* This works on viking's in Mbus mode and all
 		 * other MBUS modules.  It is virtually the same as
 		 * the above madness sans turning traps off and flipping
 		 * the AC bit.
 		 */
-srmmu_nviking:
 		set	AC_M_CTPR, %g1
 		lda	[%g1] ASI_M_MMUREGS, %g1	! get ctx table ptr
 		sll	%g1, 0x4, %g1			! make physical addr
@@ -313,6 +322,29 @@
 		 nop					! wheee....
 
 
+leon_remap:
+		/* Sanity-check, is MMU enabled */
+		lda	[%g0] ASI_LEON_MMUREGS, %g1
+		andcc	%g1, 1, %g0
+		be	halt_notsup
+		 nop
+
+		/* Same code as in the srmmu_not_viking case,
+		 * with the LEON ASI for mmuregs
+		 */
+		set	AC_M_CTPR, %g1
+		lda	[%g1] ASI_LEON_MMUREGS, %g1	! get ctx table ptr
+		sll	%g1, 0x4, %g1			! make physical addr
+		lda	[%g1] ASI_M_BYPASS, %g1		! ptr to level 1 pg_table
+		srl	%g1, 0x4, %g1
+		sll	%g1, 0x8, %g1			! make phys addr for l1 tbl
+
+		lda	[%g1] ASI_M_BYPASS, %g2		! get level1 entry for 0x0
+		add	%g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+		sta	%g2, [%g3] ASI_M_BYPASS		! place at KERNBASE entry
+		b	go_to_highmem
+		 nop					! wheee....
+
 /* Now do a non-relative jump so that PC is in high-memory */
 go_to_highmem:
 		set	execute_in_high_mem, %g1
@@ -336,8 +368,9 @@
 		sethi	%hi(linux_dbvec), %g1
 		st	%o1, [%g1 + %lo(linux_dbvec)]
 
-/* Get the machine type via the mysterious romvec node operations. */
-
+		/* Get the machine type via the romvec
+		 * getprops node operation
+		 */
 		add	%g7, 0x1c, %l1
 		ld	[%l1], %l0
 		ld	[%l0], %l0
@@ -356,9 +389,42 @@
 						! to a buf where above string
 						! will get stored by the prom.
 
-#ifdef CONFIG_SPARC_LEON
-	        /* no cpu-type check is needed, it is a SPARC-LEON */
 
+		/* Check value of "compatible" property.
+		 * "value" => "model"
+		 * leon => sparc_leon
+		 * sun4m => sun4m
+		 * sun4s => sun4m
+		 * sun4d => sun4d
+		 * sun4e => "no_sun4e_here"
+		 * '*'   => "no_sun4u_here"
+		 * Check single letters only
+		 */
+
+		set	cputypval, %o2
+		/* If cputypval[0] == 'l' (lower case letter L) this is leon */
+		ldub	[%o2], %l1
+		cmp	%l1, 'l'
+		be	leon_init
+		 nop
+
+		/* Check cputypval[4] to find the sun model */
+		ldub	[%o2 + 0x4], %l1
+
+		cmp	%l1, 'm'
+		be	sun4m_init
+		 cmp	%l1, 's'
+		be	sun4m_init
+		 cmp	%l1, 'd'
+		be	sun4d_init
+		 cmp	%l1, 'e'
+		be	no_sun4e_here		! Could be a sun4e.
+		 nop
+		b	no_sun4u_here		! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
+		 nop
+
+leon_init:
+		/* LEON CPU - set boot_cpu_id */
 		sethi	%hi(boot_cpu_id), %g2	! boot-cpu index
 
 #ifdef CONFIG_SMP
@@ -376,26 +442,6 @@
 
 		ba continue_boot
 		 nop
-#endif
-
-/* Check to cputype. We may be booted on a sun4u (64 bit box),
- * and sun4d needs special treatment.
- */
-
-		set	cputypval, %o2
-		ldub	[%o2 + 0x4], %l1
-
-		cmp	%l1, 'm'
-		be	sun4m_init
-		 cmp	%l1, 's'
-		be	sun4m_init
-		 cmp	%l1, 'd'
-		be	sun4d_init
-		 cmp	%l1, 'e'
-		be	no_sun4e_here		! Could be a sun4e.
-		 nop
-		b	no_sun4u_here		! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
-		 nop
 
 /* CPUID in bootbus can be found at PA 0xff0140000 */
 #define SUN4D_BOOTBUS_CPUID     0xf0140000
@@ -431,9 +477,9 @@
 /* This sucks, apparently this makes Vikings call prom panic, will fix later */
 2:
 		rd	%psr, %o1
-		srl	%o1, 28, %o1		! Get a type of the CPU
+		srl	%o1, PSR_IMPL_SHIFT, %o1	! Get a type of the CPU
 
-		subcc	%o1, 4, %g0		! TI: Viking or MicroSPARC
+		subcc	%o1, PSR_IMPL_TI, %g0		! TI: Viking or MicroSPARC
 		be	continue_boot
 		 nop
 
@@ -459,10 +505,6 @@
 /* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
  * show-time!
  */
-
-		sethi	%hi(cputyp), %o0
-		st	%g4, [%o0 + %lo(cputyp)]
-
 		/* Turn on Supervisor, EnableFloating, and all the PIL bits.
 		 * Also puts us in register window zero with traps off.
 		 */
@@ -480,7 +522,7 @@
 		set	__bss_start , %o0	! First address of BSS
 		set	_end , %o1		! Last address of BSS
 		add	%o0, 0x1, %o0
-1:	
+1:
 		stb	%g0, [%o0]
 		subcc	%o0, %o1, %g0
 		bl	1b
@@ -546,7 +588,7 @@
 		set	dest, %g2; \
 		ld	[%g5], %g4; \
 		st	%g4, [%g2];
-	
+
 		/* Patch for window spills... */
 		PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
 		PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@
 		st	%g4, [%g5 + 0x18]
 		st	%g4, [%g5 + 0x1c]
 
-2:		
+2:
 		sethi	%hi(nwindows), %g4
 		st	%g3, [%g4 + %lo(nwindows)]	! store final value
 		sub	%g3, 0x1, %g3
@@ -617,18 +659,12 @@
 		wr	%g3, PSR_ET, %psr
 		WRITE_PAUSE
 
-		/* First we call prom_init() to set up PROMLIB, then
-		 * off to start_kernel().
-		 */
-
+		/* Call sparc32_start_kernel(struct linux_romvec *rp) */
 		sethi	%hi(prom_vector_p), %g5
 		ld	[%g5 + %lo(prom_vector_p)], %o0
-		call	prom_init
+		call	sparc32_start_kernel
 		 nop
 
-		call 	start_kernel
-		 nop
-	
 		/* We should not get here. */
 		call	halt_me
 		 nop
@@ -659,7 +695,7 @@
 		.asciz "write"
 		.align	4
 sun4u_6:
-        	.asciz  "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
+		.asciz  "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
 sun4u_6e:
 		.align	4
 sun4u_7:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index a2846f5..0f094db 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -55,17 +55,13 @@
 /* This function must make sure that caches and memory are coherent after DMA
  * On LEON systems without cache snooping it flushes the entire D-CACHE.
  */
-#ifndef CONFIG_SPARC_LEON
 static inline void dma_make_coherent(unsigned long pa, unsigned long len)
 {
+	if (sparc_cpu_model == sparc_leon) {
+		if (!sparc_leon3_snooping_enabled())
+			leon_flush_dcache_all();
+	}
 }
-#else
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
-	if (!sparc_leon3_snooping_enabled())
-		leon_flush_dcache_all();
-}
-#endif
 
 static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@
 #endif /* CONFIG_SBUS */
 
 
-/* LEON reuses PCI DMA ops */
-#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
-
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
@@ -657,14 +650,11 @@
 };
 EXPORT_SYMBOL(pci32_dma_ops);
 
-#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
 
-#ifdef CONFIG_SPARC_LEON
-struct dma_map_ops *dma_ops = &pci32_dma_ops;
-#elif defined(CONFIG_SBUS)
 struct dma_map_ops *dma_ops = &sbus_dma_ops;
-#endif
-
 EXPORT_SYMBOL(dma_ops);
 
 
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ae04914..c145f6f 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -241,9 +241,6 @@
 	unsigned int cpu_irq;
 	int err;
 
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-	struct tt_entry *trap_table;
-#endif
 
 	err = request_irq(irq, irq_handler, 0, "floppy", NULL);
 	if (err)
@@ -264,13 +261,18 @@
 	table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
 
 	INSTANTIATE(sparc_ttable)
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-	trap_table = &trapbase_cpu1;
-	INSTANTIATE(trap_table)
-	trap_table = &trapbase_cpu2;
-	INSTANTIATE(trap_table)
-	trap_table = &trapbase_cpu3;
-	INSTANTIATE(trap_table)
+
+#if defined CONFIG_SMP
+	if (sparc_cpu_model != sparc_leon) {
+		struct tt_entry *trap_table;
+
+		trap_table = &trapbase_cpu1;
+		INSTANTIATE(trap_table)
+		trap_table = &trapbase_cpu2;
+		INSTANTIATE(trap_table)
+		trap_table = &trapbase_cpu3;
+		INSTANTIATE(trap_table)
+	}
 #endif
 #undef INSTANTIATE
 	/*
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a86372d..291bb5d 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -26,6 +26,9 @@
 #endif
 
 #ifdef CONFIG_SPARC32
+/* setup_32.c */
+void sparc32_start_kernel(struct linux_romvec *rp);
+
 /* cpu.c */
 extern void cpu_probe(void);
 
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 77c1b91..e34e2c4 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -23,6 +23,7 @@
 #include <asm/smp.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
 #include "prom.h"
 #include "irq.h"
 
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 519ca92..4e17432 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -7,6 +7,7 @@
 #include <linux/pm.h>
 
 #include <asm/leon_amba.h>
+#include <asm/cpu_type.h>
 #include <asm/leon.h>
 
 /* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@
 /* Install LEON Power Down function */
 static int __init leon_pmc_install(void)
 {
-	/* Assign power management IDLE handler */
-	if (pmc_leon_need_fixup())
-		pm_idle = pmc_leon_idle_fixup;
-	else
-		pm_idle = pmc_leon_idle;
+	if (sparc_cpu_model == sparc_leon) {
+		/* Assign power management IDLE handler */
+		if (pmc_leon_need_fixup())
+			pm_idle = pmc_leon_idle_fixup;
+		else
+			pm_idle = pmc_leon_idle;
 
-	printk(KERN_INFO "leon: power management initialized\n");
+		printk(KERN_INFO "leon: power management initialized\n");
+	}
 
 	return 0;
 }
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index a469090..0f3fb6d 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -48,15 +48,13 @@
 
 #include "kernel.h"
 
-#ifdef CONFIG_SPARC_LEON
-
 #include "irq.h"
 
 extern ctxd_t *srmmu_ctx_table_phys;
 static int smp_processors_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 extern cpumask_t smp_commenced_mask;
-void __init leon_configure_cache_smp(void);
+void __cpuinit leon_configure_cache_smp(void);
 static void leon_ipi_init(void);
 
 /* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@
 
 extern struct linux_prom_registers smp_penguin_ctable;
 
-void __init leon_configure_cache_smp(void)
+void __cpuinit leon_configure_cache_smp(void)
 {
 	unsigned long cfg = sparc_leon3_get_dcachecfg();
 	int me = smp_processor_id();
@@ -507,5 +505,3 @@
 
 	sparc32_ipi_ops = &leon_ipi_ops;
 }
-
-#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fe6787c..cb36e82 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -65,50 +65,25 @@
 struct task_struct *last_task_used_math = NULL;
 struct thread_info *current_set[NR_CPUS];
 
-#ifndef CONFIG_SMP
-
 /*
  * the idle loop on a Sparc... ;)
  */
 void cpu_idle(void)
 {
+	set_thread_flag(TIF_POLLING_NRFLAG);
+
 	/* endless idle loop with no priority at all */
 	for (;;) {
-		if (pm_idle) {
-			while (!need_resched())
+		while (!need_resched()) {
+			if (pm_idle)
 				(*pm_idle)();
-		} else {
-			while (!need_resched())
+			else
 				cpu_relax();
 		}
 		schedule_preempt_disabled();
 	}
 }
 
-#else
-
-/* This is being executed in task 0 'user space'. */
-void cpu_idle(void)
-{
-        set_thread_flag(TIF_POLLING_NRFLAG);
-	/* endless idle loop with no priority at all */
-	while(1) {
-#ifdef CONFIG_SPARC_LEON
-		if (pm_idle) {
-			while (!need_resched())
-				(*pm_idle)();
-		} else
-#endif
-		{
-			while (!need_resched())
-				cpu_relax();
-		}
-		schedule_preempt_disabled();
-	}
-}
-
-#endif
-
 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
 void machine_halt(void)
 {
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 741df91..1303021 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -23,7 +23,6 @@
 #include <linux/of_pdt.h>
 #include <asm/prom.h>
 #include <asm/oplib.h>
-#include <asm/leon.h>
 
 #include "prom.h"
 
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 7abc24e..6c34de0 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -231,11 +231,14 @@
 	cmp	%g1, %fp
 	bleu	ret_trap_user_stack_is_bolixed
 	 mov	AC_M_SFSR, %g1
-	lda	[%g1] ASI_M_MMUREGS, %g0
+LEON_PI(lda	[%g1] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda	[%g1] ASI_M_MMUREGS, %g0)
 
-	lda	[%g0] ASI_M_MMUREGS, %g1
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %g1)
 	or	%g1, 0x2, %g1
-	sta	%g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta	%g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%g1, [%g0] ASI_M_MMUREGS)
 
 	restore	%g0, %g0, %g0
 
@@ -244,13 +247,16 @@
 	save	%g0, %g0, %g0
 
 	andn	%g1, 0x2, %g1
-	sta	%g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta	%g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%g1, [%g0] ASI_M_MMUREGS)
 
 	mov	AC_M_SFAR, %g2
-	lda	[%g2] ASI_M_MMUREGS, %g2
+LEON_PI(lda	[%g2] ASI_LEON_MMUREGS, %g2)
+SUN_PI_(lda	[%g2] ASI_M_MMUREGS, %g2)
 
 	mov	AC_M_SFSR, %g1
-	lda	[%g1] ASI_M_MMUREGS, %g1
+LEON_PI(lda	[%g1] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda	[%g1] ASI_M_MMUREGS, %g1)
 	andcc	%g1, 0x2, %g0
 	be	ret_trap_userwins_ok
 	 nop
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c052313..efe3e64 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -32,6 +32,7 @@
 #include <linux/cpu.h>
 #include <linux/kdebug.h>
 #include <linux/export.h>
+#include <linux/start_kernel.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -45,6 +46,7 @@
 #include <asm/cpudata.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
+#include <asm/sections.h>
 
 #include "kernel.h"
 
@@ -237,13 +239,62 @@
 	}
 }
 
+struct leon_1insn_patch_entry {
+	unsigned int addr;
+	unsigned int insn;
+};
+
 enum sparc_cpu sparc_cpu_model;
 EXPORT_SYMBOL(sparc_cpu_model);
 
-struct tt_entry *sparc_ttable;
+static __init void leon_patch(void)
+{
+	struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
+	struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
 
+	/* Default instruction is leon - no patching */
+	if (sparc_cpu_model == sparc_leon)
+		return;
+
+	while (start < end) {
+		unsigned long addr = start->addr;
+
+		*(unsigned int *)(addr) = start->insn;
+		flushi(addr);
+
+		start++;
+	}
+}
+
+struct tt_entry *sparc_ttable;
 struct pt_regs fake_swapper_regs;
 
+/* Called from head_32.S - before we have setup anything
+ * in the kernel. Be very careful with what you do here.
+ */
+void __init sparc32_start_kernel(struct linux_romvec *rp)
+{
+	prom_init(rp);
+
+	/* Set sparc_cpu_model */
+	sparc_cpu_model = sun_unknown;
+	if (!strcmp(&cputypval[0], "sun4m"))
+		sparc_cpu_model = sun4m;
+	if (!strcmp(&cputypval[0], "sun4s"))
+		sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
+	if (!strcmp(&cputypval[0], "sun4d"))
+		sparc_cpu_model = sun4d;
+	if (!strcmp(&cputypval[0], "sun4e"))
+		sparc_cpu_model = sun4e;
+	if (!strcmp(&cputypval[0], "sun4u"))
+		sparc_cpu_model = sun4u;
+	if (!strncmp(&cputypval[0], "leon" , 4))
+		sparc_cpu_model = sparc_leon;
+
+	leon_patch();
+	start_kernel();
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	int i;
@@ -260,21 +311,6 @@
 
 	register_console(&prom_early_console);
 
-	/* Set sparc_cpu_model */
-	sparc_cpu_model = sun_unknown;
-	if (!strcmp(&cputypval[0], "sun4m"))
-		sparc_cpu_model = sun4m;
-	if (!strcmp(&cputypval[0], "sun4s"))
-		sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
-	if (!strcmp(&cputypval[0], "sun4d"))
-		sparc_cpu_model = sun4d;
-	if (!strcmp(&cputypval[0], "sun4e"))
-		sparc_cpu_model = sun4e;
-	if (!strcmp(&cputypval[0], "sun4u"))
-		sparc_cpu_model = sun4u;
-	if (!strncmp(&cputypval[0], "leon" , 4))
-		sparc_cpu_model = sparc_leon;
-
 	printk("ARCH: ");
 	switch(sparc_cpu_model) {
 	case sun4m:
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index bb1513e..a53e0a5 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -32,8 +32,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
  */
@@ -274,7 +272,6 @@
 		case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
 		case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
 	}
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	return;
 
@@ -376,7 +373,6 @@
 		case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
 		case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
 	}
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	return;
 segv:
@@ -775,7 +771,7 @@
 	return -EFAULT;
 }
 
-static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
 				  siginfo_t *info,
 				  sigset_t *oldset, struct pt_regs *regs)
 {
@@ -787,12 +783,9 @@
 		err = setup_frame32(ka, regs, signr, oldset);
 
 	if (err)
-		return err;
+		return;
 
-	block_sigmask(ka, signr);
-	tracehook_signal_handler(signr, info, ka, regs, 0);
-
-	return 0;
+	signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -841,14 +834,7 @@
 	if (signr > 0) {
 		if (restart_syscall)
 			syscall_restart32(orig_i0, regs, &ka.sa);
-		if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
-			/* A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		}
+		handle_signal32(signr, &ka, &info, oldset, regs);
 		return;
 	}
 	if (restart_syscall &&
@@ -872,10 +858,7 @@
 	/* If there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		set_current_blocked(&current->saved_sigmask);
-	}
+	restore_saved_sigmask();
 }
 
 struct sigstack32 {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 2b7e849..68f9c86 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -29,8 +29,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
 		   void *fpqueue, unsigned long *fpqdepth);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
@@ -130,7 +128,6 @@
 	if (err)
 		goto segv_and_exit;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	return;
 
@@ -197,7 +194,6 @@
 			goto segv;
 	}
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	return;
 segv:
@@ -449,10 +445,11 @@
 	return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long signr, struct k_sigaction *ka,
-	      siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+	      siginfo_t *info, struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int err;
 
 	if (ka->sa.sa_flags & SA_SIGINFO)
@@ -461,12 +458,9 @@
 		err = setup_frame(ka, regs, signr, oldset);
 
 	if (err)
-		return err;
+		return;
 
-	block_sigmask(ka, signr);
-	tracehook_signal_handler(signr, info, ka, regs, 0);
-
-	return 0;
+	signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -498,7 +492,6 @@
 {
 	struct k_sigaction ka;
 	int restart_syscall;
-	sigset_t *oldset;
 	siginfo_t info;
 	int signr;
 
@@ -523,11 +516,6 @@
 	if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
 		regs->u_regs[UREG_G6] = orig_i0;
 
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
 	/* If the debugger messes with the program counter, it clears
@@ -544,15 +532,7 @@
 	if (signr > 0) {
 		if (restart_syscall)
 			syscall_restart(orig_i0, regs, &ka.sa);
-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-			/* a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
+		handle_signal(signr, &ka, &info, regs);
 		return;
 	}
 	if (restart_syscall &&
@@ -576,22 +556,17 @@
 	/* if there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		set_current_blocked(&current->saved_sigmask);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
 		      unsigned long thread_info_flags)
 {
-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs, orig_i0);
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index eafaab4..867de2f 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -38,8 +38,6 @@
 #include "systbls.h"
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
 {
@@ -71,7 +69,6 @@
 			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
 				goto do_sigsegv;
 		}
-		sigdelsetmask(&set, ~_BLOCKABLE);
 		set_current_blocked(&set);
 	}
 	if (test_thread_flag(TIF_32BIT)) {
@@ -315,7 +312,6 @@
 	/* Prevent syscall restart.  */
 	pt_regs_clear_syscall(regs);
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 	return;
 segv:
@@ -466,7 +462,7 @@
 	return -EFAULT;
 }
 
-static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
 				siginfo_t *info,
 				sigset_t *oldset, struct pt_regs *regs)
 {
@@ -475,12 +471,9 @@
 	err = setup_rt_frame(ka, regs, signr, oldset,
 			     (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
 	if (err)
-		return err;
+		return;
 
-	block_sigmask(ka, signr);
-	tracehook_signal_handler(signr, info, ka, regs, 0);
-
-	return 0;
+	signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -512,7 +505,7 @@
 {
 	struct k_sigaction ka;
 	int restart_syscall;
-	sigset_t *oldset;
+	sigset_t *oldset = sigmask_to_save();
 	siginfo_t info;
 	int signr;
 	
@@ -538,11 +531,6 @@
 	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
 		regs->u_regs[UREG_G6] = orig_i0;
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 #ifdef CONFIG_COMPAT
 	if (test_thread_flag(TIF_32BIT)) {
 		extern void do_signal32(sigset_t *, struct pt_regs *);
@@ -563,14 +551,7 @@
 	if (signr > 0) {
 		if (restart_syscall)
 			syscall_restart(orig_i0, regs, &ka.sa);
-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-			/* A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		}
+		handle_signal(signr, &ka, &info, oldset, regs);
 		return;
 	}
 	if (restart_syscall &&
@@ -594,10 +575,7 @@
 	/* If there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		set_current_blocked(&current->saved_sigmask);
-	}
+	restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
@@ -607,8 +585,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 3ee51f1..275f74f 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -580,16 +580,9 @@
 		unsigned long, new_len, unsigned long, flags,
 		unsigned long, new_addr)
 {
-	unsigned long ret = -EINVAL;
-
 	if (test_thread_flag(TIF_32BIT))
-		goto out;
-
-	down_write(&current->mm->mmap_sem);
-	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-	up_write(&current->mm->mmap_sem);
-out:
-	return ret;       
+		return -EINVAL;
+	return sys_mremap(addr, old_len, new_len, flags, new_addr);
 }
 
 /* we come to here via sys_nis_syscall so it can setup the regs argument */
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 7364ddc..af27aca 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -149,8 +149,6 @@
 
 	b,a	smp_do_cpu_idle
 
-#ifdef CONFIG_SPARC_LEON
-
 	__CPUINIT
 	.align	4
         .global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@
         ld [%g1+4],%g1
         srl %g1,4,%g1
         set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
-	sta %g1, [%g5] ASI_M_MMUREGS
+	sta %g1, [%g5] ASI_LEON_MMUREGS
 
 	/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
 	set	(PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@
 	 nop
 
 	b,a	smp_do_cpu_idle
-
-#endif
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index c72fdf5..3b05e66 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2054,7 +2054,7 @@
 	do_fpe_common(regs);
 }
 
-extern int do_mathemu(struct pt_regs *, struct fpustate *);
+extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
 
 void do_fpother(struct pt_regs *regs)
 {
@@ -2068,7 +2068,7 @@
 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
 	case (2 << 14): /* unfinished_FPop */
 	case (3 << 14): /* unimplemented_FPop */
-		ret = do_mathemu(regs, f);
+		ret = do_mathemu(regs, f, false);
 		break;
 	}
 	if (ret)
@@ -2308,10 +2308,12 @@
 			} else {
 				struct fpustate *f = FPUSTATE;
 
-				/* XXX maybe verify XFSR bits like
-				 * XXX do_fpother() does?
+				/* On UltraSPARC T2 and later, FPU insns which
+				 * are not implemented in HW signal an illegal
+				 * instruction trap and do not set the FP Trap
+				 * Trap in the %fsr to unimplemented_FPop.
 				 */
-				if (do_mathemu(regs, f))
+				if (do_mathemu(regs, f, true))
 					return;
 			}
 		}
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 5cffdc5..3e244f3 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -443,7 +443,7 @@
 	root_vdev = vio_create_one(hp, root, NULL);
 	err = -ENODEV;
 	if (!root_vdev) {
-		printk(KERN_ERR "VIO: Coult not create root device.\n");
+		printk(KERN_ERR "VIO: Could not create root device.\n");
 		goto out_release;
 	}
 
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0e16056..89c2c29 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -107,6 +107,11 @@
 		*(.sun4v_2insn_patch)
 		__sun4v_2insn_patch_end = .;
 	}
+	.leon_1insn_patch : {
+		__leon_1insn_patch = .;
+		*(.leon_1insn_patch)
+		__leon_1insn_patch_end = .;
+	}
 	.swapper_tsb_phys_patch : {
 		__swapper_tsb_phys_patch = .;
 		*(.swapper_tsb_phys_patch)
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 4c2de3c..28a7bc6 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -332,24 +332,30 @@
 	 mov	AC_M_SFSR, %glob_tmp
 
 	/* Clear the fault status and turn on the no_fault bit. */
-	lda	[%glob_tmp] ASI_M_MMUREGS, %g0		! eat SFSR
+LEON_PI(lda	[%glob_tmp] ASI_LEON_MMUREGS, %g0)	! eat SFSR
+SUN_PI_(lda	[%glob_tmp] ASI_M_MMUREGS, %g0)		! eat SFSR
 
-	lda	[%g0] ASI_M_MMUREGS, %glob_tmp		! read MMU control
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %glob_tmp)	! read MMU control
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %glob_tmp)		! read MMU control
 	or	%glob_tmp, 0x2, %glob_tmp		! or in no_fault bit
-	sta	%glob_tmp, [%g0] ASI_M_MMUREGS		! set it
+LEON_PI(sta	%glob_tmp, [%g0] ASI_LEON_MMUREGS)	! set it
+SUN_PI_(sta	%glob_tmp, [%g0] ASI_M_MMUREGS)		! set it
 
 	/* Dump the registers and cross fingers. */
 	STORE_WINDOW(sp)
 
 	/* Clear the no_fault bit and check the status. */
 	andn	%glob_tmp, 0x2, %glob_tmp
-	sta	%glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta	%glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%glob_tmp, [%g0] ASI_M_MMUREGS)
 
 	mov	AC_M_SFAR, %glob_tmp
-	lda	[%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda	[%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda	[%glob_tmp] ASI_M_MMUREGS, %g0)
 
 	mov	AC_M_SFSR, %glob_tmp
-	lda	[%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+LEON_PI(lda	[%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
+SUN_PI_(lda	[%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
 	andcc	%glob_tmp, 0x2, %g0			! did we fault?
 	be,a	spwin_finish_up + 0x4			! cool beans, success
 	 restore %g0, %g0, %g0
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 9fde91a..2c21cc59 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -254,16 +254,19 @@
 	mov	AC_M_SFSR, %l4
 	cmp	%l5, %sp
 	bleu	fwin_user_stack_is_bolixed
-	 lda	[%l4] ASI_M_MMUREGS, %g0	! clear fault status
+LEON_PI( lda	[%l4] ASI_LEON_MMUREGS, %g0)	! clear fault status
+SUN_PI_( lda	[%l4] ASI_M_MMUREGS, %g0)	! clear fault status
 
 	/* The technique is, turn off faults on this processor,
 	 * just let the load rip, then check the sfsr to see if
 	 * a fault did occur.  Then we turn on fault traps again
 	 * and branch conditionally based upon what happened.
 	 */
-	lda	[%g0] ASI_M_MMUREGS, %l5	! read mmu-ctrl reg
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %l5)	! read mmu-ctrl reg
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %l5)	! read mmu-ctrl reg
 	or	%l5, 0x2, %l5			! turn on no-fault bit
-	sta	%l5, [%g0] ASI_M_MMUREGS	! store it
+LEON_PI(sta	%l5, [%g0] ASI_LEON_MMUREGS)	! store it
+SUN_PI_(sta	%l5, [%g0] ASI_M_MMUREGS)	! store it
 
 	/* Cross fingers and go for it. */
 	LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@
 
 	/* LOCATION: Window 'T' */
 
-	lda	[%g0] ASI_M_MMUREGS, %twin_tmp1	! load mmu-ctrl again
-	andn	%twin_tmp1, 0x2, %twin_tmp1	! clear no-fault bit
-	sta	%twin_tmp1, [%g0] ASI_M_MMUREGS	! store it
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %twin_tmp1)	! load mmu-ctrl again
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %twin_tmp1)	! load mmu-ctrl again
+	andn	%twin_tmp1, 0x2, %twin_tmp1		! clear no-fault bit
+LEON_PI(sta	%twin_tmp1, [%g0] ASI_LEON_MMUREGS)	! store it
+SUN_PI_(sta	%twin_tmp1, [%g0] ASI_M_MMUREGS)	! store it
 
 	mov	AC_M_SFAR, %twin_tmp2
-	lda	[%twin_tmp2] ASI_M_MMUREGS, %g0	! read fault address
+LEON_PI(lda	[%twin_tmp2] ASI_LEON_MMUREGS, %g0)	! read fault address
+SUN_PI_(lda	[%twin_tmp2] ASI_M_MMUREGS, %g0)	! read fault address
 
 	mov	AC_M_SFSR, %twin_tmp2
-	lda	[%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2	! read fault status
-	andcc	%twin_tmp2, 0x2, %g0			! did fault occur?
+LEON_PI(lda	[%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
+SUN_PI_(lda	[%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2)	   ! read fault status
+	andcc	%twin_tmp2, 0x2, %g0			   ! did fault occur?
 
-	bne	1f					! yep, cleanup
+	bne	1f					   ! yep, cleanup
 	 nop
 
 	wr	%t_psr, 0x0, %psr
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 943d98d..dff4096 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -10,7 +10,6 @@
 lib-y                 += checksum_$(BITS).o
 lib-$(CONFIG_SPARC32) += blockops.o
 lib-y                 += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
-lib-y                 += strlen_user_$(BITS).o
 lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-$(CONFIG_SPARC64) += atomic_64.o
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 6b278ab..3b31218 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -15,8 +15,6 @@
 
 /* string functions */
 EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(strncmp);
 
 /* mem* functions */
diff --git a/arch/sparc/lib/strlen_user_32.S b/arch/sparc/lib/strlen_user_32.S
deleted file mode 100644
index 8c8a371..0000000
--- a/arch/sparc/lib/strlen_user_32.S
+++ /dev/null
@@ -1,109 +0,0 @@
-/* strlen_user.S: Sparc optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-10:
-	ldub	[%o0], %o5
-	cmp	%o5, 0
-	be	1f
-	 add	%o0, 1, %o0
-	andcc	%o0, 3, %g0
-	be	4f
-	 or	%o4, %lo(HI_MAGIC), %o3
-11:
-	ldub	[%o0], %o5
-	cmp	%o5, 0
-	be	2f
-	 add	%o0, 1, %o0
-	andcc	%o0, 3, %g0
-	be	5f
-	 sethi	%hi(LO_MAGIC), %o4
-12:
-	ldub	[%o0], %o5
-	cmp	%o5, 0
-	be	3f
-	 add	%o0, 1, %o0
-	b	13f
-	 or	%o4, %lo(LO_MAGIC), %o2
-1:
-	retl
-	 mov	1, %o0
-2:
-	retl
-	 mov	2, %o0
-3:
-	retl
-	 mov	3, %o0
-
-	.align 4
-	.global __strlen_user, __strnlen_user
-__strlen_user:
-	sethi	%hi(32768), %o1
-__strnlen_user:
-	mov	%o1, %g1
-	mov	%o0, %o1
-	andcc	%o0, 3, %g0
-	bne	10b
-	 sethi	%hi(HI_MAGIC), %o4
-	or	%o4, %lo(HI_MAGIC), %o3
-4:
-	sethi	%hi(LO_MAGIC), %o4
-5:
-	or	%o4, %lo(LO_MAGIC), %o2
-13:
-	ld	[%o0], %o5
-2:
-	sub	%o5, %o2, %o4
-	andcc	%o4, %o3, %g0
-	bne	82f
-	 add	%o0, 4, %o0
-	sub	%o0, %o1, %g2
-81:	cmp	%g2, %g1
-	blu	13b
-	 mov	%o0, %o4
-	ba,a	1f
-
-	/* Check every byte. */
-82:	srl	%o5, 24, %g5
-	andcc	%g5, 0xff, %g0
-	be	1f
-	 add	%o0, -3, %o4
-	srl	%o5, 16, %g5
-	andcc	%g5, 0xff, %g0
-	be	1f
-	 add	%o4, 1, %o4
-	srl	%o5, 8, %g5
-	andcc	%g5, 0xff, %g0
-	be	1f
-	 add	%o4, 1, %o4
-	andcc	%o5, 0xff, %g0
-	bne	81b
-	 sub	%o0, %o1, %g2
-
-	add	%o4, 1, %o4
-1:
-	retl
-	 sub	%o4, %o1, %o0
-
-	.section .fixup,#alloc,#execinstr
-	.align	4
-9:
-	retl
-	 clr	%o0
-
-	.section __ex_table,#alloc
-	.align	4
-
-	.word	10b, 9b
-	.word	11b, 9b
-	.word	12b, 9b
-	.word	13b, 9b
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
deleted file mode 100644
index c3df71f..0000000
--- a/arch/sparc/lib/strlen_user_64.S
+++ /dev/null
@@ -1,97 +0,0 @@
-/* strlen_user.S: Sparc64 optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/linkage.h>
-#include <asm/asi.h>
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-	.align 4
-ENTRY(__strlen_user)
-	sethi	%hi(32768), %o1
-ENTRY(__strnlen_user)
-	mov	%o1, %g1
-	mov	%o0, %o1
-	andcc	%o0, 3, %g0
-	be,pt	%icc, 9f
-	 sethi	%hi(HI_MAGIC), %o4
-10:	lduba	[%o0] %asi, %o5
-	brz,pn	%o5, 21f
-	 add	%o0, 1, %o0
-	andcc	%o0, 3, %g0
-	be,pn	%icc, 4f
-	 or	%o4, %lo(HI_MAGIC), %o3
-11:	lduba	[%o0] %asi, %o5
-	brz,pn	%o5, 22f
-	 add	%o0, 1, %o0
-	andcc	%o0, 3, %g0
-	be,pt	%icc, 13f
-	 srl	%o3, 7, %o2
-12:	lduba	[%o0] %asi, %o5
-	brz,pn	%o5, 23f
-	 add	%o0, 1, %o0
-	ba,pt	%icc, 2f
-15:	 lda	[%o0] %asi, %o5
-9:	or	%o4, %lo(HI_MAGIC), %o3
-4:	srl	%o3, 7, %o2
-13:	lda	[%o0] %asi, %o5
-2:	sub	%o5, %o2, %o4
-	andcc	%o4, %o3, %g0
-	bne,pn	%icc, 82f
-	 add	%o0, 4, %o0
-	sub	%o0, %o1, %g2
-81:	cmp	%g2, %g1
-	blu,pt	%icc, 13b
-	 mov	%o0, %o4
-	ba,a,pt	%xcc, 1f
-
-	/* Check every byte. */
-82:	srl	%o5, 24, %g7
-	andcc	%g7, 0xff, %g0
-	be,pn	%icc, 1f
-	 add	%o0, -3, %o4
-	srl	%o5, 16, %g7
-	andcc	%g7, 0xff, %g0
-	be,pn	%icc, 1f
-	 add	%o4, 1, %o4
-	srl	%o5, 8, %g7
-	andcc	%g7, 0xff, %g0
-	be,pn	%icc, 1f
-	 add	%o4, 1, %o4
-	andcc	%o5, 0xff, %g0
-	bne,pt	%icc, 81b
-	 sub	%o0, %o1, %g2
-	add	%o4, 1, %o4
-1:	retl
-	 sub	%o4, %o1, %o0
-21:	retl
-	 mov	1, %o0
-22:	retl
-	 mov	2, %o0
-23:	retl
-	 mov	3, %o0
-ENDPROC(__strlen_user)
-ENDPROC(__strnlen_user)
-
-        .section .fixup,#alloc,#execinstr
-        .align  4
-30:
-        retl
-         clr    %o0
-
-	.section __ex_table,"a"
-	.align	4
-
-	.word	10b, 30b
-	.word	11b, 30b
-	.word	12b, 30b
-	.word	15b, 30b
-	.word	13b, 30b
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 2bbe2f2..1704068 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -163,7 +163,7 @@
 	u64 q[2];
 } *argp;
 
-int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
 {
 	unsigned long pc = regs->tpc;
 	unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@
 			case FSQRTS: {
 				unsigned long x = current_thread_info()->xfsr[0];
 
-				x = (x >> 14) & 0xf;
+				x = (x >> 14) & 0x7;
 				TYPE(x,1,1,1,1,0,0);
 				break;
 			}
@@ -226,7 +226,7 @@
 			case FSQRTD: {
 				unsigned long x = current_thread_info()->xfsr[0];
 
-				x = (x >> 14) & 0xf;
+				x = (x >> 14) & 0x7;
 				TYPE(x,2,1,2,1,0,0);
 				break;
 			}
@@ -357,9 +357,17 @@
 	if (type) {
 		argp rs1 = NULL, rs2 = NULL, rd = NULL;
 		
-		freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
-		if (freg != (type >> 9))
-			goto err;
+		/* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
+		 * Type field in the %fsr to unimplemented_FPop.  Nor does it
+		 * use the fp_exception_other trap.  Instead it signals an
+		 * illegal instruction and leaves the FP trap type field of
+		 * the %fsr unchanged.
+		 */
+		if (!illegal_insn_trap) {
+			int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
+			if (ftt != (type >> 9))
+				goto err;
+		}
 		current_thread_info()->xfsr[0] &= ~0x1c000;
 		freg = ((insn >> 14) & 0x1f);
 		switch (type & 0x3) {
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 69ffd31..30c3ecc 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,8 +8,9 @@
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
 obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += srmmu_access.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
-obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+obj-$(CONFIG_SPARC32)   += leon_mm.o
 
 # Only used by sparc64
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 4c67ae6..5bed085 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -32,7 +32,7 @@
 }
 
 
-unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
 {
 
 	unsigned int ctxtbl;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 256db6b..62e3f57 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -646,6 +646,23 @@
 	}
 }
 
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+	unsigned long retval;
+
+	if (sparc_cpu_model != sparc_leon) {
+
+		vaddr &= PAGE_MASK;
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+	} else {
+		retval = leon_swprobe(vaddr, 0);
+	}
+	return retval;
+}
+
 /*
  * This is much cleaner than poking around physical address space
  * looking at the prom's page table directly which is what most
@@ -665,7 +682,7 @@
 			break; /* probably wrap around */
 		if(start == 0xfef00000)
 			start = KADB_DEBUGGER_BEGVM;
-		if(!(prompte = srmmu_hwprobe(start))) {
+		if(!(prompte = srmmu_probe(start))) {
 			start += PAGE_SIZE;
 			continue;
 		}
@@ -674,12 +691,12 @@
 		what = 0;
     
 		if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
-			if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+			if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
 				what = 1;
 		}
     
 		if(!(start & ~(SRMMU_PGDIR_MASK))) {
-			if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+			if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
 			   prompte)
 				what = 2;
 		}
@@ -1156,7 +1173,7 @@
 #ifdef TURBOSPARC_WRITEBACK
 	volatile unsigned long clear;
 
-	if (srmmu_hwprobe(page))
+	if (srmmu_probe(page))
 		turbosparc_flush_page_cache(page);
 	clear = srmmu_get_fstatus();
 #endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644
index 0000000..d0a67b2
--- /dev/null
+++ b/arch/sparc/mm/srmmu_access.S
@@ -0,0 +1,82 @@
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta	%o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%o0, [%g0] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+	/* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+	srl	%o0, 4, %g1
+	and	%g1, SRMMU_CTX_PMASK, %g1
+
+	mov	SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta	%g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%g1, [%g2] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+	mov	SRMMU_CTX_REG, %g1
+LEON_PI(sta	%o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%o0, [%g1] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+	mov	SRMMU_CTX_REG, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+	mov	SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+	mov	SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_faddr)
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 6ad6219..fe12881 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -48,6 +48,14 @@
 config SYS_SUPPORTS_HUGETLBFS
 	def_bool y
 
+# Support for additional huge page sizes besides HPAGE_SIZE.
+# The software support is currently only present in the TILE-Gx
+# hypervisor. TILEPro in any case does not support page sizes
+# larger than the default HPAGE_SIZE.
+config HUGETLB_SUPER_PAGES
+	depends on HUGETLB_PAGE && TILEGX
+	def_bool y
+
 # FIXME: tilegx can implement a more efficient rwsem.
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
@@ -107,16 +115,14 @@
 	select HVC_DRIVER
 	def_bool y
 
-# Please note: TILE-Gx support is not yet finalized; this is
-# the preliminary support.  TILE-Gx drivers are only provided
-# with the alpha or beta test versions for Tilera customers.
 config TILEGX
-	depends on EXPERIMENTAL
 	bool "Building with TILE-Gx (64-bit) compiler and toolchain"
 
+config TILEPRO
+	def_bool !TILEGX
+
 config 64BIT
-	depends on TILEGX
-	def_bool y
+	def_bool TILEGX
 
 config ARCH_DEFCONFIG
 	string
@@ -137,6 +143,31 @@
 	  smaller kernel memory footprint results from using a smaller
 	  value on chips with fewer tiles.
 
+if TILEGX
+
+choice
+	prompt "Kernel page size"
+	default PAGE_SIZE_64KB
+	help
+	  This lets you select the page size of the kernel.  For best
+	  performance on memory-intensive applications, a page size of 64KB
+	  is recommended.  For workloads involving many small files, many
+	  connections, etc., it may be better to select 16KB, which uses
+	  memory more efficiently at some cost in TLB performance.
+
+	  Note that this option is TILE-Gx specific; currently
+	  TILEPro page size is set by rebuilding the hypervisor.
+
+config PAGE_SIZE_16KB
+	bool "16KB"
+
+config PAGE_SIZE_64KB
+	bool "64KB"
+
+endchoice
+
+endif
+
 source "kernel/Kconfig.hz"
 
 config KEXEC
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 9520bc5..e20b0a0 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -34,7 +34,12 @@
   $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 # Provide the path to use for "make defconfig".
-KBUILD_DEFCONFIG := $(ARCH)_defconfig
+# We default to the newer TILE-Gx architecture if only "tile" is given.
+ifeq ($(ARCH),tile)
+        KBUILD_DEFCONFIG := tilegx_defconfig
+else
+        KBUILD_DEFCONFIG := $(ARCH)_defconfig
+endif
 
 # Used as a file extension when useful, e.g. head_$(BITS).o
 # Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index bbc1f4c..78bbce2 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -65,6 +65,31 @@
 #define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
 #define SPR_EX_CONTEXT_2_1__ICS_MASK  0x4
 #define SPR_FAIL 0x4e09
+#define SPR_IDN_AVAIL_EN 0x3e05
+#define SPR_IDN_CA_DATA 0x0b00
+#define SPR_IDN_DATA_AVAIL 0x0b03
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
+#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
+#define SPR_IDN_DEMUX_COUNT_0 0x0a06
+#define SPR_IDN_DEMUX_COUNT_1 0x0a07
+#define SPR_IDN_DEMUX_CTL 0x0a08
+#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
+#define SPR_IDN_DEMUX_STATUS 0x0a0b
+#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
+#define SPR_IDN_DIRECTION_PROTECT 0x2e05
+#define SPR_IDN_PENDING 0x0a0e
+#define SPR_IDN_REFILL_EN 0x0e05
+#define SPR_IDN_SP_FIFO_DATA 0x0a0f
+#define SPR_IDN_SP_FIFO_SEL 0x0a10
+#define SPR_IDN_SP_FREEZE 0x0a11
+#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK  0x1
+#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK  0x2
+#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK  0x4
+#define SPR_IDN_SP_STATE 0x0a12
+#define SPR_IDN_TAG_0 0x0a13
+#define SPR_IDN_TAG_1 0x0a14
+#define SPR_IDN_TAG_VALID 0x0a15
+#define SPR_IDN_TILE_COORD 0x0a16
 #define SPR_INTCTRL_0_STATUS 0x4a07
 #define SPR_INTCTRL_1_STATUS 0x4807
 #define SPR_INTCTRL_2_STATUS 0x4607
@@ -87,12 +112,36 @@
 #define SPR_INTERRUPT_MASK_SET_1_1 0x480e
 #define SPR_INTERRUPT_MASK_SET_2_0 0x460c
 #define SPR_INTERRUPT_MASK_SET_2_1 0x460d
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
 #define SPR_MPL_DMA_CPL_SET_0 0x5800
 #define SPR_MPL_DMA_CPL_SET_1 0x5801
 #define SPR_MPL_DMA_CPL_SET_2 0x5802
 #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
 #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
 #define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
+#define SPR_MPL_IDN_CA_SET_0 0x3a00
+#define SPR_MPL_IDN_CA_SET_1 0x3a01
+#define SPR_MPL_IDN_CA_SET_2 0x3a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
+#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
+#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
+#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
+#define SPR_MPL_IDN_TIMER_SET_0 0x3400
+#define SPR_MPL_IDN_TIMER_SET_1 0x3401
+#define SPR_MPL_IDN_TIMER_SET_2 0x3402
 #define SPR_MPL_INTCTRL_0_SET_0 0x4a00
 #define SPR_MPL_INTCTRL_0_SET_1 0x4a01
 #define SPR_MPL_INTCTRL_0_SET_2 0x4a02
@@ -102,6 +151,9 @@
 #define SPR_MPL_INTCTRL_2_SET_0 0x4600
 #define SPR_MPL_INTCTRL_2_SET_1 0x4601
 #define SPR_MPL_INTCTRL_2_SET_2 0x4602
+#define SPR_MPL_PERF_COUNT_SET_0 0x4200
+#define SPR_MPL_PERF_COUNT_SET_1 0x4201
+#define SPR_MPL_PERF_COUNT_SET_2 0x4202
 #define SPR_MPL_SN_ACCESS_SET_0 0x0800
 #define SPR_MPL_SN_ACCESS_SET_1 0x0801
 #define SPR_MPL_SN_ACCESS_SET_2 0x0802
@@ -181,6 +233,7 @@
 #define SPR_UDN_DEMUX_STATUS 0x0c0d
 #define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
 #define SPR_UDN_DIRECTION_PROTECT 0x3005
+#define SPR_UDN_PENDING 0x0c10
 #define SPR_UDN_REFILL_EN 0x1005
 #define SPR_UDN_SP_FIFO_DATA 0x0c11
 #define SPR_UDN_SP_FIFO_SEL 0x0c12
@@ -195,6 +248,9 @@
 #define SPR_UDN_TAG_3 0x0c18
 #define SPR_UDN_TAG_VALID 0x0c19
 #define SPR_UDN_TILE_COORD 0x0c1a
+#define SPR_WATCH_CTL 0x4209
+#define SPR_WATCH_MASK 0x420a
+#define SPR_WATCH_VAL 0x420b
 
 #endif /* !defined(__ARCH_SPR_DEF_H__) */
 
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
index cd3e5f9..0da86fa 100644
--- a/arch/tile/include/arch/spr_def_64.h
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -52,6 +52,13 @@
 #define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
 #define SPR_EX_CONTEXT_2_1__ICS_MASK  0x4
 #define SPR_FAIL 0x2707
+#define SPR_IDN_AVAIL_EN 0x1a05
+#define SPR_IDN_DATA_AVAIL 0x0a80
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
+#define SPR_IDN_DEMUX_COUNT_0 0x0a05
+#define SPR_IDN_DEMUX_COUNT_1 0x0a06
+#define SPR_IDN_DIRECTION_PROTECT 0x1405
+#define SPR_IDN_PENDING 0x0a08
 #define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
 #define SPR_INTCTRL_0_STATUS 0x2505
 #define SPR_INTCTRL_1_STATUS 0x2405
@@ -88,9 +95,27 @@
 #define SPR_IPI_MASK_SET_0 0x1f0a
 #define SPR_IPI_MASK_SET_1 0x1e0a
 #define SPR_IPI_MASK_SET_2 0x1d0a
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
 #define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
 #define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
 #define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
+#define SPR_MPL_IDN_TIMER_SET_0 0x1800
+#define SPR_MPL_IDN_TIMER_SET_1 0x1801
+#define SPR_MPL_IDN_TIMER_SET_2 0x1802
 #define SPR_MPL_INTCTRL_0_SET_0 0x2500
 #define SPR_MPL_INTCTRL_0_SET_1 0x2501
 #define SPR_MPL_INTCTRL_0_SET_2 0x2502
@@ -100,6 +125,21 @@
 #define SPR_MPL_INTCTRL_2_SET_0 0x2300
 #define SPR_MPL_INTCTRL_2_SET_1 0x2301
 #define SPR_MPL_INTCTRL_2_SET_2 0x2302
+#define SPR_MPL_IPI_0 0x1f04
+#define SPR_MPL_IPI_0_SET_0 0x1f00
+#define SPR_MPL_IPI_0_SET_1 0x1f01
+#define SPR_MPL_IPI_0_SET_2 0x1f02
+#define SPR_MPL_IPI_1 0x1e04
+#define SPR_MPL_IPI_1_SET_0 0x1e00
+#define SPR_MPL_IPI_1_SET_1 0x1e01
+#define SPR_MPL_IPI_1_SET_2 0x1e02
+#define SPR_MPL_IPI_2 0x1d04
+#define SPR_MPL_IPI_2_SET_0 0x1d00
+#define SPR_MPL_IPI_2_SET_1 0x1d01
+#define SPR_MPL_IPI_2_SET_2 0x1d02
+#define SPR_MPL_PERF_COUNT_SET_0 0x2000
+#define SPR_MPL_PERF_COUNT_SET_1 0x2001
+#define SPR_MPL_PERF_COUNT_SET_2 0x2002
 #define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
 #define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
 #define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
@@ -167,6 +207,9 @@
 #define SPR_UDN_DEMUX_COUNT_2 0x0b07
 #define SPR_UDN_DEMUX_COUNT_3 0x0b08
 #define SPR_UDN_DIRECTION_PROTECT 0x1505
+#define SPR_UDN_PENDING 0x0b0a
+#define SPR_WATCH_MASK 0x200a
+#define SPR_WATCH_VAL 0x200b
 
 #endif /* !defined(__ARCH_SPR_DEF_H__) */
 
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0bb4264..143473e 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,6 +2,7 @@
 
 header-y += ../arch/
 
+header-y += cachectl.h
 header-y += ucontext.h
 header-y += hardwall.h
 
@@ -21,7 +22,6 @@
 generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += local.h
-generic-y += module.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 54d1da8..e7fb5cf 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -303,7 +303,14 @@
 void __atomic_fault_unlock(int *lock_ptr);
 #endif
 
+/* Return a pointer to the lock for the given address. */
+int *__atomic_hashed_lock(volatile void *v);
+
 /* Private helper routines in lib/atomic_asm_32.S */
+struct __get_user {
+	unsigned long val;
+	int err;
+};
 extern struct __get_user __atomic_cmpxchg(volatile int *p,
 					  int *lock, int o, int n);
 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@
 extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
 				      int *lock, u64 o, u64 n);
 
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 16f1fa5..bd186c4 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -77,6 +77,11 @@
 	return __builtin_ffs(x);
 }
 
+static inline int fls64(__u64 w)
+{
+	return (sizeof(__u64) * 8) - __builtin_clzll(w);
+}
+
 /**
  * fls - find last set bit in word
  * @x: the word to search
@@ -90,12 +95,7 @@
  */
 static inline int fls(int x)
 {
-	return (sizeof(int) * 8) - __builtin_clz(x);
-}
-
-static inline int fls64(__u64 w)
-{
-	return (sizeof(__u64) * 8) - __builtin_clzll(w);
+	return fls64((unsigned int) x);
 }
 
 static inline unsigned int __arch_hweight32(unsigned int w)
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h
index 9558416..fb72ecf 100644
--- a/arch/tile/include/asm/byteorder.h
+++ b/arch/tile/include/asm/byteorder.h
@@ -1 +1,21 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#if defined (__BIG_ENDIAN__)
+#include <linux/byteorder/big_endian.h>
+#elif defined (__LITTLE_ENDIAN__)
 #include <linux/byteorder/little_endian.h>
+#else
+#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
+#endif
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h
new file mode 100644
index 0000000..af4c9f9
--- /dev/null
+++ b/arch/tile/include/asm/cachectl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_CACHECTL_H
+#define _ASM_TILE_CACHECTL_H
+
+/*
+ * Options for cacheflush system call.
+ *
+ * The ICACHE flush is performed on all cores currently running the
+ * current process's address space.  The intent is for user
+ * applications to be able to modify code, invoke the system call,
+ * then allow arbitrary other threads in the same address space to see
+ * the newly-modified code.  Passing a length of CHIP_L1I_CACHE_SIZE()
+ * or more invalidates the entire icache on all cores in the address
+ * spaces.  (Note: currently this option invalidates the entire icache
+ * regardless of the requested address and length, but we may choose
+ * to honor the arguments at some point.)
+ *
+ * Flush and invalidation of memory can normally be performed with the
+ * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
+ * userspace.  The DCACHE option to the system call allows userspace
+ * to flush the entire L1+L2 data cache from the core.  In this case,
+ * the address and length arguments are not used.  The DCACHE flush is
+ * restricted to the current core, not all cores in the address space.
+ */
+#define	ICACHE	(1<<0)		/* invalidate L1 instruction cache */
+#define	DCACHE	(1<<1)		/* flush and invalidate data cache */
+#define	BCACHE	(ICACHE|DCACHE)	/* flush both caches               */
+
+#endif	/* _ASM_TILE_CACHECTL_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 4b4b289..6e74450 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -44,7 +44,6 @@
 typedef __kernel_mode_t compat_mode_t;
 typedef __kernel_dev_t compat_dev_t;
 typedef __kernel_loff_t compat_loff_t;
-typedef __kernel_nlink_t compat_nlink_t;
 typedef __kernel_ipc_pid_t compat_ipc_pid_t;
 typedef __kernel_daddr_t compat_daddr_t;
 typedef __kernel_fsid_t	compat_fsid_t;
@@ -242,9 +241,6 @@
 long compat_sys_sched_rr_get_interval(compat_pid_t pid,
 				      struct compat_timespec __user *interval);
 
-/* Tilera Linux syscalls that don't have "compat" versions. */
-#define compat_sys_flush_cache sys_flush_cache
-
 /* These are the intvec_64.S trampolines. */
 long _compat_sys_execve(const char __user *path,
 			const compat_uptr_t __user *argv,
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 623a6bb..d16d006 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -44,7 +44,11 @@
 #else
 #define ELF_CLASS	ELFCLASS32
 #endif
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA	ELFDATA2MSB
+#else
 #define ELF_DATA	ELFDATA2LSB
+#endif
 
 /*
  * There seems to be a bug in how compat_binfmt_elf.c works: it
@@ -59,6 +63,7 @@
  */
 #define elf_check_arch(x)  \
 	((x)->e_ident[EI_CLASS] == ELF_CLASS && \
+	 (x)->e_ident[EI_DATA] == ELF_DATA && \
 	 (x)->e_machine == CHIP_ELF_TYPE())
 
 /* The module loader only handles a few relocation types. */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index d03ec12..5909ac3 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -28,29 +28,81 @@
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/errno.h>
+#include <asm/atomic.h>
 
-extern struct __get_user futex_set(u32 __user *v, int i);
-extern struct __get_user futex_add(u32 __user *v, int n);
-extern struct __get_user futex_or(u32 __user *v, int n);
-extern struct __get_user futex_andn(u32 __user *v, int n);
-extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
+/*
+ * Support macros for futex operations.  Do not use these macros directly.
+ * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
+ * __futex_cmpxchg() additionally assumes "oldval".
+ */
 
-#ifndef __tilegx__
-extern struct __get_user futex_xor(u32 __user *v, int n);
+#ifdef __tilegx__
+
+#define __futex_asm(OP) \
+	asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n"		\
+	    ".pushsection .fixup,\"ax\"\n"			\
+	    "0: { movei %0, %5; j 9f }\n"			\
+	    ".section __ex_table,\"a\"\n"			\
+	    ".quad 1b, 0b\n"					\
+	    ".popsection\n"					\
+	    "9:"						\
+	    : "=r" (ret), "=r" (val), "+m" (*(uaddr))		\
+	    : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
+
+#define __futex_set() __futex_asm(exch4)
+#define __futex_add() __futex_asm(fetchadd4)
+#define __futex_or() __futex_asm(fetchor4)
+#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
+#define __futex_cmpxchg() \
+	({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
+
+#define __futex_xor()						\
+	({							\
+		u32 oldval, n = oparg;				\
+		if ((ret = __get_user(oldval, uaddr)) == 0) {	\
+			do {					\
+				oparg = oldval ^ n;		\
+				__futex_cmpxchg();		\
+			} while (ret == 0 && oldval != val);	\
+		}						\
+	})
+
+/* No need to prefetch, since the atomic ops go to the home cache anyway. */
+#define __futex_prolog()
+
 #else
-static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
-{
-	struct __get_user asm_ret = __get_user_4(uaddr);
-	if (!asm_ret.err) {
-		int oldval, newval;
-		do {
-			oldval = asm_ret.val;
-			newval = oldval ^ n;
-			asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-		} while (asm_ret.err == 0 && oldval != asm_ret.val);
+
+#define __futex_call(FN)						\
+	{								\
+		struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
+		val = gu.val;						\
+		ret = gu.err;						\
 	}
-	return asm_ret;
-}
+
+#define __futex_set() __futex_call(__atomic_xchg)
+#define __futex_add() __futex_call(__atomic_xchg_add)
+#define __futex_or() __futex_call(__atomic_or)
+#define __futex_andn() __futex_call(__atomic_andn)
+#define __futex_xor() __futex_call(__atomic_xor)
+
+#define __futex_cmpxchg()						\
+	{								\
+		struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
+							lock, oldval, oparg); \
+		val = gu.val;						\
+		ret = gu.err;						\
+	}
+
+/*
+ * Find the lock pointer for the atomic calls to use, and issue a
+ * prefetch to the user address to bring it into cache.  Similar to
+ * __atomic_setup(), but we can't do a read into the L1 since it might
+ * fault; instead we do a prefetch into the L2.
+ */
+#define __futex_prolog()					\
+	int *lock;						\
+	__insn_prefetch(uaddr);					\
+	lock = __atomic_hashed_lock((int __force *)uaddr)
 #endif
 
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -59,8 +111,12 @@
 	int cmp = (encoded_op >> 24) & 15;
 	int oparg = (encoded_op << 8) >> 20;
 	int cmparg = (encoded_op << 20) >> 20;
-	int ret;
-	struct __get_user asm_ret;
+	int uninitialized_var(val), ret;
+
+	__futex_prolog();
+
+	/* The 32-bit futex code makes this assumption, so validate it here. */
+	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
@@ -71,46 +127,45 @@
 	pagefault_disable();
 	switch (op) {
 	case FUTEX_OP_SET:
-		asm_ret = futex_set(uaddr, oparg);
+		__futex_set();
 		break;
 	case FUTEX_OP_ADD:
-		asm_ret = futex_add(uaddr, oparg);
+		__futex_add();
 		break;
 	case FUTEX_OP_OR:
-		asm_ret = futex_or(uaddr, oparg);
+		__futex_or();
 		break;
 	case FUTEX_OP_ANDN:
-		asm_ret = futex_andn(uaddr, oparg);
+		__futex_andn();
 		break;
 	case FUTEX_OP_XOR:
-		asm_ret = futex_xor(uaddr, oparg);
+		__futex_xor();
 		break;
 	default:
-		asm_ret.err = -ENOSYS;
+		ret = -ENOSYS;
+		break;
 	}
 	pagefault_enable();
 
-	ret = asm_ret.err;
-
 	if (!ret) {
 		switch (cmp) {
 		case FUTEX_OP_CMP_EQ:
-			ret = (asm_ret.val == cmparg);
+			ret = (val == cmparg);
 			break;
 		case FUTEX_OP_CMP_NE:
-			ret = (asm_ret.val != cmparg);
+			ret = (val != cmparg);
 			break;
 		case FUTEX_OP_CMP_LT:
-			ret = (asm_ret.val < cmparg);
+			ret = (val < cmparg);
 			break;
 		case FUTEX_OP_CMP_GE:
-			ret = (asm_ret.val >= cmparg);
+			ret = (val >= cmparg);
 			break;
 		case FUTEX_OP_CMP_LE:
-			ret = (asm_ret.val <= cmparg);
+			ret = (val <= cmparg);
 			break;
 		case FUTEX_OP_CMP_GT:
-			ret = (asm_ret.val > cmparg);
+			ret = (val > cmparg);
 			break;
 		default:
 			ret = -ENOSYS;
@@ -120,22 +175,20 @@
 }
 
 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-						u32 oldval, u32 newval)
+						u32 oldval, u32 oparg)
 {
-	struct __get_user asm_ret;
+	int ret, val;
+
+	__futex_prolog();
 
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
-	asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-	*uval = asm_ret.val;
-	return asm_ret.err;
-}
+	__futex_cmpxchg();
 
-#ifndef __tilegx__
-/* Return failure from the atomic wrappers. */
-struct __get_user __atomic_bad_address(int __user *addr);
-#endif
+	*uval = val;
+	return ret;
+}
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2ac4228..47514a5 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -11,12 +11,14 @@
  *   NON INFRINGEMENT.  See the GNU General Public License for
  *   more details.
  *
- * Provide methods for the HARDWALL_FILE for accessing the UDN.
+ * Provide methods for access control of per-cpu resources like
+ * UDN, IDN, or IPI.
  */
 
 #ifndef _ASM_TILE_HARDWALL_H
 #define _ASM_TILE_HARDWALL_H
 
+#include <arch/chip.h>
 #include <linux/ioctl.h>
 
 #define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
 /*
  * The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
  * The resulting ioctl value is passed to the kernel in conjunction
- * with a pointer to a little-endian bitmask of cpus, which must be
- * physically in a rectangular configuration on the chip.
+ * with a pointer to a standard kernel bitmask of cpus.
+ * For network resources (UDN or IDN) the bitmask must physically
+ * represent a rectangular configuration on the chip.
  * The "size" is the number of bytes of cpu mask data.
  */
 #define _HARDWALL_CREATE 1
@@ -44,13 +47,7 @@
 #define HARDWALL_GET_ID \
  _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
 
-#ifndef __KERNEL__
-
-/* This is the canonical name expected by userspace. */
-#define HARDWALL_FILE "/dev/hardwall"
-
-#else
-
+#ifdef __KERNEL__
 /* /proc hooks for hardwall. */
 struct proc_dir_entry;
 #ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@
 #else
 static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
 #endif
-
 #endif
 
 #endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index d396d18..b204238 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -106,4 +106,25 @@
 {
 }
 
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+				       struct page *page, int writable)
+{
+	size_t pagesize = huge_page_size(hstate_vma(vma));
+	if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
+		entry = pte_mksuper(entry);
+	return entry;
+}
+#define arch_make_huge_pte arch_make_huge_pte
+
+/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
+enum {
+	HUGE_SHIFT_PGDIR = 0,
+	HUGE_SHIFT_PMD = 1,
+	HUGE_SHIFT_PAGE = 2,
+	HUGE_SHIFT_ENTRIES
+};
+extern int huge_shift[HUGE_SHIFT_ENTRIES];
+#endif
+
 #endif /* _ASM_TILE_HUGETLB_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 5db0ce5..b4e96fe 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -28,10 +28,10 @@
  */
 #if CHIP_HAS_AUX_PERF_COUNTERS()
 #define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
+	(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
 #else
 #define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT)))
+	(~(INT_MASK_HI(INT_PERF_COUNT)))
 #endif
 
 #else
@@ -90,6 +90,14 @@
 	__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
 	__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
 } while (0)
+#define interrupt_mask_save_mask() \
+	(__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
+	 (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
+#define interrupt_mask_restore_mask(mask) do { \
+	unsigned long long __m = (mask); \
+	__insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
+	__insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
+} while (0)
 #else
 #define interrupt_mask_set(n) \
 	__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
@@ -101,6 +109,10 @@
 	__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
 #define interrupt_mask_reset_mask(mask) \
 	__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
+#define interrupt_mask_save_mask() \
+	__insn_mfspr(SPR_INTERRUPT_MASK_K)
+#define interrupt_mask_restore_mask(mask) \
+	__insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
 #endif
 
 /*
@@ -122,7 +134,7 @@
 
 /* Disable all interrupts, including NMIs. */
 #define arch_local_irq_disable_all() \
-	interrupt_mask_set_mask(-1UL)
+	interrupt_mask_set_mask(-1ULL)
 
 /* Re-enable all maskable interrupts. */
 #define arch_local_irq_enable() \
@@ -179,7 +191,7 @@
 #ifdef __tilegx__
 
 #if INT_MEM_ERROR != 0
-# error Fix IRQ_DISABLED() macro
+# error Fix IRQS_DISABLED() macro
 #endif
 
 /* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@
 	mtspr   SPR_INTERRUPT_MASK_SET_K, tmp
 
 /* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1)					\
+#define IRQ_ENABLE_LOAD(tmp0, tmp1)				\
 	GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);			\
-	ld      tmp0, tmp0;					\
+	ld      tmp0, tmp0
+#define IRQ_ENABLE_APPLY(tmp0, tmp1)				\
 	mtspr   SPR_INTERRUPT_MASK_RESET_K, tmp0
 
 #else /* !__tilegx__ */
@@ -253,17 +266,22 @@
 	mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp
 
 /* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1)					\
+#define IRQ_ENABLE_LOAD(tmp0, tmp1)				\
 	GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);			\
 	{							\
 	 lw     tmp0, tmp0;					\
 	 addi   tmp1, tmp0, 4					\
 	};							\
-	lw      tmp1, tmp1;					\
+	lw      tmp1, tmp1
+#define IRQ_ENABLE_APPLY(tmp0, tmp1)				\
 	mtspr   SPR_INTERRUPT_MASK_RESET_K_0, tmp0;		\
 	mtspr   SPR_INTERRUPT_MASK_RESET_K_1, tmp1
 #endif
 
+#define IRQ_ENABLE(tmp0, tmp1)					\
+	IRQ_ENABLE_LOAD(tmp0, tmp1);				\
+	IRQ_ENABLE_APPLY(tmp0, tmp1)
+
 /*
  * Do the CPU's IRQ-state tracing from assembly code. We call a
  * C function, but almost everywhere we do, we don't mind clobbering
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h
index c11a6cc..fc98ccf 100644
--- a/arch/tile/include/asm/kexec.h
+++ b/arch/tile/include/asm/kexec.h
@@ -19,12 +19,24 @@
 
 #include <asm/page.h>
 
+#ifndef __tilegx__
 /* Maximum physical address we can use pages from. */
 #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
 /* Maximum address we can reach in physical address mode. */
 #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
 /* Maximum address we can use for the control code buffer. */
 #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#else
+/* We need to limit the memory below PGDIR_SIZE since
+ * we only setup page table for [0, PGDIR_SIZE) before final kexec.
+ */
+/* Maximum physical address we can use pages from. */
+#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can reach in physical address mode. */
+#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can use for the control code buffer. */
+#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
+#endif
 
 #define KEXEC_CONTROL_PAGE_SIZE	PAGE_SIZE
 
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index 92f94c7..e2c7890 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -21,7 +21,7 @@
 	 * Written under the mmap_sem semaphore; read without the
 	 * semaphore but atomically, but it is conservatively set.
 	 */
-	unsigned int priority_cached;
+	unsigned long priority_cached;
 };
 
 typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 15fb246..37f0b74 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -30,11 +30,15 @@
 	return 0;
 }
 
-/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
+/*
+ * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
+ * also call hv_install_context().
+ */
 static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
 {
 	/* FIXME: DIRECTIO should not always be set. FIXME. */
-	int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
+	int rc = hv_install_context(__pa(pgdir), prot, asid,
+				    HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
 	if (rc < 0)
 		panic("hv_install_context failed: %d", rc);
 }
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644
index 0000000..44ed07c
--- /dev/null
+++ b/arch/tile/include/asm/module.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_MODULE_H
+#define _ASM_TILE_MODULE_H
+
+#include <arch/chip.h>
+
+#include <asm-generic/module.h>
+
+/* We can't use modules built with different page sizes. */
+#if defined(CONFIG_PAGE_SIZE_16KB)
+# define MODULE_PGSZ " 16KB"
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define MODULE_PGSZ " 64KB"
+#else
+# define MODULE_PGSZ ""
+#endif
+
+/* We don't really support no-SMP so tag if someone tries. */
+#ifdef CONFIG_SMP
+#define MODULE_NOSMP ""
+#else
+#define MODULE_NOSMP " nosmp"
+#endif
+
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+
+#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index db93518..9d9131e 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -20,8 +20,17 @@
 #include <arch/chip.h>
 
 /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#define PAGE_SHIFT	HV_LOG2_PAGE_SIZE_SMALL
-#define HPAGE_SHIFT	HV_LOG2_PAGE_SIZE_LARGE
+#if defined(CONFIG_PAGE_SIZE_16KB)
+#define PAGE_SHIFT	14
+#define CTX_PAGE_FLAG	HV_CTX_PG_SM_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PAGE_SHIFT	16
+#define CTX_PAGE_FLAG	HV_CTX_PG_SM_64K
+#else
+#define PAGE_SHIFT	HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
+#define CTX_PAGE_FLAG	0
+#endif
+#define HPAGE_SHIFT	HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
 
 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
 #define HPAGE_SIZE	(_AC(1, UL) << HPAGE_SHIFT)
@@ -78,8 +87,7 @@
 /*
  * User L2 page tables are managed as one L2 page table per page,
  * because we use the page allocator for them.  This keeps the allocation
- * simple and makes it potentially useful to implement HIGHPTE at some point.
- * However, it's also inefficient, since L2 page tables are much smaller
+ * simple, but it's also inefficient, since L2 page tables are much smaller
  * than pages (currently 2KB vs 64KB).  So we should revisit this.
  */
 typedef struct page *pgtable_t;
@@ -128,7 +136,7 @@
 
 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 
-#define HUGE_MAX_HSTATE		2
+#define HUGE_MAX_HSTATE		6
 
 #ifdef CONFIG_HUGETLB_PAGE
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
index e919c0b..1b90250 100644
--- a/arch/tile/include/asm/pgalloc.h
+++ b/arch/tile/include/asm/pgalloc.h
@@ -19,24 +19,24 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <asm/fixmap.h>
+#include <asm/page.h>
 #include <hv/hypervisor.h>
 
 /* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT \
-  (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
+#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
 
 /* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
-#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
+#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
 #else
 #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
 #endif
 
 /* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
+#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
 
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
@@ -50,14 +50,14 @@
 static inline void pmd_populate_kernel(struct mm_struct *mm,
 				       pmd_t *pmd, pte_t *ptep)
 {
-	set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
+	set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
 			      __pgprot(_PAGE_PRESENT)));
 }
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 				pgtable_t page)
 {
-	set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
+	set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
 			      __pgprot(_PAGE_PRESENT)));
 }
 
@@ -68,8 +68,20 @@
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+				   int order);
+extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+				      unsigned long address)
+{
+	return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+	pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
+}
 
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
@@ -85,8 +97,13 @@
 	pte_free(mm, virt_to_page(pte));
 }
 
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
-			   unsigned long address);
+extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+			       unsigned long address, int order);
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+				  unsigned long address)
+{
+	__pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
+}
 
 #define check_pgt_cache()	do { } while (0)
 
@@ -104,19 +121,44 @@
 void shatter_huge_page(unsigned long addr);
 
 #ifdef __tilegx__
-/* We share a single page allocator for both L1 and L2 page tables. */
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
+
 #define pud_populate(mm, pud, pmd) \
   pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-#define pmd_alloc_one(mm, addr) \
-  ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
-#define pmd_free(mm, pmdp) \
-  pte_free((mm), virt_to_page(pmdp))
-#define __pmd_free_tlb(tlb, pmdp, address) \
-  __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
+
+/* Bits for the size of the L1 (intermediate) page table. */
+#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
+
+/* We currently allocate L1 page tables by page. */
+#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
+#else
+#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
 #endif
 
+/* How many pages do we need, as an "order", for an L1 page table? */
+#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
+	return (pmd_t *)page_to_virt(p);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
+{
+	pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+				  unsigned long address)
+{
+	__pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
+			   L1_USER_PGTABLE_ORDER);
+}
+
+#endif /* __tilegx__ */
+
 #endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 6749091..73b1a4c 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -27,8 +27,10 @@
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
+#include <linux/pfn.h>
 #include <asm/processor.h>
 #include <asm/fixmap.h>
+#include <asm/page.h>
 
 struct mm_struct;
 struct vm_area_struct;
@@ -69,6 +71,7 @@
 
 #define _PAGE_PRESENT           HV_PTE_PRESENT
 #define _PAGE_HUGE_PAGE         HV_PTE_PAGE
+#define _PAGE_SUPER_PAGE        HV_PTE_SUPER
 #define _PAGE_READABLE          HV_PTE_READABLE
 #define _PAGE_WRITABLE          HV_PTE_WRITABLE
 #define _PAGE_EXECUTABLE        HV_PTE_EXECUTABLE
@@ -85,6 +88,7 @@
 #define _PAGE_ALL (\
   _PAGE_PRESENT | \
   _PAGE_HUGE_PAGE | \
+  _PAGE_SUPER_PAGE | \
   _PAGE_READABLE | \
   _PAGE_WRITABLE | \
   _PAGE_EXECUTABLE | \
@@ -162,7 +166,7 @@
   (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
 
 /* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
+#define pte_pgprot(x) hv_pte_set_pa((x), 0)
 
 /*
  * For PTEs and PDEs, we must clear the Present bit first when
@@ -187,6 +191,7 @@
  * Undefined behaviour if not..
  */
 #define pte_present hv_pte_get_present
+#define pte_mknotpresent hv_pte_clear_present
 #define pte_user hv_pte_get_user
 #define pte_read hv_pte_get_readable
 #define pte_dirty hv_pte_get_dirty
@@ -194,6 +199,7 @@
 #define pte_write hv_pte_get_writable
 #define pte_exec hv_pte_get_executable
 #define pte_huge hv_pte_get_page
+#define pte_super hv_pte_get_super
 #define pte_rdprotect hv_pte_clear_readable
 #define pte_exprotect hv_pte_clear_executable
 #define pte_mkclean hv_pte_clear_dirty
@@ -206,6 +212,7 @@
 #define pte_mkyoung hv_pte_set_accessed
 #define pte_mkwrite hv_pte_set_writable
 #define pte_mkhuge hv_pte_set_page
+#define pte_mksuper hv_pte_set_super
 
 #define pte_special(pte) 0
 #define pte_mkspecial(pte) (pte)
@@ -261,7 +268,7 @@
 
 static inline unsigned long pte_pfn(pte_t pte)
 {
-	return hv_pte_get_pfn(pte);
+	return PFN_DOWN(hv_pte_get_pa(pte));
 }
 
 /* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -270,7 +277,7 @@
 
 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 {
-	return hv_pte_set_pfn(prot, pfn);
+	return hv_pte_set_pa(prot, PFN_PHYS(pfn));
 }
 
 /* Support for priority mappings. */
@@ -312,7 +319,7 @@
  */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	return pfn_pte(hv_pte_get_pfn(pte), newprot);
+	return pfn_pte(pte_pfn(pte), newprot);
 }
 
 /*
@@ -335,13 +342,8 @@
  */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-#if defined(CONFIG_HIGHPTE)
-extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
-#define pte_unmap(pte) kunmap_atomic(pte)
-#else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte) do { } while (0)
-#endif
 
 /* Clear a non-executable kernel PTE and flush it from the TLB. */
 #define kpte_clear_flush(ptep, vaddr)		\
@@ -410,6 +412,46 @@
 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 }
 
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
+}
+
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pmd_t *pmdp)
+{
+	return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
+}
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
+}
+
+#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
+
+/* Create a pmd from a PTFN. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+	return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
+
 /*
  * A given kernel pmd_t maps to a specific virtual address (either a
  * kernel huge page or a kernel pte_t table).  Since kernel pte_t
@@ -430,7 +472,48 @@
  * OK for pte_lockptr(), since we just end up with potentially one
  * lock being used for several pte_t arrays.
  */
-#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
+#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	__pte_clear(pmdp_ptep(pmdp));
+}
+
+#define pmd_mknotpresent(pmd)	pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_huge_page(pmd)	pte_huge(pmd_pte(pmd))
+#define pmd_mkhuge(pmd)		pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pfn_pmd(pfn, pgprot)	pte_pmd(pfn_pte((pfn), (pgprot)))
+#define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
+#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+	return pfn_pmd(pmd_pfn(pmd), newprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#define pmd_trans_huge pmd_huge_page
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+	return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return hv_pte_get_client2(pmd_pte(pmd));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
  * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -448,17 +531,13 @@
        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 }
 
-static inline int pmd_huge_page(pmd_t pmd)
-{
-	return pmd_val(pmd) & _PAGE_HUGE_PAGE;
-}
-
 #include <asm-generic/pgtable.h>
 
 /* Support /proc/NN/pgtable API. */
 struct seq_file;
 int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
-			   unsigned long vaddr, pte_t *ptep, void **datap);
+			   unsigned long vaddr, unsigned long pagesize,
+			   pte_t *ptep, void **datap);
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 9f98529..4ce4a7a 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -20,11 +20,12 @@
  * The level-1 index is defined by the huge page size.  A PGD is composed
  * of PTRS_PER_PGD pgd_t's and is the top level of the page table.
  */
-#define PGDIR_SHIFT	HV_LOG2_PAGE_SIZE_LARGE
-#define PGDIR_SIZE	HV_PAGE_SIZE_LARGE
+#define PGDIR_SHIFT	HPAGE_SHIFT
+#define PGDIR_SIZE	HPAGE_SIZE
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
-#define SIZEOF_PGD	(PTRS_PER_PGD * sizeof(pgd_t))
+#define PTRS_PER_PGD	_HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PGD_INDEX(va)	_HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PGD	_HV_L1_SIZE(HPAGE_SHIFT)
 
 /*
  * The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
  * Note that the hypervisor docs use PTE for what we call pte_t, so
  * this nomenclature is somewhat confusing.
  */
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE	(PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE	_HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va)	_HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE	_HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -111,24 +113,14 @@
 	return pte;
 }
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-	set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
-}
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-	return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-	__pte_clear(&pmdp->pud.pgd);
-}
+/*
+ * pmds are wrappers around pgds, which are the same as ptes.
+ * It's often convenient to "cast" back and forth and use the pte methods,
+ * which are the methods supplied by the hypervisor.
+ */
+#define pmd_pte(pmd) ((pmd).pud.pgd)
+#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
+#define pte_pmd(pte) ((pmd_t){ { (pte) } })
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index fd80328..2492fa5 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -21,17 +21,19 @@
 #define PGDIR_SIZE	HV_L1_SPAN
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD	HV_L0_ENTRIES
-#define SIZEOF_PGD	(PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_INDEX(va)	HV_L0_INDEX(va)
+#define SIZEOF_PGD	HV_L0_SIZE
 
 /*
  * The level-1 index is defined by the huge page size.  A PMD is composed
  * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
  */
-#define PMD_SHIFT	HV_LOG2_PAGE_SIZE_LARGE
-#define PMD_SIZE	HV_PAGE_SIZE_LARGE
+#define PMD_SHIFT	HPAGE_SHIFT
+#define PMD_SIZE	HPAGE_SIZE
 #define PMD_MASK	(~(PMD_SIZE-1))
-#define PTRS_PER_PMD	(1 << (PGDIR_SHIFT - PMD_SHIFT))
-#define SIZEOF_PMD	(PTRS_PER_PMD * sizeof(pmd_t))
+#define PTRS_PER_PMD	_HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PMD_INDEX(va)	_HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PMD	_HV_L1_SIZE(HPAGE_SHIFT)
 
 /*
  * The level-2 index is defined by the difference between the huge
@@ -40,17 +42,19 @@
  * Note that the hypervisor docs use PTE for what we call pte_t, so
  * this nomenclature is somewhat confusing.
  */
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE	(PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE	_HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va)	_HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE	_HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
 
 /*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end.  The vmalloc code also puts in an internal
+ * Align the vmalloc area to an L2 page table.  Omit guard pages at
+ * the beginning and end for simplicity (particularly in the per-cpu
+ * memory allocation code).  The vmalloc code puts in an internal
  * guard page between each allocation.
  */
 #define _VMALLOC_END	HUGE_VMAP_BASE
-#define VMALLOC_END	(_VMALLOC_END - PAGE_SIZE)
-#define VMALLOC_START	(_VMALLOC_START + PAGE_SIZE)
+#define VMALLOC_END	_VMALLOC_END
+#define VMALLOC_START	_VMALLOC_START
 
 #define HUGE_VMAP_END	(HUGE_VMAP_BASE + PGDIR_SIZE)
 
@@ -98,7 +102,7 @@
  * A pud_t points to a pmd_t array.  Since we can have multiple per
  * page, we don't have a one-to-one mapping of pud_t's to pages.
  */
-#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
 
 static inline unsigned long pud_index(unsigned long address)
 {
@@ -108,28 +112,6 @@
 #define pmd_offset(pud, address) \
 	((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-	set_pte(pmdp, pmdval);
-}
-
-/* Create a pmd from a PTFN and pgprot. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-	return hv_pte_set_ptfn(prot, ptfn);
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-static inline unsigned long pmd_ptfn(pmd_t pmd)
-{
-	return hv_pte_get_ptfn(pmd);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-	__pte_clear(pmdp);
-}
-
 /* Normalize an address to having the correct high bits set. */
 #define pgd_addr_normalize pgd_addr_normalize
 static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +152,13 @@
 	return hv_pte(__insn_exch(&ptep->val, 0UL));
 }
 
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 15cd8a4..8c4dd9f 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -76,6 +76,17 @@
 
 #ifdef CONFIG_HARDWALL
 struct hardwall_info;
+struct hardwall_task {
+	/* Which hardwall is this task tied to? (or NULL if none) */
+	struct hardwall_info *info;
+	/* Chains this task into the list at info->task_head. */
+	struct list_head list;
+};
+#ifdef __tilepro__
+#define HARDWALL_TYPES 1   /* udn */
+#else
+#define HARDWALL_TYPES 3   /* udn, idn, and ipi */
+#endif
 #endif
 
 struct thread_struct {
@@ -116,10 +127,8 @@
 	unsigned long dstream_pf;
 #endif
 #ifdef CONFIG_HARDWALL
-	/* Is this task tied to an activated hardwall? */
-	struct hardwall_info *hardwall;
-	/* Chains this task into the list at hardwall->list. */
-	struct list_head hardwall_list;
+	/* Hardwall information for various resources. */
+	struct hardwall_task hardwall[HARDWALL_TYPES];
 #endif
 #if CHIP_HAS_TILE_DMA()
 	/* Async DMA TLB fault information */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e58613e..c67eb70 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -41,15 +41,15 @@
 #ifdef CONFIG_HARDWALL
 /* User-level network management functions */
 void reset_network_state(void);
-void grant_network_mpls(void);
-void restrict_network_mpls(void);
 struct task_struct;
-int hardwall_deactivate(struct task_struct *task);
+void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
+void hardwall_deactivate_all(struct task_struct *task);
+int hardwall_ipi_valid(int cpu);
 
 /* Hook hardwall code into changes in affinity. */
 #define arch_set_cpus_allowed(p, new_mask) do { \
-	if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
-		hardwall_deactivate(p); \
+	if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
+		hardwall_deactivate_all(p); \
 } while (0)
 #endif
 
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 3b5507c..06f0464 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -43,7 +43,8 @@
 		     u32 len, int advice);
 int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
 		       u32 len_lo, u32 len_hi, int advice);
-long sys_flush_cache(void);
+long sys_cacheflush(unsigned long addr, unsigned long len,
+		    unsigned long flags);
 #ifndef __tilegx__  /* No mmap() in the 32-bit kernel. */
 #define sys_mmap sys_mmap
 #endif
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 656c486..e9c670d 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -91,11 +91,6 @@
 /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
 extern void _cpu_idle(void);
 
-/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
-extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
-				  unsigned long new_sp,
-				  unsigned long new_ss10);
-
 #else /* __ASSEMBLY__ */
 
 /*
@@ -166,7 +161,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, &ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
 }
 #endif	/* !__ASSEMBLY__ */
 
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h
index 96199d2..dcf91b2 100644
--- a/arch/tile/include/asm/tlbflush.h
+++ b/arch/tile/include/asm/tlbflush.h
@@ -38,16 +38,11 @@
 /* The hypervisor tells us what ASIDs are available to us. */
 extern int min_asid, max_asid;
 
-static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
-{
-	return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
-}
-
 /* Pass as vma pointer for non-executable mapping, if no vma available. */
-#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL)
+#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
 
 /* Flush a single user page on this cpu. */
-static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
 					unsigned long addr,
 					unsigned long page_size)
 {
@@ -60,7 +55,7 @@
 }
 
 /* Flush range of user pages on this cpu. */
-static inline void local_flush_tlb_pages(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
 					 unsigned long addr,
 					 unsigned long page_size,
 					 unsigned long len)
@@ -117,10 +112,10 @@
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tlb_current_task(void);
 extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(const struct vm_area_struct *, unsigned long);
-extern void flush_tlb_page_mm(const struct vm_area_struct *,
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_page_mm(struct vm_area_struct *,
 			      struct mm_struct *, unsigned long);
-extern void flush_tlb_range(const struct vm_area_struct *,
+extern void flush_tlb_range(struct vm_area_struct *,
 			    unsigned long start, unsigned long end);
 
 #define flush_tlb()     flush_tlb_current_task()
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ef34d2caa..9ab078a 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,45 +114,75 @@
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
- * We return the __get_user_N function results in a structure,
- * thus in r0 and r1.  If "err" is zero, "val" is the result
- * of the read; otherwise, "err" is -EFAULT.
+ * Support macros for __get_user().
  *
- * We rarely need 8-byte values on a 32-bit architecture, but
- * we size the structure to accommodate.  In practice, for the
- * the smaller reads, we can zero the high word for free, and
- * the caller will ignore it by virtue of casting anyway.
+ * Implementation note: The "case 8" logic of casting to the type of
+ * the result of subtracting the value from itself is basically a way
+ * of keeping all integer types the same, but casting any pointers to
+ * ptrdiff_t, i.e. also an integer type.  This way there are no
+ * questionable casts seen by the compiler on an ILP32 platform.
+ *
+ * Note that __get_user() and __put_user() assume proper alignment.
  */
-struct __get_user {
-	unsigned long long val;
-	int err;
-};
 
-/*
- * FIXME: we should express these as inline extended assembler, since
- * they're fundamentally just a variable dereference and some
- * supporting exception_table gunk.  Note that (a la i386) we can
- * extend the copy_to_user and copy_from_user routines to call into
- * such extended assembler routines, though we will have to use a
- * different return code in that case (1, 2, or 4, rather than -EFAULT).
- */
-extern struct __get_user __get_user_1(const void __user *);
-extern struct __get_user __get_user_2(const void __user *);
-extern struct __get_user __get_user_4(const void __user *);
-extern struct __get_user __get_user_8(const void __user *);
-extern int __put_user_1(long, void __user *);
-extern int __put_user_2(long, void __user *);
-extern int __put_user_4(long, void __user *);
-extern int __put_user_8(long long, void __user *);
+#ifdef __LP64__
+#define _ASM_PTR	".quad"
+#else
+#define _ASM_PTR	".long"
+#endif
 
-/* Unimplemented routines to cause linker failures */
-extern struct __get_user __get_user_bad(void);
-extern int __put_user_bad(void);
+#define __get_user_asm(OP, x, ptr, ret)					\
+	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
+		     ".pushsection .fixup,\"ax\"\n"			\
+		     "0: { movei %1, 0; movei %0, %3 }\n"		\
+		     "j 9f\n"						\
+		     ".section __ex_table,\"a\"\n"			\
+		     _ASM_PTR " 1b, 0b\n"				\
+		     ".popsection\n"					\
+		     "9:"						\
+		     : "=r" (ret), "=r" (x)				\
+		     : "r" (ptr), "i" (-EFAULT))
 
-/*
- * Careful: we have to cast the result to the type of the pointer
- * for sign reasons.
- */
+#ifdef __tilegx__
+#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
+#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
+#else
+#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
+#ifdef __LITTLE_ENDIAN
+#define __lo32(a, b) a
+#define __hi32(a, b) b
+#else
+#define __lo32(a, b) b
+#define __hi32(a, b) a
+#endif
+#define __get_user_8(x, ptr, ret)					\
+	({								\
+		unsigned int __a, __b;					\
+		asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"	\
+			     "2: { lw %2, %2; movei %0, 0 }\n"		\
+			     ".pushsection .fixup,\"ax\"\n"		\
+			     "0: { movei %1, 0; movei %2, 0 }\n"	\
+			     "{ movei %0, %4; j 9f }\n"			\
+			     ".section __ex_table,\"a\"\n"		\
+			     ".word 1b, 0b\n"				\
+			     ".word 2b, 0b\n"				\
+			     ".popsection\n"				\
+			     "9:"					\
+			     : "=r" (ret), "=r" (__a), "=&r" (__b)	\
+			     : "r" (ptr), "i" (-EFAULT));		\
+		(x) = (__typeof(x))(__typeof((x)-(x)))			\
+			(((u64)__hi32(__a, __b) << 32) |		\
+			 __lo32(__a, __b));				\
+	})
+#endif
+
+extern int __get_user_bad(void)
+  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
+
 /**
  * __get_user: - Get a simple variable from user space, with less checking.
  * @x:   Variable to store result.
@@ -174,30 +204,62 @@
  * function.
  */
 #define __get_user(x, ptr)						\
-({	struct __get_user __ret;					\
-	__typeof__(*(ptr)) const __user *__gu_addr = (ptr);		\
-	__chk_user_ptr(__gu_addr);					\
-	switch (sizeof(*(__gu_addr))) {					\
-	case 1:								\
-		__ret = __get_user_1(__gu_addr);			\
-		break;							\
-	case 2:								\
-		__ret = __get_user_2(__gu_addr);			\
-		break;							\
-	case 4:								\
-		__ret = __get_user_4(__gu_addr);			\
-		break;							\
-	case 8:								\
-		__ret = __get_user_8(__gu_addr);			\
-		break;							\
-	default:							\
-		__ret = __get_user_bad();				\
-		break;							\
-	}								\
-	(x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
-	  __ret.val;			                                \
-	__ret.err;							\
-})
+	({								\
+		int __ret;						\
+		__chk_user_ptr(ptr);					\
+		switch (sizeof(*(ptr))) {				\
+		case 1: __get_user_1(x, ptr, __ret); break;		\
+		case 2: __get_user_2(x, ptr, __ret); break;		\
+		case 4: __get_user_4(x, ptr, __ret); break;		\
+		case 8: __get_user_8(x, ptr, __ret); break;		\
+		default: __ret = __get_user_bad(); break;		\
+		}							\
+		__ret;							\
+	})
+
+/* Support macros for __put_user(). */
+
+#define __put_user_asm(OP, x, ptr, ret)			\
+	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
+		     ".pushsection .fixup,\"ax\"\n"			\
+		     "0: { movei %0, %3; j 9f }\n"			\
+		     ".section __ex_table,\"a\"\n"			\
+		     _ASM_PTR " 1b, 0b\n"				\
+		     ".popsection\n"					\
+		     "9:"						\
+		     : "=r" (ret)					\
+		     : "r" (ptr), "r" (x), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
+#else
+#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
+#define __put_user_8(x, ptr, ret)					\
+	({								\
+		u64 __x = (__typeof((x)-(x)))(x);			\
+		int __lo = (int) __x, __hi = (int) (__x >> 32);		\
+		asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"	\
+			     "2: { sw %0, %3; movei %0, 0 }\n"		\
+			     ".pushsection .fixup,\"ax\"\n"		\
+			     "0: { movei %0, %4; j 9f }\n"		\
+			     ".section __ex_table,\"a\"\n"		\
+			     ".word 1b, 0b\n"				\
+			     ".word 2b, 0b\n"				\
+			     ".popsection\n"				\
+			     "9:"					\
+			     : "=&r" (ret)				\
+			     : "r" (ptr), "r" (__lo32(__lo, __hi)),	\
+			     "r" (__hi32(__lo, __hi)), "i" (-EFAULT));	\
+	})
+#endif
+
+extern int __put_user_bad(void)
+  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 
 /**
  * __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@
  * function.
  *
  * Returns zero on success, or -EFAULT on error.
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type.  This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
  */
 #define __put_user(x, ptr)						\
 ({									\
-	int __pu_err = 0;						\
-	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
-	typeof(*__pu_addr) __pu_val = (x);				\
-	__chk_user_ptr(__pu_addr);					\
-	switch (sizeof(__pu_val)) {					\
-	case 1:								\
-		__pu_err = __put_user_1((long)__pu_val, __pu_addr);	\
-		break;							\
-	case 2:								\
-		__pu_err = __put_user_2((long)__pu_val, __pu_addr);	\
-		break;							\
-	case 4:								\
-		__pu_err = __put_user_4((long)__pu_val, __pu_addr);	\
-		break;							\
-	case 8:								\
-		__pu_err =						\
-		  __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
-			__pu_addr);					\
-		break;							\
-	default:							\
-		__pu_err = __put_user_bad();				\
-		break;							\
+	int __ret;							\
+	__chk_user_ptr(ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1: __put_user_1(x, ptr, __ret); break;			\
+	case 2: __put_user_2(x, ptr, __ret); break;			\
+	case 4: __put_user_4(x, ptr, __ret); break;			\
+	case 8: __put_user_8(x, ptr, __ret); break;			\
+	default: __ret = __put_user_bad(); break;			\
 	}								\
-	__pu_err;							\
+	__ret;								\
 })
 
 /*
@@ -378,7 +420,7 @@
 /**
  * __copy_in_user() - copy data within user space, with less checking.
  * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
+ * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
  * Context: User context only.  This function may sleep.
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f70bf1c..a017246 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -24,8 +24,8 @@
 #include <asm-generic/unistd.h>
 
 /* Additional Tilera-specific syscalls. */
-#define __NR_flush_cache	(__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_flush_cache, sys_flush_cache)
+#define __NR_cacheflush	(__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
 
 #ifndef __tilegx__
 /* "Fast" syscalls provide atomic support for 32-bit chips. */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
index f13188a..2a20b26 100644
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -460,7 +460,7 @@
  *  linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
  *  our page size of exactly 65536.  We add one for a "body" fragment.
  */
-#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
 
 /** Total number of bytes needed for an lepp_tso_cmd_t. */
 #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 72ec1e9..ccd847e2 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -17,8 +17,8 @@
  * The hypervisor's public API.
  */
 
-#ifndef _TILE_HV_H
-#define _TILE_HV_H
+#ifndef _HV_HV_H
+#define _HV_HV_H
 
 #include <arch/chip.h>
 
@@ -42,25 +42,45 @@
  */
 #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
 
-/** The log2 of the size of small pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+/** The log2 of the initial size of small pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_SMALL.
  */
-#define HV_LOG2_PAGE_SIZE_SMALL 16
+#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
 
-/** The size of small pages, in bytes. This value should be verified
+/** The initial size of small pages, in bytes. This value should be verified
  * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+ * It may also be modified when installing a new context.
  */
-#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
+#define HV_DEFAULT_PAGE_SIZE_SMALL \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
 
-/** The log2 of the size of large pages, in bytes. This value should be
- * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+/** The log2 of the initial size of large pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_LARGE.
  */
-#define HV_LOG2_PAGE_SIZE_LARGE 24
+#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
 
-/** The size of large pages, in bytes. This value should be verified
+/** The initial size of large pages, in bytes. This value should be verified
  * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+ * It may also be modified when installing a new context.
  */
-#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE)
+#define HV_DEFAULT_PAGE_SIZE_LARGE \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
+
+#if CHIP_VA_WIDTH() > 32
+
+/** The log2 of the initial size of jumbo pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_JUMBO.
+ */
+#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
+
+/** The initial size of jumbo pages, in bytes. This value should
+ * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
+ * It may also be modified when installing a new context.
+ */
+#define HV_DEFAULT_PAGE_SIZE_JUMBO \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
+
+#endif
 
 /** The log2 of the granularity at which page tables must be aligned;
  *  in other words, the CPA for a page table must have this many zero
@@ -280,8 +300,11 @@
 #define HV_DISPATCH_GET_IPI_PTE                   56
 #endif
 
+/** hv_set_pte_super_shift */
+#define HV_DISPATCH_SET_PTE_SUPER_SHIFT           57
+
 /** One more than the largest dispatch value */
-#define _HV_DISPATCH_END                          57
+#define _HV_DISPATCH_END                          58
 
 
 #ifndef __ASSEMBLER__
@@ -401,7 +424,18 @@
    *  that the temperature has hit an upper limit and is no longer being
    *  accurately tracked.
    */
-  HV_SYSCONF_BOARD_TEMP      = 6
+  HV_SYSCONF_BOARD_TEMP      = 6,
+
+  /** Legal page size bitmask for hv_install_context().
+   * For example, if 16KB and 64KB small pages are supported,
+   * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
+   */
+  HV_SYSCONF_VALID_PAGE_SIZES = 7,
+
+  /** The size of jumbo pages, in bytes.
+   * If no jumbo pages are available, zero will be returned.
+   */
+  HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
 
 } HV_SysconfQuery;
 
@@ -474,7 +508,19 @@
   HV_CONFSTR_SWITCH_CONTROL  = 14,
 
   /** Chip revision level. */
-  HV_CONFSTR_CHIP_REV        = 15
+  HV_CONFSTR_CHIP_REV        = 15,
+
+  /** CPU module part number. */
+  HV_CONFSTR_CPUMOD_PART_NUM = 16,
+
+  /** CPU module serial number. */
+  HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
+
+  /** CPU module revision level. */
+  HV_CONFSTR_CPUMOD_REV      = 18,
+
+  /** Human-readable CPU module description. */
+  HV_CONFSTR_CPUMOD_DESC     = 19
 
 } HV_ConfstrQuery;
 
@@ -494,11 +540,16 @@
 /** Tile coordinate */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   int x;
 
   /** Y coordinate, relative to supervisor's top-left coordinate */
   int y;
+#else
+  int y;
+  int x;
+#endif
 } HV_Coord;
 
 
@@ -649,6 +700,12 @@
  *  new page table does not need to contain any mapping for the
  *  hv_install_context address itself.
  *
+ *  At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ *  if multiple flags are specified, HV_EINVAL is returned.
+ *  Specifying none of the flags results in using the default page size.
+ *  All cores participating in a given client must request the same
+ *  page size, or the results are undefined.
+ *
  * @param page_table Root of the page table.
  * @param access PTE providing info on how to read the page table.  This
  *   value must be consistent between multiple tiles sharing a page table,
@@ -667,8 +724,36 @@
 #define HV_CTX_DIRECTIO     0x1   /**< Direct I/O requests are accepted from
                                        PL0. */
 
+#define HV_CTX_PG_SM_4K     0x10  /**< Use 4K small pages, if available. */
+#define HV_CTX_PG_SM_16K    0x20  /**< Use 16K small pages, if available. */
+#define HV_CTX_PG_SM_64K    0x40  /**< Use 64K small pages, if available. */
+#define HV_CTX_PG_SM_MASK   0xf0  /**< Mask of all possible small pages. */
+
 #ifndef __ASSEMBLER__
 
+
+/** Set the number of pages ganged together by HV_PTE_SUPER at a
+ * particular level of the page table.
+ *
+ * The current TILE-Gx hardware only supports powers of four
+ * (i.e. log2_count must be a multiple of two), and the requested
+ * "super" page size must be less than the span of the next level in
+ * the page table.  The largest size that can be requested is 64GB.
+ *
+ * The shift value is initially "0" for all page table levels,
+ * indicating that the HV_PTE_SUPER bit is effectively ignored.
+ *
+ * If you change the count from one non-zero value to another, the
+ * hypervisor will flush the entire TLB and TSB to avoid confusion.
+ *
+ * @param level Page table level (0, 1, or 2)
+ * @param log2_count Base-2 log of the number of pages to gang together,
+ * i.e. how much to shift left the base page size for the super page size.
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+int hv_set_pte_super_shift(int level, int log2_count);
+
+
 /** Value returned from hv_inquire_context(). */
 typedef struct
 {
@@ -986,8 +1071,13 @@
 /** A range of ASID values. */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   HV_ASID start;        /**< First ASID in the range. */
   unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
+#else
+  unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
+  HV_ASID start;        /**< First ASID in the range. */
+#endif
 } HV_ASIDRange;
 
 /** Returns information about a range of ASIDs.
@@ -1238,11 +1328,14 @@
  * with the existing priority pages) or "red/black" (if they don't).
  * The bitmask provides information on which parts of the cache
  * have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a page has been marked
- * "priority" whose PFN equals N, mod 8.
+ * appears in the bitmask, that indicates that a 4KB region of the
+ * cache starting at (N * 4KB) is in use by a "priority" page.
+ * The portion of cache used by a particular page can be computed
+ * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
+ * all the "4KB" bits corresponding to the actual page size.
  * @param bitmask A bitmap of priority page set values
  */
-void hv_set_caching(unsigned int bitmask);
+void hv_set_caching(unsigned long bitmask);
 
 
 /** Zero out a specified number of pages.
@@ -1308,6 +1401,7 @@
 /** Message recipient. */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   unsigned int x:11;
 
@@ -1316,6 +1410,11 @@
 
   /** Status of this recipient */
   HV_Recip_State state:10;
+#else //__BIG_ENDIAN__
+  HV_Recip_State state:10;
+  unsigned int y:11;
+  unsigned int x:11;
+#endif
 } HV_Recipient;
 
 /** Send a message to a set of recipients.
@@ -1851,12 +1950,12 @@
 #define HV_PTE_INDEX_USER            10  /**< Page is user-accessible */
 #define HV_PTE_INDEX_ACCESSED        11  /**< Page has been accessed */
 #define HV_PTE_INDEX_DIRTY           12  /**< Page has been written */
-                                         /*   Bits 13-15 are reserved for
+                                         /*   Bits 13-14 are reserved for
                                               future use. */
+#define HV_PTE_INDEX_SUPER           15  /**< Pages ganged together for TLB */
 #define HV_PTE_INDEX_MODE            16  /**< Page mode; see HV_PTE_MODE_xxx */
 #define HV_PTE_MODE_BITS              3  /**< Number of bits in mode */
-                                         /*   Bit 19 is reserved for
-                                              future use. */
+#define HV_PTE_INDEX_CLIENT2         19  /**< Page client state 2 */
 #define HV_PTE_INDEX_LOTAR           20  /**< Page's LOTAR; must be high bits
                                               of word */
 #define HV_PTE_LOTAR_BITS            12  /**< Number of bits in a LOTAR */
@@ -1869,15 +1968,6 @@
                                               of word */
 #define HV_PTE_PTFN_BITS             29  /**< Number of bits in a PTFN */
 
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
-                                               HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
-                               (HV_LOG2_PAGE_SIZE_SMALL - \
-                                HV_LOG2_PAGE_TABLE_ALIGN))
-
 /*
  * Legal values for the PTE's mode field
  */
@@ -1957,7 +2047,10 @@
 
 /** Does this PTE map a page?
  *
- * If this bit is set in the level-1 page table, the entry should be
+ * If this bit is set in a level-0 page table, the entry should be
+ * interpreted as a level-2 page table entry mapping a jumbo page.
+ *
+ * If this bit is set in a level-1 page table, the entry should be
  * interpreted as a level-2 page table entry mapping a large page.
  *
  * This bit should not be modified by the client while PRESENT is set, as
@@ -1967,6 +2060,18 @@
  */
 #define HV_PTE_PAGE                  (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
 
+/** Does this PTE implicitly reference multiple pages?
+ *
+ * If this bit is set in the page table (either in the level-2 page table,
+ * or in a higher level page table in conjunction with the PAGE bit)
+ * then the PTE specifies a range of contiguous pages, not a single page.
+ * The hv_set_pte_super_shift() allows you to specify the count for
+ * each level of the page table.
+ *
+ * Note: this bit is not supported on TILEPro systems.
+ */
+#define HV_PTE_SUPER                 (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
+
 /** Is this a global (non-ASID) mapping?
  *
  * If this bit is set, the translations established by this PTE will
@@ -2046,6 +2151,13 @@
  */
 #define HV_PTE_CLIENT1               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
 
+/** Client-private bit in PTE.
+ *
+ * This bit is guaranteed not to be inspected or modified by the
+ * hypervisor.
+ */
+#define HV_PTE_CLIENT2               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
+
 /** Non-coherent (NC) bit in PTE.
  *
  * If this bit is set, the mapping that is set up will be non-coherent
@@ -2178,8 +2290,10 @@
  */
 _HV_BIT(present,         PRESENT)
 _HV_BIT(page,            PAGE)
+_HV_BIT(super,           SUPER)
 _HV_BIT(client0,         CLIENT0)
 _HV_BIT(client1,         CLIENT1)
+_HV_BIT(client2,         CLIENT2)
 _HV_BIT(migrating,       MIGRATING)
 _HV_BIT(nc,              NC)
 _HV_BIT(readable,        READABLE)
@@ -2222,40 +2336,11 @@
  *
  * This field contains the upper bits of the CPA (client physical
  * address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it.
+ * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
  *
- * For PTEs in a level-1 page table where the Page bit is set, the
- * CPA must be aligned modulo the large page size.
- */
-static __inline unsigned int
-hv_pte_get_pfn(const HV_PTE pte)
-{
-  return pte.val >> HV_PTE_INDEX_PFN;
-}
-
-
-/** Set the page frame number into a PTE.  See hv_pte_get_pfn. */
-static __inline HV_PTE
-hv_pte_set_pfn(HV_PTE pte, unsigned int val)
-{
-  /*
-   * Note that the use of "PTFN" in the next line is intentional; we
-   * don't want any garbage lower bits left in that field.
-   */
-  pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
-  pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
-  return pte;
-}
-
-/** Get the page table frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page table; the complete CPA is this field with
- * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
- *
- * For PTEs in a level-1 page table when the Page bit is not set, the
- * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
- * the level-2 page table size.
+ * For all PTEs in the lowest-level page table, and for all PTEs with
+ * the Page bit set in all page tables, the CPA must be aligned modulo
+ * the relevant page size.
  */
 static __inline unsigned long
 hv_pte_get_ptfn(const HV_PTE pte)
@@ -2263,7 +2348,6 @@
   return pte.val >> HV_PTE_INDEX_PTFN;
 }
 
-
 /** Set the page table frame number into a PTE.  See hv_pte_get_ptfn. */
 static __inline HV_PTE
 hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2273,6 +2357,20 @@
   return pte;
 }
 
+/** Get the client physical address from the PTE.  See hv_pte_set_ptfn. */
+static __inline HV_PhysAddr
+hv_pte_get_pa(const HV_PTE pte)
+{
+  return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
+}
+
+/** Set the client physical address into a PTE.  See hv_pte_get_ptfn. */
+static __inline HV_PTE
+hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
+{
+  return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
+}
+
 
 /** Get the remote tile caching this page.
  *
@@ -2308,28 +2406,20 @@
 
 #endif  /* !__ASSEMBLER__ */
 
-/** Converts a client physical address to a pfn. */
-#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
-
-/** Converts a pfn to a client physical address. */
-#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
-
 /** Converts a client physical address to a ptfn. */
 #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
 
 /** Converts a ptfn to a client physical address. */
 #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
 
-/** Converts a ptfn to a pfn. */
-#define HV_PTFN_TO_PFN(p) \
-  ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define HV_PFN_TO_PTFN(p) \
-  ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
 #if CHIP_VA_WIDTH() > 32
 
+/*
+ * Note that we currently do not allow customizing the page size
+ * of the L0 pages, but fix them at 4GB, so we do not use the
+ * "_HV_xxx" nomenclature for the L0 macros.
+ */
+
 /** Log number of HV_PTE entries in L0 page table */
 #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
 
@@ -2359,69 +2449,104 @@
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Log number of HV_PTE entries in L1 page table */
-#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE)
+#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
+  (HV_LOG2_L1_SPAN - log2_page_size_large)
 
 /** Number of HV_PTE entries in L1 page table */
-#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES)
+#define _HV_L1_ENTRIES(log2_page_size_large) \
+  (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
 
 /** Log size of L1 page table in bytes */
-#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES)
+#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
+  (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
 
 /** Size of L1 page table in bytes */
-#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE)
+#define _HV_L1_SIZE(log2_page_size_large) \
+  (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
 
 /** Log number of HV_PTE entries in level-2 page table */
-#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)
+#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+  (log2_page_size_large - log2_page_size_small)
 
 /** Number of HV_PTE entries in level-2 page table */
-#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES)
+#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+  (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
 
 /** Log size of level-2 page table in bytes */
-#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES)
+#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+  (HV_LOG2_PTE_SIZE + \
+   _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
 
 /** Size of level-2 page table in bytes */
-#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE)
+#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+  (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
 
 #ifdef __ASSEMBLER__
 
 #if CHIP_VA_WIDTH() > 32
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
 
 #else /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((va) >> log2_page_size_large))
 
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+  (((va) >> log2_page_size_small) & \
+   (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
 
 #else /* __ASSEMBLER __ */
 
 #if CHIP_VA_WIDTH() > 32
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((HV_VirtAddr)(va) >> log2_page_size_large) & \
+   (_HV_L1_ENTRIES(log2_page_size_large) - 1))
 
 #else /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((HV_VirtAddr)(va) >> log2_page_size_large))
 
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+  (((HV_VirtAddr)(va) >> log2_page_size_small) & \
+   (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
 
 #endif /* __ASSEMBLER __ */
 
-#endif /* _TILE_HV_H */
+/** Position of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN(log2_page_size) \
+  (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Length of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
+  (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a client physical address to a pfn. */
+#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
+
+/** Converts a pfn to a client physical address. */
+#define _HV_PFN_TO_CPA(p, log2_page_size) \
+  (((HV_PhysAddr)(p)) << log2_page_size)
+
+/** Converts a ptfn to a pfn. */
+#define _HV_PTFN_TO_PFN(p, log2_page_size) \
+  ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a pfn to a ptfn. */
+#define _HV_PFN_TO_PTFN(p, log2_page_size) \
+  ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+#endif /* _HV_HV_H */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 0d826fa..5de9924 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -9,10 +9,9 @@
 	intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
 
 obj-$(CONFIG_HARDWALL)		+= hardwall.o
-obj-$(CONFIG_TILEGX)		+= futex_64.o
 obj-$(CONFIG_COMPAT)		+= compat.o compat_signal.o
 obj-$(CONFIG_SMP)		+= smpboot.o smp.o tlb.o
 obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel_$(BITS).o
 obj-$(CONFIG_PCI)		+= pci.o
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index cdef6e5..474571b8 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -118,8 +118,6 @@
 	struct compat_ucontext uc;
 };
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
 			     struct compat_sigaction __user *oact,
 			     size_t sigsetsize)
@@ -302,7 +300,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index ec91568..c31637b 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -68,20 +68,6 @@
 	jrp lr   /* keep backtracer happy */
 	STD_ENDPROC(KBacktraceIterator_init_current)
 
-/*
- * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
- * free the old stack (passed in r0) and re-invoke cpu_idle().
- * We update sp and ksp0 simultaneously to avoid backtracer warnings.
- */
-STD_ENTRY(cpu_idle_on_new_stack)
-	{
-	 move sp, r1
-	 mtspr SPR_SYSTEM_SAVE_K_0, r2
-	}
-	jal free_thread_info
-	j cpu_idle
-	STD_ENDPROC(cpu_idle_on_new_stack)
-
 /* Loop forever on a nap during SMP boot. */
 STD_ENTRY(smp_nap)
 	nap
@@ -100,8 +86,9 @@
  */
 STD_ENTRY(_cpu_idle)
 	movei r1, 1
+	IRQ_ENABLE_LOAD(r2, r3)
 	mtspr INTERRUPT_CRITICAL_SECTION, r1
-	IRQ_ENABLE(r2, r3)             /* unmask, but still with ICS set */
+	IRQ_ENABLE_APPLY(r2, r3)       /* unmask, but still with ICS set */
 	mtspr INTERRUPT_CRITICAL_SECTION, zero
 	.global _cpu_idle_nap
 _cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891..20273ee 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
 
 
 /*
- * This data structure tracks the rectangle data, etc., associated
- * one-to-one with a "struct file *" from opening HARDWALL_FILE.
+ * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
+ * We use "hardwall" nomenclature throughout for historical reasons.
+ * The lock here controls access to the list data structure as well as
+ * to the items on the list.
+ */
+struct hardwall_type {
+	int index;
+	int is_xdn;
+	int is_idn;
+	int disabled;
+	const char *name;
+	struct list_head list;
+	spinlock_t lock;
+	struct proc_dir_entry *proc_dir;
+};
+
+enum hardwall_index {
+	HARDWALL_UDN = 0,
+#ifndef __tilepro__
+	HARDWALL_IDN = 1,
+	HARDWALL_IPI = 2,
+#endif
+	_HARDWALL_TYPES
+};
+
+static struct hardwall_type hardwall_types[] = {
+	{  /* user-space access to UDN */
+		0,
+		1,
+		0,
+		0,
+		"udn",
+		LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
+		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+		NULL
+	},
+#ifndef __tilepro__
+	{  /* user-space access to IDN */
+		1,
+		1,
+		1,
+		1,  /* disabled pending hypervisor support */
+		"idn",
+		LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
+		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+		NULL
+	},
+	{  /* access to user-space IPI */
+		2,
+		0,
+		0,
+		0,
+		"ipi",
+		LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
+		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+		NULL
+	},
+#endif
+};
+
+/*
+ * This data structure tracks the cpu data, etc., associated
+ * one-to-one with a "struct file *" from opening a hardwall device file.
  * Note that the file's private data points back to this structure.
  */
 struct hardwall_info {
-	struct list_head list;             /* "rectangles" list */
+	struct list_head list;             /* for hardwall_types.list */
 	struct list_head task_head;        /* head of tasks in this hardwall */
-	struct cpumask cpumask;            /* cpus in the rectangle */
+	struct hardwall_type *type;        /* type of this resource */
+	struct cpumask cpumask;            /* cpus reserved */
+	int id;                            /* integer id for this hardwall */
+	int teardown_in_progress;          /* are we tearing this one down? */
+
+	/* Remaining fields only valid for user-network resources. */
 	int ulhc_x;                        /* upper left hand corner x coord */
 	int ulhc_y;                        /* upper left hand corner y coord */
 	int width;                         /* rectangle width */
 	int height;                        /* rectangle height */
-	int id;                            /* integer id for this hardwall */
-	int teardown_in_progress;          /* are we tearing this one down? */
+#if CHIP_HAS_REV1_XDN()
+	atomic_t xdn_pending_count;        /* cores in phase 1 of drain */
+#endif
 };
 
-/* Currently allocated hardwall rectangles */
-static LIST_HEAD(rectangles);
 
 /* /proc/tile/hardwall */
 static struct proc_dir_entry *hardwall_proc_dir;
 
 /* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *rect);
-static void hardwall_remove_proc(struct hardwall_info *rect);
-
-/*
- * Guard changes to the hardwall data structures.
- * This could be finer grained (e.g. one lock for the list of hardwall
- * rectangles, then separate embedded locks for each one's list of tasks),
- * but there are subtle correctness issues when trying to start with
- * a task's "hardwall" pointer and lock the correct rectangle's embedded
- * lock in the presence of a simultaneous deactivation, so it seems
- * easier to have a single lock, given that none of these data
- * structures are touched very frequently during normal operation.
- */
-static DEFINE_SPINLOCK(hardwall_lock);
+static void hardwall_add_proc(struct hardwall_info *);
+static void hardwall_remove_proc(struct hardwall_info *);
 
 /* Allow disabling UDN access. */
-static int udn_disabled;
 static int __init noudn(char *str)
 {
 	pr_info("User-space UDN access is disabled\n");
-	udn_disabled = 1;
+	hardwall_types[HARDWALL_UDN].disabled = 1;
 	return 0;
 }
 early_param("noudn", noudn);
 
+#ifndef __tilepro__
+/* Allow disabling IDN access. */
+static int __init noidn(char *str)
+{
+	pr_info("User-space IDN access is disabled\n");
+	hardwall_types[HARDWALL_IDN].disabled = 1;
+	return 0;
+}
+early_param("noidn", noidn);
+
+/* Allow disabling IPI access. */
+static int __init noipi(char *str)
+{
+	pr_info("User-space IPI access is disabled\n");
+	hardwall_types[HARDWALL_IPI].disabled = 1;
+	return 0;
+}
+early_param("noipi", noipi);
+#endif
+
 
 /*
- * Low-level primitives
+ * Low-level primitives for UDN/IDN
  */
 
+#ifdef __tilepro__
+#define mtspr_XDN(hwt, name, val) \
+	do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+	do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
+#define mfspr_XDN(hwt, name) \
+	((void)(hwt), __insn_mfspr(SPR_UDN_##name))
+#else
+#define mtspr_XDN(hwt, name, val)					\
+	do {								\
+		if ((hwt)->is_idn)					\
+			__insn_mtspr(SPR_IDN_##name, (val));		\
+		else							\
+			__insn_mtspr(SPR_UDN_##name, (val));		\
+	} while (0)
+#define mtspr_MPL_XDN(hwt, name, val)					\
+	do {								\
+		if ((hwt)->is_idn)					\
+			__insn_mtspr(SPR_MPL_IDN_##name, (val));	\
+		else							\
+			__insn_mtspr(SPR_MPL_UDN_##name, (val));	\
+	} while (0)
+#define mfspr_XDN(hwt, name) \
+  ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
+#endif
+
 /* Set a CPU bit if the CPU is online. */
 #define cpu_online_set(cpu, dst) do { \
 	if (cpu_online(cpu))          \
@@ -101,7 +199,7 @@
 }
 
 /* Compute the rectangle parameters and validate the cpumask. */
-static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
+static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
 {
 	int x, y, cpu, ulhc, lrhc;
 
@@ -114,8 +212,6 @@
 	r->ulhc_y = cpu_y(ulhc);
 	r->width = cpu_x(lrhc) - r->ulhc_x + 1;
 	r->height = cpu_y(lrhc) - r->ulhc_y + 1;
-	cpumask_copy(&r->cpumask, mask);
-	r->id = ulhc;   /* The ulhc cpu id can be the hardwall id. */
 
 	/* Width and height must be positive */
 	if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@
 				return -EINVAL;
 
 	/*
-	 * Note that offline cpus can't be drained when this UDN
+	 * Note that offline cpus can't be drained when this user network
 	 * rectangle eventually closes.  We used to detect this
 	 * situation and print a warning, but it annoyed users and
 	 * they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@
 	return 0;
 }
 
-/* Do the two given rectangles overlap on any cpu? */
-static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
-{
-	return a->ulhc_x + a->width > b->ulhc_x &&    /* A not to the left */
-		b->ulhc_x + b->width > a->ulhc_x &&   /* B not to the left */
-		a->ulhc_y + a->height > b->ulhc_y &&  /* A not above */
-		b->ulhc_y + b->height > a->ulhc_y;    /* B not above */
-}
-
-
 /*
  * Hardware management of hardwall setup, teardown, trapping,
  * and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@
 	N_PROTECT = (1 << 0),
 	E_PROTECT = (1 << 1),
 	S_PROTECT = (1 << 2),
-	W_PROTECT = (1 << 3)
+	W_PROTECT = (1 << 3),
+	C_PROTECT = (1 << 4),
 };
 
-static void enable_firewall_interrupts(void)
+static inline int xdn_which_interrupt(struct hardwall_type *hwt)
 {
-	arch_local_irq_unmask_now(INT_UDN_FIREWALL);
+#ifndef __tilepro__
+	if (hwt->is_idn)
+		return INT_IDN_FIREWALL;
+#endif
+	return INT_UDN_FIREWALL;
 }
 
-static void disable_firewall_interrupts(void)
+static void enable_firewall_interrupts(struct hardwall_type *hwt)
 {
-	arch_local_irq_mask_now(INT_UDN_FIREWALL);
+	arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
+}
+
+static void disable_firewall_interrupts(struct hardwall_type *hwt)
+{
+	arch_local_irq_mask_now(xdn_which_interrupt(hwt));
 }
 
 /* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_ipi_func(void *info)
+static void hardwall_setup_func(void *info)
 {
 	struct hardwall_info *r = info;
+	struct hardwall_type *hwt = r->type;
+
 	int cpu = smp_processor_id();
 	int x = cpu % smp_width;
 	int y = cpu / smp_width;
@@ -187,13 +285,12 @@
 	if (y == r->ulhc_y + r->height - 1)
 		bits |= S_PROTECT;
 	BUG_ON(bits == 0);
-	__insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
-	enable_firewall_interrupts();
-
+	mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
+	enable_firewall_interrupts(hwt);
 }
 
 /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_setup(struct hardwall_info *r)
+static void hardwall_protect_rectangle(struct hardwall_info *r)
 {
 	int x, y, cpu, delta;
 	struct cpumask rect_cpus;
@@ -217,37 +314,50 @@
 	}
 
 	/* Then tell all the cpus to set up their protection SPR */
-	on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
+	on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
 }
 
 void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
 {
 	struct hardwall_info *rect;
+	struct hardwall_type *hwt;
 	struct task_struct *p;
 	struct siginfo info;
-	int x, y;
 	int cpu = smp_processor_id();
 	int found_processes;
 	unsigned long flags;
-
 	struct pt_regs *old_regs = set_irq_regs(regs);
+
 	irq_enter();
 
+	/* Figure out which network trapped. */
+	switch (fault_num) {
+#ifndef __tilepro__
+	case INT_IDN_FIREWALL:
+		hwt = &hardwall_types[HARDWALL_IDN];
+		break;
+#endif
+	case INT_UDN_FIREWALL:
+		hwt = &hardwall_types[HARDWALL_UDN];
+		break;
+	default:
+		BUG();
+	}
+	BUG_ON(hwt->disabled);
+
 	/* This tile trapped a network access; find the rectangle. */
-	x = cpu % smp_width;
-	y = cpu / smp_width;
-	spin_lock_irqsave(&hardwall_lock, flags);
-	list_for_each_entry(rect, &rectangles, list) {
-		if (contains(rect, x, y))
+	spin_lock_irqsave(&hwt->lock, flags);
+	list_for_each_entry(rect, &hwt->list, list) {
+		if (cpumask_test_cpu(cpu, &rect->cpumask))
 			break;
 	}
 
 	/*
 	 * It shouldn't be possible not to find this cpu on the
 	 * rectangle list, since only cpus in rectangles get hardwalled.
-	 * The hardwall is only removed after the UDN is drained.
+	 * The hardwall is only removed after the user network is drained.
 	 */
-	BUG_ON(&rect->list == &rectangles);
+	BUG_ON(&rect->list == &hwt->list);
 
 	/*
 	 * If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@
 	 * to quiesce.
 	 */
 	if (rect->teardown_in_progress) {
-		pr_notice("cpu %d: detected hardwall violation %#lx"
+		pr_notice("cpu %d: detected %s hardwall violation %#lx"
 		       " while teardown already in progress\n",
-		       cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+			  cpu, hwt->name,
+			  (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
 		goto done;
 	}
 
 	/*
 	 * Kill off any process that is activated in this rectangle.
 	 * We bypass security to deliver the signal, since it must be
-	 * one of the activated processes that generated the UDN
+	 * one of the activated processes that generated the user network
 	 * message that caused this trap, and all the activated
 	 * processes shared a single open file so are pretty tightly
 	 * bound together from a security point of view to begin with.
 	 */
 	rect->teardown_in_progress = 1;
 	wmb(); /* Ensure visibility of rectangle before notifying processes. */
-	pr_notice("cpu %d: detected hardwall violation %#lx...\n",
-	       cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+	pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
+		  cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
 	info.si_signo = SIGILL;
 	info.si_errno = 0;
 	info.si_code = ILL_HARDWALL;
 	found_processes = 0;
-	list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
-		BUG_ON(p->thread.hardwall != rect);
+	list_for_each_entry(p, &rect->task_head,
+			    thread.hardwall[hwt->index].list) {
+		BUG_ON(p->thread.hardwall[hwt->index].info != rect);
 		if (!(p->flags & PF_EXITING)) {
 			found_processes = 1;
 			pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@
 		pr_notice("hardwall: no associated processes!\n");
 
  done:
-	spin_unlock_irqrestore(&hardwall_lock, flags);
+	spin_unlock_irqrestore(&hwt->lock, flags);
 
 	/*
 	 * We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@
 	 * haven't yet drained the network, and that would allow packets
 	 * to cross out of the hardwall region.
 	 */
-	disable_firewall_interrupts();
+	disable_firewall_interrupts(hwt);
 
 	irq_exit();
 	set_irq_regs(old_regs);
 }
 
-/* Allow access from user space to the UDN. */
-void grant_network_mpls(void)
+/* Allow access from user space to the user network. */
+void grant_hardwall_mpls(struct hardwall_type *hwt)
 {
-	__insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
-	__insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
-	__insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
-	__insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
+#ifndef __tilepro__
+	if (!hwt->is_xdn) {
+		__insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
+		return;
+	}
+#endif
+	mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
+	mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
+	mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
+	mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
 #if !CHIP_HAS_REV1_XDN()
-	__insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
-	__insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
+	mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
+	mtspr_MPL_XDN(hwt, CA_SET_0, 1);
 #endif
 }
 
-/* Deny access from user space to the UDN. */
-void restrict_network_mpls(void)
+/* Deny access from user space to the user network. */
+void restrict_hardwall_mpls(struct hardwall_type *hwt)
 {
-	__insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
-	__insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
-	__insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
-	__insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
+#ifndef __tilepro__
+	if (!hwt->is_xdn) {
+		__insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
+		return;
+	}
+#endif
+	mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
+	mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
+	mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
+	mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
 #if !CHIP_HAS_REV1_XDN()
-	__insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
-	__insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
+	mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
+	mtspr_MPL_XDN(hwt, CA_SET_1, 1);
 #endif
 }
 
+/* Restrict or deny as necessary for the task we're switching to. */
+void hardwall_switch_tasks(struct task_struct *prev,
+			   struct task_struct *next)
+{
+	int i;
+	for (i = 0; i < HARDWALL_TYPES; ++i) {
+		if (prev->thread.hardwall[i].info != NULL) {
+			if (next->thread.hardwall[i].info == NULL)
+				restrict_hardwall_mpls(&hardwall_types[i]);
+		} else if (next->thread.hardwall[i].info != NULL) {
+			grant_hardwall_mpls(&hardwall_types[i]);
+		}
+	}
+}
+
+/* Does this task have the right to IPI the given cpu? */
+int hardwall_ipi_valid(int cpu)
+{
+#ifdef __tilegx__
+	struct hardwall_info *info =
+		current->thread.hardwall[HARDWALL_IPI].info;
+	return info && cpumask_test_cpu(cpu, &info->cpumask);
+#else
+	return 0;
+#endif
+}
 
 /*
- * Code to create, activate, deactivate, and destroy hardwall rectangles.
+ * Code to create, activate, deactivate, and destroy hardwall resources.
  */
 
-/* Create a hardwall for the given rectangle */
-static struct hardwall_info *hardwall_create(
-	size_t size, const unsigned char __user *bits)
+/* Create a hardwall for the given resource */
+static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
+					     size_t size,
+					     const unsigned char __user *bits)
 {
-	struct hardwall_info *iter, *rect;
+	struct hardwall_info *iter, *info;
 	struct cpumask mask;
 	unsigned long flags;
 	int rc;
@@ -370,55 +521,62 @@
 		}
 	}
 
-	/* Allocate a new rectangle optimistically. */
-	rect = kmalloc(sizeof(struct hardwall_info),
+	/* Allocate a new hardwall_info optimistically. */
+	info = kmalloc(sizeof(struct hardwall_info),
 			GFP_KERNEL | __GFP_ZERO);
-	if (rect == NULL)
+	if (info == NULL)
 		return ERR_PTR(-ENOMEM);
-	INIT_LIST_HEAD(&rect->task_head);
+	INIT_LIST_HEAD(&info->task_head);
+	info->type = hwt;
 
 	/* Compute the rectangle size and validate that it's plausible. */
-	rc = setup_rectangle(rect, &mask);
-	if (rc != 0) {
-		kfree(rect);
-		return ERR_PTR(rc);
+	cpumask_copy(&info->cpumask, &mask);
+	info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
+	if (hwt->is_xdn) {
+		rc = check_rectangle(info, &mask);
+		if (rc != 0) {
+			kfree(info);
+			return ERR_PTR(rc);
+		}
 	}
 
 	/* Confirm it doesn't overlap and add it to the list. */
-	spin_lock_irqsave(&hardwall_lock, flags);
-	list_for_each_entry(iter, &rectangles, list) {
-		if (overlaps(iter, rect)) {
-			spin_unlock_irqrestore(&hardwall_lock, flags);
-			kfree(rect);
+	spin_lock_irqsave(&hwt->lock, flags);
+	list_for_each_entry(iter, &hwt->list, list) {
+		if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
+			spin_unlock_irqrestore(&hwt->lock, flags);
+			kfree(info);
 			return ERR_PTR(-EBUSY);
 		}
 	}
-	list_add_tail(&rect->list, &rectangles);
-	spin_unlock_irqrestore(&hardwall_lock, flags);
+	list_add_tail(&info->list, &hwt->list);
+	spin_unlock_irqrestore(&hwt->lock, flags);
 
 	/* Set up appropriate hardwalling on all affected cpus. */
-	hardwall_setup(rect);
+	if (hwt->is_xdn)
+		hardwall_protect_rectangle(info);
 
 	/* Create a /proc/tile/hardwall entry. */
-	hardwall_add_proc(rect);
+	hardwall_add_proc(info);
 
-	return rect;
+	return info;
 }
 
 /* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *rect)
+static int hardwall_activate(struct hardwall_info *info)
 {
-	int cpu, x, y;
+	int cpu;
 	unsigned long flags;
 	struct task_struct *p = current;
 	struct thread_struct *ts = &p->thread;
+	struct hardwall_type *hwt;
 
-	/* Require a rectangle. */
-	if (rect == NULL)
+	/* Require a hardwall. */
+	if (info == NULL)
 		return -ENODATA;
 
-	/* Not allowed to activate a rectangle that is being torn down. */
-	if (rect->teardown_in_progress)
+	/* Not allowed to activate a hardwall that is being torn down. */
+	if (info->teardown_in_progress)
 		return -EINVAL;
 
 	/*
@@ -428,78 +586,87 @@
 	if (cpumask_weight(&p->cpus_allowed) != 1)
 		return -EPERM;
 
-	/* Make sure we are bound to a cpu in this rectangle. */
+	/* Make sure we are bound to a cpu assigned to this resource. */
 	cpu = smp_processor_id();
 	BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
-	x = cpu_x(cpu);
-	y = cpu_y(cpu);
-	if (!contains(rect, x, y))
+	if (!cpumask_test_cpu(cpu, &info->cpumask))
 		return -EINVAL;
 
 	/* If we are already bound to this hardwall, it's a no-op. */
-	if (ts->hardwall) {
-		BUG_ON(ts->hardwall != rect);
+	hwt = info->type;
+	if (ts->hardwall[hwt->index].info) {
+		BUG_ON(ts->hardwall[hwt->index].info != info);
 		return 0;
 	}
 
-	/* Success!  This process gets to use the user networks on this cpu. */
-	ts->hardwall = rect;
-	spin_lock_irqsave(&hardwall_lock, flags);
-	list_add(&ts->hardwall_list, &rect->task_head);
-	spin_unlock_irqrestore(&hardwall_lock, flags);
-	grant_network_mpls();
-	printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
-	       p->pid, p->comm, cpu);
+	/* Success!  This process gets to use the resource on this cpu. */
+	ts->hardwall[hwt->index].info = info;
+	spin_lock_irqsave(&hwt->lock, flags);
+	list_add(&ts->hardwall[hwt->index].list, &info->task_head);
+	spin_unlock_irqrestore(&hwt->lock, flags);
+	grant_hardwall_mpls(hwt);
+	printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
+	       p->pid, p->comm, hwt->name, cpu);
 	return 0;
 }
 
 /*
- * Deactivate a task's hardwall.  Must hold hardwall_lock.
+ * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
  * This method may be called from free_task(), so we don't want to
  * rely on too many fields of struct task_struct still being valid.
  * We assume the cpus_allowed, pid, and comm fields are still valid.
  */
-static void _hardwall_deactivate(struct task_struct *task)
+static void _hardwall_deactivate(struct hardwall_type *hwt,
+				 struct task_struct *task)
 {
 	struct thread_struct *ts = &task->thread;
 
 	if (cpumask_weight(&task->cpus_allowed) != 1) {
-		pr_err("pid %d (%s) releasing networks with"
+		pr_err("pid %d (%s) releasing %s hardwall with"
 		       " an affinity mask containing %d cpus!\n",
-		       task->pid, task->comm,
+		       task->pid, task->comm, hwt->name,
 		       cpumask_weight(&task->cpus_allowed));
 		BUG();
 	}
 
-	BUG_ON(ts->hardwall == NULL);
-	ts->hardwall = NULL;
-	list_del(&ts->hardwall_list);
+	BUG_ON(ts->hardwall[hwt->index].info == NULL);
+	ts->hardwall[hwt->index].info = NULL;
+	list_del(&ts->hardwall[hwt->index].list);
 	if (task == current)
-		restrict_network_mpls();
+		restrict_hardwall_mpls(hwt);
 }
 
 /* Deactivate a task's hardwall. */
-int hardwall_deactivate(struct task_struct *task)
+static int hardwall_deactivate(struct hardwall_type *hwt,
+			       struct task_struct *task)
 {
 	unsigned long flags;
 	int activated;
 
-	spin_lock_irqsave(&hardwall_lock, flags);
-	activated = (task->thread.hardwall != NULL);
+	spin_lock_irqsave(&hwt->lock, flags);
+	activated = (task->thread.hardwall[hwt->index].info != NULL);
 	if (activated)
-		_hardwall_deactivate(task);
-	spin_unlock_irqrestore(&hardwall_lock, flags);
+		_hardwall_deactivate(hwt, task);
+	spin_unlock_irqrestore(&hwt->lock, flags);
 
 	if (!activated)
 		return -EINVAL;
 
-	printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
-	       task->pid, task->comm, smp_processor_id());
+	printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
+	       task->pid, task->comm, hwt->name, smp_processor_id());
 	return 0;
 }
 
-/* Stop a UDN switch before draining the network. */
-static void stop_udn_switch(void *ignored)
+void hardwall_deactivate_all(struct task_struct *task)
+{
+	int i;
+	for (i = 0; i < HARDWALL_TYPES; ++i)
+		if (task->thread.hardwall[i].info)
+			hardwall_deactivate(&hardwall_types[i], task);
+}
+
+/* Stop the switch before draining the network. */
+static void stop_xdn_switch(void *arg)
 {
 #if !CHIP_HAS_REV1_XDN()
 	/* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@
 		     SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
 		     SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
 		     SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
+#else
+	/*
+	 * Drop all packets bound for the core or off the edge.
+	 * We rely on the normal hardwall protection setup code
+	 * to have set the low four bits to trigger firewall interrupts,
+	 * and shift those bits up to trigger "drop on send" semantics,
+	 * plus adding "drop on send to core" for all switches.
+	 * In practice it seems the switches latch the DIRECTION_PROTECT
+	 * SPR so they won't start dropping if they're already
+	 * delivering the last message to the core, but it doesn't
+	 * hurt to enable it here.
+	 */
+	struct hardwall_type *hwt = arg;
+	unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
+	mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
 #endif
 }
 
-/* Drain all the state from a stopped switch. */
-static void drain_udn_switch(void *ignored)
+static void empty_xdn_demuxes(struct hardwall_type *hwt)
 {
-#if !CHIP_HAS_REV1_XDN()
+#ifndef __tilepro__
+	if (hwt->is_idn) {
+		while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
+			(void) __tile_idn0_receive();
+		while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
+			(void) __tile_idn1_receive();
+		return;
+	}
+#endif
+	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
+		(void) __tile_udn0_receive();
+	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
+		(void) __tile_udn1_receive();
+	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
+		(void) __tile_udn2_receive();
+	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
+		(void) __tile_udn3_receive();
+}
+
+/* Drain all the state from a stopped switch. */
+static void drain_xdn_switch(void *arg)
+{
+	struct hardwall_info *info = arg;
+	struct hardwall_type *hwt = info->type;
+
+#if CHIP_HAS_REV1_XDN()
+	/*
+	 * The switches have been configured to drop any messages
+	 * destined for cores (or off the edge of the rectangle).
+	 * But the current message may continue to be delivered,
+	 * so we wait until all the cores have finished any pending
+	 * messages before we stop draining.
+	 */
+	int pending = mfspr_XDN(hwt, PENDING);
+	while (pending--) {
+		empty_xdn_demuxes(hwt);
+		if (hwt->is_idn)
+			__tile_idn_send(0);
+		else
+			__tile_udn_send(0);
+	}
+	atomic_dec(&info->xdn_pending_count);
+	while (atomic_read(&info->xdn_pending_count))
+		empty_xdn_demuxes(hwt);
+#else
 	int i;
 	int from_tile_words, ca_count;
 
@@ -533,15 +758,7 @@
 		(void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
 
 	/* Empty out demuxes. */
-	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
-		(void) __tile_udn0_receive();
-	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
-		(void) __tile_udn1_receive();
-	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
-		(void) __tile_udn2_receive();
-	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
-		(void) __tile_udn3_receive();
-	BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
+	empty_xdn_demuxes(hwt);
 
 	/* Empty out catch all. */
 	ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@
 #endif
 }
 
-/* Reset random UDN state registers at boot up and during hardwall teardown. */
-void reset_network_state(void)
+/* Reset random XDN state registers at boot up and during hardwall teardown. */
+static void reset_xdn_network_state(struct hardwall_type *hwt)
 {
-#if !CHIP_HAS_REV1_XDN()
-	/* Reset UDN coordinates to their standard value */
-	unsigned int cpu = smp_processor_id();
-	unsigned int x = cpu % smp_width;
-	unsigned int y = cpu / smp_width;
-#endif
-
-	if (udn_disabled)
+	if (hwt->disabled)
 		return;
 
+	/* Clear out other random registers so we have a clean slate. */
+	mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
+	mtspr_XDN(hwt, AVAIL_EN, 0);
+	mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
+
 #if !CHIP_HAS_REV1_XDN()
-	__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+	/* Reset UDN coordinates to their standard value */
+	{
+		unsigned int cpu = smp_processor_id();
+		unsigned int x = cpu % smp_width;
+		unsigned int y = cpu / smp_width;
+		__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+	}
 
 	/* Set demux tags to predefined values and enable them. */
 	__insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@
 	__insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
 	__insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
 	__insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-#endif
 
-	/* Clear out other random registers so we have a clean slate. */
-	__insn_mtspr(SPR_UDN_AVAIL_EN, 0);
-	__insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
-#if !CHIP_HAS_REV1_XDN()
+	/* Set other rev0 random registers to a clean state. */
 	__insn_mtspr(SPR_UDN_REFILL_EN, 0);
 	__insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
 	__insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-#endif
 
 	/* Start the switch and demux. */
-#if !CHIP_HAS_REV1_XDN()
 	__insn_mtspr(SPR_UDN_SP_FREEZE, 0);
 #endif
 }
 
-/* Restart a UDN switch after draining. */
-static void restart_udn_switch(void *ignored)
+void reset_network_state(void)
 {
-	reset_network_state();
-
-	/* Disable firewall interrupts. */
-	__insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
-	disable_firewall_interrupts();
+	reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
+#ifndef __tilepro__
+	reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
+#endif
 }
 
-/* Build a struct cpumask containing all valid tiles in bounding rectangle. */
-static void fill_mask(struct hardwall_info *r, struct cpumask *result)
+/* Restart an XDN switch after draining. */
+static void restart_xdn_switch(void *arg)
 {
-	int x, y, cpu;
+	struct hardwall_type *hwt = arg;
 
-	cpumask_clear(result);
+#if CHIP_HAS_REV1_XDN()
+	/* One last drain step to avoid races with injection and draining. */
+	empty_xdn_demuxes(hwt);
+#endif
 
-	cpu = r->ulhc_y * smp_width + r->ulhc_x;
-	for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
-		for (x = 0; x < r->width; ++x, ++cpu)
-			cpu_online_set(cpu, result);
-	}
+	reset_xdn_network_state(hwt);
+
+	/* Disable firewall interrupts. */
+	disable_firewall_interrupts(hwt);
 }
 
 /* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *rect)
+static void hardwall_destroy(struct hardwall_info *info)
 {
 	struct task_struct *task;
+	struct hardwall_type *hwt;
 	unsigned long flags;
-	struct cpumask mask;
 
-	/* Make sure this file actually represents a rectangle. */
-	if (rect == NULL)
+	/* Make sure this file actually represents a hardwall. */
+	if (info == NULL)
 		return;
 
 	/*
@@ -644,39 +859,53 @@
 	 * deactivate any remaining tasks before freeing the
 	 * hardwall_info object itself.
 	 */
-	spin_lock_irqsave(&hardwall_lock, flags);
-	list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
-		_hardwall_deactivate(task);
-	spin_unlock_irqrestore(&hardwall_lock, flags);
+	hwt = info->type;
+	info->teardown_in_progress = 1;
+	spin_lock_irqsave(&hwt->lock, flags);
+	list_for_each_entry(task, &info->task_head,
+			    thread.hardwall[hwt->index].list)
+		_hardwall_deactivate(hwt, task);
+	spin_unlock_irqrestore(&hwt->lock, flags);
 
-	/* Drain the UDN. */
-	printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
-	       rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
-	fill_mask(rect, &mask);
-	on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
-	on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
+	if (hwt->is_xdn) {
+		/* Configure the switches for draining the user network. */
+		printk(KERN_DEBUG
+		       "Clearing %s hardwall rectangle %dx%d %d,%d\n",
+		       hwt->name, info->width, info->height,
+		       info->ulhc_x, info->ulhc_y);
+		on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
 
-	/* Restart switch and disable firewall. */
-	on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
+		/* Drain the network. */
+#if CHIP_HAS_REV1_XDN()
+		atomic_set(&info->xdn_pending_count,
+			   cpumask_weight(&info->cpumask));
+		on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
+#else
+		on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
+#endif
+
+		/* Restart switch and disable firewall. */
+		on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
+	}
 
 	/* Remove the /proc/tile/hardwall entry. */
-	hardwall_remove_proc(rect);
+	hardwall_remove_proc(info);
 
-	/* Now free the rectangle from the list. */
-	spin_lock_irqsave(&hardwall_lock, flags);
-	BUG_ON(!list_empty(&rect->task_head));
-	list_del(&rect->list);
-	spin_unlock_irqrestore(&hardwall_lock, flags);
-	kfree(rect);
+	/* Now free the hardwall from the list. */
+	spin_lock_irqsave(&hwt->lock, flags);
+	BUG_ON(!list_empty(&info->task_head));
+	list_del(&info->list);
+	spin_unlock_irqrestore(&hwt->lock, flags);
+	kfree(info);
 }
 
 
 static int hardwall_proc_show(struct seq_file *sf, void *v)
 {
-	struct hardwall_info *rect = sf->private;
+	struct hardwall_info *info = sf->private;
 	char buf[256];
 
-	int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
+	int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
 	buf[rc++] = '\n';
 	seq_write(sf, buf, rc);
 	return 0;
@@ -695,31 +924,45 @@
 	.release	= single_release,
 };
 
-static void hardwall_add_proc(struct hardwall_info *rect)
+static void hardwall_add_proc(struct hardwall_info *info)
 {
 	char buf[64];
-	snprintf(buf, sizeof(buf), "%d", rect->id);
-	proc_create_data(buf, 0444, hardwall_proc_dir,
-			 &hardwall_proc_fops, rect);
+	snprintf(buf, sizeof(buf), "%d", info->id);
+	proc_create_data(buf, 0444, info->type->proc_dir,
+			 &hardwall_proc_fops, info);
 }
 
-static void hardwall_remove_proc(struct hardwall_info *rect)
+static void hardwall_remove_proc(struct hardwall_info *info)
 {
 	char buf[64];
-	snprintf(buf, sizeof(buf), "%d", rect->id);
-	remove_proc_entry(buf, hardwall_proc_dir);
+	snprintf(buf, sizeof(buf), "%d", info->id);
+	remove_proc_entry(buf, info->type->proc_dir);
 }
 
 int proc_pid_hardwall(struct task_struct *task, char *buffer)
 {
-	struct hardwall_info *rect = task->thread.hardwall;
-	return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
+	int i;
+	int n = 0;
+	for (i = 0; i < HARDWALL_TYPES; ++i) {
+		struct hardwall_info *info = task->thread.hardwall[i].info;
+		if (info)
+			n += sprintf(&buffer[n], "%s: %d\n",
+				     info->type->name, info->id);
+	}
+	return n;
 }
 
 void proc_tile_hardwall_init(struct proc_dir_entry *root)
 {
-	if (!udn_disabled)
-		hardwall_proc_dir = proc_mkdir("hardwall", root);
+	int i;
+	for (i = 0; i < HARDWALL_TYPES; ++i) {
+		struct hardwall_type *hwt = &hardwall_types[i];
+		if (hwt->disabled)
+			continue;
+		if (hardwall_proc_dir == NULL)
+			hardwall_proc_dir = proc_mkdir("hardwall", root);
+		hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
+	}
 }
 
 
@@ -729,34 +972,45 @@
 
 static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
 {
-	struct hardwall_info *rect = file->private_data;
+	struct hardwall_info *info = file->private_data;
+	int minor = iminor(file->f_mapping->host);
+	struct hardwall_type* hwt;
 
 	if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
 		return -EINVAL;
 
+	BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
+	BUILD_BUG_ON(HARDWALL_TYPES !=
+		     sizeof(hardwall_types)/sizeof(hardwall_types[0]));
+
+	if (minor < 0 || minor >= HARDWALL_TYPES)
+		return -EINVAL;
+	hwt = &hardwall_types[minor];
+	WARN_ON(info && hwt != info->type);
+
 	switch (_IOC_NR(a)) {
 	case _HARDWALL_CREATE:
-		if (udn_disabled)
+		if (hwt->disabled)
 			return -ENOSYS;
-		if (rect != NULL)
+		if (info != NULL)
 			return -EALREADY;
-		rect = hardwall_create(_IOC_SIZE(a),
-					(const unsigned char __user *)b);
-		if (IS_ERR(rect))
-			return PTR_ERR(rect);
-		file->private_data = rect;
+		info = hardwall_create(hwt, _IOC_SIZE(a),
+				       (const unsigned char __user *)b);
+		if (IS_ERR(info))
+			return PTR_ERR(info);
+		file->private_data = info;
 		return 0;
 
 	case _HARDWALL_ACTIVATE:
-		return hardwall_activate(rect);
+		return hardwall_activate(info);
 
 	case _HARDWALL_DEACTIVATE:
-		if (current->thread.hardwall != rect)
+		if (current->thread.hardwall[hwt->index].info != info)
 			return -EINVAL;
-		return hardwall_deactivate(current);
+		return hardwall_deactivate(hwt, current);
 
 	case _HARDWALL_GET_ID:
-		return rect ? rect->id : -EINVAL;
+		return info ? info->id : -EINVAL;
 
 	default:
 		return -EINVAL;
@@ -775,26 +1029,28 @@
 /* The user process closed the file; revoke access to user networks. */
 static int hardwall_flush(struct file *file, fl_owner_t owner)
 {
-	struct hardwall_info *rect = file->private_data;
+	struct hardwall_info *info = file->private_data;
 	struct task_struct *task, *tmp;
 	unsigned long flags;
 
-	if (rect) {
+	if (info) {
 		/*
 		 * NOTE: if multiple threads are activated on this hardwall
 		 * file, the other threads will continue having access to the
-		 * UDN until they are context-switched out and back in again.
+		 * user network until they are context-switched out and back
+		 * in again.
 		 *
 		 * NOTE: A NULL files pointer means the task is being torn
 		 * down, so in that case we also deactivate it.
 		 */
-		spin_lock_irqsave(&hardwall_lock, flags);
-		list_for_each_entry_safe(task, tmp, &rect->task_head,
-					 thread.hardwall_list) {
+		struct hardwall_type *hwt = info->type;
+		spin_lock_irqsave(&hwt->lock, flags);
+		list_for_each_entry_safe(task, tmp, &info->task_head,
+					 thread.hardwall[hwt->index].list) {
 			if (task->files == owner || task->files == NULL)
-				_hardwall_deactivate(task);
+				_hardwall_deactivate(hwt, task);
 		}
-		spin_unlock_irqrestore(&hardwall_lock, flags);
+		spin_unlock_irqrestore(&hwt->lock, flags);
 	}
 
 	return 0;
@@ -824,11 +1080,11 @@
 	int rc;
 	dev_t dev;
 
-	rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
+	rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
 	if (rc < 0)
 		return rc;
 	cdev_init(&hardwall_dev, &dev_hardwall_fops);
-	rc = cdev_add(&hardwall_dev, dev, 1);
+	rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
 	if (rc < 0)
 		return rc;
 
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c..f71bfee 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@
 	}
 	{
 	  moveli lr, lo16(1f)
-	  move r5, zero
+	  moveli r5, CTX_PAGE_FLAG
 	}
 	{
 	  auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@
 
 	.macro PTE va, cpa, bits1, no_org=0
 	.ifeq \no_org
-	.org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
+	.org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
 	.endif
 	.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
 	      (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
-	.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
+	.word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
 	.endm
 
 __PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@
 	/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
 	PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
 			      (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
-	.org swapper_pg_dir + HV_L1_SIZE
+	.org swapper_pg_dir + PGDIR_SIZE
 	END(swapper_pg_dir)
 
 	/*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a93..f9a2734 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@
 	  shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
 	}
 	{
-	  move r3, zero
+	  moveli r3, CTX_PAGE_FLAG
 	  j hv_install_context
 	}
 1:
@@ -210,19 +210,19 @@
 	.macro PTE cpa, bits1
 	.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
 	      HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
-	      (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
+	      (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
 	.endm
 
 __PAGE_ALIGNED_DATA
 	.align PAGE_SIZE
 ENTRY(swapper_pg_dir)
-	.org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
+	.org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
 .Lsv_data_pmd:
 	.quad 0  /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
-	.org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
+	.org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
 .Lsv_code_pmd:
 	.quad 0  /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
-	.org swapper_pg_dir + HV_L0_SIZE
+	.org swapper_pg_dir + SIZEOF_PGD
 	END(swapper_pg_dir)
 
 	.align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@
 	 * permissions later.
 	 */
 	.set addr, 0
-	.rept HV_L1_ENTRIES
+	.rept PTRS_PER_PMD
 	PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
-	.set addr, addr + HV_PAGE_SIZE_LARGE
+	.set addr, addr + HPAGE_SIZE
 	.endr
-	.org temp_data_pmd + HV_L1_SIZE
+	.org temp_data_pmd + SIZEOF_PMD
 	END(temp_data_pmd)
 
 	.align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@
 	 * permissions later.
 	 */
 	.set addr, 0
-	.rept HV_L1_ENTRIES
+	.rept PTRS_PER_PMD
 	PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
-	.set addr, addr + HV_PAGE_SIZE_LARGE
+	.set addr, addr + HPAGE_SIZE
 	.endr
-	.org temp_code_pmd + HV_L1_SIZE
+	.org temp_code_pmd + SIZEOF_PMD
 	END(temp_code_pmd)
 
 	/*
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 2b7cd0a..d44c5a6 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -55,4 +55,5 @@
 hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
 hv_flush_all = TEXT_OFFSET + 0x106e0;
 hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
-hv_glue_internals = TEXT_OFFSET + 0x10720;
+hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
+hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e..7c06d59 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@
 	 * This routine saves just the first four registers, plus the
 	 * stack context so we can do proper backtracing right away,
 	 * and defers to handle_interrupt to save the rest.
-	 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
+	 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
+	 * and needs sp set to its final location at the bottom of
+	 * the stack frame.
 	 */
 	addli   r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
 	wh64    r0   /* cache line 7 */
@@ -450,23 +452,6 @@
 	push_reg r5, r52
 	st      r52, r4
 
-	/* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
-	{
-	 mfspr  r20, SPR_SYSTEM_SAVE_K_0
-	 moveli r21, hw2_last(__per_cpu_offset)
-	}
-	{
-	 shl16insli r21, r21, hw1(__per_cpu_offset)
-	 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
-	}
-	shl16insli r21, r21, hw0(__per_cpu_offset)
-	shl3add r20, r20, r21
-	ld      tp, r20
-#else
-	move    tp, zero
-#endif
-
 	/*
 	 * If we will be returning to the kernel, we will need to
 	 * reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@
 	.endif
 	st      r21, r32
 
+	/*
+	 * we've captured enough state to the stack (including in
+	 * particular our EX_CONTEXT state) that we can now release
+	 * the interrupt critical section and replace it with our
+	 * standard "interrupts disabled" mask value.  This allows
+	 * synchronous interrupts (and profile interrupts) to punch
+	 * through from this point onwards.
+	 *
+	 * It's important that no code before this point touch memory
+	 * other than our own stack (to keep the invariant that this
+	 * is all that gets touched under ICS), and that no code after
+	 * this point reference any interrupt-specific SPR, in particular
+	 * the EX_CONTEXT_K_ values.
+	 */
+	.ifc \function,handle_nmi
+	IRQ_DISABLE_ALL(r20)
+	.else
+	IRQ_DISABLE(r20, r21)
+	.endif
+	mtspr   INTERRUPT_CRITICAL_SECTION, zero
+
+	/* Load tp with our per-cpu offset. */
+#ifdef CONFIG_SMP
+	{
+	 mfspr  r20, SPR_SYSTEM_SAVE_K_0
+	 moveli r21, hw2_last(__per_cpu_offset)
+	}
+	{
+	 shl16insli r21, r21, hw1(__per_cpu_offset)
+	 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
+	}
+	shl16insli r21, r21, hw0(__per_cpu_offset)
+	shl3add r20, r20, r21
+	ld      tp, r20
+#else
+	move    tp, zero
+#endif
+
 #ifdef __COLLECT_LINKER_FEEDBACK__
 	/*
 	 * Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@
 #endif
 
 	/*
-	 * we've captured enough state to the stack (including in
-	 * particular our EX_CONTEXT state) that we can now release
-	 * the interrupt critical section and replace it with our
-	 * standard "interrupts disabled" mask value.  This allows
-	 * synchronous interrupts (and profile interrupts) to punch
-	 * through from this point onwards.
-	 */
-	.ifc \function,handle_nmi
-	IRQ_DISABLE_ALL(r20)
-	.else
-	IRQ_DISABLE(r20, r21)
-	.endif
-	mtspr   INTERRUPT_CRITICAL_SECTION, zero
-
-	/*
 	 * Prepare the first 256 stack bytes to be rapidly accessible
 	 * without having to fetch the background data.
 	 */
@@ -736,9 +744,10 @@
 	beqzt   r30, .Lrestore_regs
 	j       3f
 2:	TRACE_IRQS_ON
+	IRQ_ENABLE_LOAD(r20, r21)
 	movei   r0, 1
 	mtspr   INTERRUPT_CRITICAL_SECTION, r0
-	IRQ_ENABLE(r20, r21)
+	IRQ_ENABLE_APPLY(r20, r21)
 	beqzt   r30, .Lrestore_regs
 3:
 
@@ -755,7 +764,6 @@
 	 * that will save some cycles if this turns out to be a syscall.
 	 */
 .Lrestore_regs:
-	FEEDBACK_REENTER(interrupt_return)   /* called from elsewhere */
 
 	/*
 	 * Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@
 	int_hand     INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
 	int_hand     INT_DTLB_MISS, DTLB_MISS, do_page_fault
 	int_hand     INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
-	int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
+	int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
 	int_hand     INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
 	int_hand     INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
 	int_hand     INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2e..f0b54a9 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -31,6 +31,8 @@
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/checksum.h>
+#include <asm/tlbflush.h>
+#include <asm/homecache.h>
 #include <hv/hypervisor.h>
 
 
@@ -222,11 +224,22 @@
 	return alloc_pages_node(0, gfp_mask, order);
 }
 
+/*
+ * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
+ * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
+ * for tilepro, while for tilegx, we limit it to entire middle level page
+ * table which we assume has been allocated and is undoubtedly large enough.
+ */
+#ifndef __tilegx__
+#define	QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
+#else
+#define	QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
+#endif
+
 static void setup_quasi_va_is_pa(void)
 {
-	HV_PTE *pgtable;
 	HV_PTE pte;
-	int i;
+	unsigned long i;
 
 	/*
 	 * Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@
 	 */
 	local_flush_tlb_all();
 
-	/* setup VA is PA, at least up to PAGE_OFFSET */
-
-	pgtable = (HV_PTE *)current->mm->pgd;
+	/*
+	 * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
+	 * Note here we assume that level-1 page table is defined by
+	 * HPAGE_SIZE.
+	 */
 	pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
 	pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
-
-	for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+	for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
+		unsigned long vaddr = i << HPAGE_SHIFT;
+		pgd_t *pgd = pgd_offset(current->mm, vaddr);
+		pud_t *pud = pud_offset(pgd, vaddr);
+		pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
 		unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+
 		if (pfn_valid(pfn))
-			__set_pte(&pgtable[i], pfn_pte(pfn, pte));
+			__set_pte(ptep, pfn_pte(pfn, pte));
 	}
 }
 
@@ -251,6 +270,7 @@
 void machine_kexec(struct kimage *image)
 {
 	void *reboot_code_buffer;
+	pte_t *ptep;
 	void (*rnk)(unsigned long, void *, unsigned long)
 		__noreturn;
 
@@ -266,8 +286,10 @@
 	 */
 	homecache_change_page_home(image->control_code_page, 0,
 				   smp_processor_id());
-	reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
-				  __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
+	reboot_code_buffer = page_address(image->control_code_page);
+	BUG_ON(reboot_code_buffer == NULL);
+	ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+	__set_pte(ptep, pte_mkexec(*ptep));
 	memcpy(reboot_code_buffer, relocate_new_kernel,
 	       relocate_new_kernel_size);
 	__flush_icache_range(
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 98d4769..001cbfa 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -159,7 +159,17 @@
 
 		switch (ELF_R_TYPE(rel[i].r_info)) {
 
-#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
+#ifdef __LITTLE_ENDIAN
+# define MUNGE(func) \
+	(*location = ((*location & ~func(-1)) | func(value)))
+#else
+/*
+ * Instructions are always little-endian, so when we read them as data,
+ * we have to swap them around before and after modifying them.
+ */
+# define MUNGE(func) \
+	(*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
+#endif
 
 #ifndef __tilegx__
 		case R_TILE_32:
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 446a7f5..dafc447 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -22,6 +22,7 @@
 #include <linux/proc_fs.h>
 #include <linux/sysctl.h>
 #include <linux/hardirq.h>
+#include <linux/hugetlb.h>
 #include <linux/mman.h>
 #include <asm/unaligned.h>
 #include <asm/pgtable.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index f572c19..6be7991 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -128,10 +128,10 @@
 	 * Calling deactivate here just frees up the data structures.
 	 * If the task we're freeing held the last reference to a
 	 * hardwall fd, it would have been released prior to this point
-	 * anyway via exit_files(), and "hardwall" would be NULL by now.
+	 * anyway via exit_files(), and the hardwall_task.info pointers
+	 * would be NULL by now.
 	 */
-	if (info->task->thread.hardwall)
-		hardwall_deactivate(info->task);
+	hardwall_deactivate_all(info->task);
 #endif
 
 	if (step_state) {
@@ -245,7 +245,8 @@
 
 #ifdef CONFIG_HARDWALL
 	/* New thread does not own any networks. */
-	p->thread.hardwall = NULL;
+	memset(&p->thread.hardwall[0], 0,
+	       sizeof(struct hardwall_task) * HARDWALL_TYPES);
 #endif
 
 
@@ -515,12 +516,7 @@
 
 #ifdef CONFIG_HARDWALL
 	/* Enable or disable access to the network registers appropriately. */
-	if (prev->thread.hardwall != NULL) {
-		if (next->thread.hardwall == NULL)
-			restrict_network_mpls();
-	} else if (next->thread.hardwall != NULL) {
-		grant_network_mpls();
-	}
+	hardwall_switch_tasks(prev, next);
 #endif
 
 	/*
@@ -569,8 +565,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 		return 1;
 	}
 	if (thread_info_flags & _TIF_SINGLESTEP) {
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel_32.S
similarity index 100%
rename from arch/tile/kernel/relocate_kernel.S
rename to arch/tile/kernel/relocate_kernel_32.S
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644
index 0000000..1c09a4f
--- /dev/null
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * copy new kernel into place and then call hv_reexec
+ *
+ */
+
+#include <linux/linkage.h>
+#include <arch/chip.h>
+#include <asm/page.h>
+#include <hv/hypervisor.h>
+
+#undef RELOCATE_NEW_KERNEL_VERBOSE
+
+STD_ENTRY(relocate_new_kernel)
+
+	move	r30, r0		/* page list */
+	move	r31, r1		/* address of page we are on */
+	move	r32, r2		/* start address of new kernel */
+
+	shrui	r1, r1, PAGE_SHIFT
+	addi	r1, r1, 1
+	shli	sp, r1, PAGE_SHIFT
+	addi	sp, sp, -8
+	/* we now have a stack (whether we need one or not) */
+
+	moveli	r40, hw2_last(hv_console_putc)
+	shl16insli r40, r40, hw1(hv_console_putc)
+	shl16insli r40, r40, hw0(hv_console_putc)
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+	moveli	r0, 'r'
+	jalr	r40
+
+	moveli	r0, '_'
+	jalr	r40
+
+	moveli	r0, 'n'
+	jalr	r40
+
+	moveli	r0, '_'
+	jalr	r40
+
+	moveli	r0, 'k'
+	jalr	r40
+
+	moveli	r0, '\n'
+	jalr	r40
+#endif
+
+	/*
+	 * Throughout this code r30 is pointer to the element of page
+	 * list we are working on.
+	 *
+	 * Normally we get to the next element of the page list by
+	 * incrementing r30 by eight.  The exception is if the element
+	 * on the page list is an IND_INDIRECTION in which case we use
+	 * the element with the low bits masked off as the new value
+	 * of r30.
+	 *
+	 * To get this started, we need the value passed to us (which
+	 * will always be an IND_INDIRECTION) in memory somewhere with
+	 * r30 pointing at it.  To do that, we push the value passed
+	 * to us on the stack and make r30 point to it.
+	 */
+
+	st	sp, r30
+	move	r30, sp
+	addi	sp, sp, -16
+
+#if CHIP_HAS_CBOX_HOME_MAP()
+	/*
+	 * On TILE-GX, we need to flush all tiles' caches, since we may
+	 * have been doing hash-for-home caching there.  Note that we
+	 * must do this _after_ we're completely done modifying any memory
+	 * other than our output buffer (which we know is locally cached).
+	 * We want the caches to be fully clean when we do the reexec,
+	 * because the hypervisor is going to do this flush again at that
+	 * point, and we don't want that second flush to overwrite any memory.
+	 */
+	{
+	 move	r0, zero	 /* cache_pa */
+	 moveli	r1, hw2_last(HV_FLUSH_EVICT_L2)
+	}
+	{
+	 shl16insli	r1, r1, hw1(HV_FLUSH_EVICT_L2)
+	 movei	r2, -1		 /* cache_cpumask; -1 means all client tiles */
+	}
+	{
+	 shl16insli	r1, r1, hw0(HV_FLUSH_EVICT_L2)  /* cache_control */
+	 move	r3, zero	 /* tlb_va */
+	}
+	{
+	 move	r4, zero	 /* tlb_length */
+	 move	r5, zero	 /* tlb_pgsize */
+	}
+	{
+	 move	r6, zero	 /* tlb_cpumask */
+	 move	r7, zero	 /* asids */
+	}
+	{
+	 moveli	r20, hw2_last(hv_flush_remote)
+	 move	r8, zero	 /* asidcount */
+	}
+	shl16insli	r20, r20, hw1(hv_flush_remote)
+	shl16insli	r20, r20, hw0(hv_flush_remote)
+
+	jalr	r20
+#endif
+
+	/* r33 is destination pointer, default to zero */
+
+	moveli	r33, 0
+
+.Lloop:	ld	r10, r30
+
+	andi	r9, r10, 0xf	/* low 4 bits tell us what type it is */
+	xor	r10, r10, r9	/* r10 is now value with low 4 bits stripped */
+
+	cmpeqi	r0, r9, 0x1	/* IND_DESTINATION */
+	beqzt	r0, .Ltry2
+
+	move	r33, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+	moveli	r0, 'd'
+	jalr	r40
+#endif
+
+	addi	r30, r30, 8
+	j	.Lloop
+
+.Ltry2:
+	cmpeqi	r0, r9, 0x2	/* IND_INDIRECTION */
+	beqzt	r0, .Ltry4
+
+	move	r30, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+	moveli	r0, 'i'
+	jalr	r40
+#endif
+
+	j	.Lloop
+
+.Ltry4:
+	cmpeqi	r0, r9, 0x4	/* IND_DONE */
+	beqzt	r0, .Ltry8
+
+	mf
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+	moveli	r0, 'D'
+	jalr	r40
+	moveli	r0, '\n'
+	jalr	r40
+#endif
+
+	move	r0, r32
+
+	moveli	r41, hw2_last(hv_reexec)
+	shl16insli	r41, r41, hw1(hv_reexec)
+	shl16insli	r41, r41, hw0(hv_reexec)
+
+	jalr	r41
+
+	/* we should not get here */
+
+	moveli	r0, '?'
+	jalr	r40
+	moveli	r0, '\n'
+	jalr	r40
+
+	j	.Lhalt
+
+.Ltry8:	cmpeqi	r0, r9, 0x8	/* IND_SOURCE */
+	beqz	r0, .Lerr	/* unknown type */
+
+	/* copy page at r10 to page at r33 */
+
+	move	r11, r33
+
+	moveli	r0, hw2_last(PAGE_SIZE)
+	shl16insli	r0, r0, hw1(PAGE_SIZE)
+	shl16insli	r0, r0, hw0(PAGE_SIZE)
+	add	r33, r33, r0
+
+	/* copy word at r10 to word at r11 until r11 equals r33 */
+
+	/* We know page size must be multiple of 8, so we can unroll
+	 * 8 times safely without any edge case checking.
+	 *
+	 * Issue a flush of the destination every 8 words to avoid
+	 * incoherence when starting the new kernel.  (Now this is
+	 * just good paranoia because the hv_reexec call will also
+	 * take care of this.)
+	 */
+
+1:
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0; addi	r11, r11, 8 }
+	{ ld	r0, r10; addi	r10, r10, 8 }
+	{ st	r11, r0 }
+	{ flush r11    ; addi	r11, r11, 8 }
+
+	cmpeq	r0, r33, r11
+	beqzt	r0, 1b
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+	moveli	r0, 's'
+	jalr	r40
+#endif
+
+	addi	r30, r30, 8
+	j	.Lloop
+
+
+.Lerr:	moveli	r0, 'e'
+	jalr	r40
+	moveli	r0, 'r'
+	jalr	r40
+	moveli	r0, 'r'
+	jalr	r40
+	moveli	r0, '\n'
+	jalr	r40
+.Lhalt:
+	moveli r41, hw2_last(hv_halt)
+	shl16insli r41, r41, hw1(hv_halt)
+	shl16insli r41, r41, hw0(hv_halt)
+
+	jalr	r41
+	STD_ENDPROC(relocate_new_kernel)
+
+	.section .rodata,"a"
+
+	.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 98d80eb..dd87f34 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -28,6 +28,8 @@
 #include <linux/highmem.h>
 #include <linux/smp.h>
 #include <linux/timex.h>
+#include <linux/hugetlb.h>
+#include <linux/start_kernel.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -49,9 +51,6 @@
 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
-/* We only create bootmem data on node 0. */
-static bootmem_data_t __initdata node0_bdata;
-
 /* Information on the NUMA nodes that we compute early */
 unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
 unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +533,96 @@
 #endif
 }
 
-static void __init setup_bootmem_allocator(void)
+/*
+ * On 32-bit machines, we only put bootmem on the low controller,
+ * since PAs > 4GB can't be used in bootmem.  In principle one could
+ * imagine, e.g., multiple 1 GB controllers all of which could support
+ * bootmem, but in practice using controllers this small isn't a
+ * particularly interesting scenario, so we just keep it simple and
+ * use only the first controller for bootmem on 32-bit machines.
+ */
+static inline int node_has_bootmem(int nid)
 {
-	unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
-
-	/* Provide a node 0 bdata. */
-	NODE_DATA(0)->bdata = &node0_bdata;
-
-#ifdef CONFIG_PCI
-	/* Don't let boot memory alias the PCI region. */
-	last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
+#ifdef CONFIG_64BIT
+	return 1;
 #else
-	last_alloc_pfn = max_low_pfn;
+	return nid == 0;
+#endif
+}
+
+static inline unsigned long alloc_bootmem_pfn(int nid,
+					      unsigned long size,
+					      unsigned long goal)
+{
+	void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
+					 PAGE_SIZE, goal);
+	unsigned long pfn = kaddr_to_pfn(kva);
+	BUG_ON(goal && PFN_PHYS(pfn) != goal);
+	return pfn;
+}
+
+static void __init setup_bootmem_allocator_node(int i)
+{
+	unsigned long start, end, mapsize, mapstart;
+
+	if (node_has_bootmem(i)) {
+		NODE_DATA(i)->bdata = &bootmem_node_data[i];
+	} else {
+		/* Share controller zero's bdata for now. */
+		NODE_DATA(i)->bdata = &bootmem_node_data[0];
+		return;
+	}
+
+	/* Skip up to after the bss in node 0. */
+	start = (i == 0) ? min_low_pfn : node_start_pfn[i];
+
+	/* Only lowmem, if we're a HIGHMEM build. */
+#ifdef CONFIG_HIGHMEM
+	end = node_lowmem_end_pfn[i];
+#else
+	end = node_end_pfn[i];
 #endif
 
-	/*
-	 * Initialize the boot-time allocator (with low memory only):
-	 * The first argument says where to put the bitmap, and the
-	 * second says where the end of allocatable memory is.
-	 */
-	bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
+	/* No memory here. */
+	if (end == start)
+		return;
 
-	/*
-	 * Let the bootmem allocator use all the space we've given it
-	 * except for its own bitmap.
-	 */
-	first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
-	if (first_alloc_pfn >= last_alloc_pfn)
-		early_panic("Not enough memory on controller 0 for bootmem\n");
+	/* Figure out where the bootmem bitmap is located. */
+	mapsize = bootmem_bootmap_pages(end - start);
+	if (i == 0) {
+		/* Use some space right before the heap on node 0. */
+		mapstart = start;
+		start += mapsize;
+	} else {
+		/* Allocate bitmap on node 0 to avoid page table issues. */
+		mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
+	}
 
-	free_bootmem(PFN_PHYS(first_alloc_pfn),
-		     PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
+	/* Initialize a node. */
+	init_bootmem_node(NODE_DATA(i), mapstart, start, end);
+
+	/* Free all the space back into the allocator. */
+	free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
+
+#if defined(CONFIG_PCI)
+	/*
+	 * Throw away any memory aliased by the PCI region.  FIXME: this
+	 * is a temporary hack to work around bug 10502, and needs to be
+	 * fixed properly.
+	 */
+	if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
+		reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
+				PFN_PHYS(pci_reserve_end_pfn -
+					 pci_reserve_start_pfn),
+				BOOTMEM_EXCLUSIVE);
+#endif
+}
+
+static void __init setup_bootmem_allocator(void)
+{
+	int i;
+	for (i = 0; i < MAX_NUMNODES; ++i)
+		setup_bootmem_allocator_node(i);
 
 #ifdef CONFIG_KEXEC
 	if (crashk_res.start != crashk_res.end)
@@ -595,14 +653,6 @@
 	return size;
 }
 
-static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
-{
-	void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
-	unsigned long pfn = kaddr_to_pfn(kva);
-	BUG_ON(goal && PFN_PHYS(pfn) != goal);
-	return pfn;
-}
-
 static void __init zone_sizes_init(void)
 {
 	unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +690,22 @@
 		 * though, there'll be no lowmem, so we just alloc_bootmem
 		 * the memmap.  There will be no percpu memory either.
 		 */
-		if (__pfn_to_highbits(start) == 0) {
-			/* In low PAs, allocate via bootmem. */
+		if (i != 0 && cpu_isset(i, isolnodes)) {
+			node_memmap_pfn[i] =
+				alloc_bootmem_pfn(0, memmap_size, 0);
+			BUG_ON(node_percpu[i] != 0);
+		} else if (node_has_bootmem(start)) {
 			unsigned long goal = 0;
 			node_memmap_pfn[i] =
-				alloc_bootmem_pfn(memmap_size, goal);
+				alloc_bootmem_pfn(i, memmap_size, 0);
 			if (kdata_huge)
 				goal = PFN_PHYS(lowmem_end) - node_percpu[i];
 			if (node_percpu[i])
 				node_percpu_pfn[i] =
-				    alloc_bootmem_pfn(node_percpu[i], goal);
-		} else if (cpu_isset(i, isolnodes)) {
-			node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
-			BUG_ON(node_percpu[i] != 0);
+					alloc_bootmem_pfn(i, node_percpu[i],
+							  goal);
 		} else {
-			/* In high PAs, just reserve some pages. */
+			/* In non-bootmem zones, just reserve some pages. */
 			node_memmap_pfn[i] = node_free_pfn[i];
 			node_free_pfn[i] += PFN_UP(memmap_size);
 			if (!kdata_huge) {
@@ -678,16 +729,9 @@
 		zones_size[ZONE_NORMAL] = end - start;
 #endif
 
-		/*
-		 * Everyone shares node 0's bootmem allocator, but
-		 * we use alloc_remap(), above, to put the actual
-		 * struct page array on the individual controllers,
-		 * which is most of the data that we actually care about.
-		 * We can't place bootmem allocators on the other
-		 * controllers since the bootmem allocator can only
-		 * operate on 32-bit physical addresses.
-		 */
-		NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
+		/* Take zone metadata from controller 0 if we're isolnode. */
+		if (node_isset(i, isolnodes))
+			NODE_DATA(i)->bdata = &bootmem_node_data[0];
 
 		free_area_init_node(i, zones_size, start, NULL);
 		printk(KERN_DEBUG "  Normal zone: %ld per-cpu pages\n",
@@ -870,6 +914,22 @@
 
 #endif /* CONFIG_NUMA */
 
+/*
+ * Initialize hugepage support on this cpu.  We do this on all cores
+ * early in boot: before argument parsing for the boot cpu, and after
+ * argument parsing but before the init functions run on the secondaries.
+ * So the values we set up here in the hypervisor may be overridden on
+ * the boot cpu as arguments are parsed.
+ */
+static __cpuinit void init_super_pages(void)
+{
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+	int i;
+	for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
+		hv_set_pte_super_shift(i, huge_shift[i]);
+#endif
+}
+
 /**
  * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
  * @boot: Is this the boot cpu?
@@ -924,6 +984,8 @@
 	/* Reset the network state on this cpu. */
 	reset_network_state();
 #endif
+
+	init_super_pages();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1474,13 @@
 		for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
 
 			/* Update the vmalloc mapping and page home. */
-			pte_t *ptep =
-				virt_to_pte(NULL, (unsigned long)ptr + i);
+			unsigned long addr = (unsigned long)ptr + i;
+			pte_t *ptep = virt_to_pte(NULL, addr);
 			pte_t pte = *ptep;
 			BUG_ON(pfn != pte_pfn(pte));
 			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
 			pte = set_remote_cache_cpu(pte, cpu);
-			set_pte(ptep, pte);
+			set_pte_at(&init_mm, addr, ptep, pte);
 
 			/* Update the lowmem mapping for consistency. */
 			lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1493,7 @@
 				BUG_ON(pte_huge(*ptep));
 			}
 			BUG_ON(pfn != pte_pfn(*ptep));
-			set_pte(ptep, pte);
+			set_pte_at(&init_mm, lowmem_va, ptep, pte);
 		}
 	}
 
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index f79d4b8..e29b055 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -37,8 +37,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
 		stack_t __user *, uoss, struct pt_regs *, regs)
 {
@@ -96,7 +94,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -242,10 +239,11 @@
  * OK, we're invoking a handler
  */
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-			 struct k_sigaction *ka, sigset_t *oldset,
+static void handle_signal(unsigned long sig, siginfo_t *info,
+			 struct k_sigaction *ka,
 			 struct pt_regs *regs)
 {
+	sigset_t *oldset = sigmask_to_save();
 	int ret;
 
 	/* Are we from a system call? */
@@ -278,15 +276,9 @@
 	else
 #endif
 		ret = setup_rt_frame(sig, ka, info, oldset, regs);
-	if (ret == 0) {
-		/* This code is only called from system calls or from
-		 * the work_pending path in the return-to-user code, and
-		 * either way we can re-enable interrupts unconditionally.
-		 */
-		block_sigmask(ka, sig);
-	}
-
-	return ret;
+	if (ret)
+		return;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -299,7 +291,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t *oldset;
 
 	/*
 	 * i386 will check if we're coming from kernel mode and bail out
@@ -308,24 +299,10 @@
 	 * helpful, we can reinstate the check on "!user_mode(regs)".
 	 */
 
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee! Actually deliver the signal.  */
-		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TS_RESTORE_SIGMASK flag.
-			 */
-			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		}
-
+		handle_signal(signr, &info, &ka, regs);
 		goto done;
 	}
 
@@ -350,10 +327,7 @@
 	}
 
 	/* If there's no signal to deliver, just put the saved sigmask back. */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	restore_saved_sigmask();
 
 done:
 	/* Avoid double syscall restart if there are nested signals. */
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 89529c9..27742e8 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -172,9 +172,6 @@
 		return (tilepro_bundle_bits) 0;
 	}
 
-#ifndef __LITTLE_ENDIAN
-# error We assume little-endian representation with copy_xx_user size 2 here
-#endif
 	/* Handle unaligned load/store */
 	if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
 		unsigned short val_16;
@@ -195,8 +192,19 @@
 			state->update = 1;
 		}
 	} else {
+		unsigned short val_16;
 		val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
-		err = copy_to_user(addr, &val, size);
+		switch (size) {
+		case 2:
+			val_16 = val;
+			err = copy_to_user(addr, &val_16, sizeof(val_16));
+			break;
+		case 4:
+			err = copy_to_user(addr, &val, sizeof(val));
+			break;
+		default:
+			BUG();
+		}
 	}
 
 	if (err) {
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f7..cbc73a8 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@
 		if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
 			panic("Failed to initialize IPI for cpu %d\n", cpu);
 
-		offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+		offset = PFN_PHYS(pte_pfn(pte));
 		ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
 	}
 #endif
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index cb44ba7..b08095b 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -32,11 +32,17 @@
 #include <asm/syscalls.h>
 #include <asm/pgtable.h>
 #include <asm/homecache.h>
+#include <asm/cachectl.h>
 #include <arch/chip.h>
 
-SYSCALL_DEFINE0(flush_cache)
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
+		unsigned long, flags)
 {
-	homecache_evict(cpumask_of(smp_processor_id()));
+	if (flags & DCACHE)
+		homecache_evict(cpumask_of(smp_processor_id()));
+	if (flags & ICACHE)
+		flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
+			     0, 0, 0, NULL, NULL, 0);
 	return 0;
 }
 
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 71ae728..e25b0a8 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -93,6 +93,10 @@
 HV_CONF_ATTR(mezz_serial,	HV_CONFSTR_MEZZ_SERIAL_NUM)
 HV_CONF_ATTR(mezz_revision,	HV_CONFSTR_MEZZ_REV)
 HV_CONF_ATTR(mezz_description,	HV_CONFSTR_MEZZ_DESC)
+HV_CONF_ATTR(cpumod_part,	HV_CONFSTR_CPUMOD_PART_NUM)
+HV_CONF_ATTR(cpumod_serial,	HV_CONFSTR_CPUMOD_SERIAL_NUM)
+HV_CONF_ATTR(cpumod_revision,	HV_CONFSTR_CPUMOD_REV)
+HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
 HV_CONF_ATTR(switch_control,	HV_CONFSTR_SWITCH_CONTROL)
 
 static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@
 	&dev_attr_mezz_serial.attr,
 	&dev_attr_mezz_revision.attr,
 	&dev_attr_mezz_description.attr,
+	&dev_attr_cpumod_part.attr,
+	&dev_attr_cpumod_serial.attr,
+	&dev_attr_cpumod_revision.attr,
+	&dev_attr_cpumod_description.attr,
 	&dev_attr_switch_control.attr,
 	NULL
 };
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index a5f241c..3fd54d5 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -15,6 +15,7 @@
 
 #include <linux/cpumask.h>
 #include <linux/module.h>
+#include <linux/hugetlb.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 #include <hv/hypervisor.h>
@@ -49,25 +50,25 @@
 	flush_tlb_mm(current->mm);
 }
 
-void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
+void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
 		       unsigned long va)
 {
-	unsigned long size = hv_page_size(vma);
+	unsigned long size = vma_kernel_pagesize(vma);
 	int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
 	flush_remote(0, cache, mm_cpumask(mm),
 		     va, size, size, mm_cpumask(mm), NULL, 0);
 }
 
-void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 {
 	flush_tlb_page_mm(vma, vma->vm_mm, va);
 }
 EXPORT_SYMBOL(flush_tlb_page);
 
-void flush_tlb_range(const struct vm_area_struct *vma,
+void flush_tlb_range(struct vm_area_struct *vma,
 		     unsigned long start, unsigned long end)
 {
-	unsigned long size = hv_page_size(vma);
+	unsigned long size = vma_kernel_pagesize(vma);
 	struct mm_struct *mm = vma->vm_mm;
 	int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
 	flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 73cff81..5b19a23 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -195,6 +195,25 @@
 	return 1;
 }
 
+static const char *const int_name[] = {
+	[INT_MEM_ERROR] = "Memory error",
+	[INT_ILL] = "Illegal instruction",
+	[INT_GPV] = "General protection violation",
+	[INT_UDN_ACCESS] = "UDN access",
+	[INT_IDN_ACCESS] = "IDN access",
+#if CHIP_HAS_SN()
+	[INT_SN_ACCESS] = "SN access",
+#endif
+	[INT_SWINT_3] = "Software interrupt 3",
+	[INT_SWINT_2] = "Software interrupt 2",
+	[INT_SWINT_0] = "Software interrupt 0",
+	[INT_UNALIGN_DATA] = "Unaligned data",
+	[INT_DOUBLE_FAULT] = "Double fault",
+#ifdef __tilegx__
+	[INT_ILL_TRANS] = "Illegal virtual address",
+#endif
+};
+
 void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 		       unsigned long reason)
 {
@@ -211,10 +230,17 @@
 	 * current process and hope for the best.
 	 */
 	if (!user_mode(regs)) {
+		const char *name;
 		if (fixup_exception(regs))  /* only UNALIGN_DATA in practice */
 			return;
-		pr_alert("Kernel took bad trap %d at PC %#lx\n",
-		       fault_num, regs->pc);
+		if (fault_num >= 0 &&
+		    fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+		    int_name[fault_num] != NULL)
+			name = int_name[fault_num];
+		else
+			name = "Unknown interrupt";
+		pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
+			 fault_num, name, regs->pc);
 		if (fault_num == INT_GPV)
 			pr_alert("GPV_REASON is %#lx\n", reason);
 		show_regs(regs);
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 771b251..f5cada7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,7 +18,6 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/atomic.h>
-#include <asm/futex.h>
 #include <arch/chip.h>
 
 /* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
-static inline int *__atomic_hashed_lock(volatile void *v)
+int *__atomic_hashed_lock(volatile void *v)
 {
 	/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
 
-static inline int *__futex_setup(int __user *v)
-{
-	/*
-	 * Issue a prefetch to the counter to bring it into cache.
-	 * As for __atomic_setup, but we can't do a read into the L1
-	 * since it might fault; instead we do a prefetch into the L2.
-	 */
-	__insn_prefetch(v);
-	return __atomic_hashed_lock((int __force *)v);
-}
-
-struct __get_user futex_set(u32 __user *v, int i)
-{
-	return __atomic_xchg((int __force *)v, __futex_setup(v), i);
-}
-
-struct __get_user futex_add(u32 __user *v, int n)
-{
-	return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_or(u32 __user *v, int n)
-{
-	return __atomic_or((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_andn(u32 __user *v, int n)
-{
-	return __atomic_andn((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_xor(u32 __user *v, int n)
-{
-	return __atomic_xor((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
-{
-	return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
-}
-
 /*
  * If any of the atomic or futex routines hit a bad address (not in
  * the page tables at kernel PL) this routine is called.  The futex
@@ -323,7 +281,4 @@
 	BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
-	/* The futex code makes this assumption, so we validate it here. */
-	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 }
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32..dd5f0a3 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,14 +18,6 @@
 
 /* arch/tile/lib/usercopy.S */
 #include <linux/uaccess.h>
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
 EXPORT_SYMBOL(strnlen_user_asm);
 EXPORT_SYMBOL(strncpy_from_user_asm);
 EXPORT_SYMBOL(clear_user_asm);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
index 84fdc8d..6f867db 100644
--- a/arch/tile/lib/memchr_64.c
+++ b/arch/tile/lib/memchr_64.c
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
+#include "string-endian.h"
 
 void *memchr(const void *s, int c, size_t n)
 {
@@ -39,11 +40,8 @@
 
 	/* Read the first word, but munge it so that bytes before the array
 	 * will not match goal.
-	 *
-	 * Note that this shift count expression works because we know
-	 * shift counts are taken mod 64.
 	 */
-	before_mask = (1ULL << (s_int << 3)) - 1;
+	before_mask = MASK(s_int);
 	v = (*p | before_mask) ^ (goal & before_mask);
 
 	/* Compute the address of the last byte. */
@@ -65,7 +63,7 @@
 	/* We found a match, but it might be in a byte past the end
 	 * of the array.
 	 */
-	ret = ((char *)p) + (__insn_ctz(bits) >> 3);
+	ret = ((char *)p) + (CFZ(bits) >> 3);
 	return (ret <= last_byte_ptr) ? ret : NULL;
 }
 EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
index 3fab9a6..c79b8e7 100644
--- a/arch/tile/lib/memcpy_64.c
+++ b/arch/tile/lib/memcpy_64.c
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-#define __memcpy memcpy
 /* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
 
 /* Must be 8 bytes in size. */
@@ -188,6 +187,7 @@
 
 	/* n != 0 if we get here.  Write out any trailing bytes. */
 	dst1 = (char *)dst8;
+#ifndef __BIG_ENDIAN__
 	if (n & 4) {
 		ST4((uint32_t *)dst1, final);
 		dst1 += 4;
@@ -202,11 +202,30 @@
 	}
 	if (n)
 		ST1((uint8_t *)dst1, final);
+#else
+	if (n & 4) {
+		ST4((uint32_t *)dst1, final >> 32);
+		dst1 += 4;
+        }
+        else
+        {
+		final >>= 32;
+        }
+	if (n & 2) {
+		ST2((uint16_t *)dst1, final >> 16);
+		dst1 += 2;
+        }
+        else
+        {
+		final >>= 16;
+        }
+	if (n & 1)
+		ST1((uint8_t *)dst1, final >> 8);
+#endif
 
 	return RETVAL;
 }
 
-
 #ifdef USERCOPY_FUNC
 #undef ST1
 #undef ST2
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index b2fe15e..3bc4b4e 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -160,7 +160,7 @@
 			break;
 		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
 			break;
-		src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
+		src_page = pfn_to_page(pte_pfn(src_pte));
 		get_page(src_page);
 		if (pte_val(src_pte) != pte_val(*src_ptep)) {
 			put_page(src_page);
@@ -168,7 +168,7 @@
 		}
 		if (pte_huge(src_pte)) {
 			/* Adjust the PTE to correspond to a small page */
-			int pfn = hv_pte_get_pfn(src_pte);
+			int pfn = pte_pfn(src_pte);
 			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
 				>> PAGE_SHIFT);
 			src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@
 			put_page(src_page);
 			break;
 		}
-		dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
+		dst_page = pfn_to_page(pte_pfn(dst_pte));
 		if (dst_page == src_page) {
 			/*
 			 * Source and dest are on the same page; this
@@ -206,7 +206,7 @@
 		}
 		if (pte_huge(dst_pte)) {
 			/* Adjust the PTE to correspond to a small page */
-			int pfn = hv_pte_get_pfn(dst_pte);
+			int pfn = pte_pfn(dst_pte);
 			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
 				>> PAGE_SHIFT);
 			dst_pte = pfn_pte(pfn, dst_pte);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
index 617a927..f39f9dc 100644
--- a/arch/tile/lib/strchr_64.c
+++ b/arch/tile/lib/strchr_64.c
@@ -15,8 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef strchr
+#include "string-endian.h"
 
 char *strchr(const char *s, int c)
 {
@@ -33,13 +32,9 @@
 	 * match neither zero nor goal (we make sure the high bit of each
 	 * byte is 1, and the low 7 bits are all the opposite of the goal
 	 * byte).
-	 *
-	 * Note that this shift count expression works because we know shift
-	 * counts are taken mod 64.
 	 */
-	const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
-	uint64_t v = (*p | before_mask) ^
-		(goal & __insn_v1shrsi(before_mask, 1));
+	const uint64_t before_mask = MASK(s_int);
+	uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
 
 	uint64_t zero_matches, goal_matches;
 	while (1) {
@@ -55,8 +50,8 @@
 		v = *++p;
 	}
 
-	z = __insn_ctz(zero_matches);
-	g = __insn_ctz(goal_matches);
+	z = CFZ(zero_matches);
+	g = CFZ(goal_matches);
 
 	/* If we found c before '\0' we got a match. Note that if c == '\0'
 	 * then g == z, and we correctly return the address of the '\0'
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
new file mode 100644
index 0000000..c0eed7c
--- /dev/null
+++ b/arch/tile/lib/string-endian.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * Provide a mask based on the pointer alignment that
+ * sets up non-zero bytes before the beginning of the string.
+ * The MASK expression works because shift counts are taken mod 64.
+ * Also, specify how to count "first" and "last" bits
+ * when the bits have been read as a word.
+ */
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
+#define NULMASK(x) ((2ULL << x) - 1)
+#define CFZ(x) __insn_ctz(x)
+#define REVCZ(x) __insn_clz(x)
+#else
+#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
+#define NULMASK(x) (-2LL << (63 - x))
+#define CFZ(x) __insn_clz(x)
+#define REVCZ(x) __insn_ctz(x)
+#endif
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
index 1c92d46..9583fc3 100644
--- a/arch/tile/lib/strlen_64.c
+++ b/arch/tile/lib/strlen_64.c
@@ -15,8 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef strlen
+#include "string-endian.h"
 
 size_t strlen(const char *s)
 {
@@ -24,15 +23,13 @@
 	const uintptr_t s_int = (uintptr_t) s;
 	const uint64_t *p = (const uint64_t *)(s_int & -8);
 
-	/* Read the first word, but force bytes before the string to be nonzero.
-	 * This expression works because we know shift counts are taken mod 64.
-	 */
-	uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
+	/* Read and MASK the first word. */
+	uint64_t v = *p | MASK(s_int);
 
 	uint64_t bits;
 	while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
 		v = *++p;
 
-	return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
+	return ((const char *)p) + (CFZ(bits) >> 3) - s;
 }
 EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 979f76d..b62d002 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -19,82 +19,6 @@
 
 /* Access user memory, but use MMU to avoid propagating kernel exceptions. */
 
-	.pushsection .fixup,"ax"
-
-get_user_fault:
-	{ move r0, zero; move r1, zero }
-	{ movei r2, -EFAULT; jrp lr }
-	ENDPROC(get_user_fault)
-
-put_user_fault:
-	{ movei r0, -EFAULT; jrp lr }
-	ENDPROC(put_user_fault)
-
-	.popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r2
- * on success, with the value in r0; or else -EFAULT in r2.
- */
-#define __get_user_N(bytes, LOAD) \
-	STD_ENTRY(__get_user_##bytes); \
-1:	{ LOAD r0, r0; move r1, zero; move r2, zero }; \
-	jrp lr; \
-	STD_ENDPROC(__get_user_##bytes); \
-	.pushsection __ex_table,"a"; \
-	.word 1b, get_user_fault; \
-	.popsection
-
-__get_user_N(1, lb_u)
-__get_user_N(2, lh_u)
-__get_user_N(4, lw)
-
-/*
- * __get_user_8 takes a pointer in r0, and returns 0 in r2
- * on success, with the value in r0/r1; or else -EFAULT in r2.
- */
-	STD_ENTRY(__get_user_8);
-1:	{ lw r0, r0; addi r1, r0, 4 };
-2:	{ lw r1, r1; move r2, zero };
-	jrp lr;
-	STD_ENDPROC(__get_user_8);
-	.pushsection __ex_table,"a";
-	.word 1b, get_user_fault;
-	.word 2b, get_user_fault;
-	.popsection
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
-	STD_ENTRY(__put_user_##bytes); \
-1:	{ STORE r1, r0; move r0, zero }; \
-	jrp lr; \
-	STD_ENDPROC(__put_user_##bytes); \
-	.pushsection __ex_table,"a"; \
-	.word 1b, put_user_fault; \
-	.popsection
-
-__put_user_N(1, sb)
-__put_user_N(2, sh)
-__put_user_N(4, sw)
-
-/*
- * __put_user_8 takes a value in r0/r1 and a pointer in r2,
- * and returns 0 in r0 on success or -EFAULT on failure.
- */
-STD_ENTRY(__put_user_8)
-1:      { sw r2, r0; addi r2, r2, 4 }
-2:      { sw r2, r1; move r0, zero }
-	jrp lr
-	STD_ENDPROC(__put_user_8)
-	.pushsection __ex_table,"a"
-	.word 1b, put_user_fault
-	.word 2b, put_user_fault
-	.popsection
-
-
 /*
  * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
  * It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index 2ff44f8..adb2dbb 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -19,55 +19,6 @@
 
 /* Access user memory, but use MMU to avoid propagating kernel exceptions. */
 
-	.pushsection .fixup,"ax"
-
-get_user_fault:
-	{ movei r1, -EFAULT; move r0, zero }
-	jrp lr
-	ENDPROC(get_user_fault)
-
-put_user_fault:
-	{ movei r0, -EFAULT; jrp lr }
-	ENDPROC(put_user_fault)
-
-	.popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r1
- * on success, with the value in r0; or else -EFAULT in r1.
- */
-#define __get_user_N(bytes, LOAD) \
-	STD_ENTRY(__get_user_##bytes); \
-1:	{ LOAD r0, r0; move r1, zero }; \
-	jrp lr; \
-	STD_ENDPROC(__get_user_##bytes); \
-	.pushsection __ex_table,"a"; \
-	.quad 1b, get_user_fault; \
-	.popsection
-
-__get_user_N(1, ld1u)
-__get_user_N(2, ld2u)
-__get_user_N(4, ld4u)
-__get_user_N(8, ld)
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
-	STD_ENTRY(__put_user_##bytes); \
-1:	{ STORE r1, r0; move r0, zero }; \
-	jrp lr; \
-	STD_ENDPROC(__put_user_##bytes); \
-	.pushsection __ex_table,"a"; \
-	.quad 1b, put_user_fault; \
-	.popsection
-
-__put_user_N(1, st1)
-__put_user_N(2, st2)
-__put_user_N(4, st4)
-__put_user_N(8, st)
-
 /*
  * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
  * It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 22e58f5..84ce7ab 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -187,7 +187,7 @@
 	HV_Context ctx = hv_inquire_context();
 	unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
 	struct page *pgd_page = pfn_to_page(pgd_pfn);
-	BUG_ON(PageHighMem(pgd_page));   /* oops, HIGHPTE? */
+	BUG_ON(PageHighMem(pgd_page));
 	return (pgd_t *) __va(ctx.page_table);
 }
 
@@ -273,11 +273,15 @@
 	int si_code;
 	int is_kernel_mode;
 	pgd_t *pgd;
+	unsigned int flags;
 
 	/* on TILE, protection faults are always writes */
 	if (!is_page_fault)
 		write = 1;
 
+	flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+		 (write ? FAULT_FLAG_WRITE : 0));
+
 	is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
 
 	tsk = validate_current();
@@ -382,6 +386,8 @@
 			vma = NULL;  /* happy compiler */
 			goto bad_area_nosemaphore;
 		}
+
+retry:
 		down_read(&mm->mmap_sem);
 	}
 
@@ -429,7 +435,11 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(mm, vma, address, write);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return 0;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -437,10 +447,22 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR)
-		tsk->maj_flt++;
-	else
-		tsk->min_flt++;
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			tsk->maj_flt++;
+		else
+			tsk->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			 /*
+			  * No need to up_read(&mm->mmap_sem) as we would
+			  * have already released it in __lock_page_or_retry
+			  * in mm/filemap.c.
+			  */
+			goto retry;
+		}
+	}
 
 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
 	/*
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 499f737..dbcbdf7 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -30,6 +30,7 @@
 #include <linux/cache.h>
 #include <linux/smp.h>
 #include <linux/module.h>
+#include <linux/hugetlb.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 42cfcba..812e2d0 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -27,85 +27,161 @@
 #include <linux/mman.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+
+/*
+ * Provide an additional huge page size (in addition to the regular default
+ * huge page size) if no "hugepagesz" arguments are specified.
+ * Note that it must be smaller than the default huge page size so
+ * that it's possible to allocate them on demand from the buddy allocator.
+ * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
+ * or not define it at all.
+ */
+#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
+
+/* "Extra" page-size multipliers, one per level of the page table. */
+int huge_shift[HUGE_SHIFT_ENTRIES] = {
+#ifdef ADDITIONAL_HUGE_SIZE
+#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
+	[HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
+#endif
+};
+
+/*
+ * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
+ * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
+ * It locks the user pagetable, and bumps up the mm->nr_ptes field,
+ * but otherwise allocate the page table using the kernel versions.
+ */
+static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
+				unsigned long address)
+{
+	pte_t *new;
+
+	if (pmd_none(*pmd)) {
+		new = pte_alloc_one_kernel(mm, address);
+		if (!new)
+			return NULL;
+
+		smp_wmb(); /* See comment in __pte_alloc */
+
+		spin_lock(&mm->page_table_lock);
+		if (likely(pmd_none(*pmd))) {  /* Has another populated it ? */
+			mm->nr_ptes++;
+			pmd_populate_kernel(mm, pmd, new);
+			new = NULL;
+		} else
+			VM_BUG_ON(pmd_trans_splitting(*pmd));
+		spin_unlock(&mm->page_table_lock);
+		if (new)
+			pte_free_kernel(mm, new);
+	}
+
+	return pte_offset_kernel(pmd, address);
+}
+#endif
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
 		      unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgd;
 	pud_t *pud;
-	pte_t *pte = NULL;
 
-	/* We do not yet support multiple huge page sizes. */
-	BUG_ON(sz != PMD_SIZE);
+	addr &= -sz;   /* Mask off any low bits in the address. */
 
 	pgd = pgd_offset(mm, addr);
 	pud = pud_alloc(mm, pgd, addr);
-	if (pud)
-		pte = (pte_t *) pmd_alloc(mm, pud, addr);
-	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 
-	return pte;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+	if (sz >= PGDIR_SIZE) {
+		BUG_ON(sz != PGDIR_SIZE &&
+		       sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
+		return (pte_t *)pud;
+	} else {
+		pmd_t *pmd = pmd_alloc(mm, pud, addr);
+		if (sz >= PMD_SIZE) {
+			BUG_ON(sz != PMD_SIZE &&
+			       sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
+			return (pte_t *)pmd;
+		}
+		else {
+			if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
+				panic("Unexpected page size %#lx\n", sz);
+			return pte_alloc_hugetlb(mm, pmd, addr);
+		}
+	}
+#else
+	BUG_ON(sz != PMD_SIZE);
+	return (pte_t *) pmd_alloc(mm, pud, addr);
+#endif
+}
+
+static pte_t *get_pte(pte_t *base, int index, int level)
+{
+	pte_t *ptep = base + index;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+	if (!pte_present(*ptep) && huge_shift[level] != 0) {
+		unsigned long mask = -1UL << huge_shift[level];
+		pte_t *super_ptep = base + (index & mask);
+		pte_t pte = *super_ptep;
+		if (pte_present(pte) && pte_super(pte))
+			ptep = super_ptep;
+	}
+#endif
+	return ptep;
 }
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
 	pud_t *pud;
-	pmd_t *pmd = NULL;
+	pmd_t *pmd;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+	pte_t *pte;
+#endif
 
-	pgd = pgd_offset(mm, addr);
-	if (pgd_present(*pgd)) {
-		pud = pud_offset(pgd, addr);
-		if (pud_present(*pud))
-			pmd = pmd_offset(pud, addr);
-	}
-	return (pte_t *) pmd;
-}
+	/* Get the top-level page table entry. */
+	pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
+	if (!pgd_present(*pgd))
+		return NULL;
 
-#ifdef HUGETLB_TEST
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-			      int write)
-{
-	unsigned long start = address;
-	int length = 1;
-	int nr;
-	struct page *page;
-	struct vm_area_struct *vma;
+	/* We don't have four levels. */
+	pud = pud_offset(pgd, addr);
+#ifndef __PAGETABLE_PUD_FOLDED
+# error support fourth page table level
+#endif
 
-	vma = find_vma(mm, addr);
-	if (!vma || !is_vm_hugetlb_page(vma))
-		return ERR_PTR(-EINVAL);
+	/* Check for an L0 huge PTE, if we have three levels. */
+#ifndef __PAGETABLE_PMD_FOLDED
+	if (pud_huge(*pud))
+		return (pte_t *)pud;
 
-	pte = huge_pte_offset(mm, address);
+	pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
+			       pmd_index(addr), 1);
+	if (!pmd_present(*pmd))
+		return NULL;
+#else
+	pmd = pmd_offset(pud, addr);
+#endif
 
-	/* hugetlb should be locked, and hence, prefaulted */
-	WARN_ON(!pte || pte_none(*pte));
+	/* Check for an L1 huge PTE. */
+	if (pmd_huge(*pmd))
+		return (pte_t *)pmd;
 
-	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+	/* Check for an L2 huge PTE. */
+	pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
+	if (!pte_present(*pte))
+		return NULL;
+	if (pte_super(*pte))
+		return pte;
+#endif
 
-	WARN_ON(!PageHead(page));
-
-	return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
-	return 0;
-}
-
-int pud_huge(pud_t pud)
-{
-	return 0;
-}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-			     pmd_t *pmd, int write)
-{
 	return NULL;
 }
 
-#else
-
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 			      int write)
 {
@@ -149,8 +225,6 @@
 	return 0;
 }
 
-#endif
-
 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 		unsigned long addr, unsigned long len,
@@ -322,21 +396,102 @@
 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
 				pgoff, flags);
 }
+#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static __init int __setup_hugepagesz(unsigned long ps)
+{
+	int log_ps = __builtin_ctzl(ps);
+	int level, base_shift;
+
+	if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
+		pr_warn("Not enabling %ld byte huge pages;"
+			" must be a power of four.\n", ps);
+		return -EINVAL;
+	}
+
+	if (ps > 64*1024*1024*1024UL) {
+		pr_warn("Not enabling %ld MB huge pages;"
+			" largest legal value is 64 GB .\n", ps >> 20);
+		return -EINVAL;
+	} else if (ps >= PUD_SIZE) {
+		static long hv_jpage_size;
+		if (hv_jpage_size == 0)
+			hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
+		if (hv_jpage_size != PUD_SIZE) {
+			pr_warn("Not enabling >= %ld MB huge pages:"
+				" hypervisor reports size %ld\n",
+				PUD_SIZE >> 20, hv_jpage_size);
+			return -EINVAL;
+		}
+		level = 0;
+		base_shift = PUD_SHIFT;
+	} else if (ps >= PMD_SIZE) {
+		level = 1;
+		base_shift = PMD_SHIFT;
+	} else if (ps > PAGE_SIZE) {
+		level = 2;
+		base_shift = PAGE_SHIFT;
+	} else {
+		pr_err("hugepagesz: huge page size %ld too small\n", ps);
+		return -EINVAL;
+	}
+
+	if (log_ps != base_shift) {
+		int shift_val = log_ps - base_shift;
+		if (huge_shift[level] != 0) {
+			int old_shift = base_shift + huge_shift[level];
+			pr_warn("Not enabling %ld MB huge pages;"
+				" already have size %ld MB.\n",
+				ps >> 20, (1UL << old_shift) >> 20);
+			return -EINVAL;
+		}
+		if (hv_set_pte_super_shift(level, shift_val) != 0) {
+			pr_warn("Not enabling %ld MB huge pages;"
+				" no hypervisor support.\n", ps >> 20);
+			return -EINVAL;
+		}
+		printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
+		huge_shift[level] = shift_val;
+	}
+
+	hugetlb_add_hstate(log_ps - PAGE_SHIFT);
+
+	return 0;
+}
+
+static bool saw_hugepagesz;
 
 static __init int setup_hugepagesz(char *opt)
 {
-	unsigned long ps = memparse(opt, &opt);
-	if (ps == PMD_SIZE) {
-		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
-	} else if (ps == PUD_SIZE) {
-		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
-	} else {
-		pr_err("hugepagesz: Unsupported page size %lu M\n",
-			ps >> 20);
-		return 0;
+	if (!saw_hugepagesz) {
+		saw_hugepagesz = true;
+		memset(huge_shift, 0, sizeof(huge_shift));
 	}
-	return 1;
+	return __setup_hugepagesz(memparse(opt, NULL));
 }
 __setup("hugepagesz=", setup_hugepagesz);
 
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#ifdef ADDITIONAL_HUGE_SIZE
+/*
+ * Provide an additional huge page size if no "hugepagesz" args are given.
+ * In that case, all the cores have properly set up their hv super_shift
+ * already, but we need to notify the hugetlb code to enable the
+ * new huge page size from the Linux point of view.
+ */
+static __init int add_default_hugepagesz(void)
+{
+	if (!saw_hugepagesz) {
+		BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
+			     ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
+		BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
+			     ADDITIONAL_HUGE_SIZE);
+		BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
+		hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
+	}
+	return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
+
+#endif /* CONFIG_HUGETLB_SUPER_PAGES */
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 6a9d20d..630dd2c 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@
 
 static void init_prealloc_ptes(int node, int pages)
 {
-	BUG_ON(pages & (HV_L2_ENTRIES-1));
+	BUG_ON(pages & (PTRS_PER_PTE - 1));
 	if (pages) {
 		num_l2_ptes[node] = pages;
 		l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@
 
 #ifdef __tilegx__
 
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-
-/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
 static inline pmd_t *alloc_pmd(void)
 {
-	return (pmd_t *)alloc_pte();
+	return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
 }
 
 static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -444,6 +439,7 @@
  */
 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 {
+	unsigned long long irqmask;
 	unsigned long address, pfn;
 	pmd_t *pmd;
 	pte_t *pte;
@@ -633,10 +629,13 @@
 	 *  - install pgtables[] as the real page table
 	 *  - flush the TLB so the new page table takes effect
 	 */
+	irqmask = interrupt_mask_save_mask();
+	interrupt_mask_set_mask(-1ULL);
 	rc = flush_and_install_context(__pa(pgtables),
 				       init_pgprot((unsigned long)pgtables),
 				       __get_cpu_var(current_asid),
 				       cpumask_bits(my_cpu_mask));
+	interrupt_mask_restore_mask(irqmask);
 	BUG_ON(rc != 0);
 
 	/* Copy the page table back to the normal swapper_pg_dir. */
@@ -699,6 +698,7 @@
 #endif /* CONFIG_HIGHMEM */
 
 
+#ifndef CONFIG_64BIT
 static void __init init_free_pfn_range(unsigned long start, unsigned long end)
 {
 	unsigned long pfn;
@@ -771,6 +771,7 @@
 		init_free_pfn_range(start, end);
 	}
 }
+#endif
 
 /*
  * paging_init() sets up the page tables - note that all of lowmem is
@@ -807,7 +808,7 @@
 	 * changing init_mm once we get up and running, and there's no
 	 * need for e.g. vmalloc_sync_all().
 	 */
-	BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
+	BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
 	pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
 	assign_pmd(pud, alloc_pmd());
 #endif
@@ -859,8 +860,10 @@
 	/* this will put all bootmem onto the freelists */
 	totalram_pages += free_all_bootmem();
 
+#ifndef CONFIG_64BIT
 	/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
 	set_non_bootmem_pages_init();
+#endif
 
 	codesize =  (unsigned long)&_etext - (unsigned long)&_text;
 	datasize =  (unsigned long)&_end - (unsigned long)&_sdata;
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h
index cd45a08..91683d9 100644
--- a/arch/tile/mm/migrate.h
+++ b/arch/tile/mm/migrate.h
@@ -24,6 +24,9 @@
 /*
  * This function is used as a helper when setting up the initial
  * page table (swapper_pg_dir).
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
  */
 extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
 				     HV_ASID asid,
@@ -39,6 +42,9 @@
  *
  * Note that any non-NULL pointers must not point to the page that
  * is handled by the stack_pte itself.
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
  */
 extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
 				     size_t length, pte_t *stack_ptep,
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
index ac01a7c..5305814 100644
--- a/arch/tile/mm/migrate_32.S
+++ b/arch/tile/mm/migrate_32.S
@@ -40,8 +40,7 @@
 #define FRAME_R32	16
 #define FRAME_R33	20
 #define FRAME_R34	24
-#define FRAME_R35	28
-#define FRAME_SIZE	32
+#define FRAME_SIZE	28
 
 
 
@@ -66,12 +65,11 @@
 #define r_my_cpumask	r5
 
 /* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics	r30
-#define r_context_lo	r31
-#define r_context_hi	r32
-#define r_access_lo	r33
-#define r_access_hi	r34
-#define r_asid		r35
+#define r_context_lo	r30
+#define r_context_hi	r31
+#define r_access_lo	r32
+#define r_access_hi	r33
+#define r_asid		r34
 
 STD_ENTRY(flush_and_install_context)
 	/*
@@ -104,11 +102,7 @@
 	 sw r_tmp, r33
 	 addi r_tmp, sp, FRAME_R34
 	}
-	{
-	 sw r_tmp, r34
-	 addi r_tmp, sp, FRAME_R35
-	}
-	sw r_tmp, r35
+	sw r_tmp, r34
 
 	/* Move some arguments to callee-save registers. */
 	{
@@ -121,13 +115,6 @@
 	}
 	move r_asid, r_asid_in
 
-	/* Disable interrupts, since we can't use our stack. */
-	{
-	 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
-	 movei r_tmp, 1
-	}
-	mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
 	/* First, flush our L2 cache. */
 	{
 	 move r0, zero  /* cache_pa */
@@ -163,7 +150,7 @@
 	}
 	{
 	 move r4, r_asid
-	 movei r5, HV_CTX_DIRECTIO
+	 moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
 	}
 	jal hv_install_context
 	bnz r0, .Ldone
@@ -175,9 +162,6 @@
 	}
 
 .Ldone:
-	/* Reset interrupts back how they were before. */
-	mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
 	/* Restore the callee-saved registers and return. */
 	addli lr, sp, FRAME_SIZE
 	{
@@ -202,10 +186,6 @@
 	}
 	{
 	 lw r34, r_tmp
-	 addli r_tmp, sp, FRAME_R35
-	}
-	{
-	 lw r35, r_tmp
 	 addi sp, sp, FRAME_SIZE
 	}
 	jrp lr
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
index e76fea6..1d15b108 100644
--- a/arch/tile/mm/migrate_64.S
+++ b/arch/tile/mm/migrate_64.S
@@ -38,8 +38,7 @@
 #define FRAME_R30	16
 #define FRAME_R31	24
 #define FRAME_R32	32
-#define FRAME_R33	40
-#define FRAME_SIZE	48
+#define FRAME_SIZE	40
 
 
 
@@ -60,10 +59,9 @@
 #define r_my_cpumask	r3
 
 /* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics	r30
-#define r_context	r31
-#define r_access	r32
-#define r_asid		r33
+#define r_context	r30
+#define r_access	r31
+#define r_asid		r32
 
 /*
  * Caller-save locals and frame constants are the same as
@@ -93,11 +91,7 @@
 	 st r_tmp, r31
 	 addi r_tmp, sp, FRAME_R32
 	}
-	{
-	 st r_tmp, r32
-	 addi r_tmp, sp, FRAME_R33
-	}
-	st r_tmp, r33
+	st r_tmp, r32
 
 	/* Move some arguments to callee-save registers. */
 	{
@@ -106,13 +100,6 @@
 	}
 	move r_asid, r_asid_in
 
-	/* Disable interrupts, since we can't use our stack. */
-	{
-	 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
-	 movei r_tmp, 1
-	}
-	mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
 	/* First, flush our L2 cache. */
 	{
 	 move r0, zero  /* cache_pa */
@@ -147,7 +134,7 @@
 	}
 	{
 	 move r2, r_asid
-	 movei r3, HV_CTX_DIRECTIO
+	 moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
 	}
 	jal hv_install_context
 	bnez r0, 1f
@@ -158,10 +145,7 @@
 	 jal hv_flush_all
 	}
 
-1:      /* Reset interrupts back how they were before. */
-	mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
-	/* Restore the callee-saved registers and return. */
+1:	/* Restore the callee-saved registers and return. */
 	addli lr, sp, FRAME_SIZE
 	{
 	 ld lr, lr
@@ -177,10 +161,6 @@
 	}
 	{
 	 ld r32, r_tmp
-	 addli r_tmp, sp, FRAME_R33
-	}
-	{
-	 ld r33, r_tmp
 	 addi sp, sp, FRAME_SIZE
 	}
 	jrp lr
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa8..345edfe 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -132,15 +132,6 @@
 	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
 }
 
-#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
-{
-	pte_t *pte = kmap_atomic(pmd_page(*dir)) +
-		(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
-	return &pte[pte_index(address)];
-}
-#endif
-
 /**
  * shatter_huge_page() - ensure a given address is mapped by a small page.
  *
@@ -289,33 +280,26 @@
 
 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
 
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+			       int order)
 {
 	gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
 	struct page *p;
-#if L2_USER_PGTABLE_ORDER > 0
 	int i;
-#endif
-
-#ifdef CONFIG_HIGHPTE
-	flags |= __GFP_HIGHMEM;
-#endif
 
 	p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
 	if (p == NULL)
 		return NULL;
 
-#if L2_USER_PGTABLE_ORDER > 0
 	/*
 	 * Make every page have a page_count() of one, not just the first.
 	 * We don't use __GFP_COMP since it doesn't look like it works
 	 * correctly with tlb_remove_page().
 	 */
-	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+	for (i = 1; i < order; ++i) {
 		init_page_count(p+i);
 		inc_zone_page_state(p+i, NR_PAGETABLE);
 	}
-#endif
 
 	pgtable_page_ctor(p);
 	return p;
@@ -326,28 +310,28 @@
  * process).  We have to correct whatever pte_alloc_one() did before
  * returning the pages to the allocator.
  */
-void pte_free(struct mm_struct *mm, struct page *p)
+void pgtable_free(struct mm_struct *mm, struct page *p, int order)
 {
 	int i;
 
 	pgtable_page_dtor(p);
 	__free_page(p);
 
-	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+	for (i = 1; i < order; ++i) {
 		__free_page(p+i);
 		dec_zone_page_state(p+i, NR_PAGETABLE);
 	}
 }
 
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
-		    unsigned long address)
+void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+			unsigned long address, int order)
 {
 	int i;
 
 	pgtable_page_dtor(pte);
 	tlb_remove_page(tlb, pte);
 
-	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+	for (i = 1; i < order; ++i) {
 		tlb_remove_page(tlb, pte + i);
 		dec_zone_page_state(pte + i, NR_PAGETABLE);
 	}
@@ -490,7 +474,7 @@
 /* Can this mm load a PTE with cached_priority set? */
 static inline int mm_is_priority_cached(struct mm_struct *mm)
 {
-	return mm->context.priority_cached;
+	return mm->context.priority_cached != 0;
 }
 
 /*
@@ -500,8 +484,8 @@
 void start_mm_caching(struct mm_struct *mm)
 {
 	if (!mm_is_priority_cached(mm)) {
-		mm->context.priority_cached = -1U;
-		hv_set_caching(-1U);
+		mm->context.priority_cached = -1UL;
+		hv_set_caching(-1UL);
 	}
 }
 
@@ -516,7 +500,7 @@
  * Presumably we'll come back later and have more luck and clear
  * the value then; for now we'll just keep the cache marked for priority.
  */
-static unsigned int update_priority_cached(struct mm_struct *mm)
+static unsigned long update_priority_cached(struct mm_struct *mm)
 {
 	if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
 		struct vm_area_struct *vm;
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 55c0661..0970910 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -121,15 +121,8 @@
 
 LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
 
-CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
-define cmd_vmlinux__
-	$(CC) $(CFLAGS_vmlinux) -o $@ \
-	-Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
-	-Wl,--start-group $(vmlinux-main) -Wl,--end-group \
-	-lutil \
-	$(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o \
-	FORCE ,$^) ; rm -f linux
-endef
+# Used by link-vmlinux.sh which has special support for um link
+export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
 
 # When cleaning we don't include .config, so we don't include
 # TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index 7607849..e584e40 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,9 +6,6 @@
 #ifndef __FRAME_KERN_H_
 #define __FRAME_KERN_H_
 
-#define _S(nr) (1<<((nr)-1))
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 
 				 struct k_sigaction *ka,
 				 struct pt_regs *regs, 
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 3a2235e..ccb9a9d 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -117,11 +117,8 @@
 		schedule();
 	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal();
-	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 		tracehook_notify_resume(&current->thread.regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
-	}
 }
 
 void exit_thread(void)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 4d93dff..3d15243 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -4,7 +4,9 @@
  */
 
 #include "linux/sched.h"
+#include "linux/spinlock.h"
 #include "linux/slab.h"
+#include "linux/oom.h"
 #include "kern_util.h"
 #include "os.h"
 #include "skas.h"
@@ -22,13 +24,18 @@
 		struct task_struct *p;
 		int pid;
 
+		read_lock(&tasklist_lock);
 		for_each_process(p) {
-			if (p->mm == NULL)
-				continue;
+			struct task_struct *t;
 
-			pid = p->mm->context.id.u.pid;
+			t = find_lock_task_mm(p);
+			if (!t)
+				continue;
+			pid = t->mm->context.id.u.pid;
+			task_unlock(t);
 			os_kill_ptraced_process(pid, 1);
 		}
+		read_unlock(&tasklist_lock);
 	}
 }
 
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 292e706..7362d58 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -15,17 +15,13 @@
 EXPORT_SYMBOL(block_signals);
 EXPORT_SYMBOL(unblock_signals);
 
-#define _S(nr) (1<<((nr)-1))
-
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(struct pt_regs *regs, unsigned long signr,
-			 struct k_sigaction *ka, siginfo_t *info,
-			 sigset_t *oldset)
+static void handle_signal(struct pt_regs *regs, unsigned long signr,
+			 struct k_sigaction *ka, siginfo_t *info)
 {
+	sigset_t *oldset = sigmask_to_save();
 	unsigned long sp;
 	int err;
 
@@ -65,9 +61,7 @@
 	if (err)
 		force_sigsegv(signr, current);
 	else
-		block_sigmask(ka, signr);
-
-	return err;
+		signal_delivered(signr, info, ka, regs, 0);
 }
 
 static int kern_do_signal(struct pt_regs *regs)
@@ -77,24 +71,9 @@
 	int sig, handled_sig = 0;
 
 	while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
-		sigset_t *oldset;
-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-			oldset = &current->saved_sigmask;
-		else
-			oldset = &current->blocked;
 		handled_sig = 1;
 		/* Whee!  Actually deliver the signal.  */
-		if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
-			/*
-			 * a signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-			break;
-		}
+		handle_signal(regs, sig, &ka_copy, &info);
 	}
 
 	/* Did we come from a system call? */
@@ -130,10 +109,8 @@
 	 * if there's no signal to deliver, we just put the saved sigmask
 	 * back
 	 */
-	if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
+	if (!handled_sig)
+		restore_saved_sigmask();
 	return handled_sig;
 }
 
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index dafc947..3be6076 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -30,6 +30,8 @@
 	pmd_t *pmd;
 	pte_t *pte;
 	int err = -EFAULT;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+				 (is_write ? FAULT_FLAG_WRITE : 0);
 
 	*code_out = SEGV_MAPERR;
 
@@ -40,6 +42,7 @@
 	if (in_atomic())
 		goto out_nosemaphore;
 
+retry:
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, address);
 	if (!vma)
@@ -65,7 +68,11 @@
 	do {
 		int fault;
 
-		fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+		fault = handle_mm_fault(mm, vma, address, flags);
+
+		if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+			goto out_nosemaphore;
+
 		if (unlikely(fault & VM_FAULT_ERROR)) {
 			if (fault & VM_FAULT_OOM) {
 				goto out_of_memory;
@@ -75,10 +82,17 @@
 			}
 			BUG();
 		}
-		if (fault & VM_FAULT_MAJOR)
-			current->maj_flt++;
-		else
-			current->min_flt++;
+		if (flags & FAULT_FLAG_ALLOW_RETRY) {
+			if (fault & VM_FAULT_MAJOR)
+				current->maj_flt++;
+			else
+				current->min_flt++;
+			if (fault & VM_FAULT_RETRY) {
+				flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+				goto retry;
+			}
+		}
 
 		pgd = pgd_offset(mm, address);
 		pud = pud_offset(pgd, address);
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
index 7754df6..8adedb3 100644
--- a/arch/unicore32/kernel/signal.c
+++ b/arch/unicore32/kernel/signal.c
@@ -21,8 +21,6 @@
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For UniCore syscalls, we encode the syscall number into the instruction.
  */
@@ -61,10 +59,8 @@
 	int err;
 
 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-	if (err == 0) {
-		sigdelsetmask(&set, ~_BLOCKABLE);
+	if (err == 0)
 		set_current_blocked(&set);
-	}
 
 	err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
 	err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
@@ -312,13 +308,12 @@
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
-	      siginfo_t *info, sigset_t *oldset,
-	      struct pt_regs *regs, int syscall)
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
+	      siginfo_t *info, struct pt_regs *regs, int syscall)
 {
 	struct thread_info *thread = current_thread_info();
 	struct task_struct *tsk = current;
-	sigset_t blocked;
+	sigset_t *oldset = sigmask_to_save();
 	int usig = sig;
 	int ret;
 
@@ -364,15 +359,10 @@
 
 	if (ret != 0) {
 		force_sigsegv(sig, tsk);
-		return ret;
+		return;
 	}
 
-	/*
-	 * Block the signal if we were successful.
-	 */
-	block_sigmask(ka, sig);
-
-	return 0;
+	signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -399,32 +389,12 @@
 	if (!user_mode(regs))
 		return;
 
-	if (try_to_freeze())
-		goto no_signal;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
-		sigset_t *oldset;
-
-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-			oldset = &current->saved_sigmask;
-		else
-			oldset = &current->blocked;
-		if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
-				== 0) {
-			/*
-			 * A signal was successfully delivered; the saved
-			 * sigmask will have been stored in the signal frame,
-			 * and will be restored by sigreturn, so we can simply
-			 * clear the TIF_RESTORE_SIGMASK flag.
-			 */
-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-				clear_thread_flag(TIF_RESTORE_SIGMASK);
-		}
+		handle_signal(signr, &ka, &info, regs, syscall);
 		return;
 	}
 
- no_signal:
 	/*
 	 * No signal to deliver to the process - restart the syscall.
 	 */
@@ -451,8 +421,7 @@
 	/* If there's no signal to deliver, we just put the saved
 	 * sigmask back.
 	 */
-	if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-		set_current_blocked(&current->saved_sigmask);
+	restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs,
@@ -464,8 +433,6 @@
 	if (thread_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 }
 
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0e9dec6..e5287d8 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,4 +1,3 @@
-
 obj-$(CONFIG_KVM) += kvm/
 
 # Xen paravirtualization support
@@ -7,6 +6,7 @@
 # lguest paravirtualization support
 obj-$(CONFIG_LGUEST_GUEST) += lguest/
 
+obj-y += realmode/
 obj-y += kernel/
 obj-y += mm/
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 66cc380..c70684f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,6 +32,7 @@
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_FRAME_POINTERS
 	select HAVE_DMA_ATTRS
+	select HAVE_DMA_CONTIGUOUS if !SWIOTLB
 	select HAVE_KRETPROBES
 	select HAVE_OPTPROBES
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -92,6 +93,8 @@
 	select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
 	select GENERIC_TIME_VSYSCALL if X86_64
 	select KTIME_SCALAR if X86_32
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS || UPROBES)
@@ -1503,6 +1506,8 @@
           This kernel feature allows a bzImage to be loaded directly
 	  by EFI firmware without the use of a bootloader.
 
+	  See Documentation/x86/efi-stub.txt for more information.
+
 config SECCOMP
 	def_bool y
 	prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c14e76..4e85f5f 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -16,6 +16,26 @@
 
 static efi_system_table_t *sys_table;
 
+static void efi_printk(char *str)
+{
+	char *s8;
+
+	for (s8 = str; *s8; s8++) {
+		struct efi_simple_text_output_protocol *out;
+		efi_char16_t ch[2] = { 0 };
+
+		ch[0] = *s8;
+		out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+
+		if (*s8 == '\n') {
+			efi_char16_t nl[2] = { '\r', 0 };
+			efi_call_phys2(out->output_string, out, nl);
+		}
+
+		efi_call_phys2(out->output_string, out, ch);
+	}
+}
+
 static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size,
 			      unsigned long *desc_size)
 {
@@ -531,8 +551,10 @@
 				EFI_LOADER_DATA,
 				nr_initrds * sizeof(*initrds),
 				&initrds);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to alloc mem for initrds\n");
 		goto fail;
+	}
 
 	str = (char *)(unsigned long)hdr->cmd_line_ptr;
 	for (i = 0; i < nr_initrds; i++) {
@@ -575,32 +597,42 @@
 
 			status = efi_call_phys3(boottime->handle_protocol,
 					image->device_handle, &fs_proto, &io);
-			if (status != EFI_SUCCESS)
+			if (status != EFI_SUCCESS) {
+				efi_printk("Failed to handle fs_proto\n");
 				goto free_initrds;
+			}
 
 			status = efi_call_phys2(io->open_volume, io, &fh);
-			if (status != EFI_SUCCESS)
+			if (status != EFI_SUCCESS) {
+				efi_printk("Failed to open volume\n");
 				goto free_initrds;
+			}
 		}
 
 		status = efi_call_phys5(fh->open, fh, &h, filename_16,
 					EFI_FILE_MODE_READ, (u64)0);
-		if (status != EFI_SUCCESS)
+		if (status != EFI_SUCCESS) {
+			efi_printk("Failed to open initrd file\n");
 			goto close_handles;
+		}
 
 		initrd->handle = h;
 
 		info_sz = 0;
 		status = efi_call_phys4(h->get_info, h, &info_guid,
 					&info_sz, NULL);
-		if (status != EFI_BUFFER_TOO_SMALL)
+		if (status != EFI_BUFFER_TOO_SMALL) {
+			efi_printk("Failed to get initrd info size\n");
 			goto close_handles;
+		}
 
 grow:
 		status = efi_call_phys3(sys_table->boottime->allocate_pool,
 					EFI_LOADER_DATA, info_sz, &info);
-		if (status != EFI_SUCCESS)
+		if (status != EFI_SUCCESS) {
+			efi_printk("Failed to alloc mem for initrd info\n");
 			goto close_handles;
+		}
 
 		status = efi_call_phys4(h->get_info, h, &info_guid,
 					&info_sz, info);
@@ -612,8 +644,10 @@
 		file_sz = info->file_size;
 		efi_call_phys1(sys_table->boottime->free_pool, info);
 
-		if (status != EFI_SUCCESS)
+		if (status != EFI_SUCCESS) {
+			efi_printk("Failed to get initrd info\n");
 			goto close_handles;
+		}
 
 		initrd->size = file_sz;
 		initrd_total += file_sz;
@@ -629,11 +663,14 @@
 		 */
 		status = high_alloc(initrd_total, 0x1000,
 				   &initrd_addr, hdr->initrd_addr_max);
-		if (status != EFI_SUCCESS)
+		if (status != EFI_SUCCESS) {
+			efi_printk("Failed to alloc highmem for initrds\n");
 			goto close_handles;
+		}
 
 		/* We've run out of free low memory. */
 		if (initrd_addr > hdr->initrd_addr_max) {
+			efi_printk("We've run out of free low memory\n");
 			status = EFI_INVALID_PARAMETER;
 			goto free_initrd_total;
 		}
@@ -652,8 +689,10 @@
 				status = efi_call_phys3(fh->read,
 							initrds[j].handle,
 							&chunksize, addr);
-				if (status != EFI_SUCCESS)
+				if (status != EFI_SUCCESS) {
+					efi_printk("Failed to read initrd\n");
 					goto free_initrd_total;
+				}
 				addr += chunksize;
 				size -= chunksize;
 			}
@@ -674,7 +713,7 @@
 	low_free(initrd_total, initrd_addr);
 
 close_handles:
-	for (k = j; k < nr_initrds; k++)
+	for (k = j; k < i; k++)
 		efi_call_phys1(fh->close, initrds[k].handle);
 free_initrds:
 	efi_call_phys1(sys_table->boottime->free_pool, initrds);
@@ -732,8 +771,10 @@
 			options_size++;	/* NUL termination */
 
 			status = low_alloc(options_size, 1, &cmdline);
-			if (status != EFI_SUCCESS)
+			if (status != EFI_SUCCESS) {
+				efi_printk("Failed to alloc mem for cmdline\n");
 				goto fail;
+			}
 
 			s1 = (u8 *)(unsigned long)cmdline;
 			s2 = (u16 *)options;
@@ -895,12 +936,16 @@
 
 	status = efi_call_phys3(sys_table->boottime->handle_protocol,
 				handle, &proto, (void *)&image);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
 		goto fail;
+	}
 
 	status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to alloc lowmem for boot params\n");
 		goto fail;
+	}
 
 	memset(boot_params, 0x0, 0x4000);
 
@@ -933,8 +978,10 @@
 	if (status != EFI_SUCCESS) {
 		status = low_alloc(hdr->init_size, hdr->kernel_alignment,
 				   &start);
-		if (status != EFI_SUCCESS)
+		if (status != EFI_SUCCESS) {
+			efi_printk("Failed to alloc mem for kernel\n");
 			goto fail;
+		}
 	}
 
 	hdr->code32_start = (__u32)start;
@@ -945,19 +992,25 @@
 	status = efi_call_phys3(sys_table->boottime->allocate_pool,
 				EFI_LOADER_DATA, sizeof(*gdt),
 				(void **)&gdt);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to alloc mem for gdt structure\n");
 		goto fail;
+	}
 
 	gdt->size = 0x800;
 	status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to alloc mem for gdt\n");
 		goto fail;
+	}
 
 	status = efi_call_phys3(sys_table->boottime->allocate_pool,
 				EFI_LOADER_DATA, sizeof(*idt),
 				(void **)&idt);
-	if (status != EFI_SUCCESS)
+	if (status != EFI_SUCCESS) {
+		efi_printk("Failed to alloc mem for idt structure\n");
 		goto fail;
+	}
 
 	idt->size = 0;
 	idt->address = 0;
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index 3925166..3b6e156 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -58,4 +58,10 @@
 	void *blt;
 };
 
+struct efi_simple_text_output_protocol {
+	void *reset;
+	void *output_string;
+	void *test_string;
+};
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 8bbea6a..efe5acf 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -94,10 +94,10 @@
 
 	.section ".bsdata", "a"
 bugger_off_msg:
-	.ascii	"Direct booting from floppy is no longer supported.\r\n"
-	.ascii	"Please use a boot loader program instead.\r\n"
+	.ascii	"Direct floppy boot is not supported. "
+	.ascii	"Use a boot loader program instead.\r\n"
 	.ascii	"\n"
-	.ascii	"Remove disk and press any key to reboot . . .\r\n"
+	.ascii	"Remove disk and press any key to reboot ...\r\n"
 	.byte	0
 
 #ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@
 #else
 	.word	0x8664				# x86-64
 #endif
-	.word	2				# nr_sections
+	.word	3				# nr_sections
 	.long	0 				# TimeDateStamp
 	.long	0				# PointerToSymbolTable
 	.long	1				# NumberOfSymbols
@@ -158,8 +158,8 @@
 #else
 	.quad	0				# ImageBase
 #endif
-	.long	0x1000				# SectionAlignment
-	.long	0x200				# FileAlignment
+	.long	0x20				# SectionAlignment
+	.long	0x20				# FileAlignment
 	.word	0				# MajorOperatingSystemVersion
 	.word	0				# MinorOperatingSystemVersion
 	.word	0				# MajorImageVersion
@@ -200,8 +200,10 @@
 
 	# Section table
 section_table:
-	.ascii	".text"
-	.byte	0
+	#
+	# The offset & size fields are filled in by build.c.
+	#
+	.ascii	".setup"
 	.byte	0
 	.byte	0
 	.long	0
@@ -217,9 +219,8 @@
 
 	#
 	# The EFI application loader requires a relocation section
-	# because EFI applications must be relocatable. But since
-	# we don't need the loader to fixup any relocs for us, we
-	# just create an empty (zero-length) .reloc section header.
+	# because EFI applications must be relocatable. The .reloc
+	# offset & size fields are filled in by build.c.
 	#
 	.ascii	".reloc"
 	.byte	0
@@ -233,6 +234,25 @@
 	.word	0				# NumberOfRelocations
 	.word	0				# NumberOfLineNumbers
 	.long	0x42100040			# Characteristics (section flags)
+
+	#
+	# The offset & size fields are filled in by build.c.
+	#
+	.ascii	".text"
+	.byte	0
+	.byte	0
+	.byte	0
+	.long	0
+	.long	0x0				# startup_{32,64}
+	.long	0				# Size of initialized data
+						# on disk
+	.long	0x0				# startup_{32,64}
+	.long	0				# PointerToRelocations
+	.long	0				# PointerToLineNumbers
+	.word	0				# NumberOfRelocations
+	.word	0				# NumberOfLineNumbers
+	.long	0x60500020			# Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
 	# Kernel attributes; used by setup.  This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 3f61f6e..4b8e165 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -50,6 +50,8 @@
 u8 buf[SETUP_SECT_MAX*512];
 int is_big_kernel;
 
+#define PECOFF_RELOC_RESERVE 0x20
+
 /*----------------------------------------------------------------------*/
 
 static const u32 crctab32[] = {
@@ -133,11 +135,103 @@
 	die("Usage: build setup system [> image]");
 }
 
+#ifdef CONFIG_EFI_STUB
+
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+	unsigned int pe_header;
+	unsigned short num_sections;
+	u8 *section;
+
+	pe_header = get_unaligned_le32(&buf[0x3c]);
+	num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+
+#ifdef CONFIG_X86_32
+	section = &buf[pe_header + 0xa8];
+#else
+	section = &buf[pe_header + 0xb8];
+#endif
+
+	while (num_sections > 0) {
+		if (strncmp((char*)section, section_name, 8) == 0) {
+			/* section header size field */
+			put_unaligned_le32(size, section + 0x8);
+
+			/* section header vma field */
+			put_unaligned_le32(offset, section + 0xc);
+
+			/* section header 'size of initialised data' field */
+			put_unaligned_le32(size, section + 0x10);
+
+			/* section header 'file offset' field */
+			put_unaligned_le32(offset, section + 0x14);
+
+			break;
+		}
+		section += 0x28;
+		num_sections--;
+	}
+}
+
+static void update_pecoff_setup_and_reloc(unsigned int size)
+{
+	u32 setup_offset = 0x200;
+	u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
+	u32 setup_size = reloc_offset - setup_offset;
+
+	update_pecoff_section_header(".setup", setup_offset, setup_size);
+	update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+
+	/*
+	 * Modify .reloc section contents with a single entry. The
+	 * relocation is applied to offset 10 of the relocation section.
+	 */
+	put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+	put_unaligned_le32(10, &buf[reloc_offset + 4]);
+}
+
+static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+{
+	unsigned int pe_header;
+	unsigned int text_sz = file_sz - text_start;
+
+	pe_header = get_unaligned_le32(&buf[0x3c]);
+
+	/* Size of image */
+	put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+	/*
+	 * Size of code: Subtract the size of the first sector (512 bytes)
+	 * which includes the header.
+	 */
+	put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
+
+#ifdef CONFIG_X86_32
+	/*
+	 * Address of entry point.
+	 *
+	 * The EFI stub entry point is +16 bytes from the start of
+	 * the .text section.
+	 */
+	put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
+#else
+	/*
+	 * Address of entry point. startup_32 is at the beginning and
+	 * the 64-bit entry point (startup_64) is always 512 bytes
+	 * after. The EFI stub entry point is 16 bytes after that, as
+	 * the first instruction allows legacy loaders to jump over
+	 * the EFI stub initialisation
+	 */
+	put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
+#endif /* CONFIG_X86_32 */
+
+	update_pecoff_section_header(".text", text_start, text_sz);
+}
+
+#endif /* CONFIG_EFI_STUB */
+
 int main(int argc, char ** argv)
 {
-#ifdef CONFIG_EFI_STUB
-	unsigned int file_sz, pe_header;
-#endif
 	unsigned int i, sz, setup_sectors;
 	int c;
 	u32 sys_size;
@@ -163,6 +257,12 @@
 		die("Boot block hasn't got boot flag (0xAA55)");
 	fclose(file);
 
+#ifdef CONFIG_EFI_STUB
+	/* Reserve 0x20 bytes for .reloc section */
+	memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+	c += PECOFF_RELOC_RESERVE;
+#endif
+
 	/* Pad unused space with zeros */
 	setup_sectors = (c + 511) / 512;
 	if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@
 	i = setup_sectors*512;
 	memset(buf+c, 0, i-c);
 
+#ifdef CONFIG_EFI_STUB
+	update_pecoff_setup_and_reloc(i);
+#endif
+
 	/* Set the default root device */
 	put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
 
@@ -194,66 +298,8 @@
 	put_unaligned_le32(sys_size, &buf[0x1f4]);
 
 #ifdef CONFIG_EFI_STUB
-	file_sz = sz + i + ((sys_size * 16) - sz);
-
-	pe_header = get_unaligned_le32(&buf[0x3c]);
-
-	/* Size of image */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
-	/*
-	 * Subtract the size of the first section (512 bytes) which
-	 * includes the header and .reloc section. The remaining size
-	 * is that of the .text section.
-	 */
-	file_sz -= 512;
-
-	/* Size of code */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
-
-#ifdef CONFIG_X86_32
-	/*
-	 * Address of entry point.
-	 *
-	 * The EFI stub entry point is +16 bytes from the start of
-	 * the .text section.
-	 */
-	put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
-
-	/* .text size */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
-
-	/* .text vma */
-	put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
-
-	/* .text size of initialised data */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
-
-	/* .text file offset */
-	put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
-#else
-	/*
-	 * Address of entry point. startup_32 is at the beginning and
-	 * the 64-bit entry point (startup_64) is always 512 bytes
-	 * after. The EFI stub entry point is 16 bytes after that, as
-	 * the first instruction allows legacy loaders to jump over
-	 * the EFI stub initialisation
-	 */
-	put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
-
-	/* .text size */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
-
-	/* .text vma */
-	put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
-
-	/* .text size of initialised data */
-	put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
-
-	/* .text file offset */
-	put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
-#endif /* CONFIG_X86_32 */
-#endif /* CONFIG_EFI_STUB */
+	update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+#endif
 
 	crc = partial_crc32(buf, i, crc);
 	if (fwrite(buf, 1, i, stdout) != i)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index be6d9e3..3470624 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2460,10 +2460,12 @@
 	pxor IN3, STATE4
 	movaps IN4, IV
 #else
-	pxor (INP), STATE2
-	pxor 0x10(INP), STATE3
 	pxor IN1, STATE4
 	movaps IN2, IV
+	movups (INP), IN1
+	pxor IN1, STATE2
+	movups 0x10(INP), IN2
+	pxor IN2, STATE3
 #endif
 	movups STATE1, (OUTP)
 	movups STATE2, 0x10(OUTP)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 98bd70f..673ac9b 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -38,7 +38,7 @@
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
 	int err = 0;
-	bool ia32 = is_ia32_task();
+	bool ia32 = test_thread_flag(TIF_IA32);
 
 	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
 		return -EFAULT;
@@ -273,7 +273,6 @@
 				    sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
@@ -299,7 +298,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d..0c44630 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,7 +29,7 @@
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 
 #define COMPILER_DEPENDENT_INT64   long long
 #define COMPILER_DEPENDENT_UINT64  unsigned long long
@@ -117,11 +117,8 @@
 /* Low-level suspend routine. */
 extern int acpi_suspend_lowlevel(void);
 
-extern const unsigned char acpi_wakeup_code[];
-#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
-
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
 
 /*
  * Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b97596e..a6983b2 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,8 @@
 #include <linux/compiler.h>
 #include <asm/alternative.h>
 
+#define BIT_64(n)			(U64_C(1) << (n))
+
 /*
  * These have to be done with inline assembly: that way the bit-setting
  * is guaranteed to be atomic. All bit operations return 0 if the bit
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 340ee49..f91e80f 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -176,7 +176,7 @@
 #define X86_FEATURE_XSAVEOPT	(7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN		(7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS		(7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS		(7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM	(7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE	(7*32+ 8) /* AMD HW-PState */
 
 /* Virtualization flags: Linux defined, word 8 */
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..c092416
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 61c0bd2..f7b4c79 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
 #include <asm/io.h>
 #include <asm/swiotlb.h>
 #include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
 
 #ifdef CONFIG_ISA
 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@
 					dma_addr_t *dma_addr, gfp_t flag,
 					struct dma_attrs *attrs);
 
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+				      void *vaddr, dma_addr_t dma_addr,
+				      struct dma_attrs *attrs);
+
 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 18d9005..b0767bc 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -34,7 +34,7 @@
 
 #ifndef __ASSEMBLY__
 extern void mcount(void);
-extern int modifying_ftrace_code;
+extern atomic_t modifying_ftrace_code;
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 0e3793b..dc580c4 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -54,6 +54,20 @@
 	__register_nmi_handler((t), &fn##_na);	\
 })
 
+/*
+ * For special handlers that register/unregister in the
+ * init section only.  This should be considered rare.
+ */
+#define register_nmi_handler_initonly(t, fn, fg, n)		\
+({							\
+	static struct nmiaction fn##_na __initdata = {		\
+		.handler = (fn),			\
+		.name = (n),				\
+		.flags = (fg),				\
+	};						\
+	__register_nmi_handler((t), &fn##_na);	\
+})
+
 int __register_nmi_handler(unsigned int, struct nmiaction *);
 
 void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index effff47..cb00ccc 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -31,6 +31,60 @@
 	ptep->pte_low = pte.pte_low;
 }
 
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
+ */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+	pmdval_t ret;
+	u32 *tmp = (u32 *)pmdp;
+
+	ret = (pmdval_t) (*tmp);
+	if (ret) {
+		/*
+		 * If the low part is null, we must not read the high part
+		 * or we can end up with a partial pmd.
+		 */
+		smp_rmb();
+		ret |= ((pmdval_t)*(tmp + 1)) << 32;
+	}
+
+	return (pmd_t) { ret };
+}
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index 99f262e..8e52505 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -10,9 +10,6 @@
 typedef unsigned short	__kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short	__kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short	__kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7745b257..39bc577 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -544,13 +544,16 @@
  * enable), so that any CPU's that boot up
  * after us can get the correct flags.
  */
-extern unsigned long		mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
 
 static inline void set_in_cr4(unsigned long mask)
 {
 	unsigned long cr4;
 
 	mmu_cr4_features |= mask;
+	if (trampoline_cr4_features)
+		*trampoline_cr4_features = mmu_cr4_features;
 	cr4 = read_cr4();
 	cr4 |= mask;
 	write_cr4(cr4);
@@ -561,6 +564,8 @@
 	unsigned long cr4;
 
 	mmu_cr4_features &= ~mask;
+	if (trampoline_cr4_features)
+		*trampoline_cr4_features = mmu_cr4_features;
 	cr4 = read_cr4();
 	cr4 &= ~mask;
 	write_cr4(cr4);
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644
index 0000000..fce3f4a
--- /dev/null
+++ b/arch/x86/include/asm/realmode.h
@@ -0,0 +1,62 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+	u32	text_start;
+	u32	ro_end;
+	/* SMP trampoline */
+	u32	trampoline_start;
+	u32	trampoline_status;
+	u32	trampoline_header;
+#ifdef CONFIG_X86_64
+	u32	trampoline_pgd;
+#endif
+	/* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+	u32	wakeup_start;
+	u32	wakeup_header;
+#endif
+	/* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+	u32	machine_real_restart_asm;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+	u32 start;
+	u16 gdt_pad;
+	u16 gdt_limit;
+	u32 gdt_base;
+#else
+	u64 start;
+	u64 efer;
+	u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+extern void __init setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index ada93b3..beff97f 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -7,8 +7,6 @@
 
 #include <asm/processor-flags.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #define __FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_OF | \
 			 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
 			 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 0000000..e9d32df
--- /dev/null
+++ b/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 5c25de0..89f794f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -248,7 +248,23 @@
 {
 	struct thread_info *ti = current_thread_info();
 	ti->status |= TS_RESTORE_SIGMASK;
-	set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+	return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	struct thread_info *ti = current_thread_info();
+	if (!(ti->status & TS_RESTORE_SIGMASK))
+		return false;
+	ti->status &= ~TS_RESTORE_SIGMASK;
+	return true;
 }
 
 static inline bool is_ia32_task(void)
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644
index feca311..0000000
--- a/arch/x86/include/asm/trampoline.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_X86_TRAMPOLINE_H
-#define _ASM_X86_TRAMPOLINE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/io.h>
-
-/*
- * Trampoline 80x86 program as an array.  These are in the init rodata
- * segment, but that's okay, because we only care about the relative
- * addresses of the symbols.
- */
-extern const unsigned char x86_trampoline_start [];
-extern const unsigned char x86_trampoline_end   [];
-extern unsigned char *x86_trampoline_base;
-
-extern unsigned long init_rsp;
-extern unsigned long initial_code;
-extern unsigned long initial_gs;
-
-extern void __init setup_trampolines(void);
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_status[];
-
-#define TRAMPOLINE_SYM(x)						\
-	((void *)(x86_trampoline_base +					\
-		  ((const unsigned char *)(x) - x86_trampoline_start)))
-
-/* Address of the SMP trampoline */
-static inline unsigned long trampoline_address(void)
-{
-	return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_TRAMPOLINE_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 851fe0d..e1f3a17 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -32,9 +32,9 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __addr_ok(addr)					\
-	((unsigned long __force)(addr) <		\
-	 (current_thread_info()->addr_limit.seg))
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define __addr_ok(addr) 	\
+	((unsigned long __force)(addr) < user_addr_max())
 
 /*
  * Test whether a block of memory is a valid user space address.
@@ -46,14 +46,14 @@
  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
  */
 
-#define __range_not_ok(addr, size)					\
+#define __range_not_ok(addr, size, limit)				\
 ({									\
 	unsigned long flag, roksum;					\
 	__chk_user_ptr(addr);						\
 	asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"		\
 	    : "=&r" (flag), "=r" (roksum)				\
 	    : "1" (addr), "g" ((long)(size)),				\
-	      "rm" (current_thread_info()->addr_limit.seg));		\
+	      "rm" (limit));						\
 	flag;								\
 })
 
@@ -76,7 +76,8 @@
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+	(likely(__range_not_ok(addr, size, user_addr_max()) == 0))
 
 /*
  * The exception table consists of pairs of addresses relative to the
@@ -565,6 +566,9 @@
 extern __must_check long
 strncpy_from_user(char *dst, const char __user *src, long count);
 
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
  */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 8084bc7..576e39b 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,23 +213,6 @@
 	return n;
 }
 
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-long strnlen_user(const char __user *str, long n);
 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index fcd4b6f..8e796fb 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@
 	}
 }
 
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index becf47b..6149b47 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -149,7 +149,6 @@
 /* 4 bits of software ack period */
 #define UV2_ACK_MASK			0x7UL
 #define UV2_ACK_UNITS_SHFT		3
-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
 #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
 
 /*
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index ae03fac..5b238981 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -10,6 +10,11 @@
  * bit count instruction, that might be better than the multiply
  * and shift, for example.
  */
+struct word_at_a_time {
+	const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
 
 #ifdef CONFIG_64BIT
 
@@ -37,10 +42,31 @@
 
 #endif
 
-/* Return the high bit set in the first byte that is a zero */
-static inline unsigned long has_zero(unsigned long a)
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
 {
-	return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
+	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+	*bits = mask;
+	return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+	bits = (bits - 1) & ~bits;
+	return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+	return count_masked_bytes(mask);
 }
 
 /*
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9bba5b7..8215e56 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,7 +35,6 @@
 obj-y			+= pci-iommu_table.o
 obj-y			+= resource.o
 
-obj-y				+= trampoline.o trampoline_$(BITS).o
 obj-y				+= process.o
 obj-y				+= i387.o xsave.o
 obj-y				+= ptrace.o
@@ -48,7 +47,6 @@
 obj-y				+= cpu/
 obj-y				+= acpi/
 obj-y				+= reboot.o
-obj-$(CONFIG_X86_32)		+= reboot_32.o
 obj-$(CONFIG_X86_MSR)		+= msr.o
 obj-$(CONFIG_X86_CPUID)		+= cpuid.o
 obj-$(CONFIG_PCI)		+= early-quirks.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 6f35260..163b225 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,14 +1,7 @@
-subdir-				:= realmode
-
 obj-$(CONFIG_ACPI)		+= boot.o
-obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_rm.o wakeup_$(BITS).o
+obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
 
 ifneq ($(CONFIG_ACPI_PROCESSOR),)
 obj-y				+= cstate.o
 endif
 
-$(obj)/wakeup_rm.o:    $(obj)/realmode/wakeup.bin
-
-$(obj)/realmode/wakeup.bin: FORCE
-	$(Q)$(MAKE) $(build)=$(obj)/realmode
-
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8afb693..b2297e5 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -422,12 +422,14 @@
 		return 0;
 	}
 
-	if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+	if (intsrc->source_irq == 0) {
 		if (acpi_skip_timer_override) {
-			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+			printk(PREFIX "BIOS IRQ0 override ignored.\n");
 			return 0;
 		}
-		if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+		if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+			&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
 			intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
 			printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
 		}
@@ -1334,17 +1336,12 @@
 }
 
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
-	/*
-	 * The ati_ixp4x0_rev() early PCI quirk should have set
-	 * the acpi_skip_timer_override flag already:
-	 */
 	if (!acpi_skip_timer_override) {
-		WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-		pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+		pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
 			d->ident);
 		acpi_skip_timer_override = 1;
 	}
@@ -1438,7 +1435,7 @@
 	 * is enabled.  This input is incorrectly designated the
 	 * ISA IRQ 0 via an interrupt source override even though
 	 * it is wired to the output of the master 8259A and INTIN0
-	 * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+	 * is not connected at all.  Force ignoring BIOS IRQ0
 	 * override in that cases.
 	 */
 	{
@@ -1473,6 +1470,14 @@
 		     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
 		     },
 	 },
+	{
+	 .callback = dmi_ignore_irq0_timer_override,
+	 .ident = "FUJITSU SIEMENS",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+		     },
+	 },
 	{}
 };
 
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
deleted file mode 100644
index 58f1f48..0000000
--- a/arch/x86/kernel/acpi/realmode/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-wakeup.bin
-wakeup.elf
-wakeup.lds
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
deleted file mode 100644
index 6a564ac..0000000
--- a/arch/x86/kernel/acpi/realmode/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# arch/x86/kernel/acpi/realmode/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-always		:= wakeup.bin
-targets		:= wakeup.elf wakeup.lds
-
-wakeup-y	+= wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o
-
-# The link order of the video-*.o modules can matter.  In particular,
-# video-vga.o *must* be listed first, followed by video-vesa.o.
-# Hardware-specific drivers should follow in the order they should be
-# probed, and video-bios.o should typically be last.
-wakeup-y	+= video-vga.o
-wakeup-y	+= video-vesa.o
-wakeup-y	+= video-bios.o
-
-targets		+= $(wakeup-y)
-
-bootsrc		:= $(src)/../../../boot
-
-# ---------------------------------------------------------------------------
-
-# How to compile the 16-bit code.  Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-# Compile with _SETUP since this is similar to the boot-time setup code.
-KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
-		   -I$(srctree)/$(bootsrc) \
-		   $(cflags-y) \
-		   -Wall -Wstrict-prototypes \
-		   -march=i386 -mregparm=3 \
-		   -include $(srctree)/$(bootsrc)/code16gcc.h \
-		   -fno-strict-aliasing -fomit-frame-pointer \
-		   $(call cc-option, -ffreestanding) \
-		   $(call cc-option, -fno-toplevel-reorder,\
-			$(call cc-option, -fno-unit-at-a-time)) \
-		   $(call cc-option, -fno-stack-protector) \
-		   $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS	+= $(call cc-option, -m32)
-KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
-GCOV_PROFILE := n
-
-WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
-
-LDFLAGS_wakeup.elf	:= -T
-
-CPPFLAGS_wakeup.lds += -P -C
-
-$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE
-	$(call if_changed,ld)
-
-OBJCOPYFLAGS_wakeup.bin	:= -O binary
-
-$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
-	$(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/bioscall.S b/arch/x86/kernel/acpi/realmode/bioscall.S
deleted file mode 100644
index f51eb0b..0000000
--- a/arch/x86/kernel/acpi/realmode/bioscall.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/bioscall.S"
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
deleted file mode 100644
index dc59ebe..0000000
--- a/arch/x86/kernel/acpi/realmode/copy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/regs.c b/arch/x86/kernel/acpi/realmode/regs.c
deleted file mode 100644
index 6206033..0000000
--- a/arch/x86/kernel/acpi/realmode/regs.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/regs.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
deleted file mode 100644
index 7deabc1..0000000
--- a/arch/x86/kernel/acpi/realmode/video-bios.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
deleted file mode 100644
index 328ad20..0000000
--- a/arch/x86/kernel/acpi/realmode/video-mode.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
deleted file mode 100644
index 9dbb967..0000000
--- a/arch/x86/kernel/acpi/realmode/video-vesa.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
deleted file mode 100644
index bcc8125..0000000
--- a/arch/x86/kernel/acpi/realmode/video-vga.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
deleted file mode 100644
index b4fd836..0000000
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * ACPI wakeup real mode startup stub
- */
-#include <asm/segment.h>
-#include <asm/msr-index.h>
-#include <asm/page_types.h>
-#include <asm/pgtable_types.h>
-#include <asm/processor-flags.h>
-#include "wakeup.h"
-
-	.code16
-	.section ".jump", "ax"
-	.globl	_start
-_start:
-	cli
-	jmp	wakeup_code
-
-/* This should match the structure in wakeup.h */
-		.section ".header", "a"
-		.globl	wakeup_header
-wakeup_header:
-video_mode:	.short	0	/* Video mode number */
-pmode_return:	.byte	0x66, 0xea	/* ljmpl */
-		.long	0	/* offset goes here */
-		.short	__KERNEL_CS
-pmode_cr0:	.long	0	/* Saved %cr0 */
-pmode_cr3:	.long	0	/* Saved %cr3 */
-pmode_cr4:	.long	0	/* Saved %cr4 */
-pmode_efer:	.quad	0	/* Saved EFER */
-pmode_gdt:	.quad	0
-pmode_misc_en:	.quad	0	/* Saved MISC_ENABLE MSR */
-pmode_behavior:	.long	0	/* Wakeup behavior flags */
-realmode_flags:	.long	0
-real_magic:	.long	0
-trampoline_segment:	.word 0
-_pad1:		.byte	0
-wakeup_jmp:	.byte	0xea	/* ljmpw */
-wakeup_jmp_off:	.word	3f
-wakeup_jmp_seg:	.word	0
-wakeup_gdt:	.quad	0, 0, 0
-signature:	.long	WAKEUP_HEADER_SIGNATURE
-
-	.text
-	.code16
-wakeup_code:
-	cld
-
-	/* Apparently some dimwit BIOS programmers don't know how to
-	   program a PM to RM transition, and we might end up here with
-	   junk in the data segment descriptor registers.  The only way
-	   to repair that is to go into PM and fix it ourselves... */
-	movw	$16, %cx
-	lgdtl	%cs:wakeup_gdt
-	movl	%cr0, %eax
-	orb	$X86_CR0_PE, %al
-	movl	%eax, %cr0
-	jmp	1f
-1:	ljmpw	$8, $2f
-2:
-	movw	%cx, %ds
-	movw	%cx, %es
-	movw	%cx, %ss
-	movw	%cx, %fs
-	movw	%cx, %gs
-
-	andb	$~X86_CR0_PE, %al
-	movl	%eax, %cr0
-	jmp	wakeup_jmp
-3:
-	/* Set up segments */
-	movw	%cs, %ax
-	movw	%ax, %ds
-	movw	%ax, %es
-	movw	%ax, %ss
-	lidtl	wakeup_idt
-
-	movl	$wakeup_stack_end, %esp
-
-	/* Clear the EFLAGS */
-	pushl	$0
-	popfl
-
-	/* Check header signature... */
-	movl	signature, %eax
-	cmpl	$WAKEUP_HEADER_SIGNATURE, %eax
-	jne	bogus_real_magic
-
-	/* Check we really have everything... */
-	movl	end_signature, %eax
-	cmpl	$WAKEUP_END_SIGNATURE, %eax
-	jne	bogus_real_magic
-
-	/* Call the C code */
-	calll	main
-
-	/* Restore MISC_ENABLE before entering protected mode, in case
-	   BIOS decided to clear XD_DISABLE during S3. */
-	movl	pmode_behavior, %eax
-	btl	$WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
-	jnc	1f
-
-	movl	pmode_misc_en, %eax
-	movl	pmode_misc_en + 4, %edx
-	movl	$MSR_IA32_MISC_ENABLE, %ecx
-	wrmsr
-1:
-
-	/* Do any other stuff... */
-
-#ifndef CONFIG_64BIT
-	/* This could also be done in C code... */
-	movl	pmode_cr3, %eax
-	movl	%eax, %cr3
-
-	movl	pmode_cr4, %ecx
-	jecxz	1f
-	movl	%ecx, %cr4
-1:
-	movl	pmode_efer, %eax
-	movl	pmode_efer + 4, %edx
-	movl	%eax, %ecx
-	orl	%edx, %ecx
-	jz	1f
-	movl	$MSR_EFER, %ecx
-	wrmsr
-1:
-
-	lgdtl	pmode_gdt
-
-	/* This really couldn't... */
-	movl	pmode_cr0, %eax
-	movl	%eax, %cr0
-	jmp	pmode_return
-#else
-	pushw	$0
-	pushw	trampoline_segment
-	pushw	$0
-	lret
-#endif
-
-bogus_real_magic:
-1:
-	hlt
-	jmp	1b
-
-	.data
-	.balign	8
-
-	/* This is the standard real-mode IDT */
-wakeup_idt:
-	.word	0xffff		/* limit */
-	.long	0		/* address */
-	.word	0
-
-	.globl	HEAP, heap_end
-HEAP:
-	.long	wakeup_heap
-heap_end:
-	.long	wakeup_stack
-
-	.bss
-wakeup_heap:
-	.space	2048
-wakeup_stack:
-	.space	2048
-wakeup_stack_end:
-
-	.section ".signature","a"
-end_signature:
-	.long	WAKEUP_END_SIGNATURE
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
deleted file mode 100644
index d4f8010..0000000
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * wakeup.ld
- *
- * Linker script for the real-mode wakeup code
- */
-#undef i386
-#include "wakeup.h"
-
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-
-SECTIONS
-{
-	. = 0;
-	.jump	: {
-		*(.jump)
-	} = 0x90909090
-
-	. = WAKEUP_HEADER_OFFSET;
-	.header : {
-		*(.header)
-	}
-
-	. = ALIGN(16);
-	.text : {
-		 *(.text*)
-	} = 0x90909090
-
-	. = ALIGN(16);
-	.rodata : {
-		*(.rodata*)
-	}
-
-	.videocards : {
-		video_cards = .;
-		*(.videocards)
-		video_cards_end = .;
-	}
-
-	. = ALIGN(16);
-	.data : {
-		 *(.data*)
-	}
-
-	. = ALIGN(16);
-	.bss :	{
-		__bss_start = .;
-		*(.bss)
-		__bss_end = .;
-	}
-
-	.signature : {
-		*(.signature)
-	}
-
-	_end = .;
-
-	/DISCARD/ : {
-		*(.note*)
-	}
-}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 146a49c..95bf99d 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -14,8 +14,9 @@
 #include <asm/desc.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
+#include <asm/realmode.h>
 
-#include "realmode/wakeup.h"
+#include "../../realmode/rm/wakeup.h"
 #include "sleep.h"
 
 unsigned long acpi_realmode_flags;
@@ -36,13 +37,9 @@
  */
 int acpi_suspend_lowlevel(void)
 {
-	struct wakeup_header *header;
-	/* address in low memory of the wakeup routine. */
-	char *acpi_realmode;
+	struct wakeup_header *header =
+		(struct wakeup_header *) __va(real_mode_header->wakeup_header);
 
-	acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
-
-	header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
 	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
 		printk(KERN_ERR "wakeup header does not match\n");
 		return -EINVAL;
@@ -50,27 +47,6 @@
 
 	header->video_mode = saved_video_mode;
 
-	header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
-
-	/*
-	 * Set up the wakeup GDT.  We set these up as Big Real Mode,
-	 * that is, with limits set to 4 GB.  At least the Lenovo
-	 * Thinkpad X61 is known to need this for the video BIOS
-	 * initialization quirk to work; this is likely to also
-	 * be the case for other laptops or integrated video devices.
-	 */
-
-	/* GDT[0]: GDT self-pointer */
-	header->wakeup_gdt[0] =
-		(u64)(sizeof(header->wakeup_gdt) - 1) +
-		((u64)__pa(&header->wakeup_gdt) << 16);
-	/* GDT[1]: big real mode-like code segment */
-	header->wakeup_gdt[1] =
-		GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
-	/* GDT[2]: big real mode-like data segment */
-	header->wakeup_gdt[2] =
-		GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
-
 #ifndef CONFIG_64BIT
 	store_gdt((struct desc_ptr *)&header->pmode_gdt);
 
@@ -95,7 +71,6 @@
 	header->pmode_cr3 = (u32)__pa(&initial_page_table);
 	saved_magic = 0x12345678;
 #else /* CONFIG_64BIT */
-	header->trampoline_segment = trampoline_address() >> 4;
 #ifdef CONFIG_SMP
 	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
 	early_gdt_descr.address =
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index d68677a..5653a57 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,8 +2,8 @@
  *	Variables and functions used by the code in sleep.c
  */
 
-#include <asm/trampoline.h>
 #include <linux/linkage.h>
+#include <asm/realmode.h>
 
 extern unsigned long saved_video_mode;
 extern long saved_magic;
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
deleted file mode 100644
index 63b8ab5..0000000
--- a/arch/x86/kernel/acpi/wakeup_rm.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Wrapper script for the realmode binary as a transport object
- * before copying to low memory.
- */
-#include <asm/page_types.h>
-
-	.section ".x86_trampoline","a"
-	.balign PAGE_SIZE
-	.globl	acpi_wakeup_code
-acpi_wakeup_code:
-	.incbin	"arch/x86/kernel/acpi/realmode/wakeup.bin"
-	.size	acpi_wakeup_code, .-acpi_wakeup_code
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 6e76c19..d5fd66f 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,7 +20,6 @@
 #include <linux/bitops.h>
 #include <linux/ioport.h>
 #include <linux/suspend.h>
-#include <linux/kmemleak.h>
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/iommu.h>
@@ -95,11 +94,6 @@
 		return 0;
 	}
 	memblock_reserve(addr, aper_size);
-	/*
-	 * Kmemleak should not scan this block as it may not be mapped via the
-	 * kernel direct mapping.
-	 */
-	kmemleak_ignore(phys_to_virt(addr));
 	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
 			aper_size >> 10, addr);
 	insert_aperture_resource((u32)addr, aper_size);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561..5f0ff59 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1195,7 +1195,7 @@
 	BUG_ON(!cfg->vector);
 
 	vector = cfg->vector;
-	for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+	for_each_cpu(cpu, cfg->domain)
 		per_cpu(vector_irq, cpu)[vector] = -1;
 
 	cfg->vector = 0;
@@ -1203,7 +1203,7 @@
 
 	if (likely(!cfg->move_in_progress))
 		return;
-	for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+	for_each_cpu(cpu, cfg->old_domain) {
 		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
 								vector++) {
 			if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 82f29e7..6b9333b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1101,14 +1101,20 @@
 		 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
 }
 
+static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
+
 void debug_stack_set_zero(void)
 {
+	this_cpu_inc(debug_stack_use_ctr);
 	load_idt((const struct desc_ptr *)&nmi_idt_descr);
 }
 
 void debug_stack_reset(void)
 {
-	load_idt((const struct desc_ptr *)&idt_descr);
+	if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
+		return;
+	if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
+		load_idt((const struct desc_ptr *)&idt_descr);
 }
 
 #else	/* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 507ea58..cd8b166 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,7 +42,8 @@
 	struct mce m;
 
 	/* Only corrected MC is reported */
-	if (!corrected)
+	if (!corrected || !(mem_err->validation_bits &
+				CPER_MEM_VALID_PHYSICAL_ADDRESS))
 		return;
 
 	mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 0c82091..413c2ce 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -126,6 +126,16 @@
 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
 		USER
 		),
+	MCESEV(
+		KEEP, "HT thread notices Action required: instruction fetch error",
+		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+		MCGMASK(MCG_STATUS_EIPV, 0)
+		),
+	MCESEV(
+		AR, "Action required: instruction fetch error",
+		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+		USER
+		),
 #endif
 	MCESEV(
 		PANIC, "Action required: unknown MCACOD",
@@ -165,15 +175,19 @@
 };
 
 /*
- * If the EIPV bit is set, it means the saved IP is the
- * instruction which caused the MCE.
+ * If mcgstatus indicated that ip/cs on the stack were
+ * no good, then "m->cs" will be zero and we will have
+ * to assume the worst case (IN_KERNEL) as we actually
+ * have no idea what we were executing when the machine
+ * check hit.
+ * If we do have a good "m->cs" (or a faked one in the
+ * case we were executing in VM86 mode) we can use it to
+ * distinguish an exception taken in user from from one
+ * taken in the kernel.
  */
 static int error_context(struct mce *m)
 {
-	if (m->mcgstatus & MCG_STATUS_EIPV)
-		return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
-	/* Unknown, assume kernel */
-	return IN_KERNEL;
+	return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
 }
 
 int mce_severity(struct mce *m, int tolerant, char **msg)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2afcbd2..da27c5d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -437,6 +437,14 @@
 		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 			m->ip = regs->ip;
 			m->cs = regs->cs;
+
+			/*
+			 * When in VM86 mode make the cs look like ring 3
+			 * always. This is a lie, but it's better than passing
+			 * the additional vm86 bit around everywhere.
+			 */
+			if (v8086_mode(regs))
+				m->cs |= 3;
 		}
 		/* Use accurate RIP reporting if available. */
 		if (rip_msr)
@@ -641,16 +649,18 @@
  * Do a quick check if any of the events requires a panic.
  * This decides if we keep the events around or clear them.
  */
-static int mce_no_way_out(struct mce *m, char **msg)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
 {
-	int i;
+	int i, ret = 0;
 
 	for (i = 0; i < banks; i++) {
 		m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+		if (m->status & MCI_STATUS_VAL)
+			__set_bit(i, validp);
 		if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
-			return 1;
+			ret = 1;
 	}
-	return 0;
+	return ret;
 }
 
 /*
@@ -1013,6 +1023,7 @@
 	 */
 	int kill_it = 0;
 	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
+	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
 	char *msg = "Unknown";
 
 	atomic_inc(&mce_entry);
@@ -1027,7 +1038,8 @@
 	final = &__get_cpu_var(mces_seen);
 	*final = m;
 
-	no_way_out = mce_no_way_out(&m, &msg);
+	memset(valid_banks, 0, sizeof(valid_banks));
+	no_way_out = mce_no_way_out(&m, &msg, valid_banks);
 
 	barrier();
 
@@ -1047,6 +1059,8 @@
 	order = mce_start(&no_way_out);
 	for (i = 0; i < banks; i++) {
 		__clear_bit(i, toclear);
+		if (!test_bit(i, valid_banks))
+			continue;
 		if (!mce_banks[i].ctl)
 			continue;
 
@@ -1237,15 +1251,15 @@
  * poller finds an MCE, poll 2x faster.  When the poller finds no more
  * errors, poll 2x slower (up to check_interval seconds).
  */
-static int check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
 
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
 static DEFINE_PER_CPU(struct timer_list, mce_timer);
 
-static void mce_start_timer(unsigned long data)
+static void mce_timer_fn(unsigned long data)
 {
-	struct timer_list *t = &per_cpu(mce_timer, data);
-	int *n;
+	struct timer_list *t = &__get_cpu_var(mce_timer);
+	unsigned long iv;
 
 	WARN_ON(smp_processor_id() != data);
 
@@ -1258,13 +1272,14 @@
 	 * Alert userspace if needed.  If we logged an MCE, reduce the
 	 * polling interval, otherwise increase the polling interval.
 	 */
-	n = &__get_cpu_var(mce_next_interval);
+	iv = __this_cpu_read(mce_next_interval);
 	if (mce_notify_irq())
-		*n = max(*n/2, HZ/100);
+		iv = max(iv / 2, (unsigned long) HZ/100);
 	else
-		*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
+		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
+	__this_cpu_write(mce_next_interval, iv);
 
-	t->expires = jiffies + *n;
+	t->expires = jiffies + iv;
 	add_timer_on(t, smp_processor_id());
 }
 
@@ -1458,9 +1473,9 @@
 				 rdmsrl(msrs[i], val);
 
 				 /* CntP bit set? */
-				 if (val & BIT(62)) {
-					 val &= ~BIT(62);
-					 wrmsrl(msrs[i], val);
+				 if (val & BIT_64(62)) {
+					val &= ~BIT_64(62);
+					wrmsrl(msrs[i], val);
 				 }
 			 }
 
@@ -1542,17 +1557,17 @@
 static void __mcheck_cpu_init_timer(void)
 {
 	struct timer_list *t = &__get_cpu_var(mce_timer);
-	int *n = &__get_cpu_var(mce_next_interval);
+	unsigned long iv = check_interval * HZ;
 
-	setup_timer(t, mce_start_timer, smp_processor_id());
+	setup_timer(t, mce_timer_fn, smp_processor_id());
 
 	if (mce_ignore_ce)
 		return;
 
-	*n = check_interval * HZ;
-	if (!*n)
+	__this_cpu_write(mce_next_interval, iv);
+	if (!iv)
 		return;
-	t->expires = round_jiffies(jiffies + *n);
+	t->expires = round_jiffies(jiffies + iv);
 	add_timer_on(t, smp_processor_id());
 }
 
@@ -2262,7 +2277,7 @@
 	case CPU_DOWN_FAILED_FROZEN:
 		if (!mce_ignore_ce && check_interval) {
 			t->expires = round_jiffies(jiffies +
-					   __get_cpu_var(mce_next_interval));
+					per_cpu(mce_next_interval, cpu));
 			add_timer_on(t, cpu);
 		}
 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
index dfea390..c7b3fe2 100644
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ b/arch/x86/kernel/cpu/mkcapflags.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
 #
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 #
@@ -11,22 +11,35 @@
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 
+%features = ();
+$err = 0;
+
 while (defined($line = <IN>)) {
 	if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
 		$macro = $1;
-		$feature = $2;
+		$feature = "\L$2";
 		$tail = $3;
 		if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
-			$feature = $1;
+			$feature = "\L$1";
 		}
 
-		if ($feature ne '') {
-			printf OUT "\t%-32s = \"%s\",\n",
-				"[$macro]", "\L$feature";
+		next if ($feature eq '');
+
+		if ($features{$feature}++) {
+			print STDERR "$in: duplicate feature name: $feature\n";
+			$err++;
 		}
+		printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
 	}
 }
 print OUT "};\n";
 
 close(IN);
 close(OUT);
+
+if ($err) {
+	unlink($out);
+	exit(1);
+}
+
+exit(0);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index ac140c7..bdda2e6 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -266,7 +266,7 @@
 		if (align > max_align)
 			align = max_align;
 
-		sizek = 1 << align;
+		sizek = 1UL << align;
 		if (debug_print) {
 			char start_factor = 'K', size_factor = 'K';
 			unsigned long start_base, size_base;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e049d6d..c4706cf 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1496,6 +1496,7 @@
 		if (!cpuc->shared_regs)
 			goto error;
 	}
+	cpuc->is_fake = 1;
 	return cpuc;
 error:
 	free_fake_cpuc(cpuc);
@@ -1756,6 +1757,12 @@
 	dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
 }
 
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+	return (__range_not_ok(fp, size, TASK_SIZE) == 0);
+}
+
 #ifdef CONFIG_COMPAT
 
 #include <asm/compat.h>
@@ -1780,7 +1787,7 @@
 		if (bytes != sizeof(frame))
 			break;
 
-		if (fp < compat_ptr(regs->sp))
+		if (!valid_user_frame(fp, sizeof(frame)))
 			break;
 
 		perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1833,7 @@
 		if (bytes != sizeof(frame))
 			break;
 
-		if ((unsigned long)fp < regs->sp)
+		if (!valid_user_frame(fp, sizeof(frame)))
 			break;
 
 		perf_callchain_store(entry, frame.return_address);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6638aaf..7241e2f 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -117,6 +117,7 @@
 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 
 	unsigned int		group_flag;
+	int			is_fake;
 
 	/*
 	 * Intel DebugStore bits
@@ -364,6 +365,7 @@
 	int		pebs_record_size;
 	void		(*drain_pebs)(struct pt_regs *regs);
 	struct event_constraint *pebs_constraints;
+	void		(*pebs_aliases)(struct perf_event *event);
 
 	/*
 	 * Intel LBR
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546e..187c294 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1119,27 +1119,33 @@
 	return NULL;
 }
 
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static int intel_alt_er(int idx)
 {
 	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
-		return false;
+		return idx;
 
-	if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
-		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
-		event->hw.config |= 0x01bb;
-		event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
-		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
-	} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+	if (idx == EXTRA_REG_RSP_0)
+		return EXTRA_REG_RSP_1;
+
+	if (idx == EXTRA_REG_RSP_1)
+		return EXTRA_REG_RSP_0;
+
+	return idx;
+}
+
+static void intel_fixup_er(struct perf_event *event, int idx)
+{
+	event->hw.extra_reg.idx = idx;
+
+	if (idx == EXTRA_REG_RSP_0) {
 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
 		event->hw.config |= 0x01b7;
-		event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+	} else if (idx == EXTRA_REG_RSP_1) {
+		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+		event->hw.config |= 0x01bb;
+		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
 	}
-
-	if (event->hw.extra_reg.idx == orig_idx)
-		return false;
-
-	return true;
 }
 
 /*
@@ -1157,14 +1163,18 @@
 	struct event_constraint *c = &emptyconstraint;
 	struct er_account *era;
 	unsigned long flags;
-	int orig_idx = reg->idx;
+	int idx = reg->idx;
 
-	/* already allocated shared msr */
-	if (reg->alloc)
+	/*
+	 * reg->alloc can be set due to existing state, so for fake cpuc we
+	 * need to ignore this, otherwise we might fail to allocate proper fake
+	 * state for this extra reg constraint. Also see the comment below.
+	 */
+	if (reg->alloc && !cpuc->is_fake)
 		return NULL; /* call x86_get_event_constraint() */
 
 again:
-	era = &cpuc->shared_regs->regs[reg->idx];
+	era = &cpuc->shared_regs->regs[idx];
 	/*
 	 * we use spin_lock_irqsave() to avoid lockdep issues when
 	 * passing a fake cpuc
@@ -1173,6 +1183,29 @@
 
 	if (!atomic_read(&era->ref) || era->config == reg->config) {
 
+		/*
+		 * If its a fake cpuc -- as per validate_{group,event}() we
+		 * shouldn't touch event state and we can avoid doing so
+		 * since both will only call get_event_constraints() once
+		 * on each event, this avoids the need for reg->alloc.
+		 *
+		 * Not doing the ER fixup will only result in era->reg being
+		 * wrong, but since we won't actually try and program hardware
+		 * this isn't a problem either.
+		 */
+		if (!cpuc->is_fake) {
+			if (idx != reg->idx)
+				intel_fixup_er(event, idx);
+
+			/*
+			 * x86_schedule_events() can call get_event_constraints()
+			 * multiple times on events in the case of incremental
+			 * scheduling(). reg->alloc ensures we only do the ER
+			 * allocation once.
+			 */
+			reg->alloc = 1;
+		}
+
 		/* lock in msr value */
 		era->config = reg->config;
 		era->reg = reg->reg;
@@ -1180,17 +1213,17 @@
 		/* one more user */
 		atomic_inc(&era->ref);
 
-		/* no need to reallocate during incremental event scheduling */
-		reg->alloc = 1;
-
 		/*
 		 * need to call x86_get_event_constraint()
 		 * to check if associated event has constraints
 		 */
 		c = NULL;
-	} else if (intel_try_alt_er(event, orig_idx)) {
-		raw_spin_unlock_irqrestore(&era->lock, flags);
-		goto again;
+	} else {
+		idx = intel_alt_er(idx);
+		if (idx != reg->idx) {
+			raw_spin_unlock_irqrestore(&era->lock, flags);
+			goto again;
+		}
 	}
 	raw_spin_unlock_irqrestore(&era->lock, flags);
 
@@ -1204,11 +1237,14 @@
 	struct er_account *era;
 
 	/*
-	 * only put constraint if extra reg was actually
-	 * allocated. Also takes care of event which do
-	 * not use an extra shared reg
+	 * Only put constraint if extra reg was actually allocated. Also takes
+	 * care of event which do not use an extra shared reg.
+	 *
+	 * Also, if this is a fake cpuc we shouldn't touch any event state
+	 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
+	 * either since it'll be thrown out.
 	 */
-	if (!reg->alloc)
+	if (!reg->alloc || cpuc->is_fake)
 		return;
 
 	era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@
 	intel_put_shared_regs_event_constraints(cpuc, event);
 }
 
-static int intel_pmu_hw_config(struct perf_event *event)
+static void intel_pebs_aliases_core2(struct perf_event *event)
 {
-	int ret = x86_pmu_hw_config(event);
-
-	if (ret)
-		return ret;
-
-	if (event->attr.precise_ip &&
-	    (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
 		/*
 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
 		 * (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@
 		 */
 		u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
 
+		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
+		event->hw.config = alt_config;
+	}
+}
+
+static void intel_pebs_aliases_snb(struct perf_event *event)
+{
+	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+		/*
+		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
+		 * (0x003c) so that we can use it with PEBS.
+		 *
+		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
+		 * PEBS capable. However we can use UOPS_RETIRED.ALL
+		 * (0x01c2), which is a PEBS capable event, to get the same
+		 * count.
+		 *
+		 * UOPS_RETIRED.ALL counts the number of cycles that retires
+		 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
+		 * larger than the maximum number of micro-ops that can be
+		 * retired per cycle (4) and then inverting the condition, we
+		 * count all cycles that retire 16 or less micro-ops, which
+		 * is every cycle.
+		 *
+		 * Thereby we gain a PEBS capable cycle counter.
+		 */
+		u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
 
 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
 		event->hw.config = alt_config;
 	}
+}
+
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+	int ret = x86_pmu_hw_config(event);
+
+	if (ret)
+		return ret;
+
+	if (event->attr.precise_ip && x86_pmu.pebs_aliases)
+		x86_pmu.pebs_aliases(event);
 
 	if (intel_pmu_needs_lbr_smpl(event)) {
 		ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@
 	.max_period		= (1ULL << 31) - 1,
 	.get_event_constraints	= intel_get_event_constraints,
 	.put_event_constraints	= intel_put_event_constraints,
+	.pebs_aliases		= intel_pebs_aliases_core2,
 
 	.format_attrs		= intel_arch3_formats_attr,
 
@@ -1840,8 +1909,9 @@
 		break;
 
 	case 42: /* SandyBridge */
-		x86_add_quirk(intel_sandybridge_quirk);
 	case 45: /* SandyBridge, "Romely-EP" */
+		x86_add_quirk(intel_sandybridge_quirk);
+	case 58: /* IvyBridge */
 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
@@ -1849,6 +1919,7 @@
 
 		x86_pmu.event_constraints = intel_snb_event_constraints;
 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
 		x86_pmu.extra_regs = intel_snb_extra_regs;
 		/* all extra regs are per-cpu when HT is on */
 		x86_pmu.er_flags |= ERF_HAS_RSP_1;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 5a3edc2..35e2192 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -400,14 +400,7 @@
 	INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
 	INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
-	INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
-	INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
-	INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
-	INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
-	INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
-	INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
-	INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
-	INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 	INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index addf9e8..ee8e9ab 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,7 +31,7 @@
 	const struct cpuid_bit *cb;
 
 	static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-		{ X86_FEATURE_DTS,		CR_EAX, 0, 0x00000006, 0 },
+		{ X86_FEATURE_DTHERM,		CR_EAX, 0, 0x00000006, 0 },
 		{ X86_FEATURE_IDA,		CR_EAX, 1, 0x00000006, 0 },
 		{ X86_FEATURE_ARAT,		CR_EAX, 2, 0x00000006, 0 },
 		{ X86_FEATURE_PLN,		CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 62d61e9..4185797 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -113,7 +113,9 @@
 	int x = e820x->nr_map;
 
 	if (x >= ARRAY_SIZE(e820x->map)) {
-		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+		printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
+		       (unsigned long long) start,
+		       (unsigned long long) (start + size - 1));
 		return;
 	}
 
@@ -133,19 +135,19 @@
 	switch (type) {
 	case E820_RAM:
 	case E820_RESERVED_KERN:
-		printk(KERN_CONT "(usable)");
+		printk(KERN_CONT "usable");
 		break;
 	case E820_RESERVED:
-		printk(KERN_CONT "(reserved)");
+		printk(KERN_CONT "reserved");
 		break;
 	case E820_ACPI:
-		printk(KERN_CONT "(ACPI data)");
+		printk(KERN_CONT "ACPI data");
 		break;
 	case E820_NVS:
-		printk(KERN_CONT "(ACPI NVS)");
+		printk(KERN_CONT "ACPI NVS");
 		break;
 	case E820_UNUSABLE:
-		printk(KERN_CONT "(unusable)");
+		printk(KERN_CONT "unusable");
 		break;
 	default:
 		printk(KERN_CONT "type %u", type);
@@ -158,10 +160,10 @@
 	int i;
 
 	for (i = 0; i < e820.nr_map; i++) {
-		printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+		printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
 		       (unsigned long long) e820.map[i].addr,
 		       (unsigned long long)
-		       (e820.map[i].addr + e820.map[i].size));
+		       (e820.map[i].addr + e820.map[i].size - 1));
 		e820_print_type(e820.map[i].type);
 		printk(KERN_CONT "\n");
 	}
@@ -428,9 +430,8 @@
 		size = ULLONG_MAX - start;
 
 	end = start + size;
-	printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
-		       (unsigned long long) start,
-		       (unsigned long long) end);
+	printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
+	       (unsigned long long) start, (unsigned long long) (end - 1));
 	e820_print_type(old_type);
 	printk(KERN_CONT " ==> ");
 	e820_print_type(new_type);
@@ -509,9 +510,8 @@
 		size = ULLONG_MAX - start;
 
 	end = start + size;
-	printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
-		       (unsigned long long) start,
-		       (unsigned long long) end);
+	printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
+	       (unsigned long long) start, (unsigned long long) (end - 1));
 	if (checktype)
 		e820_print_type(old_type);
 	printk(KERN_CONT "\n");
@@ -567,7 +567,7 @@
 	if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
 		return;
 	e820.nr_map = nr_map;
-	printk(KERN_INFO "modified physical RAM map:\n");
+	printk(KERN_INFO "e820: modified physical RAM map:\n");
 	e820_print_map("modified");
 }
 static void __init update_e820_saved(void)
@@ -637,8 +637,8 @@
 	if (!found) {
 		gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
 		printk(KERN_ERR
-	"PCI: Warning: Cannot find a gap in the 32bit address range\n"
-	"PCI: Unassigned devices with 32bit resource registers may break!\n");
+	"e820: cannot find a gap in the 32bit address range\n"
+	"e820: PCI devices with unassigned 32bit BARs may break!\n");
 	}
 #endif
 
@@ -648,8 +648,8 @@
 	pci_mem_start = gapstart;
 
 	printk(KERN_INFO
-	       "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-	       pci_mem_start, gapstart, gapsize);
+	       "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
+	       gapstart, gapstart + gapsize - 1);
 }
 
 /**
@@ -667,7 +667,7 @@
 	extmap = (struct e820entry *)(sdata->data);
 	__append_e820_map(extmap, entries);
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-	printk(KERN_INFO "extended physical RAM map:\n");
+	printk(KERN_INFO "e820: extended physical RAM map:\n");
 	e820_print_map("extended");
 }
 
@@ -734,7 +734,7 @@
 	addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 	if (addr) {
 		e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
-		printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+		printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
 		update_e820_saved();
 	}
 
@@ -784,7 +784,7 @@
 	if (last_pfn > max_arch_pfn)
 		last_pfn = max_arch_pfn;
 
-	printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
+	printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
 			 last_pfn, max_arch_pfn);
 	return last_pfn;
 }
@@ -888,7 +888,7 @@
 			early_panic("Invalid user supplied memory map");
 		e820.nr_map = nr;
 
-		printk(KERN_INFO "user-defined physical RAM map:\n");
+		printk(KERN_INFO "e820: user-defined physical RAM map:\n");
 		e820_print_map("user");
 	}
 }
@@ -996,8 +996,9 @@
 			end = MAX_RESOURCE_SIZE;
 		if (start >= end)
 			continue;
-		printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
-			       start, end);
+		printk(KERN_DEBUG
+		       "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
+		       start, end);
 		reserve_region_with_split(&iomem_resource, start, end,
 					  "RAM buffer");
 	}
@@ -1047,7 +1048,7 @@
 
 	who = x86_init.resources.memory_setup();
 	memcpy(&e820_saved, &e820, sizeof(struct e820map));
-	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+	printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
 	e820_print_map(who);
 }
 
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 01ccf9b..623f288 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -316,7 +316,6 @@
 	preempt_stop(CLBR_ANY)
 ret_from_intr:
 	GET_THREAD_INFO(%ebp)
-resume_userspace_sig:
 #ifdef CONFIG_VM86
 	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
 	movb PT_CS(%esp), %al
@@ -615,9 +614,13 @@
 					# vm86-space
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
+	movb PT_CS(%esp), %bl
+	andb $SEGMENT_RPL_MASK, %bl
+	cmpb $USER_RPL, %bl
+	jb resume_kernel
 	xorl %edx, %edx
 	call do_notify_resume
-	jmp resume_userspace_sig
+	jmp resume_userspace
 
 	ALIGN
 work_notifysig_v86:
@@ -630,9 +633,13 @@
 #endif
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
+	movb PT_CS(%esp), %bl
+	andb $SEGMENT_RPL_MASK, %bl
+	cmpb $USER_RPL, %bl
+	jb resume_kernel
 	xorl %edx, %edx
 	call do_notify_resume
-	jmp resume_userspace_sig
+	jmp resume_userspace
 END(work_pending)
 
 	# perform syscall exit tracing
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 320852d..7d65133 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -191,6 +191,44 @@
 .endm
 
 /*
+ * When dynamic function tracer is enabled it will add a breakpoint
+ * to all locations that it is about to modify, sync CPUs, update
+ * all the code, sync CPUs, then remove the breakpoints. In this time
+ * if lockdep is enabled, it might jump back into the debug handler
+ * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
+ *
+ * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
+ * make sure the stack pointer does not get reset back to the top
+ * of the debug stack, and instead just reuses the current stack.
+ */
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
+
+.macro TRACE_IRQS_OFF_DEBUG
+	call debug_stack_set_zero
+	TRACE_IRQS_OFF
+	call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_ON_DEBUG
+	call debug_stack_set_zero
+	TRACE_IRQS_ON
+	call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
+	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
+	jnc  1f
+	TRACE_IRQS_ON_DEBUG
+1:
+.endm
+
+#else
+# define TRACE_IRQS_OFF_DEBUG		TRACE_IRQS_OFF
+# define TRACE_IRQS_ON_DEBUG		TRACE_IRQS_ON
+# define TRACE_IRQS_IRETQ_DEBUG		TRACE_IRQS_IRETQ
+#endif
+
+/*
  * C code is not supposed to know about undefined top of stack. Every time
  * a C function with an pt_regs argument is called from the SYSCALL based
  * fast path FIXUP_TOP_OF_STACK is needed.
@@ -1098,7 +1136,7 @@
 	subq $ORIG_RAX-R15, %rsp
 	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
 	call save_paranoid
-	TRACE_IRQS_OFF
+	TRACE_IRQS_OFF_DEBUG
 	movq %rsp,%rdi		/* pt_regs pointer */
 	xorl %esi,%esi		/* no error code */
 	subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
@@ -1393,7 +1431,7 @@
 ENTRY(paranoid_exit)
 	DEFAULT_FRAME
 	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
+	TRACE_IRQS_OFF_DEBUG
 	testl %ebx,%ebx				/* swapgs needed? */
 	jnz paranoid_restore
 	testl $3,CS(%rsp)
@@ -1404,7 +1442,7 @@
 	RESTORE_ALL 8
 	jmp irq_return
 paranoid_restore:
-	TRACE_IRQS_IRETQ 0
+	TRACE_IRQS_IRETQ_DEBUG 0
 	RESTORE_ALL 8
 	jmp irq_return
 paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 32ff365..c3a7cb4 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -100,7 +100,7 @@
 }
 
 static int
-ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
 		   unsigned const char *new_code)
 {
 	unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -141,7 +141,20 @@
 	old = ftrace_call_replace(ip, addr);
 	new = ftrace_nop_replace();
 
-	return ftrace_modify_code(rec->ip, old, new);
+	/*
+	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
+	 * is converted to a nop, and will never become MCOUNT_ADDR
+	 * again. This code is either running before SMP (on boot up)
+	 * or before the code will ever be executed (module load).
+	 * We do not want to use the breakpoint version in this case,
+	 * just modify the code directly.
+	 */
+	if (addr == MCOUNT_ADDR)
+		return ftrace_modify_code_direct(rec->ip, old, new);
+
+	/* Normal cases use add_brk_on_nop */
+	WARN_ONCE(1, "invalid use of ftrace_make_nop");
+	return -EINVAL;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -152,9 +165,47 @@
 	old = ftrace_nop_replace();
 	new = ftrace_call_replace(ip, addr);
 
-	return ftrace_modify_code(rec->ip, old, new);
+	/* Should only be called when module is loaded */
+	return ftrace_modify_code_direct(rec->ip, old, new);
 }
 
+/*
+ * The modifying_ftrace_code is used to tell the breakpoint
+ * handler to call ftrace_int3_handler(). If it fails to
+ * call this handler for a breakpoint added by ftrace, then
+ * the kernel may crash.
+ *
+ * As atomic_writes on x86 do not need a barrier, we do not
+ * need to add smp_mb()s for this to work. It is also considered
+ * that we can not read the modifying_ftrace_code before
+ * executing the breakpoint. That would be quite remarkable if
+ * it could do that. Here's the flow that is required:
+ *
+ *   CPU-0                          CPU-1
+ *
+ * atomic_inc(mfc);
+ * write int3s
+ *				<trap-int3> // implicit (r)mb
+ *				if (atomic_read(mfc))
+ *					call ftrace_int3_handler()
+ *
+ * Then when we are finished:
+ *
+ * atomic_dec(mfc);
+ *
+ * If we hit a breakpoint that was not set by ftrace, it does not
+ * matter if ftrace_int3_handler() is called or not. It will
+ * simply be ignored. But it is crucial that a ftrace nop/caller
+ * breakpoint is handled. No other user should ever place a
+ * breakpoint on an ftrace nop/caller location. It must only
+ * be done by this code.
+ */
+atomic_t modifying_ftrace_code __read_mostly;
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+		   unsigned const char *new_code);
+
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
 	unsigned long ip = (unsigned long)(&ftrace_call);
@@ -163,13 +214,17 @@
 
 	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
 	new = ftrace_call_replace(ip, (unsigned long)func);
+
+	/* See comment above by declaration of modifying_ftrace_code */
+	atomic_inc(&modifying_ftrace_code);
+
 	ret = ftrace_modify_code(ip, old, new);
 
+	atomic_dec(&modifying_ftrace_code);
+
 	return ret;
 }
 
-int modifying_ftrace_code __read_mostly;
-
 /*
  * A breakpoint was added to the code address we are about to
  * modify, and this is the handle that will just skip over it.
@@ -489,13 +544,46 @@
 	}
 }
 
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+		   unsigned const char *new_code)
+{
+	int ret;
+
+	ret = add_break(ip, old_code);
+	if (ret)
+		goto out;
+
+	run_sync();
+
+	ret = add_update_code(ip, new_code);
+	if (ret)
+		goto fail_update;
+
+	run_sync();
+
+	ret = ftrace_write(ip, new_code, 1);
+	if (ret) {
+		ret = -EPERM;
+		goto out;
+	}
+	run_sync();
+ out:
+	return ret;
+
+ fail_update:
+	probe_kernel_write((void *)ip, &old_code[0], 1);
+	goto out;
+}
+
 void arch_ftrace_update_code(int command)
 {
-	modifying_ftrace_code++;
+	/* See comment above by declaration of modifying_ftrace_code */
+	atomic_inc(&modifying_ftrace_code);
 
 	ftrace_modify_all_code(command);
 
-	modifying_ftrace_code--;
+	atomic_dec(&modifying_ftrace_code);
 }
 
 int __init ftrace_dyn_arch_init(void *data)
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 51ff186..c18f59d 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -14,7 +14,6 @@
 #include <asm/sections.h>
 #include <asm/e820.h>
 #include <asm/page.h>
-#include <asm/trampoline.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
 #include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 3a3b779..037df57 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -24,7 +24,6 @@
 #include <asm/sections.h>
 #include <asm/kdebug.h>
 #include <asm/e820.h>
-#include <asm/trampoline.h>
 #include <asm/bios_ebda.h>
 
 static void __init zap_identity_mappings(void)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 463c979..d42ab17 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -274,10 +274,7 @@
  * If cpu hotplug is not supported then this code can go in init section
  * which will be freed later
  */
-
 __CPUINIT
-
-#ifdef CONFIG_SMP
 ENTRY(startup_32_smp)
 	cld
 	movl $(__BOOT_DS),%eax
@@ -288,7 +285,7 @@
 	movl pa(stack_start),%ecx
 	movl %eax,%ss
 	leal -__PAGE_OFFSET(%ecx),%esp
-#endif /* CONFIG_SMP */
+
 default_entry:
 
 /*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7a40f24..94bf9cc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -139,10 +139,6 @@
 	/* Fixup phys_base */
 	addq	%rbp, phys_base(%rip)
 
-	/* Fixup trampoline */
-	addq	%rbp, trampoline_level4_pgt + 0(%rip)
-	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
-
 	/* Due to ENTRY(), sometimes the empty space gets filled with
 	 * zeros. Better take a jmp than relying on empty space being
 	 * filled with 0x90 (nop)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 9cc7b43..1460a5d 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -870,7 +870,7 @@
 	else
 		pr_warn("HPET initial state will not be saved\n");
 	cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
-	hpet_writel(cfg, HPET_Tn_CFG(i));
+	hpet_writel(cfg, HPET_CFG);
 	if (cfg)
 		pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
 			cfg);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8bfb614..3f61904 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -444,12 +444,12 @@
 
 /**
  *	kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- *	@vector: The error vector of the exception that happened.
+ *	@e_vector: The error vector of the exception that happened.
  *	@signo: The signal number of the exception that happened.
  *	@err_code: The error code of the exception that happened.
- *	@remcom_in_buffer: The buffer of the packet we have read.
- *	@remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- *	@regs: The &struct pt_regs of the current process.
+ *	@remcomInBuffer: The buffer of the packet we have read.
+ *	@remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ *	@linux_regs: The &struct pt_regs of the current process.
  *
  *	This function MUST handle the 'c' and 's' command packets,
  *	as well packets to set / remove a hardware breakpoint, if used.
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 086eb58..f1b42b3a 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -120,11 +120,6 @@
 	bool ret = false;
 	struct pvclock_vcpu_time_info *src;
 
-	/*
-	 * per_cpu() is safe here because this function is only called from
-	 * timer functions where preemption is already disabled.
-	 */
-	WARN_ON(!in_atomic());
 	src = &__get_cpu_var(hv_clock);
 	if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
 		__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b02d4dd..d2b5648 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,7 +27,6 @@
 #include <asm/proto.h>
 #include <asm/bios_ebda.h>
 #include <asm/e820.h>
-#include <asm/trampoline.h>
 #include <asm/setup.h>
 #include <asm/smp.h>
 
@@ -568,8 +567,8 @@
 	struct mpf_intel *mpf;
 	unsigned long mem;
 
-	apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
-			bp, length);
+	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
+		    base, base + length - 1);
 	BUILD_BUG_ON(sizeof(*mpf) != 16);
 
 	while (length > 0) {
@@ -584,8 +583,10 @@
 #endif
 			mpf_found = mpf;
 
-			printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
-			       mpf, (u64)virt_to_phys(mpf));
+			printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
+			       (unsigned long long) virt_to_phys(mpf),
+			       (unsigned long long) virt_to_phys(mpf) +
+			       sizeof(*mpf) - 1, mpf);
 
 			mem = virt_to_phys(mpf);
 			memblock_reserve(mem, sizeof(*mpf));
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 9087527..a0b2f84 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -444,14 +444,16 @@
 	 */
 	if (unlikely(is_debug_stack(regs->sp))) {
 		debug_stack_set_zero();
-		__get_cpu_var(update_debug_stack) = 1;
+		this_cpu_write(update_debug_stack, 1);
 	}
 }
 
 static inline void nmi_nesting_postprocess(void)
 {
-	if (unlikely(__get_cpu_var(update_debug_stack)))
+	if (unlikely(this_cpu_read(update_debug_stack))) {
 		debug_stack_reset();
+		this_cpu_write(update_debug_stack, 0);
+	}
 }
 #endif
 
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index e31bf8d..149b8d9 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -42,7 +42,7 @@
 static void __init init_nmi_testsuite(void)
 {
 	/* trap all the unknown NMIs we may generate */
-	register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
+	register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
 }
 
 static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@
 {
 	unsigned long timeout;
 
-	if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+	if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
 				 NMI_FLAG_FIRST, "nmi_selftest")) {
 		nmi_fail = FAILURE;
 		return;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250..c0f420f 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -101,13 +101,18 @@
 {
 	unsigned long dma_mask;
 	struct page *page;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t addr;
 
 	dma_mask = dma_alloc_coherent_mask(dev, flag);
 
 	flag |= __GFP_ZERO;
 again:
-	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+	page = NULL;
+	if (!(flag & GFP_ATOMIC))
+		page = dma_alloc_from_contiguous(dev, count, get_order(size));
+	if (!page)
+		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
 	if (!page)
 		return NULL;
 
@@ -127,6 +132,16 @@
 	return page_address(page);
 }
 
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+			       dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page *page = virt_to_page(vaddr);
+
+	if (!dma_release_from_contiguous(dev, page, count))
+		free_pages((unsigned long)vaddr, get_order(size));
+}
+
 /*
  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  * parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f960506..871be4a 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@
 	return nents;
 }
 
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
-				dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
-	free_pages((unsigned long)vaddr, get_order(size));
-}
-
 static void nommu_sync_single_for_device(struct device *dev,
 			dma_addr_t addr, size_t size,
 			enum dma_data_direction dir)
@@ -97,7 +91,7 @@
 
 struct dma_map_ops nommu_dma_ops = {
 	.alloc			= dma_generic_alloc_coherent,
-	.free			= nommu_free_coherent,
+	.free			= dma_generic_free_coherent,
 	.map_sg			= nommu_map_sg,
 	.map_page		= nommu_map_page,
 	.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 13b1990..c4c6a5c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1211,12 +1211,6 @@
 					     0, sizeof(struct user_i387_struct),
 					     datap);
 
-		/* normal 64bit interface to access TLS data.
-		   Works just like arch_prctl, except that the arguments
-		   are reversed. */
-	case PTRACE_ARCH_PRCTL:
-		return do_arch_prctl(child, data, addr);
-
 	default:
 		return compat_ptrace_request(child, request, addr, data);
 	}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 77215c2..5de92f1 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,6 +24,7 @@
 #ifdef CONFIG_X86_32
 # include <linux/ctype.h>
 # include <linux/mc146818rtc.h>
+# include <asm/realmode.h>
 #else
 # include <asm/x86_init.h>
 #endif
@@ -156,15 +157,10 @@
 	return 0;
 }
 
-extern const unsigned char machine_real_restart_asm[];
-extern const u64 machine_real_restart_gdt[3];
-
 void machine_real_restart(unsigned int type)
 {
-	void *restart_va;
-	unsigned long restart_pa;
-	void (*restart_lowmem)(unsigned int);
-	u64 *lowmem_gdt;
+	void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
+		real_mode_header->machine_real_restart_asm;
 
 	local_irq_disable();
 
@@ -195,21 +191,6 @@
 	 * too. */
 	*((unsigned short *)0x472) = reboot_mode;
 
-	/* Patch the GDT in the low memory trampoline */
-	lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
-
-	restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
-	restart_pa = virt_to_phys(restart_va);
-	restart_lowmem = (void (*)(unsigned int))restart_pa;
-
-	/* GDT[0]: GDT self-pointer */
-	lowmem_gdt[0] =
-		(u64)(sizeof(machine_real_restart_gdt) - 1) +
-		((u64)virt_to_phys(lowmem_gdt) << 16);
-	/* GDT[1]: 64K real mode code segment */
-	lowmem_gdt[1] =
-		GDT_ENTRY(0x009b, restart_pa, 0xffff);
-
 	/* Jump to the identity-mapped low memory code */
 	restart_lowmem(type);
 }
@@ -470,6 +451,14 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
 		},
 	},
+	{	/* Handle problems with rebooting on the Precision M6600. */
+		.callback = set_pci_reboot,
+		.ident = "Dell OptiPlex 990",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+		},
+	},
 	{ }
 };
 
@@ -658,9 +647,11 @@
 	set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
 
 	/*
-	 * O.K Now that I'm on the appropriate processor,
-	 * stop all of the others.
+	 * O.K Now that I'm on the appropriate processor, stop all of the
+	 * others. Also disable the local irq to not receive the per-cpu
+	 * timer interrupt which may trigger scheduler's load balance.
 	 */
+	local_irq_disable();
 	stop_other_cpus();
 #endif
 
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 366c688..16be6dc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -49,6 +49,7 @@
 #include <asm/pci-direct.h>
 #include <linux/init_ohci1394_dma.h>
 #include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -72,7 +73,7 @@
 
 #include <asm/mtrr.h>
 #include <asm/apic.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/e820.h>
 #include <asm/mpspec.h>
 #include <asm/setup.h>
@@ -333,8 +334,8 @@
 	memblock_reserve(ramdisk_here, area_size);
 	initrd_start = ramdisk_here + PAGE_OFFSET;
 	initrd_end   = initrd_start + ramdisk_size;
-	printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
-			 ramdisk_here, ramdisk_here + ramdisk_size);
+	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
+			 ramdisk_here, ramdisk_here + ramdisk_size - 1);
 
 	q = (char *)initrd_start;
 
@@ -365,8 +366,8 @@
 	/* high pages is not converted by early_res_to_bootmem */
 	ramdisk_image = boot_params.hdr.ramdisk_image;
 	ramdisk_size  = boot_params.hdr.ramdisk_size;
-	printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
-		" %08llx - %08llx\n",
+	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
+		" [mem %#010llx-%#010llx]\n",
 		ramdisk_image, ramdisk_image + ramdisk_size - 1,
 		ramdisk_here, ramdisk_here + ramdisk_size - 1);
 }
@@ -391,8 +392,8 @@
 		       ramdisk_size, end_of_lowmem>>1);
 	}
 
-	printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image,
-			ramdisk_end);
+	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
+			ramdisk_end - 1);
 
 
 	if (ramdisk_end <= end_of_lowmem) {
@@ -905,10 +906,10 @@
 	setup_bios_corruption_check();
 #endif
 
-	printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
-			max_pfn_mapped<<PAGE_SHIFT);
+	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
+			(max_pfn_mapped<<PAGE_SHIFT) - 1);
 
-	setup_trampolines();
+	setup_real_mode();
 
 	init_gbpages();
 
@@ -925,6 +926,7 @@
 	}
 #endif
 	memblock.current_limit = get_max_mapped();
+	dma_contiguous_reserve(0);
 
 	/*
 	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -966,6 +968,8 @@
 	if (boot_cpu_data.cpuid_level >= 0) {
 		/* A CPU has %cr4 if and only if it has CPUID */
 		mmu_cr4_features = read_cr4();
+		if (trampoline_cr4_features)
+			*trampoline_cr4_features = mmu_cr4_features;
 	}
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 965dfda..21af737 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -555,7 +555,6 @@
 				    sizeof(frame->extramask))))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->sc, &ax))
@@ -581,7 +580,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
@@ -647,42 +645,28 @@
 		struct pt_regs *regs)
 {
 	int usig = signr_convert(sig);
-	sigset_t *set = &current->blocked;
-	int ret;
-
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-		set = &current->saved_sigmask;
+	sigset_t *set = sigmask_to_save();
 
 	/* Set up the stack frame */
 	if (is_ia32) {
 		if (ka->sa.sa_flags & SA_SIGINFO)
-			ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
+			return ia32_setup_rt_frame(usig, ka, info, set, regs);
 		else
-			ret = ia32_setup_frame(usig, ka, set, regs);
+			return ia32_setup_frame(usig, ka, set, regs);
 #ifdef CONFIG_X86_X32_ABI
 	} else if (is_x32) {
-		ret = x32_setup_rt_frame(usig, ka, info,
+		return x32_setup_rt_frame(usig, ka, info,
 					 (compat_sigset_t *)set, regs);
 #endif
 	} else {
-		ret = __setup_rt_frame(sig, ka, info, set, regs);
+		return __setup_rt_frame(sig, ka, info, set, regs);
 	}
-
-	if (ret) {
-		force_sigsegv(sig, current);
-		return -EFAULT;
-	}
-
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-	return ret;
 }
 
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
 		struct pt_regs *regs)
 {
-	int ret;
-
 	/* Are we from a system call? */
 	if (syscall_get_nr(current, regs) >= 0) {
 		/* If so, check system call restarting.. */
@@ -713,10 +697,10 @@
 	    likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
 		regs->flags &= ~X86_EFLAGS_TF;
 
-	ret = setup_rt_frame(sig, ka, info, regs);
-
-	if (ret)
-		return ret;
+	if (setup_rt_frame(sig, ka, info, regs) < 0) {
+		force_sigsegv(sig, current);
+		return;
+	}
 
 	/*
 	 * Clear the direction flag as per the ABI for function entry.
@@ -731,12 +715,8 @@
 	 */
 	regs->flags &= ~X86_EFLAGS_TF;
 
-	block_sigmask(ka, sig);
-
-	tracehook_signal_handler(sig, info, ka, regs,
-				 test_thread_flag(TIF_SINGLESTEP));
-
-	return 0;
+	signal_delivered(sig, info, ka, regs,
+			 test_thread_flag(TIF_SINGLESTEP));
 }
 
 #ifdef CONFIG_X86_32
@@ -757,16 +737,6 @@
 	siginfo_t info;
 	int signr;
 
-	/*
-	 * We want the common case to go fast, which is why we may in certain
-	 * cases get here from kernel mode. Just return without doing anything
-	 * if so.
-	 * X86_32: vm86 regs switched out by assembly code before reaching
-	 * here, so testing against kernel CS suffices.
-	 */
-	if (!user_mode(regs))
-		return;
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee! Actually deliver the signal.  */
@@ -796,10 +766,7 @@
 	 * If there's no signal to deliver, we just put the saved sigmask
 	 * back.
 	 */
-	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-		set_current_blocked(&current->saved_sigmask);
-	}
+	restore_saved_sigmask();
 }
 
 /*
@@ -827,8 +794,6 @@
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
 	}
 	if (thread_info_flags & _TIF_USER_RETURN_NOTIFY)
 		fire_user_return_notifiers();
@@ -936,7 +901,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 433529e..7bd8a08 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,7 +57,7 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/idle.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/cpu.h>
 #include <asm/numa.h>
 #include <asm/pgtable.h>
@@ -73,6 +73,8 @@
 #include <asm/smpboot_hooks.h>
 #include <asm/i8259.h>
 
+#include <asm/realmode.h>
+
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
@@ -347,9 +349,12 @@
 
 static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-	if (c->phys_proc_id == o->phys_proc_id)
-		return topology_sane(c, o, "mc");
+	if (c->phys_proc_id == o->phys_proc_id) {
+		if (cpu_has(c, X86_FEATURE_AMD_DCM))
+			return true;
 
+		return topology_sane(c, o, "mc");
+	}
 	return false;
 }
 
@@ -380,6 +385,15 @@
 		if ((i == cpu) || (has_mc && match_llc(c, o)))
 			link_mask(llc_shared, cpu, i);
 
+	}
+
+	/*
+	 * This needs a separate iteration over the cpus because we rely on all
+	 * cpu_sibling_mask links to be set-up.
+	 */
+	for_each_cpu(i, cpu_sibling_setup_mask) {
+		o = &cpu_data(i);
+
 		if ((i == cpu) || (has_mc && match_mc(c, o))) {
 			link_mask(core, cpu, i);
 
@@ -408,15 +422,7 @@
 /* maps the cpu to the sched domain representing multi-core */
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-	/*
-	 * For perf, we return last level cache shared map.
-	 * And for power savings, we return cpu_core_map
-	 */
-	if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
-		return cpu_core_mask(cpu);
-	else
-		return cpu_llc_shared_mask(cpu);
+	return cpu_llc_shared_mask(cpu);
 }
 
 static void impress_friends(void)
@@ -660,8 +666,12 @@
  */
 static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 {
+	volatile u32 *trampoline_status =
+		(volatile u32 *) __va(real_mode_header->trampoline_status);
+	/* start_ip had better be page-aligned! */
+	unsigned long start_ip = real_mode_header->trampoline_start;
+
 	unsigned long boot_error = 0;
-	unsigned long start_ip;
 	int timeout;
 
 	alternatives_smp_switch(1);
@@ -684,9 +694,6 @@
 	initial_code = (unsigned long)start_secondary;
 	stack_start  = idle->thread.sp;
 
-	/* start_ip had better be page-aligned! */
-	start_ip = trampoline_address();
-
 	/* So we see what's up */
 	announce_cpu(cpu, apicid);
 
@@ -749,8 +756,7 @@
 			pr_debug("CPU%d: has booted.\n", cpu);
 		} else {
 			boot_error = 1;
-			if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
-			    == 0xA5A5A5A5)
+			if (*trampoline_status == 0xA5A5A5A5)
 				/* trampoline started but...? */
 				pr_err("CPU%d: Stuck ??\n", cpu);
 			else
@@ -776,7 +782,7 @@
 	}
 
 	/* mark "stuck" area as not stuck */
-	*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
+	*trampoline_status = 0;
 
 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 		/*
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 6410744..f84fe00 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -32,7 +32,7 @@
 #include <linux/mm.h>
 #include <linux/tboot.h>
 
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/processor.h>
 #include <asm/bootparam.h>
 #include <asm/pgtable.h>
@@ -44,7 +44,7 @@
 #include <asm/e820.h>
 #include <asm/io.h>
 
-#include "acpi/realmode/wakeup.h"
+#include "../realmode/rm/wakeup.h"
 
 /* Global pointer to shared data; NULL means no measured launch. */
 struct tboot *tboot __read_mostly;
@@ -201,7 +201,8 @@
 		add_mac_region(e820.map[i].addr, e820.map[i].size);
 	}
 
-	tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
+	tboot->acpi_sinfo.kernel_s3_resume_vector =
+		real_mode_header->wakeup_start;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
deleted file mode 100644
index a73b610..0000000
--- a/arch/x86/kernel/trampoline.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/io.h>
-#include <linux/memblock.h>
-
-#include <asm/trampoline.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-unsigned char *x86_trampoline_base;
-
-void __init setup_trampolines(void)
-{
-	phys_addr_t mem;
-	size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
-	/* Has to be in very low memory so we can execute real-mode AP code. */
-	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-	if (!mem)
-		panic("Cannot allocate trampoline\n");
-
-	x86_trampoline_base = __va(mem);
-	memblock_reserve(mem, size);
-
-	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-	       x86_trampoline_base, (unsigned long long)mem, size);
-
-	memcpy(x86_trampoline_base, x86_trampoline_start, size);
-}
-
-/*
- * setup_trampolines() gets called very early, to guarantee the
- * availability of low memory.  This is before the proper kernel page
- * tables are set up, so we cannot set page permissions in that
- * function.  Thus, we use an arch_initcall instead.
- */
-static int __init configure_trampolines(void)
-{
-	size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
-	set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
-	return 0;
-}
-arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
deleted file mode 100644
index 451c0a7..0000000
--- a/arch/x86/kernel/trampoline_32.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- *	Trampoline.S	Derived from Setup.S by Linus Torvalds
- *
- *	4 Jan 1997 Michael Chastain: changed to gnu as.
- *
- *	This is only used for booting secondary CPUs in SMP machine
- *
- *	Entry: CS:IP point to the start of our code, we are 
- *	in real mode with no stack, but the rest of the 
- *	trampoline page to make our stack and everything else
- *	is a mystery.
- *
- *	We jump into arch/x86/kernel/head_32.S.
- *
- *	On entry to trampoline_data, the processor is in real mode
- *	with 16-bit addressing and 16-bit data.  CS has some value
- *	and IP is zero.  Thus, data addresses need to be absolute
- *	(no relocation) and are taken with regard to r_base.
- *
- *	If you work on this file, check the object module with
- *	objdump --reloc to make sure there are no relocation
- *	entries except for:
- *
- *	TYPE              VALUE
- *	R_386_32          startup_32_smp
- *	R_386_32          boot_gdt
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/segment.h>
-#include <asm/page_types.h>
-
-#ifdef CONFIG_SMP
-
-	.section ".x86_trampoline","a"
-	.balign PAGE_SIZE
-	.code16
-
-ENTRY(trampoline_data)
-r_base = .
-	wbinvd			# Needed for NUMA-Q should be harmless for others
-	mov	%cs, %ax	# Code and data in the same place
-	mov	%ax, %ds
-
-	cli			# We should be safe anyway
-
-	movl	$0xA5A5A5A5, trampoline_status - r_base
-				# write marker for master knows we're running
-
-	/* GDT tables in non default location kernel can be beyond 16MB and
-	 * lgdt will not be able to load the address as in real mode default
-	 * operand size is 16bit. Use lgdtl instead to force operand size
-	 * to 32 bit.
-	 */
-
-	lidtl	boot_idt_descr - r_base	# load idt with 0, 0
-	lgdtl	boot_gdt_descr - r_base	# load gdt with whatever is appropriate
-
-	xor	%ax, %ax
-	inc	%ax		# protected mode (PE) bit
-	lmsw	%ax		# into protected mode
-	# flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
-	ljmpl	$__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
-
-	# These need to be in the same 64K segment as the above;
-	# hence we don't use the boot_gdt_descr defined in head.S
-boot_gdt_descr:
-	.word	__BOOT_DS + 7			# gdt limit
-	.long	boot_gdt - __PAGE_OFFSET	# gdt base
-
-boot_idt_descr:
-	.word	0				# idt limit = 0
-	.long	0				# idt base = 0L
-
-ENTRY(trampoline_status)
-	.long	0
-
-.globl trampoline_end
-trampoline_end:
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
deleted file mode 100644
index 09ff517..0000000
--- a/arch/x86/kernel/trampoline_64.S
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- *
- *	Trampoline.S	Derived from Setup.S by Linus Torvalds
- *
- *	4 Jan 1997 Michael Chastain: changed to gnu as.
- *	15 Sept 2005 Eric Biederman: 64bit PIC support
- *
- *	Entry: CS:IP point to the start of our code, we are 
- *	in real mode with no stack, but the rest of the 
- *	trampoline page to make our stack and everything else
- *	is a mystery.
- *
- *	On entry to trampoline_data, the processor is in real mode
- *	with 16-bit addressing and 16-bit data.  CS has some value
- *	and IP is zero.  Thus, data addresses need to be absolute
- *	(no relocation) and are taken with regard to r_base.
- *
- *	With the addition of trampoline_level4_pgt this code can
- *	now enter a 64bit kernel that lives at arbitrary 64bit
- *	physical addresses.
- *
- *	If you work on this file, check the object module with objdump
- *	--full-contents --reloc to make sure there are no relocation
- *	entries.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/pgtable_types.h>
-#include <asm/page_types.h>
-#include <asm/msr.h>
-#include <asm/segment.h>
-#include <asm/processor-flags.h>
-
-	.section ".x86_trampoline","a"
-	.balign PAGE_SIZE
-	.code16
-
-ENTRY(trampoline_data)
-r_base = .
-	cli			# We should be safe anyway
-	wbinvd
-	mov	%cs, %ax	# Code and data in the same place
-	mov	%ax, %ds
-	mov	%ax, %es
-	mov	%ax, %ss
-
-
-	movl	$0xA5A5A5A5, trampoline_status - r_base
-				# write marker for master knows we're running
-
-					# Setup stack
-	movw	$(trampoline_stack_end - r_base), %sp
-
-	call	verify_cpu		# Verify the cpu supports long mode
-	testl   %eax, %eax		# Check for return code
-	jnz	no_longmode
-
-	mov	%cs, %ax
-	movzx	%ax, %esi		# Find the 32bit trampoline location
-	shll	$4, %esi
-
-					# Fixup the absolute vectors
-	leal	(startup_32 - r_base)(%esi), %eax
-	movl	%eax, startup_32_vector - r_base
-	leal	(startup_64 - r_base)(%esi), %eax
-	movl	%eax, startup_64_vector - r_base
-	leal	(tgdt - r_base)(%esi), %eax
-	movl	%eax, (tgdt + 2 - r_base)
-
-	/*
-	 * GDT tables in non default location kernel can be beyond 16MB and
-	 * lgdt will not be able to load the address as in real mode default
-	 * operand size is 16bit. Use lgdtl instead to force operand size
-	 * to 32 bit.
-	 */
-
-	lidtl	tidt - r_base	# load idt with 0, 0
-	lgdtl	tgdt - r_base	# load gdt with whatever is appropriate
-
-	mov	$X86_CR0_PE, %ax	# protected mode (PE) bit
-	lmsw	%ax			# into protected mode
-
-	# flush prefetch and jump to startup_32
-	ljmpl	*(startup_32_vector - r_base)
-
-	.code32
-	.balign 4
-startup_32:
-	movl	$__KERNEL_DS, %eax	# Initialize the %ds segment register
-	movl	%eax, %ds
-
-	movl	$X86_CR4_PAE, %eax
-	movl	%eax, %cr4		# Enable PAE mode
-
-					# Setup trampoline 4 level pagetables
-	leal	(trampoline_level4_pgt - r_base)(%esi), %eax
-	movl	%eax, %cr3
-
-	movl	$MSR_EFER, %ecx
-	movl	$(1 << _EFER_LME), %eax	# Enable Long Mode
-	xorl	%edx, %edx
-	wrmsr
-
-	# Enable paging and in turn activate Long Mode
-	# Enable protected mode
-	movl	$(X86_CR0_PG | X86_CR0_PE), %eax
-	movl	%eax, %cr0
-
-	/*
-	 * At this point we're in long mode but in 32bit compatibility mode
-	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
-	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
-	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
-	 */
-	ljmp	*(startup_64_vector - r_base)(%esi)
-
-	.code64
-	.balign 4
-startup_64:
-	# Now jump into the kernel using virtual addresses
-	movq	$secondary_startup_64, %rax
-	jmp	*%rax
-
-	.code16
-no_longmode:
-	hlt
-	jmp no_longmode
-#include "verify_cpu.S"
-
-	.balign 4
-	# Careful these need to be in the same 64K segment as the above;
-tidt:
-	.word	0			# idt limit = 0
-	.word	0, 0			# idt base = 0L
-
-	# Duplicate the global descriptor table
-	# so the kernel can live anywhere
-	.balign 4
-tgdt:
-	.short	tgdt_end - tgdt		# gdt limit
-	.long	tgdt - r_base
-	.short 0
-	.quad	0x00cf9b000000ffff	# __KERNEL32_CS
-	.quad	0x00af9b000000ffff	# __KERNEL_CS
-	.quad	0x00cf93000000ffff	# __KERNEL_DS
-tgdt_end:
-
-	.balign 4
-startup_32_vector:
-	.long	startup_32 - r_base
-	.word	__KERNEL32_CS, 0
-
-	.balign 4
-startup_64_vector:
-	.long	startup_64 - r_base
-	.word	__KERNEL_CS, 0
-
-	.balign 4
-ENTRY(trampoline_status)
-	.long	0
-
-trampoline_stack:
-	.org 0x1000
-trampoline_stack_end:
-ENTRY(trampoline_level4_pgt)
-	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-	.fill	510,8,0
-	.quad	level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
-ENTRY(trampoline_end)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ff08457..05b31d9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -303,8 +303,12 @@
 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
-	/* ftrace must be first, everything else may cause a recursive crash */
-	if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
+	/*
+	 * ftrace must be first, everything else may cause a recursive crash.
+	 * See note by declaration of modifying_ftrace_code in ftrace.c
+	 */
+	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
+	    ftrace_int3_handler(regs))
 		return;
 #endif
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0f703f1..22a1530 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -197,18 +197,6 @@
 
 	INIT_DATA_SECTION(16)
 
-	/*
-	 * Code and data for a variety of lowlevel trampolines, to be
-	 * copied into base memory (< 1 MiB) during initialization.
-	 * Since it is copied early, the main copy can be discarded
-	 * afterwards.
-	 */
-	 .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
-		x86_trampoline_start = .;
-		*(.x86_trampoline)
-		x86_trampoline_end = .;
-	}
-
 	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
 		__x86_cpu_dev_start = .;
 		*(.x86_cpu_dev.init)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 72102e0..be3cea4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2595,8 +2595,7 @@
 			*gfnp = gfn;
 			kvm_release_pfn_clean(pfn);
 			pfn &= ~mask;
-			if (!get_page_unless_zero(pfn_to_page(pfn)))
-				BUG();
+			kvm_get_pfn(pfn);
 			*pfnp = pfn;
 		}
 	}
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 459b58a..25b7ae8 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -115,7 +115,7 @@
  * @src: source address
  * @dst: destination address
  * @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
  *
  * Returns an 32bit unfolded checksum of the buffer.
  */
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 2e4e4b0..4f74d94 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 
 #include <asm/word-at-a-time.h>
+#include <linux/sched.h>
 
 /*
  * best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@
 	void *map;
 	int ret;
 
+	if (__range_not_ok(from, n, TASK_SIZE))
+		return len;
+
 	do {
 		ret = __get_user_pages_fast(addr, 1, 0, &page);
 		if (!ret)
@@ -43,100 +47,3 @@
 	return len;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-
-/*
- * Do a strncpy, return length of string without final '\0'.
- * 'count' is the user-supplied count (return 'count' if we
- * hit it), 'max' is the address space maximum (and we return
- * -EFAULT if we hit it).
- */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
-{
-	long res = 0;
-
-	/*
-	 * Truncate 'max' to the user-specified limit, so that
-	 * we only have one limit we need to check in the loop
-	 */
-	if (max > count)
-		max = count;
-
-	while (max >= sizeof(unsigned long)) {
-		unsigned long c, mask;
-
-		/* Fall back to byte-at-a-time if we get a page fault */
-		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
-			break;
-		mask = has_zero(c);
-		if (mask) {
-			mask = (mask - 1) & ~mask;
-			mask >>= 7;
-			*(unsigned long *)(dst+res) = c & mask;
-			return res + count_masked_bytes(mask);
-		}
-		*(unsigned long *)(dst+res) = c;
-		res += sizeof(unsigned long);
-		max -= sizeof(unsigned long);
-	}
-
-	while (max) {
-		char c;
-
-		if (unlikely(__get_user(c,src+res)))
-			return -EFAULT;
-		dst[res] = c;
-		if (!c)
-			return res;
-		res++;
-		max--;
-	}
-
-	/*
-	 * Uhhuh. We hit 'max'. But was that the user-specified maximum
-	 * too? If so, that's ok - we got as much as the user asked for.
-	 */
-	if (res >= count)
-		return res;
-
-	/*
-	 * Nope: we hit the address space limit, and we still had more
-	 * characters the caller would have wanted. That's an EFAULT.
-	 */
-	return -EFAULT;
-}
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	unsigned long max_addr, src_addr;
-
-	if (unlikely(count <= 0))
-		return 0;
-
-	max_addr = current_thread_info()->addr_limit.seg;
-	src_addr = (unsigned long)src;
-	if (likely(src_addr < max_addr)) {
-		unsigned long max = max_addr - src_addr;
-		return do_strncpy_from_user(dst, src, count, max);
-	}
-	return -EFAULT;
-}
-EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 883b216..1781b2f 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -95,47 +95,6 @@
 }
 EXPORT_SYMBOL(__clear_user);
 
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-long strnlen_user(const char __user *s, long n)
-{
-	unsigned long mask = -__addr_ok(s);
-	unsigned long res, tmp;
-
-	might_fault();
-
-	__asm__ __volatile__(
-		"	testl %0, %0\n"
-		"	jz 3f\n"
-		"	andl %0,%%ecx\n"
-		"0:	repne; scasb\n"
-		"	setne %%al\n"
-		"	subl %%ecx,%0\n"
-		"	addl %0,%%eax\n"
-		"1:\n"
-		".section .fixup,\"ax\"\n"
-		"2:	xorl %%eax,%%eax\n"
-		"	jmp 1b\n"
-		"3:	movb $1,%%al\n"
-		"	jmp 1b\n"
-		".previous\n"
-		_ASM_EXTABLE(0b,2b)
-		:"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
-		:"0" (n), "1" (s), "2" (0), "3" (mask)
-		:"cc");
-	return res & mask;
-}
-EXPORT_SYMBOL(strnlen_user);
-
 #ifdef CONFIG_X86_INTEL_USERCOPY
 static unsigned long
 __copy_user_intel(void __user *to, const void *from, unsigned long size)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0d0326f..e5b130b 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -52,54 +52,6 @@
 }
 EXPORT_SYMBOL(clear_user);
 
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-
-long __strnlen_user(const char __user *s, long n)
-{
-	long res = 0;
-	char c;
-
-	while (1) {
-		if (res>n)
-			return n+1;
-		if (__get_user(c, s))
-			return 0;
-		if (!c)
-			return res+1;
-		res++;
-		s++;
-	}
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-long strnlen_user(const char __user *s, long n)
-{
-	if (!access_ok(VERIFY_READ, s, 1))
-		return 0;
-	return __strnlen_user(s, n);
-}
-EXPORT_SYMBOL(strnlen_user);
-
-long strlen_user(const char __user *s)
-{
-	long res = 0;
-	char c;
-
-	for (;;) {
-		if (get_user(c, s))
-			return 0;
-		if (!c)
-			return res+1;
-		res++;
-		s++;
-	}
-}
-EXPORT_SYMBOL(strlen_user);
-
 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
 {
 	if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 8191379..5d7e51f 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -28,7 +28,7 @@
 #  - (66): the last prefix is 0x66
 #  - (F3): the last prefix is 0xF3
 #  - (F2): the last prefix is 0xF2
-#
+#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
 
 Table: one byte opcode
 Referrer:
@@ -515,12 +515,12 @@
 b5: LGS Gv,Mp
 b6: MOVZX Gv,Eb
 b7: MOVZX Gv,Ew
-b8: JMPE | POPCNT Gv,Ev (F3)
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
 b9: Grp10 (1A)
 ba: Grp8 Ev,Ib (1A)
 bb: BTC Ev,Gv
-bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
 be: MOVSX Gv,Eb
 bf: MOVSX Gv,Ew
 # 0x0f 0xc0-0xcf
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 319b6f2..bc4e9d8 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -62,7 +62,8 @@
 		extra += PMD_SIZE;
 #endif
 		/* The first 2/4M doesn't use large pages. */
-		extra += mr->end - mr->start;
+		if (mr->start < PMD_SIZE)
+			extra += mr->end - mr->start;
 
 		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	} else
@@ -84,8 +85,9 @@
 	pgt_buf_end = pgt_buf_start;
 	pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
 
-	printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-		end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
+	printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
+		end - 1, pgt_buf_start << PAGE_SHIFT,
+		(pgt_buf_top << PAGE_SHIFT) - 1);
 }
 
 void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +134,8 @@
 	int nr_range, i;
 	int use_pse, use_gbpages;
 
-	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+	printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
+	       start, end - 1);
 
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
 	/*
@@ -251,8 +254,8 @@
 	}
 
 	for (i = 0; i < nr_range; i++)
-		printk(KERN_DEBUG " %010lx - %010lx page %s\n",
-				mr[i].start, mr[i].end,
+		printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
+				mr[i].start, mr[i].end - 1,
 			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
 			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 
@@ -350,8 +353,8 @@
 	 * create a kernel page fault:
 	 */
 #ifdef CONFIG_DEBUG_PAGEALLOC
-	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-		begin, end);
+	printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
+		begin, end - 1);
 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
 	/*
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index be1ef57..78fe3f1 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -180,7 +180,7 @@
 
 /**
  * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
+ * @phys_addr:    bus address of the memory
  * @size:      size of the resource to map
  *
  * ioremap_nocache performs a platform specific sequence of operations to
@@ -217,7 +217,7 @@
 
 /**
  * ioremap_wc	-	map memory into CPU space write combined
- * @offset:	bus address of the memory
+ * @phys_addr:	bus address of the memory
  * @size:	size of the resource to map
  *
  * This version of ioremap ensures that the memory is marked write combining.
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 19d3fa0..2d125be 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -141,8 +141,8 @@
 
 	/* whine about and ignore invalid blks */
 	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
-		pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
-			   nid, start, end);
+		pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+			   nid, start, end - 1);
 		return 0;
 	}
 
@@ -210,8 +210,8 @@
 
 	start = roundup(start, ZONE_ALIGN);
 
-	printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
-	       nid, start, end);
+	printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+	       nid, start, end - 1);
 
 	/*
 	 * Allocate node data.  Try remap allocator first, node-local
@@ -232,7 +232,7 @@
 	}
 
 	/* report and initialize */
-	printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
+	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
 	       nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 	if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@
 			 */
 			if (bi->end > bj->start && bi->start < bj->end) {
 				if (bi->nid != bj->nid) {
-					pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
-					       bi->nid, bi->start, bi->end,
-					       bj->nid, bj->start, bj->end);
+					pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+					       bi->nid, bi->start, bi->end - 1,
+					       bj->nid, bj->start, bj->end - 1);
 					return -EINVAL;
 				}
-				pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
-					   bi->nid, bi->start, bi->end,
-					   bj->start, bj->end);
+				pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+					   bi->nid, bi->start, bi->end - 1,
+					   bj->start, bj->end - 1);
 			}
 
 			/*
@@ -320,9 +320,9 @@
 			}
 			if (k < mi->nr_blks)
 				continue;
-			printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
-			       bi->nid, bi->start, bi->end, bj->start, bj->end,
-			       start, end);
+			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+			       bi->nid, bi->start, bi->end - 1, bj->start,
+			       bj->end - 1, start, end - 1);
 			bi->start = start;
 			bi->end = end;
 			numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@
 {
 	printk(KERN_INFO "%s\n",
 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
-	printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
-	       0LLU, PFN_PHYS(max_pfn));
+	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
+	       0LLU, PFN_PHYS(max_pfn) - 1);
 
 	node_set(0, numa_nodes_parsed);
 	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 871dd88..dbbbb47 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -68,8 +68,8 @@
 		numa_remove_memblk_from(phys_blk, pi);
 	}
 
-	printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
-	       eb->start, eb->end, (eb->end - eb->start) >> 20);
+	printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
+	       nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
 	return 0;
 }
 
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e1ebde3..a718e0d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -122,7 +122,7 @@
 
 /**
  * clflush_cache_range - flush a cache range with clflush
- * @addr:	virtual start address
+ * @vaddr:	virtual start address
  * @size:	number of bytes to flush
  *
  * clflush is an unordered instruction which needs fencing with mfence
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f6ff57b..3d68ef6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -158,31 +158,47 @@
 	return req_type;
 }
 
+struct pagerange_state {
+	unsigned long		cur_pfn;
+	int			ram;
+	int			not_ram;
+};
+
+static int
+pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
+{
+	struct pagerange_state *state = arg;
+
+	state->not_ram	|= initial_pfn > state->cur_pfn;
+	state->ram	|= total_nr_pages > 0;
+	state->cur_pfn	 = initial_pfn + total_nr_pages;
+
+	return state->ram && state->not_ram;
+}
+
 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
 {
-	int ram_page = 0, not_rampage = 0;
-	unsigned long page_nr;
+	int ret = 0;
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	struct pagerange_state state = {start_pfn, 0, 0};
 
-	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
-	     ++page_nr) {
-		/*
-		 * For legacy reasons, physical address range in the legacy ISA
-		 * region is tracked as non-RAM. This will allow users of
-		 * /dev/mem to map portions of legacy ISA region, even when
-		 * some of those portions are listed(or not even listed) with
-		 * different e820 types(RAM/reserved/..)
-		 */
-		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
-		    page_is_ram(page_nr))
-			ram_page = 1;
-		else
-			not_rampage = 1;
+	/*
+	 * For legacy reasons, physical address range in the legacy ISA
+	 * region is tracked as non-RAM. This will allow users of
+	 * /dev/mem to map portions of legacy ISA region, even when
+	 * some of those portions are listed(or not even listed) with
+	 * different e820 types(RAM/reserved/..)
+	 */
+	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
+		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
 
-		if (ram_page == not_rampage)
-			return -1;
+	if (start_pfn < end_pfn) {
+		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+				&state, pagerange_is_ram_callback);
 	}
 
-	return ram_page;
+	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
 }
 
 /*
@@ -209,9 +225,8 @@
 		page = pfn_to_page(pfn);
 		type = get_page_memtype(page);
 		if (type != -1) {
-			printk(KERN_INFO "reserve_ram_pages_type failed "
-				"0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
-				start, end, type, req_type);
+			printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+				start, end - 1, type, req_type);
 			if (new_type)
 				*new_type = type;
 
@@ -314,9 +329,9 @@
 
 	err = rbt_memtype_check_insert(new, new_type);
 	if (err) {
-		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
-		       "track %s, req %s\n",
-		       start, end, cattr_name(new->type), cattr_name(req_type));
+		printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+		       start, end - 1,
+		       cattr_name(new->type), cattr_name(req_type));
 		kfree(new);
 		spin_unlock(&memtype_lock);
 
@@ -325,8 +340,8 @@
 
 	spin_unlock(&memtype_lock);
 
-	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
-		start, end, cattr_name(new->type), cattr_name(req_type),
+	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+		start, end - 1, cattr_name(new->type), cattr_name(req_type),
 		new_type ? cattr_name(*new_type) : "-");
 
 	return err;
@@ -360,14 +375,14 @@
 	spin_unlock(&memtype_lock);
 
 	if (!entry) {
-		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
-			current->comm, current->pid, start, end);
+		printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+		       current->comm, current->pid, start, end - 1);
 		return -EINVAL;
 	}
 
 	kfree(entry);
 
-	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
 
 	return 0;
 }
@@ -491,9 +506,8 @@
 
 	while (cursor < to) {
 		if (!devmem_is_allowed(pfn)) {
-			printk(KERN_INFO
-		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
-				current->comm, from, to);
+			printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+				current->comm, from, to - 1);
 			return 0;
 		}
 		cursor += PAGE_SIZE;
@@ -554,12 +568,11 @@
 				size;
 
 	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
-		printk(KERN_INFO
-			"%s:%d ioremap_change_attr failed %s "
-			"for %Lx-%Lx\n",
+		printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+			"for [mem %#010Lx-%#010Lx]\n",
 			current->comm, current->pid,
 			cattr_name(flags),
-			base, (unsigned long long)(base + size));
+			base, (unsigned long long)(base + size-1));
 		return -EINVAL;
 	}
 	return 0;
@@ -591,12 +604,11 @@
 
 		flags = lookup_memtype(paddr);
 		if (want_flags != flags) {
-			printk(KERN_WARNING
-			"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+			printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
 				current->comm, current->pid,
 				cattr_name(want_flags),
 				(unsigned long long)paddr,
-				(unsigned long long)(paddr + size),
+				(unsigned long long)(paddr + size - 1),
 				cattr_name(flags));
 			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
 					      (~_PAGE_CACHE_MASK)) |
@@ -614,11 +626,11 @@
 		    !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
 			free_memtype(paddr, paddr + size);
 			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
-				" for %Lx-%Lx, got %s\n",
+				" for [mem %#010Lx-%#010Lx], got %s\n",
 				current->comm, current->pid,
 				cattr_name(want_flags),
 				(unsigned long long)paddr,
-				(unsigned long long)(paddr + size),
+				(unsigned long long)(paddr + size - 1),
 				cattr_name(flags));
 			return -EINVAL;
 		}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index efb5b4b..4599c3e 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,8 +176,11 @@
 		return;
 	}
 
-	printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
-	       start, end);
+	node_set(node, numa_nodes_parsed);
+
+	printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
+	       node, pxm,
+	       (unsigned long long) start, (unsigned long long) end - 1);
 }
 
 void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 3c6e328..028454f 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -110,19 +110,16 @@
 static int dumper_registered;
 
 static void dw_kmsg_dump(struct kmsg_dumper *dumper,
-			enum kmsg_dump_reason reason,
-			const char *s1, unsigned long l1,
-			const char *s2, unsigned long l2)
+			 enum kmsg_dump_reason reason)
 {
-	int i;
+	static char line[1024];
+	size_t len;
 
 	/* When run to this, we'd better re-init the HW */
 	mrst_early_console_init();
 
-	for (i = 0; i < l1; i++)
-		early_mrst_console.write(&early_mrst_console, s1 + i, 1);
-	for (i = 0; i < l2; i++)
-		early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+	while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+		early_mrst_console.write(&early_mrst_console, line, len);
 }
 
 /* Set the ratio rate to 115200, 8n1, IRQ disabled */
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8..fd41a92 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -782,7 +782,7 @@
 EXPORT_SYMBOL_GPL(intel_scu_notifier);
 
 /* Called by IPC driver */
-void intel_scu_devices_create(void)
+void __devinit intel_scu_devices_create(void)
 {
 	int i;
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3ae0e61..59880af 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1295,7 +1295,6 @@
 		 */
 		mmr_image |= (1L << SOFTACK_MSHIFT);
 		if (is_uv2_hub()) {
-			mmr_image &= ~(1L << UV2_LEG_SHFT);
 			mmr_image |= (1L << UV2_EXT_SHFT);
 		}
 		write_mmr_misc_control(pnode, mmr_image);
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644
index 0000000..94f7fbe
--- /dev/null
+++ b/arch/x86/realmode/Makefile
@@ -0,0 +1,18 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+subdir- := rm
+
+obj-y += init.o
+obj-y += rmpiggy.o
+
+$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
+
+$(obj)/rm/realmode.bin: FORCE
+	$(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 0000000..cbca565
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,115 @@
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/realmode.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+void __init setup_real_mode(void)
+{
+	phys_addr_t mem;
+	u16 real_mode_seg;
+	u32 *rel;
+	u32 count;
+	u32 *ptr;
+	u16 *seg;
+	int i;
+	unsigned char *base;
+	struct trampoline_header *trampoline_header;
+	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+	u64 *trampoline_pgd;
+	u64 efer;
+#endif
+
+	/* Has to be in very low memory so we can execute real-mode AP code. */
+	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+	if (!mem)
+		panic("Cannot allocate trampoline\n");
+
+	base = __va(mem);
+	memblock_reserve(mem, size);
+	real_mode_header = (struct real_mode_header *) base;
+	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+	       base, (unsigned long long)mem, size);
+
+	memcpy(base, real_mode_blob, size);
+
+	real_mode_seg = __pa(base) >> 4;
+	rel = (u32 *) real_mode_relocs;
+
+	/* 16-bit segment relocations. */
+	count = rel[0];
+	rel = &rel[1];
+	for (i = 0; i < count; i++) {
+		seg = (u16 *) (base + rel[i]);
+		*seg = real_mode_seg;
+	}
+
+	/* 32-bit linear relocations. */
+	count = rel[i];
+	rel =  &rel[i + 1];
+	for (i = 0; i < count; i++) {
+		ptr = (u32 *) (base + rel[i]);
+		*ptr += __pa(base);
+	}
+
+	/* Must be perfomed *after* relocation. */
+	trampoline_header = (struct trampoline_header *)
+		__va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+	trampoline_header->start = __pa(startup_32_smp);
+	trampoline_header->gdt_limit = __BOOT_DS + 7;
+	trampoline_header->gdt_base = __pa(boot_gdt);
+#else
+	/*
+	 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+	 * so we need to mask it out.
+	 */
+	rdmsrl(MSR_EFER, efer);
+	trampoline_header->efer = efer & ~EFER_LMA;
+
+	trampoline_header->start = (u64) secondary_startup_64;
+	trampoline_cr4_features = &trampoline_header->cr4;
+	*trampoline_cr4_features = read_cr4();
+
+	trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+	trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
+	trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+#endif
+}
+
+/*
+ * set_real_mode_permissions() gets called very early, to guarantee the
+ * availability of low memory.  This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function.  Thus, we use an arch_initcall instead.
+ */
+static int __init set_real_mode_permissions(void)
+{
+	unsigned char *base = (unsigned char *) real_mode_header;
+	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+	size_t ro_size =
+		PAGE_ALIGN(real_mode_header->ro_end) -
+		__pa(base);
+
+	size_t text_size =
+		PAGE_ALIGN(real_mode_header->ro_end) -
+		real_mode_header->text_start;
+
+	unsigned long text_start =
+		(unsigned long) __va(real_mode_header->text_start);
+
+	set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+	set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+	set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+
+	return 0;
+}
+
+arch_initcall(set_real_mode_permissions);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644
index 0000000..b6ed3a2
--- /dev/null
+++ b/arch/x86/realmode/rm/.gitignore
@@ -0,0 +1,3 @@
+pasyms.h
+realmode.lds
+realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644
index 0000000..5b84a2d
--- /dev/null
+++ b/arch/x86/realmode/rm/Makefile
@@ -0,0 +1,82 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+always := realmode.bin realmode.relocs
+
+wakeup-objs	:= wakeup_asm.o wakemain.o video-mode.o
+wakeup-objs	+= copy.o bioscall.o regs.o
+# The link order of the video-*.o modules can matter.  In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-objs	+= video-vga.o
+wakeup-objs	+= video-vesa.o
+wakeup-objs	+= video-bios.o
+
+realmode-y			+= header.o
+realmode-y			+= trampoline_$(BITS).o
+realmode-y			+= stack.o
+realmode-$(CONFIG_X86_32)	+= reboot_32.o
+realmode-$(CONFIG_ACPI_SLEEP)	+= $(wakeup-objs)
+
+targets	+= $(realmode-y)
+
+REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
+
+sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
+
+quiet_cmd_pasyms = PASYMS  $@
+      cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
+		   sed $(sed-pasyms) | sort | uniq > $@
+
+targets += pasyms.h
+$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
+	$(call if_changed,pasyms)
+
+targets += realmode.lds
+$(obj)/realmode.lds: $(obj)/pasyms.h
+
+LDFLAGS_realmode.elf := --emit-relocs -T
+CPPFLAGS_realmode.lds += -P -C -I$(obj)
+
+targets += realmode.elf
+$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
+	$(call if_changed,ld)
+
+OBJCOPYFLAGS_realmode.bin := -O binary
+
+targets += realmode.bin
+$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs
+	$(call if_changed,objcopy)
+
+quiet_cmd_relocs = RELOCS  $@
+      cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
+
+targets += realmode.relocs
+$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
+	$(call if_changed,relocs)
+
+# ---------------------------------------------------------------------------
+
+# How to compile the 16-bit code.  Note we always compile for -march=i386,
+# that way we can complain to the user if the CPU is insufficient.
+KBUILD_CFLAGS	:= $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+		   -I$(srctree)/arch/x86/boot \
+		   -DDISABLE_BRANCH_PROFILING \
+		   -Wall -Wstrict-prototypes \
+		   -march=i386 -mregparm=3 \
+		   -include $(srctree)/$(src)/../../boot/code16gcc.h \
+		   -fno-strict-aliasing -fomit-frame-pointer \
+		   $(call cc-option, -ffreestanding) \
+		   $(call cc-option, -fno-toplevel-reorder,\
+			$(call cc-option, -fno-unit-at-a-time)) \
+		   $(call cc-option, -fno-stack-protector) \
+		   $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
+GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644
index 0000000..16162d1
--- /dev/null
+++ b/arch/x86/realmode/rm/bioscall.S
@@ -0,0 +1 @@
+#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644
index 0000000..b785e6f
--- /dev/null
+++ b/arch/x86/realmode/rm/copy.S
@@ -0,0 +1 @@
+#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644
index 0000000..fadf483
--- /dev/null
+++ b/arch/x86/realmode/rm/header.S
@@ -0,0 +1,41 @@
+/*
+ * Real-mode blob header; this should match realmode.h and be
+ * readonly; for mutable data instead add pointers into the .data
+ * or .bss sections as appropriate.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+#include "realmode.h"
+	
+	.section ".header", "a"
+
+	.balign	16
+GLOBAL(real_mode_header)
+	.long	pa_text_start
+	.long	pa_ro_end
+	/* SMP trampoline */
+	.long	pa_trampoline_start
+	.long	pa_trampoline_status
+	.long	pa_trampoline_header
+#ifdef CONFIG_X86_64
+	.long	pa_trampoline_pgd;
+#endif
+	/* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+	.long	pa_wakeup_start
+	.long	pa_wakeup_header
+#endif
+	/* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+	.long	pa_machine_real_restart_asm
+#endif
+END(real_mode_header)
+
+	/* End signature, used to verify integrity */
+	.section ".signature","a"
+	.balign 4
+GLOBAL(end_signature)
+	.long	REALMODE_END_SIGNATURE
+END(end_signature)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644
index 0000000..d74cff6
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.h
@@ -0,0 +1,21 @@
+#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
+#define ARCH_X86_REALMODE_RM_REALMODE_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * 16-bit ljmpw to the real_mode_seg
+ *
+ * This must be open-coded since gas will choke on using a
+ * relocatable symbol for the segment portion.
+ */
+#define LJMPW_RM(to)	.byte 0xea ; .word (to), real_mode_seg
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Signature at the end of the realmode region
+ */
+#define REALMODE_END_SIGNATURE	0x65a22c82
+
+#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644
index 0000000..86b2e8d
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -0,0 +1,76 @@
+/*
+ * realmode.lds.S
+ *
+ * Linker script for the real-mode code
+ */
+
+#include <asm/page_types.h>
+
+#undef i386
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+
+SECTIONS
+{
+	real_mode_seg = 0;
+
+	. = 0;
+	.header : {
+		pa_real_mode_base = .;
+		*(.header)
+	}
+
+	. = ALIGN(4);
+	.rodata : {
+		*(.rodata)
+		*(.rodata.*)
+		. = ALIGN(16);
+		video_cards = .;
+		*(.videocards)
+		video_cards_end = .;
+	}
+
+	. = ALIGN(PAGE_SIZE);
+	pa_text_start = .;
+	.text : {
+		*(.text)
+		*(.text.*)
+	}
+
+	.text32 : {
+		*(.text32)
+		*(.text32.*)
+	}
+
+	.text64 : {
+		*(.text64)
+		*(.text64.*)
+	}
+	pa_ro_end = .;
+
+	. = ALIGN(PAGE_SIZE);
+	.data : {
+		*(.data)
+		*(.data.*)
+	}
+
+	. = ALIGN(128);
+	.bss : {
+		*(.bss*)
+	}
+
+	/* End signature for integrity checking */
+	. = ALIGN(4);
+	.signature : {
+		*(.signature)
+	}
+
+	/DISCARD/ : {
+		*(.note*)
+		*(.debug*)
+		*(.eh_frame*)
+	}
+
+#include "pasyms.h"
+}
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/realmode/rm/reboot_32.S
similarity index 71%
rename from arch/x86/kernel/reboot_32.S
rename to arch/x86/realmode/rm/reboot_32.S
index 1d5c46d..1140448 100644
--- a/arch/x86/kernel/reboot_32.S
+++ b/arch/x86/realmode/rm/reboot_32.S
@@ -2,6 +2,7 @@
 #include <linux/init.h>
 #include <asm/segment.h>
 #include <asm/page_types.h>
+#include "realmode.h"
 
 /*
  * The following code and data reboots the machine by switching to real
@@ -13,34 +14,20 @@
  *
  * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
  */
-	.section ".x86_trampoline","a"
-	.balign 16
+	.section ".text32", "ax"
 	.code32
+
+	.balign	16
 ENTRY(machine_real_restart_asm)
-r_base = .
-	/* Get our own relocated address */
-	call	1f
-1:	popl	%ebx
-	subl	$(1b - r_base), %ebx
-
-	/* Compute the equivalent real-mode segment */
-	movl	%ebx, %ecx
-	shrl	$4, %ecx
-	
-	/* Patch post-real-mode segment jump */
-	movw	(dispatch_table - r_base)(%ebx,%eax,2),%ax
-	movw	%ax, (101f - r_base)(%ebx)
-	movw	%cx, (102f - r_base)(%ebx)
-
 	/* Set up the IDT for real mode. */
-	lidtl	(machine_real_restart_idt - r_base)(%ebx)
+	lidtl	pa_machine_real_restart_idt
 
 	/*
 	 * Set up a GDT from which we can load segment descriptors for real
 	 * mode.  The GDT is not used in real mode; it is just needed here to
 	 * prepare the descriptors.
 	 */
-	lgdtl	(machine_real_restart_gdt - r_base)(%ebx)
+	lgdtl	pa_machine_real_restart_gdt
 
 	/*
 	 * Load the data segment registers with 16-bit compatible values
@@ -51,7 +38,7 @@
 	movl	%ecx, %fs
 	movl	%ecx, %gs
 	movl	%ecx, %ss
-	ljmpl	$8, $1f - r_base
+	ljmpw	$8, $1f
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
@@ -76,27 +63,29 @@
  *
  * Most of this work is probably excessive, but it is what is tested.
  */
+	.text
 	.code16
+
+	.balign	16
+machine_real_restart_asm16:
 1:
 	xorl	%ecx, %ecx
-	movl	%cr0, %eax
-	andl	$0x00000011, %eax
-	orl	$0x60000000, %eax
-	movl	%eax, %cr0
+	movl	%cr0, %edx
+	andl	$0x00000011, %edx
+	orl	$0x60000000, %edx
+	movl	%edx, %cr0
 	movl	%ecx, %cr3
 	movl	%cr0, %edx
-	andl	$0x60000000, %edx	/* If no cache bits -> no wbinvd */
+	testl	$0x60000000, %edx	/* If no cache bits -> no wbinvd */
 	jz	2f
 	wbinvd
 2:
-	andb	$0x10, %al
-	movl	%eax, %cr0
-	.byte	0xea			/* ljmpw */
-101:	.word	0			/* Offset */
-102:	.word	0			/* Segment */
-
-bios:
-	ljmpw	$0xf000, $0xfff0
+	andb	$0x10, %dl
+	movl	%edx, %cr0
+	LJMPW_RM(3f)
+3:
+	andw	%ax, %ax
+	jz	bios
 
 apm:
 	movw	$0x1000, %ax
@@ -106,26 +95,34 @@
 	movw	$0x0001, %bx
 	movw	$0x0003, %cx
 	int	$0x15
+	/* This should never return... */
 
-END(machine_real_restart_asm)
+bios:
+	ljmpw	$0xf000, $0xfff0
 
-	.balign 16
-	/* These must match <asm/reboot.h */
-dispatch_table:
-	.word	bios - r_base
-	.word	apm - r_base
-END(dispatch_table)
+	.section ".rodata", "a"
 
-	.balign 16
-machine_real_restart_idt:
+	.balign	16
+GLOBAL(machine_real_restart_idt)
 	.word	0xffff		/* Length - real mode default value */
 	.long	0		/* Base - real mode default value */
 END(machine_real_restart_idt)
 
-	.balign 16
-ENTRY(machine_real_restart_gdt)
-	.quad	0		/* Self-pointer, filled in by PM code */
-	.quad	0		/* 16-bit code segment, filled in by PM code */
+	.balign	16
+GLOBAL(machine_real_restart_gdt)
+	/* Self-pointer */
+	.word	0xffff		/* Length - real mode default value */
+	.long	pa_machine_real_restart_gdt
+	.word	0
+
+	/*
+	 * 16-bit code segment pointing to real_mode_seg
+	 * Selector value 8
+	 */
+	.word	0xffff		/* Limit */
+	.long	0x9b000000 + pa_real_mode_base
+	.word	0
+
 	/*
 	 * 16-bit data segment with the selector value 16 = 0x10 and
 	 * base value 0x100; since this is consistent with real mode
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644
index 0000000..fbb15b9
--- /dev/null
+++ b/arch/x86/realmode/rm/regs.c
@@ -0,0 +1 @@
+#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644
index 0000000..867ae87
--- /dev/null
+++ b/arch/x86/realmode/rm/stack.S
@@ -0,0 +1,19 @@
+/*
+ * Common heap and stack allocations
+ */
+
+#include <linux/linkage.h>
+
+	.data
+GLOBAL(HEAP)
+	.long	rm_heap
+GLOBAL(heap_end)
+	.long	rm_stack
+
+	.bss
+	.balign	16
+GLOBAL(rm_heap)
+	.space	2048
+GLOBAL(rm_stack)
+	.space	2048
+GLOBAL(rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644
index 0000000..c1b2791
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -0,0 +1,74 @@
+/*
+ *
+ *	Trampoline.S	Derived from Setup.S by Linus Torvalds
+ *
+ *	4 Jan 1997 Michael Chastain: changed to gnu as.
+ *
+ *	This is only used for booting secondary CPUs in SMP machine
+ *
+ *	Entry: CS:IP point to the start of our code, we are
+ *	in real mode with no stack, but the rest of the
+ *	trampoline page to make our stack and everything else
+ *	is a mystery.
+ *
+ *	We jump into arch/x86/kernel/head_32.S.
+ *
+ *	On entry to trampoline_start, the processor is in real mode
+ *	with 16-bit addressing and 16-bit data.  CS has some value
+ *	and IP is zero.  Thus, we load CS to the physical segment
+ *	of the real mode code before doing anything further.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+	.text
+	.code16
+
+	.balign	PAGE_SIZE
+ENTRY(trampoline_start)
+	wbinvd			# Needed for NUMA-Q should be harmless for others
+
+	LJMPW_RM(1f)
+1:
+	mov	%cs, %ax	# Code and data in the same place
+	mov	%ax, %ds
+
+	cli			# We should be safe anyway
+
+	movl	tr_start, %eax	# where we need to go
+
+	movl	$0xA5A5A5A5, trampoline_status
+				# write marker for master knows we're running
+
+	/*
+	 * GDT tables in non default location kernel can be beyond 16MB and
+	 * lgdt will not be able to load the address as in real mode default
+	 * operand size is 16bit. Use lgdtl instead to force operand size
+	 * to 32 bit.
+	 */
+	lidtl	tr_idt			# load idt with 0, 0
+	lgdtl	tr_gdt			# load gdt with whatever is appropriate
+
+	movw	$1, %dx			# protected mode (PE) bit
+	lmsw	%dx			# into protected mode
+
+	ljmpl	$__BOOT_CS, $pa_startup_32
+
+	.section ".text32","ax"
+	.code32
+ENTRY(startup_32)			# note: also used from wakeup_asm.S
+	jmp	*%eax
+
+	.bss
+	.balign 8
+GLOBAL(trampoline_header)
+	tr_start:		.space	4
+	tr_gdt_pad:		.space	2
+	tr_gdt:			.space	6
+END(trampoline_header)
+	
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
new file mode 100644
index 0000000..bb360dc
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -0,0 +1,153 @@
+/*
+ *
+ *	Trampoline.S	Derived from Setup.S by Linus Torvalds
+ *
+ *	4 Jan 1997 Michael Chastain: changed to gnu as.
+ *	15 Sept 2005 Eric Biederman: 64bit PIC support
+ *
+ *	Entry: CS:IP point to the start of our code, we are
+ *	in real mode with no stack, but the rest of the
+ *	trampoline page to make our stack and everything else
+ *	is a mystery.
+ *
+ *	On entry to trampoline_start, the processor is in real mode
+ *	with 16-bit addressing and 16-bit data.  CS has some value
+ *	and IP is zero.  Thus, data addresses need to be absolute
+ *	(no relocation) and are taken with regard to r_base.
+ *
+ *	With the addition of trampoline_level4_pgt this code can
+ *	now enter a 64bit kernel that lives at arbitrary 64bit
+ *	physical addresses.
+ *
+ *	If you work on this file, check the object module with objdump
+ *	--full-contents --reloc to make sure there are no relocation
+ *	entries.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
+#include <asm/msr.h>
+#include <asm/segment.h>
+#include <asm/processor-flags.h>
+#include "realmode.h"
+
+	.text
+	.code16
+
+	.balign	PAGE_SIZE
+ENTRY(trampoline_start)
+	cli			# We should be safe anyway
+	wbinvd
+
+	LJMPW_RM(1f)
+1:
+	mov	%cs, %ax	# Code and data in the same place
+	mov	%ax, %ds
+	mov	%ax, %es
+	mov	%ax, %ss
+
+	movl	$0xA5A5A5A5, trampoline_status
+	# write marker for master knows we're running
+
+	# Setup stack
+	movl	$rm_stack_end, %esp
+
+	call	verify_cpu		# Verify the cpu supports long mode
+	testl   %eax, %eax		# Check for return code
+	jnz	no_longmode
+
+	/*
+	 * GDT tables in non default location kernel can be beyond 16MB and
+	 * lgdt will not be able to load the address as in real mode default
+	 * operand size is 16bit. Use lgdtl instead to force operand size
+	 * to 32 bit.
+	 */
+
+	lidtl	tr_idt	# load idt with 0, 0
+	lgdtl	tr_gdt	# load gdt with whatever is appropriate
+
+	movw	$__KERNEL_DS, %dx	# Data segment descriptor
+
+	# Enable protected mode
+	movl	$X86_CR0_PE, %eax	# protected mode (PE) bit
+	movl	%eax, %cr0		# into protected mode
+
+	# flush prefetch and jump to startup_32
+	ljmpl	$__KERNEL32_CS, $pa_startup_32
+
+no_longmode:
+	hlt
+	jmp no_longmode
+#include "../kernel/verify_cpu.S"
+
+	.section ".text32","ax"
+	.code32
+	.balign 4
+ENTRY(startup_32)
+	movl	%edx, %ss
+	addl	$pa_real_mode_base, %esp
+	movl	%edx, %ds
+	movl	%edx, %es
+	movl	%edx, %fs
+	movl	%edx, %gs
+
+	movl	pa_tr_cr4, %eax
+	movl	%eax, %cr4		# Enable PAE mode
+
+	# Setup trampoline 4 level pagetables
+	movl	$pa_trampoline_pgd, %eax
+	movl	%eax, %cr3
+
+	# Set up EFER
+	movl	pa_tr_efer, %eax
+	movl	pa_tr_efer + 4, %edx
+	movl	$MSR_EFER, %ecx
+	wrmsr
+
+	# Enable paging and in turn activate Long Mode
+	movl	$(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
+	movl	%eax, %cr0
+
+	/*
+	 * At this point we're in long mode but in 32bit compatibility mode
+	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
+	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
+	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+	 */
+	ljmpl	$__KERNEL_CS, $pa_startup_64
+
+	.section ".text64","ax"
+	.code64
+	.balign 4
+ENTRY(startup_64)
+	# Now jump into the kernel using virtual addresses
+	jmpq	*tr_start(%rip)
+
+	.section ".rodata","a"
+	# Duplicate the global descriptor table
+	# so the kernel can live anywhere
+	.balign	16
+	.globl tr_gdt
+tr_gdt:
+	.short	tr_gdt_end - tr_gdt - 1	# gdt limit
+	.long	pa_tr_gdt
+	.short	0
+	.quad	0x00cf9b000000ffff	# __KERNEL32_CS
+	.quad	0x00af9b000000ffff	# __KERNEL_CS
+	.quad	0x00cf93000000ffff	# __KERNEL_DS
+tr_gdt_end:
+
+	.bss
+	.balign	PAGE_SIZE
+GLOBAL(trampoline_pgd)		.space	PAGE_SIZE
+
+	.balign	8
+GLOBAL(trampoline_header)
+	tr_start:		.space	8
+	GLOBAL(tr_efer)		.space	8
+	GLOBAL(tr_cr4)		.space	4
+END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644
index 0000000..b1ecdb9
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -0,0 +1,7 @@
+	.section ".rodata","a"
+	.balign	16
+tr_idt: .fill 1, 6, 0
+
+	.bss
+	.balign	4
+GLOBAL(trampoline_status)	.space	4
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644
index 0000000..848b25a
--- /dev/null
+++ b/arch/x86/realmode/rm/video-bios.c
@@ -0,0 +1 @@
+#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644
index 0000000..2a98b7e
--- /dev/null
+++ b/arch/x86/realmode/rm/video-mode.c
@@ -0,0 +1 @@
+#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644
index 0000000..413eddd
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vesa.c
@@ -0,0 +1 @@
+#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644
index 0000000..3085f5c
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vga.c
@@ -0,0 +1 @@
+#include "../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/realmode/rm/wakemain.c
similarity index 98%
rename from arch/x86/kernel/acpi/realmode/wakemain.c
rename to arch/x86/realmode/rm/wakemain.c
index 883962d..91405d5 100644
--- a/arch/x86/kernel/acpi/realmode/wakemain.c
+++ b/arch/x86/realmode/rm/wakemain.c
@@ -65,7 +65,8 @@
 {
 	/* Kill machine if structures are wrong */
 	if (wakeup_header.real_magic != 0x12345678)
-		while (1);
+		while (1)
+			;
 
 	if (wakeup_header.realmode_flags & 4)
 		send_morse("...-");
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/realmode/rm/wakeup.h
similarity index 79%
rename from arch/x86/kernel/acpi/realmode/wakeup.h
rename to arch/x86/realmode/rm/wakeup.h
index 97a29e1..9317e00 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -12,9 +12,8 @@
 /* This must match data at wakeup.S */
 struct wakeup_header {
 	u16 video_mode;		/* Video mode number */
-	u16 _jmp1;		/* ljmpl opcode, 32-bit only */
 	u32 pmode_entry;	/* Protected mode resume point, 32-bit only */
-	u16 _jmp2;		/* CS value, 32-bit only */
+	u16 pmode_cs;
 	u32 pmode_cr0;		/* Protected mode cr0 */
 	u32 pmode_cr3;		/* Protected mode cr3 */
 	u32 pmode_cr4;		/* Protected mode cr4 */
@@ -26,12 +25,6 @@
 	u32 pmode_behavior;	/* Wakeup routine behavior flags */
 	u32 realmode_flags;
 	u32 real_magic;
-	u16 trampoline_segment;	/* segment with trampoline code, 64-bit only */
-	u8  _pad1;
-	u8  wakeup_jmp;
-	u16 wakeup_jmp_off;
-	u16 wakeup_jmp_seg;
-	u64 wakeup_gdt[3];
 	u32 signature;		/* To check we have correct structure */
 } __attribute__((__packed__));
 
@@ -40,7 +33,6 @@
 
 #define WAKEUP_HEADER_OFFSET	8
 #define WAKEUP_HEADER_SIGNATURE 0x51ee1111
-#define WAKEUP_END_SIGNATURE	0x65a22c82
 
 /* Wakeup behavior bits */
 #define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE     0
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
new file mode 100644
index 0000000..8905166
--- /dev/null
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -0,0 +1,177 @@
+/*
+ * ACPI wakeup real mode startup stub
+ */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr-index.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
+#include <asm/processor-flags.h>
+#include "realmode.h"
+#include "wakeup.h"
+
+	.code16
+
+/* This should match the structure in wakeup.h */
+	.section ".data", "aw"
+
+	.balign	16
+GLOBAL(wakeup_header)
+	video_mode:	.short	0	/* Video mode number */
+	pmode_entry:	.long	0
+	pmode_cs:	.short	__KERNEL_CS
+	pmode_cr0:	.long	0	/* Saved %cr0 */
+	pmode_cr3:	.long	0	/* Saved %cr3 */
+	pmode_cr4:	.long	0	/* Saved %cr4 */
+	pmode_efer:	.quad	0	/* Saved EFER */
+	pmode_gdt:	.quad	0
+	pmode_misc_en:	.quad	0	/* Saved MISC_ENABLE MSR */
+	pmode_behavior:	.long	0	/* Wakeup behavior flags */
+	realmode_flags:	.long	0
+	real_magic:	.long	0
+	signature:	.long	WAKEUP_HEADER_SIGNATURE
+END(wakeup_header)
+
+	.text
+	.code16
+
+	.balign	16
+ENTRY(wakeup_start)
+	cli
+	cld
+
+	LJMPW_RM(3f)
+3:
+	/* Apparently some dimwit BIOS programmers don't know how to
+	   program a PM to RM transition, and we might end up here with
+	   junk in the data segment descriptor registers.  The only way
+	   to repair that is to go into PM and fix it ourselves... */
+	movw	$16, %cx
+	lgdtl	%cs:wakeup_gdt
+	movl	%cr0, %eax
+	orb	$X86_CR0_PE, %al
+	movl	%eax, %cr0
+	ljmpw	$8, $2f
+2:
+	movw	%cx, %ds
+	movw	%cx, %es
+	movw	%cx, %ss
+	movw	%cx, %fs
+	movw	%cx, %gs
+
+	andb	$~X86_CR0_PE, %al
+	movl	%eax, %cr0
+	LJMPW_RM(3f)
+3:
+	/* Set up segments */
+	movw	%cs, %ax
+	movw	%ax, %ss
+	movl	$rm_stack_end, %esp
+	movw	%ax, %ds
+	movw	%ax, %es
+	movw	%ax, %fs
+	movw	%ax, %gs
+
+	lidtl	wakeup_idt
+
+	/* Clear the EFLAGS */
+	pushl	$0
+	popfl
+
+	/* Check header signature... */
+	movl	signature, %eax
+	cmpl	$WAKEUP_HEADER_SIGNATURE, %eax
+	jne	bogus_real_magic
+
+	/* Check we really have everything... */
+	movl	end_signature, %eax
+	cmpl	$REALMODE_END_SIGNATURE, %eax
+	jne	bogus_real_magic
+
+	/* Call the C code */
+	calll	main
+
+	/* Restore MISC_ENABLE before entering protected mode, in case
+	   BIOS decided to clear XD_DISABLE during S3. */
+	movl	pmode_behavior, %eax
+	btl	$WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
+	jnc	1f
+
+	movl	pmode_misc_en, %eax
+	movl	pmode_misc_en + 4, %edx
+	movl	$MSR_IA32_MISC_ENABLE, %ecx
+	wrmsr
+1:
+
+	/* Do any other stuff... */
+
+#ifndef CONFIG_64BIT
+	/* This could also be done in C code... */
+	movl	pmode_cr3, %eax
+	movl	%eax, %cr3
+
+	movl	pmode_cr4, %ecx
+	jecxz	1f
+	movl	%ecx, %cr4
+1:
+	movl	pmode_efer, %eax
+	movl	pmode_efer + 4, %edx
+	movl	%eax, %ecx
+	orl	%edx, %ecx
+	jz	1f
+	movl	$MSR_EFER, %ecx
+	wrmsr
+1:
+
+	lgdtl	pmode_gdt
+
+	/* This really couldn't... */
+	movl	pmode_entry, %eax
+	movl	pmode_cr0, %ecx
+	movl	%ecx, %cr0
+	ljmpl	$__KERNEL_CS, $pa_startup_32
+	/* -> jmp *%eax in trampoline_32.S */
+#else
+	jmp	trampoline_start
+#endif
+
+bogus_real_magic:
+1:
+	hlt
+	jmp	1b
+
+	.section ".rodata","a"
+
+	/*
+	 * Set up the wakeup GDT.  We set these up as Big Real Mode,
+	 * that is, with limits set to 4 GB.  At least the Lenovo
+	 * Thinkpad X61 is known to need this for the video BIOS
+	 * initialization quirk to work; this is likely to also
+	 * be the case for other laptops or integrated video devices.
+	 */
+
+	.balign	16
+GLOBAL(wakeup_gdt)
+	.word	3*8-1		/* Self-descriptor */
+	.long	pa_wakeup_gdt
+	.word	0
+
+	.word	0xffff		/* 16-bit code segment @ real_mode_base */
+	.long	0x9b000000 + pa_real_mode_base
+	.word	0x008f		/* big real mode */
+
+	.word	0xffff		/* 16-bit data segment @ real_mode_base */
+	.long	0x93000000 + pa_real_mode_base
+	.word	0x008f		/* big real mode */
+END(wakeup_gdt)
+
+	.section ".rodata","a"
+	.balign	8
+
+	/* This is the standard real-mode IDT */
+	.balign	16
+GLOBAL(wakeup_idt)
+	.word	0xffff		/* limit */
+	.long	0		/* address */
+	.word	0
+END(wakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644
index 0000000..204c6ec
--- /dev/null
+++ b/arch/x86/realmode/rmpiggy.S
@@ -0,0 +1,20 @@
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+	.section ".init.data","aw"
+
+	.balign PAGE_SIZE
+
+GLOBAL(real_mode_blob)
+	.incbin	"arch/x86/realmode/rm/realmode.bin"
+END(real_mode_blob)
+
+GLOBAL(real_mode_blob_end);
+
+GLOBAL(real_mode_relocs)
+	.incbin	"arch/x86/realmode/rm/realmode.relocs"
+END(real_mode_relocs)
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 29f9f05..7a35a6e 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -355,3 +355,4 @@
 346	i386	setns			sys_setns
 347	i386	process_vm_readv	sys_process_vm_readv		compat_sys_process_vm_readv
 348	i386	process_vm_writev	sys_process_vm_writev		compat_sys_process_vm_writev
+349	i386	kcmp			sys_kcmp
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index dd29a9e..51171ae 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -318,6 +318,8 @@
 309	common	getcpu			sys_getcpu
 310	64	process_vm_readv	sys_process_vm_readv
 311	64	process_vm_writev	sys_process_vm_writev
+312	64	kcmp			sys_kcmp
+
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 # for native 64-bit operation.
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5f6a5b6..ddcf39b 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -66,9 +66,10 @@
 	rex_expr = "^REX(\\.[XRWB]+)*"
 	fpu_expr = "^ESC" # TODO
 
-	lprefix1_expr = "\\(66\\)"
+	lprefix1_expr = "\\((66|!F3)\\)"
 	lprefix2_expr = "\\(F3\\)"
-	lprefix3_expr = "\\(F2\\)"
+	lprefix3_expr = "\\((F2|!F3)\\)"
+	lprefix_expr = "\\((66|F2|F3)\\)"
 	max_lprefix = 4
 
 	# All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@
 		if (match(ext, lprefix1_expr)) {
 			lptable1[idx] = add_flags(lptable1[idx],flags)
 			variant = "INAT_VARIANT"
-		} else if (match(ext, lprefix2_expr)) {
+		}
+		if (match(ext, lprefix2_expr)) {
 			lptable2[idx] = add_flags(lptable2[idx],flags)
 			variant = "INAT_VARIANT"
-		} else if (match(ext, lprefix3_expr)) {
+		}
+		if (match(ext, lprefix3_expr)) {
 			lptable3[idx] = add_flags(lptable3[idx],flags)
 			variant = "INAT_VARIANT"
-		} else {
+		}
+		if (!match(ext, lprefix_expr)){
 			table[idx] = add_flags(table[idx],flags)
 		}
 	}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b685296..5a1847d 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -78,6 +78,13 @@
 
 static const char * const sym_regex_realmode[S_NSYMTYPES] = {
 /*
+ * These symbols are known to be relative, even if the linker marks them
+ * as absolute (typically defined outside any section in the linker script.)
+ */
+	[S_REL] =
+	"^pa_",
+
+/*
  * These are 16-bit segment symbols when compiling 16-bit code.
  */
 	[S_SEG] =
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index bb0fb03..a508cea1 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -486,7 +486,6 @@
 	    copy_from_user(&set.sig[1], extramask, sig_size))
 		goto segfault;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (copy_sc_from_user(&current->thread.regs, sc))
@@ -600,7 +599,6 @@
 	if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
 		goto segfault;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 416bd40..68d1dc9 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -39,9 +39,9 @@
 #undef __SYSCALL_I386
 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
 
-typedef void (*sys_call_ptr_t)(void);
+typedef asmlinkage void (*sys_call_ptr_t)(void);
 
-extern void sys_ni_syscall(void);
+extern asmlinkage void sys_ni_syscall(void);
 
 const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
 	/*
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 75f33b2..ff962d4 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -209,6 +209,9 @@
 	       xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
+#define CPUID_THERM_POWER_LEAF 6
+#define APERFMPERF_PRESENT 0
+
 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
 
@@ -242,6 +245,11 @@
 		*dx = cpuid_leaf5_edx_val;
 		return;
 
+	case CPUID_THERM_POWER_LEAF:
+		/* Disabling APERFMPERF for kernel usage */
+		maskecx = ~(1 << APERFMPERF_PRESENT);
+		break;
+
 	case 0xb:
 		/* Suppress extended topology stuff */
 		maskebx = 0;
@@ -1116,7 +1124,10 @@
 	.wbinvd = native_wbinvd,
 
 	.read_msr = native_read_msr_safe,
+	.rdmsr_regs = native_rdmsr_safe_regs,
 	.write_msr = xen_write_msr_safe,
+	.wrmsr_regs = native_wrmsr_safe_regs,
+
 	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,
 
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index ffd08c4..64effdc6 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -706,6 +706,7 @@
 	unsigned long uninitialized_var(address);
 	unsigned level;
 	pte_t *ptep = NULL;
+	int ret = 0;
 
 	pfn = page_to_pfn(page);
 	if (!PageHighMem(page)) {
@@ -741,6 +742,24 @@
 	list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
 	spin_unlock_irqrestore(&m2p_override_lock, flags);
 
+	/* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+	 * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+	 * pfn so that the following mfn_to_pfn(mfn) calls will return the
+	 * pfn from the m2p_override (the backend pfn) instead.
+	 * We need to do this because the pages shared by the frontend
+	 * (xen-blkfront) can be already locked (lock_page, called by
+	 * do_read_cache_page); when the userspace backend tries to use them
+	 * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+	 * do_blockdev_direct_IO is going to try to lock the same pages
+	 * again resulting in a deadlock.
+	 * As a side effect get_user_pages_fast might not be safe on the
+	 * frontend pages while they are being shared with the backend,
+	 * because mfn_to_pfn (that ends up being called by GUPF) will
+	 * return the backend pfn rather than the frontend pfn. */
+	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+	if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+		set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -752,6 +771,7 @@
 	unsigned long uninitialized_var(address);
 	unsigned level;
 	pte_t *ptep = NULL;
+	int ret = 0;
 
 	pfn = page_to_pfn(page);
 	mfn = get_phys_to_machine(pfn);
@@ -821,6 +841,22 @@
 	} else
 		set_phys_to_machine(pfn, page->index);
 
+	/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+	 * somewhere in this domain, even before being added to the
+	 * m2p_override (see comment above in m2p_add_override).
+	 * If there are no other entries in the m2p_override corresponding
+	 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+	 * the original pfn (the one shared by the frontend): the backend
+	 * cannot do any IO on this page anymore because it has been
+	 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+	 * pfn again. */
+	mfn &= ~FOREIGN_FRAME_BIT;
+	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+	if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+			m2p_find_override(mfn) == NULL)
+		set_phys_to_machine(pfn, mfn);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_remove_override);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3ebba07..a4790bf 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -371,7 +371,8 @@
 	populated = xen_populate_chunk(map, memmap.nr_entries,
 			max_pfn, &last_pfn, xen_released_pages);
 
-	extra_pages += (xen_released_pages - populated);
+	xen_released_pages -= populated;
+	extra_pages += xen_released_pages;
 
 	if (last_pfn > max_pfn) {
 		max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7608559..f973754 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -68,8 +68,8 @@
 
 # Only build variant and/or platform if it includes a Makefile
 
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
 
 # Find libgcc.a
 
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 0b9f2e1..c1dacca 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -31,5 +31,5 @@
 asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
 	struct timespec __user *tsp, const sigset_t __user *sigmask,
 	size_t sigsetsize);
-
-
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
+		size_t sigsetsize);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c5e4ec0..efe4e85 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -30,8 +30,6 @@
 
 #define DEBUG_SIG  0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern struct task_struct *coproc_owners[];
 
 struct rt_sigframe
@@ -261,7 +259,6 @@
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 
-	sigdelsetmask(&set, ~_BLOCKABLE);
 	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, frame))
@@ -452,15 +449,6 @@
 	siginfo_t info;
 	int signr;
 	struct k_sigaction ka;
-	sigset_t oldset;
-
-	if (try_to_freeze())
-		goto no_signal;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
 
 	task_pt_regs(current)->icountlevel = 0;
 
@@ -501,19 +489,17 @@
 
 		/* Whee!  Actually deliver the signal.  */
 		/* Set up the stack frame */
-		ret = setup_frame(signr, &ka, &info, oldset, regs);
+		ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
 		if (ret)
 			return;
 
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		block_sigmask(&ka, signr);
+		signal_delivered(signr, &info, &ka, regs, 0);
 		if (current->ptrace & PT_SINGLESTEP)
 			task_pt_regs(current)->icountlevel = 1;
 
 		return;
 	}
 
-no_signal:
 	/* Did we come from a system call? */
 	if ((signed) regs->syscall >= 0) {
 		/* Restart the system call - no handlers present */
@@ -532,8 +518,7 @@
 	}
 
 	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-		set_current_blocked(&current->saved_sigmask);
+	restore_saved_sigmask();
 
 	if (current->ptrace & PT_SINGLESTEP)
 		task_pt_regs(current)->icountlevel = 1;
@@ -548,9 +533,6 @@
 	if (test_thread_flag(TIF_SIGPENDING))
 		do_signal(regs);
 
-	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 		tracehook_notify_resume(regs);
-		if (current->replacement_session_keyring)
-			key_replace_session_keyring();
-	}
 }
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 88ecea3..ee2e208 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -83,7 +83,6 @@
 
   _text = .;
   _stext = .;
-  _ftext = .;
 
   .text :
   {
@@ -112,7 +111,7 @@
   EXCEPTION_TABLE(16)
   /* Data section */
 
-  _fdata = .;
+  _sdata = .;
   RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
   _edata = .;
 
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index ba150e5..db955179 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -26,11 +26,7 @@
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
-
-/* References to section boundaries */
-
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
 
 /*
  * mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@
 			reservedpages++;
 	}
 
-	codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
-	datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
-	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+	codesize =  (unsigned long) _etext - (unsigned long) _stext;
+	datasize =  (unsigned long) _edata - (unsigned long) _sdata;
+	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
 
 	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
 	       "%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@
 
 void free_initmem(void)
 {
-	free_reserved_mem(&__init_begin, &__init_end);
-	printk("Freeing unused kernel memory: %dk freed\n",
-	       (&__init_end - &__init_begin) >> 10);
+	free_reserved_mem(__init_begin, __init_end);
+	printk("Freeing unused kernel memory: %zuk freed\n",
+	       (__init_end - __init_begin) >> 10);
 }
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76..421bef9 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,8 +23,6 @@
 
 config IOSCHED_CFQ
 	tristate "CFQ I/O scheduler"
-	# If BLK_CGROUP is a module, CFQ has to be built as module.
-	depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
 	default y
 	---help---
 	  The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@
 
 	  This is the default I/O scheduler.
 
-	  Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
 config CFQ_GROUP_IOSCHED
 	bool "CFQ Group Scheduling support"
 	depends on IOSCHED_CFQ && BLK_CGROUP
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 126c341..02cf633 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -11,1570 +11,612 @@
  * 	              Nauman Rafique <nauman@google.com>
  */
 #include <linux/ioprio.h>
-#include <linux/seq_file.h>
 #include <linux/kdev_t.h>
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
-#include "blk-cgroup.h"
 #include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
 
 #define MAX_KEY_LEN 100
 
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(blkcg_pol_mutex);
 
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkcg_root);
 
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val)	(((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val)		(((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val)		((val) & 0xffff)
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
-					    struct blkio_policy_node *pn)
-{
-	list_add(&pn->node, &blkcg->policy_list);
-}
-
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
-			struct blkio_group *blkg)
-{
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
-	if (blkg->plid == plid)
-		return 1;
-
-	return 0;
-}
-
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
-			struct blkio_policy_node *pn)
-{
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int fileid = BLKIOFILE_ATTR(cft->private);
-
-	return (plid == pn->plid && fileid == pn->fileid);
-}
-
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
-{
-	list_del(&pn->node);
-}
-
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
-		enum blkio_policy_id plid, int fileid)
-{
-	struct blkio_policy_node *pn;
-
-	list_for_each_entry(pn, &blkcg->policy_list, node) {
-		if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
-			return pn;
-	}
-
-	return NULL;
-}
-
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
 {
 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
-			    struct blkio_cgroup, css);
+			    struct blkcg, css);
 }
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
 
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
+static struct blkcg *task_blkcg(struct task_struct *tsk)
 {
 	return container_of(task_subsys_state(tsk, blkio_subsys_id),
-			    struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
-
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
-{
-	struct blkio_policy_type *blkiop;
-
-	list_for_each_entry(blkiop, &blkio_list, list) {
-		/* If this policy does not own the blkg, do not send updates */
-		if (blkiop->plid != blkg->plid)
-			continue;
-		if (blkiop->ops.blkio_update_group_weight_fn)
-			blkiop->ops.blkio_update_group_weight_fn(blkg->key,
-							blkg, weight);
-	}
+			    struct blkcg, css);
 }
 
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
-				int fileid)
+struct blkcg *bio_blkcg(struct bio *bio)
 {
-	struct blkio_policy_type *blkiop;
+	if (bio && bio->bi_css)
+		return container_of(bio->bi_css, struct blkcg, css);
+	return task_blkcg(current);
+}
+EXPORT_SYMBOL_GPL(bio_blkcg);
 
-	list_for_each_entry(blkiop, &blkio_list, list) {
+static bool blkcg_policy_enabled(struct request_queue *q,
+				 const struct blkcg_policy *pol)
+{
+	return pol && test_bit(pol->plid, q->blkcg_pols);
+}
 
-		/* If this policy does not own the blkg, do not send updates */
-		if (blkiop->plid != blkg->plid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+	int i;
+
+	if (!blkg)
+		return;
+
+	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+		struct blkcg_policy *pol = blkcg_policy[i];
+		struct blkg_policy_data *pd = blkg->pd[i];
+
+		if (!pd)
 			continue;
 
-		if (fileid == BLKIO_THROTL_read_bps_device
-		    && blkiop->ops.blkio_update_group_read_bps_fn)
-			blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
-								blkg, bps);
+		if (pol && pol->pd_exit_fn)
+			pol->pd_exit_fn(blkg);
 
-		if (fileid == BLKIO_THROTL_write_bps_device
-		    && blkiop->ops.blkio_update_group_write_bps_fn)
-			blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
-								blkg, bps);
+		kfree(pd);
 	}
+
+	kfree(blkg);
 }
 
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
-			unsigned int iops, int fileid)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 {
-	struct blkio_policy_type *blkiop;
+	struct blkcg_gq *blkg;
+	int i;
 
-	list_for_each_entry(blkiop, &blkio_list, list) {
+	/* alloc and init base part */
+	blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+	if (!blkg)
+		return NULL;
 
-		/* If this policy does not own the blkg, do not send updates */
-		if (blkiop->plid != blkg->plid)
+	blkg->q = q;
+	INIT_LIST_HEAD(&blkg->q_node);
+	blkg->blkcg = blkcg;
+	blkg->refcnt = 1;
+
+	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+		struct blkcg_policy *pol = blkcg_policy[i];
+		struct blkg_policy_data *pd;
+
+		if (!blkcg_policy_enabled(q, pol))
 			continue;
 
-		if (fileid == BLKIO_THROTL_read_iops_device
-		    && blkiop->ops.blkio_update_group_read_iops_fn)
-			blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
-								blkg, iops);
-
-		if (fileid == BLKIO_THROTL_write_iops_device
-		    && blkiop->ops.blkio_update_group_write_iops_fn)
-			blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
-								blkg,iops);
-	}
-}
-
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
-				bool sync)
-{
-	if (direction)
-		stat[BLKIO_STAT_WRITE] += add;
-	else
-		stat[BLKIO_STAT_READ] += add;
-	if (sync)
-		stat[BLKIO_STAT_SYNC] += add;
-	else
-		stat[BLKIO_STAT_ASYNC] += add;
-}
-
-/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
-{
-	if (direction) {
-		BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
-		stat[BLKIO_STAT_WRITE]--;
-	} else {
-		BUG_ON(stat[BLKIO_STAT_READ] == 0);
-		stat[BLKIO_STAT_READ]--;
-	}
-	if (sync) {
-		BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
-		stat[BLKIO_STAT_SYNC]--;
-	} else {
-		BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
-		stat[BLKIO_STAT_ASYNC]--;
-	}
-}
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
-						struct blkio_group *curr_blkg)
-{
-	if (blkio_blkg_waiting(&blkg->stats))
-		return;
-	if (blkg == curr_blkg)
-		return;
-	blkg->stats.start_group_wait_time = sched_clock();
-	blkio_mark_blkg_waiting(&blkg->stats);
-}
-
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
-{
-	unsigned long long now;
-
-	if (!blkio_blkg_waiting(stats))
-		return;
-
-	now = sched_clock();
-	if (time_after64(now, stats->start_group_wait_time))
-		stats->group_wait_time += now - stats->start_group_wait_time;
-	blkio_clear_blkg_waiting(stats);
-}
-
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
-{
-	unsigned long long now;
-
-	if (!blkio_blkg_empty(stats))
-		return;
-
-	now = sched_clock();
-	if (time_after64(now, stats->start_empty_time))
-		stats->empty_time += now - stats->start_empty_time;
-	blkio_clear_blkg_empty(stats);
-}
-
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	BUG_ON(blkio_blkg_idling(&blkg->stats));
-	blkg->stats.start_idle_time = sched_clock();
-	blkio_mark_blkg_idling(&blkg->stats);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
-
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-	unsigned long flags;
-	unsigned long long now;
-	struct blkio_group_stats *stats;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	stats = &blkg->stats;
-	if (blkio_blkg_idling(stats)) {
-		now = sched_clock();
-		if (time_after64(now, stats->start_idle_time))
-			stats->idle_time += now - stats->start_idle_time;
-		blkio_clear_blkg_idling(stats);
-	}
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
-	unsigned long flags;
-	struct blkio_group_stats *stats;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	stats = &blkg->stats;
-	stats->avg_queue_size_sum +=
-			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
-			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
-	stats->avg_queue_size_samples++;
-	blkio_update_group_wait_time(stats);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
-
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
-	unsigned long flags;
-	struct blkio_group_stats *stats;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	stats = &blkg->stats;
-
-	if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
-			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
-		spin_unlock_irqrestore(&blkg->stats_lock, flags);
-		return;
-	}
-
-	/*
-	 * group is already marked empty. This can happen if cfqq got new
-	 * request in parent group and moved to this group while being added
-	 * to service tree. Just ignore the event and move on.
-	 */
-	if(blkio_blkg_empty(stats)) {
-		spin_unlock_irqrestore(&blkg->stats_lock, flags);
-		return;
-	}
-
-	stats->start_empty_time = sched_clock();
-	blkio_mark_blkg_empty(stats);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
-
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-			unsigned long dequeue)
-{
-	blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
-					struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-			struct blkio_group *curr_blkg, bool direction,
-			bool sync)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
-			sync);
-	blkio_end_empty_time(&blkg->stats);
-	blkio_set_start_group_wait_time(blkg, curr_blkg);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
-
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-						bool direction, bool sync)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
-					direction, sync);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
-
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
-				unsigned long unaccounted_time)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	blkg->stats.unaccounted_time += unaccounted_time;
-#endif
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
-
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync)
-{
-	struct blkio_group_stats_cpu *stats_cpu;
-	unsigned long flags;
-
-	/*
-	 * Disabling interrupts to provide mutual exclusion between two
-	 * writes on same cpu. It probably is not needed for 64bit. Not
-	 * optimizing that case yet.
-	 */
-	local_irq_save(flags);
-
-	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
-	u64_stats_update_begin(&stats_cpu->syncp);
-	stats_cpu->sectors += bytes >> 9;
-	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
-			1, direction, sync);
-	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
-			bytes, direction, sync);
-	u64_stats_update_end(&stats_cpu->syncp);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
-	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
-	struct blkio_group_stats *stats;
-	unsigned long flags;
-	unsigned long long now = sched_clock();
-
-	spin_lock_irqsave(&blkg->stats_lock, flags);
-	stats = &blkg->stats;
-	if (time_after64(now, io_start_time))
-		blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
-				now - io_start_time, direction, sync);
-	if (time_after64(io_start_time, start_time))
-		blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
-				io_start_time - start_time, direction, sync);
-	spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/*  Merged stats are per cpu.  */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
-					bool sync)
-{
-	struct blkio_group_stats_cpu *stats_cpu;
-	unsigned long flags;
-
-	/*
-	 * Disabling interrupts to provide mutual exclusion between two
-	 * writes on same cpu. It probably is not needed for 64bit. Not
-	 * optimizing that case yet.
-	 */
-	local_irq_save(flags);
-
-	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
-	u64_stats_update_begin(&stats_cpu->syncp);
-	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
-				direction, sync);
-	u64_stats_update_end(&stats_cpu->syncp);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
- */
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
-{
-	/* Allocate memory for per cpu stats */
-	blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
-	if (!blkg->stats_cpu)
-		return -ENOMEM;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
-
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-		struct blkio_group *blkg, void *key, dev_t dev,
-		enum blkio_policy_id plid)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-	spin_lock_init(&blkg->stats_lock);
-	rcu_assign_pointer(blkg->key, key);
-	blkg->blkcg_id = css_id(&blkcg->css);
-	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
-	blkg->plid = plid;
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-	/* Need to take css reference ? */
-	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
-	blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
-
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-	hlist_del_init_rcu(&blkg->blkcg_node);
-	blkg->blkcg_id = 0;
-}
-
-/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
- */
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-	struct blkio_cgroup *blkcg;
-	unsigned long flags;
-	struct cgroup_subsys_state *css;
-	int ret = 1;
-
-	rcu_read_lock();
-	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
-	if (css) {
-		blkcg = container_of(css, struct blkio_cgroup, css);
-		spin_lock_irqsave(&blkcg->lock, flags);
-		if (!hlist_unhashed(&blkg->blkcg_node)) {
-			__blkiocg_del_blkio_group(blkg);
-			ret = 0;
+		/* alloc per-policy data and attach it to blkg */
+		pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
+		if (!pd) {
+			blkg_free(blkg);
+			return NULL;
 		}
-		spin_unlock_irqrestore(&blkcg->lock, flags);
+
+		blkg->pd[i] = pd;
+		pd->blkg = blkg;
 	}
 
-	rcu_read_unlock();
-	return ret;
+	/* invoke per-policy init */
+	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+		struct blkcg_policy *pol = blkcg_policy[i];
+
+		if (blkcg_policy_enabled(blkg->q, pol))
+			pol->pd_init_fn(blkg);
+	}
+
+	return blkg;
 }
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
 
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+				      struct request_queue *q)
 {
-	struct blkio_group *blkg;
-	struct hlist_node *n;
-	void *__key;
+	struct blkcg_gq *blkg;
 
-	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
-		__key = blkg->key;
-		if (__key == key)
-			return blkg;
-	}
+	blkg = rcu_dereference(blkcg->blkg_hint);
+	if (blkg && blkg->q == q)
+		return blkg;
+
+	/*
+	 * Hint didn't match.  Look up from the radix tree.  Note that we
+	 * may not be holding queue_lock and thus are not sure whether
+	 * @blkg from blkg_tree has already been removed or not, so we
+	 * can't update hint to the lookup result.  Leave it to the caller.
+	 */
+	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+	if (blkg && blkg->q == q)
+		return blkg;
 
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
 
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
-{
-	struct blkio_group_stats_cpu *stats_cpu;
-	int i, j, k;
-	/*
-	 * Note: On 64 bit arch this should not be an issue. This has the
-	 * possibility of returning some inconsistent value on 32bit arch
-	 * as 64bit update on 32bit is non atomic. Taking care of this
-	 * corner case makes code very complicated, like sending IPIs to
-	 * cpus, taking care of stats of offline cpus etc.
-	 *
-	 * reset stats is anyway more of a debug feature and this sounds a
-	 * corner case. So I am not complicating the code yet until and
-	 * unless this becomes a real issue.
-	 */
-	for_each_possible_cpu(i) {
-		stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
-		stats_cpu->sectors = 0;
-		for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
-			for (k = 0; k < BLKIO_STAT_TOTAL; k++)
-				stats_cpu->stat_arr_cpu[j][k] = 0;
-	}
-}
-
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
-{
-	struct blkio_cgroup *blkcg;
-	struct blkio_group *blkg;
-	struct blkio_group_stats *stats;
-	struct hlist_node *n;
-	uint64_t queued[BLKIO_STAT_TOTAL];
-	int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	bool idling, waiting, empty;
-	unsigned long long now = sched_clock();
-#endif
-
-	blkcg = cgroup_to_blkio_cgroup(cgroup);
-	spin_lock_irq(&blkcg->lock);
-	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-		spin_lock(&blkg->stats_lock);
-		stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-		idling = blkio_blkg_idling(stats);
-		waiting = blkio_blkg_waiting(stats);
-		empty = blkio_blkg_empty(stats);
-#endif
-		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
-			queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
-		memset(stats, 0, sizeof(struct blkio_group_stats));
-		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
-			stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-		if (idling) {
-			blkio_mark_blkg_idling(stats);
-			stats->start_idle_time = now;
-		}
-		if (waiting) {
-			blkio_mark_blkg_waiting(stats);
-			stats->start_group_wait_time = now;
-		}
-		if (empty) {
-			blkio_mark_blkg_empty(stats);
-			stats->start_empty_time = now;
-		}
-#endif
-		spin_unlock(&blkg->stats_lock);
-
-		/* Reset Per cpu stats which don't take blkg->stats_lock */
-		blkio_reset_stats_cpu(blkg);
-	}
-
-	spin_unlock_irq(&blkcg->lock);
-	return 0;
-}
-
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
-				int chars_left, bool diskname_only)
-{
-	snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
-	chars_left -= strlen(str);
-	if (chars_left <= 0) {
-		printk(KERN_WARNING
-			"Possibly incorrect cgroup stat display format");
-		return;
-	}
-	if (diskname_only)
-		return;
-	switch (type) {
-	case BLKIO_STAT_READ:
-		strlcat(str, " Read", chars_left);
-		break;
-	case BLKIO_STAT_WRITE:
-		strlcat(str, " Write", chars_left);
-		break;
-	case BLKIO_STAT_SYNC:
-		strlcat(str, " Sync", chars_left);
-		break;
-	case BLKIO_STAT_ASYNC:
-		strlcat(str, " Async", chars_left);
-		break;
-	case BLKIO_STAT_TOTAL:
-		strlcat(str, " Total", chars_left);
-		break;
-	default:
-		strlcat(str, " Invalid", chars_left);
-	}
-}
-
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
-				struct cgroup_map_cb *cb, dev_t dev)
-{
-	blkio_get_key_name(0, dev, str, chars_left, true);
-	cb->fill(cb, str, val);
-	return val;
-}
-
-
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
-			enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
-	int cpu;
-	struct blkio_group_stats_cpu *stats_cpu;
-	u64 val = 0, tval;
-
-	for_each_possible_cpu(cpu) {
-		unsigned int start;
-		stats_cpu  = per_cpu_ptr(blkg->stats_cpu, cpu);
-
-		do {
-			start = u64_stats_fetch_begin(&stats_cpu->syncp);
-			if (type == BLKIO_STAT_CPU_SECTORS)
-				tval = stats_cpu->sectors;
-			else
-				tval = stats_cpu->stat_arr_cpu[type][sub_type];
-		} while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
-		val += tval;
-	}
-
-	return val;
-}
-
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
-		struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
-{
-	uint64_t disk_total, val;
-	char key_str[MAX_KEY_LEN];
-	enum stat_sub_type sub_type;
-
-	if (type == BLKIO_STAT_CPU_SECTORS) {
-		val = blkio_read_stat_cpu(blkg, type, 0);
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
-	}
-
-	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
-			sub_type++) {
-		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
-		val = blkio_read_stat_cpu(blkg, type, sub_type);
-		cb->fill(cb, key_str, val);
-	}
-
-	disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
-			blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
-
-	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
-	cb->fill(cb, key_str, disk_total);
-	return disk_total;
-}
-
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
-		struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
-{
-	uint64_t disk_total;
-	char key_str[MAX_KEY_LEN];
-	enum stat_sub_type sub_type;
-
-	if (type == BLKIO_STAT_TIME)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	if (type == BLKIO_STAT_UNACCOUNTED_TIME)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.unaccounted_time, cb, dev);
-	if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
-		uint64_t sum = blkg->stats.avg_queue_size_sum;
-		uint64_t samples = blkg->stats.avg_queue_size_samples;
-		if (samples)
-			do_div(sum, samples);
-		else
-			sum = 0;
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
-	}
-	if (type == BLKIO_STAT_GROUP_WAIT_TIME)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.group_wait_time, cb, dev);
-	if (type == BLKIO_STAT_IDLE_TIME)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.idle_time, cb, dev);
-	if (type == BLKIO_STAT_EMPTY_TIME)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.empty_time, cb, dev);
-	if (type == BLKIO_STAT_DEQUEUE)
-		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-					blkg->stats.dequeue, cb, dev);
-#endif
-
-	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
-			sub_type++) {
-		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
-		cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
-	}
-	disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
-			blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
-	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
-	cb->fill(cb, key_str, disk_total);
-	return disk_total;
-}
-
-static int blkio_policy_parse_and_set(char *buf,
-	struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
-{
-	struct gendisk *disk = NULL;
-	char *s[4], *p, *major_s = NULL, *minor_s = NULL;
-	unsigned long major, minor;
-	int i = 0, ret = -EINVAL;
-	int part;
-	dev_t dev;
-	u64 temp;
-
-	memset(s, 0, sizeof(s));
-
-	while ((p = strsep(&buf, " ")) != NULL) {
-		if (!*p)
-			continue;
-
-		s[i++] = p;
-
-		/* Prevent from inputing too many things */
-		if (i == 3)
-			break;
-	}
-
-	if (i != 2)
-		goto out;
-
-	p = strsep(&s[0], ":");
-	if (p != NULL)
-		major_s = p;
-	else
-		goto out;
-
-	minor_s = s[0];
-	if (!minor_s)
-		goto out;
-
-	if (strict_strtoul(major_s, 10, &major))
-		goto out;
-
-	if (strict_strtoul(minor_s, 10, &minor))
-		goto out;
-
-	dev = MKDEV(major, minor);
-
-	if (strict_strtoull(s[1], 10, &temp))
-		goto out;
-
-	/* For rule removal, do not check for device presence. */
-	if (temp) {
-		disk = get_gendisk(dev, &part);
-		if (!disk || part) {
-			ret = -ENODEV;
-			goto out;
-		}
-	}
-
-	newpn->dev = dev;
-
-	switch (plid) {
-	case BLKIO_POLICY_PROP:
-		if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
-		     temp > BLKIO_WEIGHT_MAX)
-			goto out;
-
-		newpn->plid = plid;
-		newpn->fileid = fileid;
-		newpn->val.weight = temp;
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(fileid) {
-		case BLKIO_THROTL_read_bps_device:
-		case BLKIO_THROTL_write_bps_device:
-			newpn->plid = plid;
-			newpn->fileid = fileid;
-			newpn->val.bps = temp;
-			break;
-		case BLKIO_THROTL_read_iops_device:
-		case BLKIO_THROTL_write_iops_device:
-			if (temp > THROTL_IOPS_MAX)
-				goto out;
-
-			newpn->plid = plid;
-			newpn->fileid = fileid;
-			newpn->val.iops = (unsigned int)temp;
-			break;
-		}
-		break;
-	default:
-		BUG();
-	}
-	ret = 0;
-out:
-	put_disk(disk);
-	return ret;
-}
-
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
-			      dev_t dev)
-{
-	struct blkio_policy_node *pn;
-	unsigned long flags;
-	unsigned int weight;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-
-	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
-				BLKIO_PROP_weight_device);
-	if (pn)
-		weight = pn->val.weight;
-	else
-		weight = blkcg->weight;
-
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	return weight;
-}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
-
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
-	struct blkio_policy_node *pn;
-	unsigned long flags;
-	uint64_t bps = -1;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_read_bps_device);
-	if (pn)
-		bps = pn->val.bps;
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	return bps;
-}
-
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
-	struct blkio_policy_node *pn;
-	unsigned long flags;
-	uint64_t bps = -1;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_write_bps_device);
-	if (pn)
-		bps = pn->val.bps;
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	return bps;
-}
-
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
-	struct blkio_policy_node *pn;
-	unsigned long flags;
-	unsigned int iops = -1;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_read_iops_device);
-	if (pn)
-		iops = pn->val.iops;
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	return iops;
-}
-
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
-	struct blkio_policy_node *pn;
-	unsigned long flags;
-	unsigned int iops = -1;
-
-	spin_lock_irqsave(&blkcg->lock, flags);
-	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_write_iops_device);
-	if (pn)
-		iops = pn->val.iops;
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	return iops;
-}
-
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
-	switch(pn->plid) {
-	case BLKIO_POLICY_PROP:
-		if (pn->val.weight == 0)
-			return 1;
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(pn->fileid) {
-		case BLKIO_THROTL_read_bps_device:
-		case BLKIO_THROTL_write_bps_device:
-			if (pn->val.bps == 0)
-				return 1;
-			break;
-		case BLKIO_THROTL_read_iops_device:
-		case BLKIO_THROTL_write_iops_device:
-			if (pn->val.iops == 0)
-				return 1;
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	return 0;
-}
-
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
-					struct blkio_policy_node *newpn)
-{
-	switch(oldpn->plid) {
-	case BLKIO_POLICY_PROP:
-		oldpn->val.weight = newpn->val.weight;
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(newpn->fileid) {
-		case BLKIO_THROTL_read_bps_device:
-		case BLKIO_THROTL_write_bps_device:
-			oldpn->val.bps = newpn->val.bps;
-			break;
-		case BLKIO_THROTL_read_iops_device:
-		case BLKIO_THROTL_write_iops_device:
-			oldpn->val.iops = newpn->val.iops;
-		}
-		break;
-	default:
-		BUG();
-	}
-}
-
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair.  This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
  */
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
-		struct blkio_group *blkg, struct blkio_policy_node *pn)
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 {
-	unsigned int weight, iops;
-	u64 bps;
+	WARN_ON_ONCE(!rcu_read_lock_held());
 
-	switch(pn->plid) {
-	case BLKIO_POLICY_PROP:
-		weight = pn->val.weight ? pn->val.weight :
-				blkcg->weight;
-		blkio_update_group_weight(blkg, weight);
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(pn->fileid) {
-		case BLKIO_THROTL_read_bps_device:
-		case BLKIO_THROTL_write_bps_device:
-			bps = pn->val.bps ? pn->val.bps : (-1);
-			blkio_update_group_bps(blkg, bps, pn->fileid);
-			break;
-		case BLKIO_THROTL_read_iops_device:
-		case BLKIO_THROTL_write_iops_device:
-			iops = pn->val.iops ? pn->val.iops : (-1);
-			blkio_update_group_iops(blkg, iops, pn->fileid);
-			break;
-		}
-		break;
-	default:
-		BUG();
-	}
+	if (unlikely(blk_queue_bypass(q)))
+		return NULL;
+	return __blkg_lookup(blkcg, q);
 }
+EXPORT_SYMBOL_GPL(blkg_lookup);
 
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
- */
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
-				struct blkio_policy_node *pn)
+static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+					     struct request_queue *q)
+	__releases(q->queue_lock) __acquires(q->queue_lock)
 {
-	struct blkio_group *blkg;
-	struct hlist_node *n;
+	struct blkcg_gq *blkg;
+	int ret;
 
-	spin_lock(&blkio_list_lock);
-	spin_lock_irq(&blkcg->lock);
+	WARN_ON_ONCE(!rcu_read_lock_held());
+	lockdep_assert_held(q->queue_lock);
 
-	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-		if (pn->dev != blkg->dev || pn->plid != blkg->plid)
-			continue;
-		blkio_update_blkg_policy(blkcg, blkg, pn);
+	/* lookup and update hint on success, see __blkg_lookup() for details */
+	blkg = __blkg_lookup(blkcg, q);
+	if (blkg) {
+		rcu_assign_pointer(blkcg->blkg_hint, blkg);
+		return blkg;
 	}
 
-	spin_unlock_irq(&blkcg->lock);
-	spin_unlock(&blkio_list_lock);
-}
+	/* blkg holds a reference to blkcg */
+	if (!css_tryget(&blkcg->css))
+		return ERR_PTR(-EINVAL);
 
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
- 				       const char *buffer)
-{
-	int ret = 0;
-	char *buf;
-	struct blkio_policy_node *newpn, *pn;
-	struct blkio_cgroup *blkcg;
-	int keep_newpn = 0;
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int fileid = BLKIOFILE_ATTR(cft->private);
+	/* allocate */
+	ret = -ENOMEM;
+	blkg = blkg_alloc(blkcg, q);
+	if (unlikely(!blkg))
+		goto err_put;
 
-	buf = kstrdup(buffer, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
-	if (!newpn) {
-		ret = -ENOMEM;
-		goto free_buf;
-	}
-
-	ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
+	/* insert */
+	ret = radix_tree_preload(GFP_ATOMIC);
 	if (ret)
-		goto free_newpn;
+		goto err_free;
 
-	blkcg = cgroup_to_blkio_cgroup(cgrp);
+	spin_lock(&blkcg->lock);
+	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+	if (likely(!ret)) {
+		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+		list_add(&blkg->q_node, &q->blkg_list);
+	}
+	spin_unlock(&blkcg->lock);
 
+	radix_tree_preload_end();
+
+	if (!ret)
+		return blkg;
+err_free:
+	blkg_free(blkg);
+err_put:
+	css_put(&blkcg->css);
+	return ERR_PTR(ret);
+}
+
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+				    struct request_queue *q)
+{
+	/*
+	 * This could be the first entry point of blkcg implementation and
+	 * we shouldn't allow anything to go through for a bypassing queue.
+	 */
+	if (unlikely(blk_queue_bypass(q)))
+		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+	return __blkg_lookup_create(blkcg, q);
+}
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
+
+static void blkg_destroy(struct blkcg_gq *blkg)
+{
+	struct request_queue *q = blkg->q;
+	struct blkcg *blkcg = blkg->blkcg;
+
+	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&blkcg->lock);
+
+	/* Something wrong if we are trying to remove same group twice */
+	WARN_ON_ONCE(list_empty(&blkg->q_node));
+	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+
+	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+	list_del_init(&blkg->q_node);
+	hlist_del_init_rcu(&blkg->blkcg_node);
+
+	/*
+	 * Both setting lookup hint to and clearing it from @blkg are done
+	 * under queue_lock.  If it's not pointing to @blkg now, it never
+	 * will.  Hint assignment itself can race safely.
+	 */
+	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+		rcu_assign_pointer(blkcg->blkg_hint, NULL);
+
+	/*
+	 * Put the reference taken at the time of creation so that when all
+	 * queues are gone, group can be destroyed.
+	 */
+	blkg_put(blkg);
+}
+
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
+ */
+static void blkg_destroy_all(struct request_queue *q)
+{
+	struct blkcg_gq *blkg, *n;
+
+	lockdep_assert_held(q->queue_lock);
+
+	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+		struct blkcg *blkcg = blkg->blkcg;
+
+		spin_lock(&blkcg->lock);
+		blkg_destroy(blkg);
+		spin_unlock(&blkcg->lock);
+	}
+}
+
+static void blkg_rcu_free(struct rcu_head *rcu_head)
+{
+	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
+}
+
+void __blkg_release(struct blkcg_gq *blkg)
+{
+	/* release the extra blkcg reference this blkg has been holding */
+	css_put(&blkg->blkcg->css);
+
+	/*
+	 * A group is freed in rcu manner. But having an rcu lock does not
+	 * mean that one can access all the fields of blkg and assume these
+	 * are valid. For example, don't try to follow throtl_data and
+	 * request queue links.
+	 *
+	 * Having a reference to blkg under an rcu allows acess to only
+	 * values local to groups like group stats and group rate limits
+	 */
+	call_rcu(&blkg->rcu_head, blkg_rcu_free);
+}
+EXPORT_SYMBOL_GPL(__blkg_release);
+
+static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
+			     u64 val)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+	struct blkcg_gq *blkg;
+	struct hlist_node *n;
+	int i;
+
+	mutex_lock(&blkcg_pol_mutex);
 	spin_lock_irq(&blkcg->lock);
 
-	pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
-	if (!pn) {
-		if (!blkio_delete_rule_command(newpn)) {
-			blkio_policy_insert_node(blkcg, newpn);
-			keep_newpn = 1;
+	/*
+	 * Note that stat reset is racy - it doesn't synchronize against
+	 * stat updates.  This is a debug feature which shouldn't exist
+	 * anyway.  If you get hit by a race, retry.
+	 */
+	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+		for (i = 0; i < BLKCG_MAX_POLS; i++) {
+			struct blkcg_policy *pol = blkcg_policy[i];
+
+			if (blkcg_policy_enabled(blkg->q, pol) &&
+			    pol->pd_reset_stats_fn)
+				pol->pd_reset_stats_fn(blkg);
 		}
-		spin_unlock_irq(&blkcg->lock);
-		goto update_io_group;
 	}
 
-	if (blkio_delete_rule_command(newpn)) {
-		blkio_policy_delete_node(pn);
-		kfree(pn);
-		spin_unlock_irq(&blkcg->lock);
-		goto update_io_group;
-	}
+	spin_unlock_irq(&blkcg->lock);
+	mutex_unlock(&blkcg_pol_mutex);
+	return 0;
+}
+
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
+{
+	/* some drivers (floppy) instantiate a queue w/o disk registered */
+	if (blkg->q->backing_dev_info.dev)
+		return dev_name(blkg->q->backing_dev_info.dev);
+	return NULL;
+}
+
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists.  @prfill is invoked with @sf, the
+ * policy data and @data.  If @show_total is %true, the sum of the return
+ * values from @prfill is printed with "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
+ */
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+		       u64 (*prfill)(struct seq_file *,
+				     struct blkg_policy_data *, int),
+		       const struct blkcg_policy *pol, int data,
+		       bool show_total)
+{
+	struct blkcg_gq *blkg;
+	struct hlist_node *n;
+	u64 total = 0;
+
+	spin_lock_irq(&blkcg->lock);
+	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+		if (blkcg_policy_enabled(blkg->q, pol))
+			total += prfill(sf, blkg->pd[pol->plid], data);
 	spin_unlock_irq(&blkcg->lock);
 
-	blkio_update_policy_rule(pn, newpn);
-
-update_io_group:
-	blkio_update_policy_node_blkg(blkcg, newpn);
-
-free_newpn:
-	if (!keep_newpn)
-		kfree(newpn);
-free_buf:
-	kfree(buf);
-	return ret;
-}
-
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
-{
-	switch(pn->plid) {
-		case BLKIO_POLICY_PROP:
-			if (pn->fileid == BLKIO_PROP_weight_device)
-				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
-					MINOR(pn->dev), pn->val.weight);
-			break;
-		case BLKIO_POLICY_THROTL:
-			switch(pn->fileid) {
-			case BLKIO_THROTL_read_bps_device:
-			case BLKIO_THROTL_write_bps_device:
-				seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
-					MINOR(pn->dev), pn->val.bps);
-				break;
-			case BLKIO_THROTL_read_iops_device:
-			case BLKIO_THROTL_write_iops_device:
-				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
-					MINOR(pn->dev), pn->val.iops);
-				break;
-			}
-			break;
-		default:
-			BUG();
-	}
-}
-
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
-			struct blkio_cgroup *blkcg, struct seq_file *m)
-{
-	struct blkio_policy_node *pn;
-
-	if (!list_empty(&blkcg->policy_list)) {
-		spin_lock_irq(&blkcg->lock);
-		list_for_each_entry(pn, &blkcg->policy_list, node) {
-			if (!pn_matches_cftype(cft, pn))
-				continue;
-			blkio_print_policy_node(m, pn);
-		}
-		spin_unlock_irq(&blkcg->lock);
-	}
-}
-
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
-				struct seq_file *m)
-{
-	struct blkio_cgroup *blkcg;
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int name = BLKIOFILE_ATTR(cft->private);
-
-	blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-	switch(plid) {
-	case BLKIO_POLICY_PROP:
-		switch(name) {
-		case BLKIO_PROP_weight_device:
-			blkio_read_policy_node_files(cft, blkcg, m);
-			return 0;
-		default:
-			BUG();
-		}
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(name){
-		case BLKIO_THROTL_read_bps_device:
-		case BLKIO_THROTL_write_bps_device:
-		case BLKIO_THROTL_read_iops_device:
-		case BLKIO_THROTL_write_iops_device:
-			blkio_read_policy_node_files(cft, blkcg, m);
-			return 0;
-		default:
-			BUG();
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	return 0;
-}
-
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
-		struct cftype *cft, struct cgroup_map_cb *cb,
-		enum stat_type type, bool show_total, bool pcpu)
-{
-	struct blkio_group *blkg;
-	struct hlist_node *n;
-	uint64_t cgroup_total = 0;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
-		if (blkg->dev) {
-			if (!cftype_blkg_same_policy(cft, blkg))
-				continue;
-			if (pcpu)
-				cgroup_total += blkio_get_stat_cpu(blkg, cb,
-						blkg->dev, type);
-			else {
-				spin_lock_irq(&blkg->stats_lock);
-				cgroup_total += blkio_get_stat(blkg, cb,
-						blkg->dev, type);
-				spin_unlock_irq(&blkg->stats_lock);
-			}
-		}
-	}
 	if (show_total)
-		cb->fill(cb, "Total", cgroup_total);
-	rcu_read_unlock();
-	return 0;
+		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
+}
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
+
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
+{
+	const char *dname = blkg_dev_name(pd->blkg);
+
+	if (!dname)
+		return 0;
+
+	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+	return v;
+}
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
+
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+			 const struct blkg_rwstat *rwstat)
+{
+	static const char *rwstr[] = {
+		[BLKG_RWSTAT_READ]	= "Read",
+		[BLKG_RWSTAT_WRITE]	= "Write",
+		[BLKG_RWSTAT_SYNC]	= "Sync",
+		[BLKG_RWSTAT_ASYNC]	= "Async",
+	};
+	const char *dname = blkg_dev_name(pd->blkg);
+	u64 v;
+	int i;
+
+	if (!dname)
+		return 0;
+
+	for (i = 0; i < BLKG_RWSTAT_NR; i++)
+		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+			   (unsigned long long)rwstat->cnt[i]);
+
+	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+	return v;
 }
 
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
-				struct cgroup_map_cb *cb)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 {
-	struct blkio_cgroup *blkcg;
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int name = BLKIOFILE_ATTR(cft->private);
-
-	blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-	switch(plid) {
-	case BLKIO_POLICY_PROP:
-		switch(name) {
-		case BLKIO_PROP_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_TIME, 0, 0);
-		case BLKIO_PROP_sectors:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_CPU_SECTORS, 0, 1);
-		case BLKIO_PROP_io_service_bytes:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-					BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
-		case BLKIO_PROP_io_serviced:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_CPU_SERVICED, 1, 1);
-		case BLKIO_PROP_io_service_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_SERVICE_TIME, 1, 0);
-		case BLKIO_PROP_io_wait_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_WAIT_TIME, 1, 0);
-		case BLKIO_PROP_io_merged:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_CPU_MERGED, 1, 1);
-		case BLKIO_PROP_io_queued:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-		case BLKIO_PROP_unaccounted_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-					BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
-		case BLKIO_PROP_dequeue:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_DEQUEUE, 0, 0);
-		case BLKIO_PROP_avg_queue_size:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-					BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
-		case BLKIO_PROP_group_wait_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-					BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
-		case BLKIO_PROP_idle_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_IDLE_TIME, 0, 0);
-		case BLKIO_PROP_empty_time:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
-		default:
-			BUG();
-		}
-		break;
-	case BLKIO_POLICY_THROTL:
-		switch(name){
-		case BLKIO_THROTL_io_service_bytes:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
-		case BLKIO_THROTL_io_serviced:
-			return blkio_read_blkg_stats(blkcg, cft, cb,
-						BLKIO_STAT_CPU_SERVICED, 1, 1);
-		default:
-			BUG();
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	return 0;
+	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 }
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+		       int off)
 {
-	struct blkio_group *blkg;
-	struct hlist_node *n;
-	struct blkio_policy_node *pn;
+	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 
-	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
+	return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
+
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value.  This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+		   const char *input, struct blkg_conf_ctx *ctx)
+	__acquires(rcu) __acquires(disk->queue->queue_lock)
+{
+	struct gendisk *disk;
+	struct blkcg_gq *blkg;
+	unsigned int major, minor;
+	unsigned long long v;
+	int part, ret;
+
+	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
 		return -EINVAL;
 
-	spin_lock(&blkio_list_lock);
-	spin_lock_irq(&blkcg->lock);
-	blkcg->weight = (unsigned int)val;
+	disk = get_gendisk(MKDEV(major, minor), &part);
+	if (!disk || part)
+		return -EINVAL;
 
-	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-		pn = blkio_policy_search_node(blkcg, blkg->dev,
-				BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
-		if (pn)
-			continue;
+	rcu_read_lock();
+	spin_lock_irq(disk->queue->queue_lock);
 
-		blkio_update_group_weight(blkg, blkcg->weight);
-	}
-	spin_unlock_irq(&blkcg->lock);
-	spin_unlock(&blkio_list_lock);
-	return 0;
-}
+	if (blkcg_policy_enabled(disk->queue, pol))
+		blkg = blkg_lookup_create(blkcg, disk->queue);
+	else
+		blkg = ERR_PTR(-EINVAL);
 
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
-	struct blkio_cgroup *blkcg;
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int name = BLKIOFILE_ATTR(cft->private);
-
-	blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-	switch(plid) {
-	case BLKIO_POLICY_PROP:
-		switch(name) {
-		case BLKIO_PROP_weight:
-			return (u64)blkcg->weight;
+	if (IS_ERR(blkg)) {
+		ret = PTR_ERR(blkg);
+		rcu_read_unlock();
+		spin_unlock_irq(disk->queue->queue_lock);
+		put_disk(disk);
+		/*
+		 * If queue was bypassing, we should retry.  Do so after a
+		 * short msleep().  It isn't strictly necessary but queue
+		 * can be bypassing for some time and it's always nice to
+		 * avoid busy looping.
+		 */
+		if (ret == -EBUSY) {
+			msleep(10);
+			ret = restart_syscall();
 		}
-		break;
-	default:
-		BUG();
+		return ret;
 	}
+
+	ctx->disk = disk;
+	ctx->blkg = blkg;
+	ctx->v = v;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
 
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update.  This function must be paired
+ * with blkg_conf_prep().
+ */
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
 {
-	struct blkio_cgroup *blkcg;
-	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-	int name = BLKIOFILE_ATTR(cft->private);
-
-	blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-	switch(plid) {
-	case BLKIO_POLICY_PROP:
-		switch(name) {
-		case BLKIO_PROP_weight:
-			return blkio_weight_write(blkcg, val);
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	return 0;
+	spin_unlock_irq(ctx->disk->queue->queue_lock);
+	rcu_read_unlock();
+	put_disk(ctx->disk);
 }
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
-struct cftype blkio_files[] = {
-	{
-		.name = "weight_device",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_weight_device),
-		.read_seq_string = blkiocg_file_read,
-		.write_string = blkiocg_file_write,
-		.max_write_len = 256,
-	},
-	{
-		.name = "weight",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_weight),
-		.read_u64 = blkiocg_file_read_u64,
-		.write_u64 = blkiocg_file_write_u64,
-	},
-	{
-		.name = "time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "sectors",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_sectors),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_service_bytes",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_service_bytes),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_serviced",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_serviced),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_service_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_service_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_wait_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_wait_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_merged",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_merged),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "io_queued",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_io_queued),
-		.read_map = blkiocg_file_read_map,
-	},
+struct cftype blkcg_files[] = {
 	{
 		.name = "reset_stats",
-		.write_u64 = blkiocg_reset_stats,
+		.write_u64 = blkcg_reset_stats,
 	},
-#ifdef CONFIG_BLK_DEV_THROTTLING
-	{
-		.name = "throttle.read_bps_device",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_read_bps_device),
-		.read_seq_string = blkiocg_file_read,
-		.write_string = blkiocg_file_write,
-		.max_write_len = 256,
-	},
-
-	{
-		.name = "throttle.write_bps_device",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_write_bps_device),
-		.read_seq_string = blkiocg_file_read,
-		.write_string = blkiocg_file_write,
-		.max_write_len = 256,
-	},
-
-	{
-		.name = "throttle.read_iops_device",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_read_iops_device),
-		.read_seq_string = blkiocg_file_read,
-		.write_string = blkiocg_file_write,
-		.max_write_len = 256,
-	},
-
-	{
-		.name = "throttle.write_iops_device",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_write_iops_device),
-		.read_seq_string = blkiocg_file_read,
-		.write_string = blkiocg_file_write,
-		.max_write_len = 256,
-	},
-	{
-		.name = "throttle.io_service_bytes",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_io_service_bytes),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "throttle.io_serviced",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-				BLKIO_THROTL_io_serviced),
-		.read_map = blkiocg_file_read_map,
-	},
-#endif /* CONFIG_BLK_DEV_THROTTLING */
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	{
-		.name = "avg_queue_size",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_avg_queue_size),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "group_wait_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_group_wait_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "idle_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_idle_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "empty_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_empty_time),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "dequeue",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_dequeue),
-		.read_map = blkiocg_file_read_map,
-	},
-	{
-		.name = "unaccounted_time",
-		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-				BLKIO_PROP_unaccounted_time),
-		.read_map = blkiocg_file_read_map,
-	},
-#endif
 	{ }	/* terminate */
 };
 
-static void blkiocg_destroy(struct cgroup *cgroup)
+/**
+ * blkcg_pre_destroy - cgroup pre_destroy callback
+ * @cgroup: cgroup of interest
+ *
+ * This function is called when @cgroup is about to go away and responsible
+ * for shooting down all blkgs associated with @cgroup.  blkgs should be
+ * removed while holding both q and blkcg locks.  As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
+ */
+static int blkcg_pre_destroy(struct cgroup *cgroup)
 {
-	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
-	unsigned long flags;
-	struct blkio_group *blkg;
-	void *key;
-	struct blkio_policy_type *blkiop;
-	struct blkio_policy_node *pn, *pntmp;
+	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 
-	rcu_read_lock();
-	do {
-		spin_lock_irqsave(&blkcg->lock, flags);
+	spin_lock_irq(&blkcg->lock);
 
-		if (hlist_empty(&blkcg->blkg_list)) {
-			spin_unlock_irqrestore(&blkcg->lock, flags);
-			break;
+	while (!hlist_empty(&blkcg->blkg_list)) {
+		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+						struct blkcg_gq, blkcg_node);
+		struct request_queue *q = blkg->q;
+
+		if (spin_trylock(q->queue_lock)) {
+			blkg_destroy(blkg);
+			spin_unlock(q->queue_lock);
+		} else {
+			spin_unlock_irq(&blkcg->lock);
+			cpu_relax();
+			spin_lock_irq(&blkcg->lock);
 		}
-
-		blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
-					blkcg_node);
-		key = rcu_dereference(blkg->key);
-		__blkiocg_del_blkio_group(blkg);
-
-		spin_unlock_irqrestore(&blkcg->lock, flags);
-
-		/*
-		 * This blkio_group is being unlinked as associated cgroup is
-		 * going away. Let all the IO controlling policies know about
-		 * this event.
-		 */
-		spin_lock(&blkio_list_lock);
-		list_for_each_entry(blkiop, &blkio_list, list) {
-			if (blkiop->plid != blkg->plid)
-				continue;
-			blkiop->ops.blkio_unlink_group_fn(key, blkg);
-		}
-		spin_unlock(&blkio_list_lock);
-	} while (1);
-
-	list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
-		blkio_policy_delete_node(pn);
-		kfree(pn);
 	}
 
-	free_css_id(&blkio_subsys, &blkcg->css);
-	rcu_read_unlock();
-	if (blkcg != &blkio_root_cgroup)
+	spin_unlock_irq(&blkcg->lock);
+	return 0;
+}
+
+static void blkcg_destroy(struct cgroup *cgroup)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+
+	if (blkcg != &blkcg_root)
 		kfree(blkcg);
 }
 
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
+static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
 {
-	struct blkio_cgroup *blkcg;
+	static atomic64_t id_seq = ATOMIC64_INIT(0);
+	struct blkcg *blkcg;
 	struct cgroup *parent = cgroup->parent;
 
 	if (!parent) {
-		blkcg = &blkio_root_cgroup;
+		blkcg = &blkcg_root;
 		goto done;
 	}
 
@@ -1582,22 +624,68 @@
 	if (!blkcg)
 		return ERR_PTR(-ENOMEM);
 
-	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 done:
 	spin_lock_init(&blkcg->lock);
+	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
 	INIT_HLIST_HEAD(&blkcg->blkg_list);
 
-	INIT_LIST_HEAD(&blkcg->policy_list);
 	return &blkcg->css;
 }
 
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
+{
+	might_sleep();
+
+	return blk_throtl_init(q);
+}
+
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue().  Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
+{
+	lockdep_assert_held(q->queue_lock);
+
+	blk_throtl_drain(q);
+}
+
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue().  Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
+{
+	spin_lock_irq(q->queue_lock);
+	blkg_destroy_all(q);
+	spin_unlock_irq(q->queue_lock);
+
+	blk_throtl_exit(q);
+}
+
 /*
  * We cannot support shared io contexts, as we have no mean to support
  * two tasks with the same ioc in two different groups without major rework
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
 	struct task_struct *task;
 	struct io_context *ioc;
@@ -1616,63 +704,213 @@
 	return ret;
 }
 
-static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
-	struct task_struct *task;
-	struct io_context *ioc;
-
-	cgroup_taskset_for_each(task, cgrp, tset) {
-		/* we don't lose anything even if ioc allocation fails */
-		ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
-		if (ioc) {
-			ioc_cgroup_changed(ioc);
-			put_io_context(ioc);
-		}
-	}
-}
-
 struct cgroup_subsys blkio_subsys = {
 	.name = "blkio",
-	.create = blkiocg_create,
-	.can_attach = blkiocg_can_attach,
-	.attach = blkiocg_attach,
-	.destroy = blkiocg_destroy,
-#ifdef CONFIG_BLK_CGROUP
-	/* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+	.create = blkcg_create,
+	.can_attach = blkcg_can_attach,
+	.pre_destroy = blkcg_pre_destroy,
+	.destroy = blkcg_destroy,
 	.subsys_id = blkio_subsys_id,
-#endif
-	.base_cftypes = blkio_files,
-	.use_id = 1,
+	.base_cftypes = blkcg_files,
 	.module = THIS_MODULE,
 };
 EXPORT_SYMBOL_GPL(blkio_subsys);
 
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path.  Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations.  Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+			  const struct blkcg_policy *pol)
 {
-	spin_lock(&blkio_list_lock);
-	list_add_tail(&blkiop->list, &blkio_list);
-	spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+	LIST_HEAD(pds);
+	struct blkcg_gq *blkg;
+	struct blkg_policy_data *pd, *n;
+	int cnt = 0, ret;
 
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
+	if (blkcg_policy_enabled(q, pol))
+		return 0;
+
+	blk_queue_bypass_start(q);
+
+	/* make sure the root blkg exists and count the existing blkgs */
+	spin_lock_irq(q->queue_lock);
+
+	rcu_read_lock();
+	blkg = __blkg_lookup_create(&blkcg_root, q);
+	rcu_read_unlock();
+
+	if (IS_ERR(blkg)) {
+		ret = PTR_ERR(blkg);
+		goto out_unlock;
+	}
+	q->root_blkg = blkg;
+
+	list_for_each_entry(blkg, &q->blkg_list, q_node)
+		cnt++;
+
+	spin_unlock_irq(q->queue_lock);
+
+	/* allocate policy_data for all existing blkgs */
+	while (cnt--) {
+		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+		if (!pd) {
+			ret = -ENOMEM;
+			goto out_free;
+		}
+		list_add_tail(&pd->alloc_node, &pds);
+	}
+
+	/*
+	 * Install the allocated pds.  With @q bypassing, no new blkg
+	 * should have been created while the queue lock was dropped.
+	 */
+	spin_lock_irq(q->queue_lock);
+
+	list_for_each_entry(blkg, &q->blkg_list, q_node) {
+		if (WARN_ON(list_empty(&pds))) {
+			/* umm... this shouldn't happen, just abort */
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+		list_del_init(&pd->alloc_node);
+
+		/* grab blkcg lock too while installing @pd on @blkg */
+		spin_lock(&blkg->blkcg->lock);
+
+		blkg->pd[pol->plid] = pd;
+		pd->blkg = blkg;
+		pol->pd_init_fn(blkg);
+
+		spin_unlock(&blkg->blkcg->lock);
+	}
+
+	__set_bit(pol->plid, q->blkcg_pols);
+	ret = 0;
+out_unlock:
+	spin_unlock_irq(q->queue_lock);
+out_free:
+	blk_queue_bypass_end(q);
+	list_for_each_entry_safe(pd, n, &pds, alloc_node)
+		kfree(pd);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
+
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q.  Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+			     const struct blkcg_policy *pol)
 {
-	spin_lock(&blkio_list_lock);
-	list_del_init(&blkiop->list);
-	spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+	struct blkcg_gq *blkg;
 
-static int __init init_cgroup_blkio(void)
+	if (!blkcg_policy_enabled(q, pol))
+		return;
+
+	blk_queue_bypass_start(q);
+	spin_lock_irq(q->queue_lock);
+
+	__clear_bit(pol->plid, q->blkcg_pols);
+
+	/* if no policy is left, no need for blkgs - shoot them down */
+	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+		blkg_destroy_all(q);
+
+	list_for_each_entry(blkg, &q->blkg_list, q_node) {
+		/* grab blkcg lock too while removing @pd from @blkg */
+		spin_lock(&blkg->blkcg->lock);
+
+		if (pol->pd_exit_fn)
+			pol->pd_exit_fn(blkg);
+
+		kfree(blkg->pd[pol->plid]);
+		blkg->pd[pol->plid] = NULL;
+
+		spin_unlock(&blkg->blkcg->lock);
+	}
+
+	spin_unlock_irq(q->queue_lock);
+	blk_queue_bypass_end(q);
+}
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
+
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core.  Might sleep and @pol may be modified on
+ * successful registration.  Returns 0 on success and -errno on failure.
+ */
+int blkcg_policy_register(struct blkcg_policy *pol)
 {
-	return cgroup_load_subsys(&blkio_subsys);
-}
+	int i, ret;
 
-static void __exit exit_cgroup_blkio(void)
+	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+		return -EINVAL;
+
+	mutex_lock(&blkcg_pol_mutex);
+
+	/* find an empty slot */
+	ret = -ENOSPC;
+	for (i = 0; i < BLKCG_MAX_POLS; i++)
+		if (!blkcg_policy[i])
+			break;
+	if (i >= BLKCG_MAX_POLS)
+		goto out_unlock;
+
+	/* register and update blkgs */
+	pol->plid = i;
+	blkcg_policy[i] = pol;
+
+	/* everything is in place, add intf files for the new policy */
+	if (pol->cftypes)
+		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+	ret = 0;
+out_unlock:
+	mutex_unlock(&blkcg_pol_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
+
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol).  Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
 {
-	cgroup_unload_subsys(&blkio_subsys);
-}
+	mutex_lock(&blkcg_pol_mutex);
 
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+	if (WARN_ON(blkcg_policy[pol->plid] != pol))
+		goto out_unlock;
+
+	/* kill the intf files first */
+	if (pol->cftypes)
+		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+
+	/* unregister and update blkgs */
+	blkcg_policy[pol->plid] = NULL;
+out_unlock:
+	mutex_unlock(&blkcg_pol_mutex);
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7..8ac457c 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,350 +15,371 @@
 
 #include <linux/cgroup.h>
 #include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
-	BLKIO_POLICY_PROP = 0,		/* Proportional Bandwidth division */
-	BLKIO_POLICY_THROTL,		/* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
 
 /* Max limits for throttle policy */
 #define THROTL_IOPS_MAX		UINT_MAX
 
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN		10
+#define CFQ_WEIGHT_MAX		1000
+#define CFQ_WEIGHT_DEFAULT	500
 
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
+#ifdef CONFIG_BLK_CGROUP
 
-enum stat_type {
-	/* Total time spent (in ns) between request dispatch to the driver and
-	 * request completion for IOs doen by this cgroup. This may not be
-	 * accurate when NCQ is turned on. */
-	BLKIO_STAT_SERVICE_TIME = 0,
-	/* Total time spent waiting in scheduler queue in ns */
-	BLKIO_STAT_WAIT_TIME,
-	/* Number of IOs queued up */
-	BLKIO_STAT_QUEUED,
-	/* All the single valued stats go below this */
-	BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	/* Time not charged to this cgroup */
-	BLKIO_STAT_UNACCOUNTED_TIME,
-	BLKIO_STAT_AVG_QUEUE_SIZE,
-	BLKIO_STAT_IDLE_TIME,
-	BLKIO_STAT_EMPTY_TIME,
-	BLKIO_STAT_GROUP_WAIT_TIME,
-	BLKIO_STAT_DEQUEUE
-#endif
+enum blkg_rwstat_type {
+	BLKG_RWSTAT_READ,
+	BLKG_RWSTAT_WRITE,
+	BLKG_RWSTAT_SYNC,
+	BLKG_RWSTAT_ASYNC,
+
+	BLKG_RWSTAT_NR,
+	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
 };
 
-/* Per cpu stats */
-enum stat_type_cpu {
-	BLKIO_STAT_CPU_SECTORS,
-	/* Total bytes transferred */
-	BLKIO_STAT_CPU_SERVICE_BYTES,
-	/* Total IOs serviced, post merge */
-	BLKIO_STAT_CPU_SERVICED,
-	/* Number of IOs merged */
-	BLKIO_STAT_CPU_MERGED,
-	BLKIO_STAT_CPU_NR
+struct blkcg_gq;
+
+struct blkcg {
+	struct cgroup_subsys_state	css;
+	spinlock_t			lock;
+
+	struct radix_tree_root		blkg_tree;
+	struct blkcg_gq			*blkg_hint;
+	struct hlist_head		blkg_list;
+
+	/* for policies to test whether associated blkcg has changed */
+	uint64_t			id;
+
+	/* TODO: per-policy storage in blkcg */
+	unsigned int			cfq_weight;	/* belongs to cfq */
 };
 
-enum stat_sub_type {
-	BLKIO_STAT_READ = 0,
-	BLKIO_STAT_WRITE,
-	BLKIO_STAT_SYNC,
-	BLKIO_STAT_ASYNC,
-	BLKIO_STAT_TOTAL
+struct blkg_stat {
+	struct u64_stats_sync		syncp;
+	uint64_t			cnt;
 };
 
-/* blkg state flags */
-enum blkg_state_flags {
-	BLKG_waiting = 0,
-	BLKG_idling,
-	BLKG_empty,
+struct blkg_rwstat {
+	struct u64_stats_sync		syncp;
+	uint64_t			cnt[BLKG_RWSTAT_NR];
 };
 
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
-	BLKIO_PROP_weight = 1,
-	BLKIO_PROP_weight_device,
-	BLKIO_PROP_io_service_bytes,
-	BLKIO_PROP_io_serviced,
-	BLKIO_PROP_time,
-	BLKIO_PROP_sectors,
-	BLKIO_PROP_unaccounted_time,
-	BLKIO_PROP_io_service_time,
-	BLKIO_PROP_io_wait_time,
-	BLKIO_PROP_io_merged,
-	BLKIO_PROP_io_queued,
-	BLKIO_PROP_avg_queue_size,
-	BLKIO_PROP_group_wait_time,
-	BLKIO_PROP_idle_time,
-	BLKIO_PROP_empty_time,
-	BLKIO_PROP_dequeue,
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q).  This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+	/* the blkg this per-policy data belongs to */
+	struct blkcg_gq			*blkg;
+
+	/* used during policy activation */
+	struct list_head		alloc_node;
 };
 
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
-	BLKIO_THROTL_read_bps_device,
-	BLKIO_THROTL_write_bps_device,
-	BLKIO_THROTL_read_iops_device,
-	BLKIO_THROTL_write_iops_device,
-	BLKIO_THROTL_io_service_bytes,
-	BLKIO_THROTL_io_serviced,
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+	/* Pointer to the associated request_queue */
+	struct request_queue		*q;
+	struct list_head		q_node;
+	struct hlist_node		blkcg_node;
+	struct blkcg			*blkcg;
+	/* reference count */
+	int				refcnt;
+
+	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
+
+	struct rcu_head			rcu_head;
 };
 
-struct blkio_cgroup {
-	struct cgroup_subsys_state css;
-	unsigned int weight;
-	spinlock_t lock;
-	struct hlist_head blkg_list;
-	struct list_head policy_list; /* list of blkio_policy_node */
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+	int				plid;
+	/* policy specific private data size */
+	size_t				pd_size;
+	/* cgroup files for the policy */
+	struct cftype			*cftypes;
+
+	/* operations */
+	blkcg_pol_init_pd_fn		*pd_init_fn;
+	blkcg_pol_exit_pd_fn		*pd_exit_fn;
+	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
 };
 
-struct blkio_group_stats {
-	/* total disk time and nr sectors dispatched by this group */
-	uint64_t time;
-	uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-	/* Time not charged to this cgroup */
-	uint64_t unaccounted_time;
+extern struct blkcg blkcg_root;
 
-	/* Sum of number of IOs queued across all samples */
-	uint64_t avg_queue_size_sum;
-	/* Count of samples taken for average */
-	uint64_t avg_queue_size_samples;
-	/* How many times this group has been removed from service tree */
-	unsigned long dequeue;
-
-	/* Total time spent waiting for it to be assigned a timeslice. */
-	uint64_t group_wait_time;
-	uint64_t start_group_wait_time;
-
-	/* Time spent idling for this blkio_group */
-	uint64_t idle_time;
-	uint64_t start_idle_time;
-	/*
-	 * Total time when we have requests queued and do not contain the
-	 * current active queue.
-	 */
-	uint64_t empty_time;
-	uint64_t start_empty_time;
-	uint16_t flags;
-#endif
-};
-
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
-	uint64_t sectors;
-	uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
-	struct u64_stats_sync syncp;
-};
-
-struct blkio_group {
-	/* An rcu protected unique identifier for the group */
-	void *key;
-	struct hlist_node blkcg_node;
-	unsigned short blkcg_id;
-	/* Store cgroup path */
-	char path[128];
-	/* The device MKDEV(major, minor), this group has been created for */
-	dev_t dev;
-	/* policy which owns this blk group */
-	enum blkio_policy_id plid;
-
-	/* Need to serialize the stats in the case of reset/update */
-	spinlock_t stats_lock;
-	struct blkio_group_stats stats;
-	/* Per cpu stats pointer */
-	struct blkio_group_stats_cpu __percpu *stats_cpu;
-};
-
-struct blkio_policy_node {
-	struct list_head node;
-	dev_t dev;
-	/* This node belongs to max bw policy or porportional weight policy */
-	enum blkio_policy_id plid;
-	/* cgroup file to which this rule belongs to */
-	int fileid;
-
-	union {
-		unsigned int weight;
-		/*
-		 * Rate read/write in terms of bytes per second
-		 * Whether this rate represents read or write is determined
-		 * by file type "fileid".
-		 */
-		u64 bps;
-		unsigned int iops;
-	} val;
-};
-
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
-				     dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
-				     dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
-				     dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
-				     dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
-				     dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
-			struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
-			struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
-			struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
-			struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
-			struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
-	blkio_unlink_group_fn *blkio_unlink_group_fn;
-	blkio_update_group_weight_fn *blkio_update_group_weight_fn;
-	blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
-	blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
-	blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
-	blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
-};
-
-struct blkio_policy_type {
-	struct list_head list;
-	struct blkio_policy_ops ops;
-	enum blkio_policy_id plid;
-};
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
+struct blkcg *bio_blkcg(struct bio *bio);
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+				    struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
 
 /* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+			  const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+			     const struct blkcg_policy *pol);
 
-static inline char *blkg_path(struct blkio_group *blkg)
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+		       u64 (*prfill)(struct seq_file *,
+				     struct blkg_policy_data *, int),
+		       const struct blkcg_policy *pol, int data,
+		       bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+			 const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+		       int off);
+
+struct blkg_conf_ctx {
+	struct gendisk			*disk;
+	struct blkcg_gq			*blkg;
+	u64				v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+		   const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+						  struct blkcg_policy *pol)
 {
-	return blkg->path;
+	return blkg ? blkg->pd[pol->plid] : NULL;
 }
 
-#else
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data.  Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+	return pd ? pd->blkg : NULL;
+}
 
-struct blkio_group {
-};
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+	int ret;
 
-struct blkio_policy_type {
-};
+	rcu_read_lock();
+	ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+	rcu_read_unlock();
+	if (ret)
+		strncpy(buf, "<unavailable>", buflen);
+	return ret;
+}
 
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+	lockdep_assert_held(blkg->q->queue_lock);
+	WARN_ON_ONCE(!blkg->refcnt);
+	blkg->refcnt++;
+}
 
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+void __blkg_release(struct blkcg_gq *blkg);
 
-#endif
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+	lockdep_assert_held(blkg->q->queue_lock);
+	WARN_ON_ONCE(blkg->refcnt <= 0);
+	if (!--blkg->refcnt)
+		__blkg_release(blkg);
+}
 
-#define BLKIO_WEIGHT_MIN	10
-#define BLKIO_WEIGHT_MAX	1000
-#define BLKIO_WEIGHT_DEFAULT	500
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat.  The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+	u64_stats_update_begin(&stat->syncp);
+	stat->cnt += val;
+	u64_stats_update_end(&stat->syncp);
+}
 
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-				unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat.  This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+	unsigned int start;
+	uint64_t v;
 
-#define BLKG_FLAG_FNS(name)						\
-static inline void blkio_mark_blkg_##name(				\
-		struct blkio_group_stats *stats)			\
-{									\
-	stats->flags |= (1 << BLKG_##name);				\
-}									\
-static inline void blkio_clear_blkg_##name(				\
-		struct blkio_group_stats *stats)			\
-{									\
-	stats->flags &= ~(1 << BLKG_##name);				\
-}									\
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats)	\
-{									\
-	return (stats->flags & (1 << BLKG_##name)) != 0;		\
-}									\
+	do {
+		start = u64_stats_fetch_begin(&stat->syncp);
+		v = stat->cnt;
+	} while (u64_stats_fetch_retry(&stat->syncp, start));
 
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
-						struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-						unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
+	return v;
+}
 
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-	struct blkio_group *blkg, void *key, dev_t dev,
-	enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
-						void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
-					unsigned long time,
-					unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
-						bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
-	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
-					bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-		struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-					bool direction, bool sync);
-#else
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+	stat->cnt = 0;
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat.  The counters are chosen according to @rw.  The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+				   int rw, uint64_t val)
+{
+	u64_stats_update_begin(&rwstat->syncp);
+
+	if (rw & REQ_WRITE)
+		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+	else
+		rwstat->cnt[BLKG_RWSTAT_READ] += val;
+	if (rw & REQ_SYNC)
+		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+	else
+		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+	u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+	unsigned int start;
+	struct blkg_rwstat tmp;
+
+	do {
+		start = u64_stats_fetch_begin(&rwstat->syncp);
+		tmp = *rwstat;
+	} while (u64_stats_fetch_retry(&rwstat->syncp, start));
+
+	return tmp;
+}
+
+/**
+ * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction.  This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+{
+	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+#else	/* CONFIG_BLK_CGROUP */
+
 struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
 
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-		struct blkio_group *blkg, void *key, dev_t dev,
-		enum blkio_policy_id plid) {}
+struct blkg_policy_data {
+};
 
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
+struct blkcg_gq {
+};
 
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
+struct blkcg_policy {
+};
 
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
-						unsigned long time,
-						unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
-		uint64_t start_time, uint64_t io_start_time, bool direction,
-		bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-						bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-		struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-						bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+					const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+					   const struct blkcg_policy *pol) { }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+						  struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+#endif	/* CONFIG_BLK_CGROUP */
+#endif	/* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74..3c923a7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,11 +29,13 @@
 #include <linux/fault-inject.h>
 #include <linux/list_sort.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -280,7 +282,7 @@
  *
  *     This function does not cancel any asynchronous activity arising
  *     out of elevator or throttling code. That would require elevaotor_exit()
- *     and blk_throtl_exit() to be called with queue lock initialized.
+ *     and blkcg_exit_queue() to be called with queue lock initialized.
  *
  */
 void blk_sync_queue(struct request_queue *q)
@@ -365,17 +367,23 @@
 
 		spin_lock_irq(q->queue_lock);
 
-		elv_drain_elevator(q);
-		if (drain_all)
-			blk_throtl_drain(q);
+		/*
+		 * The caller might be trying to drain @q before its
+		 * elevator is initialized.
+		 */
+		if (q->elevator)
+			elv_drain_elevator(q);
+
+		blkcg_drain_queue(q);
 
 		/*
 		 * This function might be called on a queue which failed
-		 * driver init after queue creation.  Some drivers
-		 * (e.g. fd) get unhappy in such cases.  Kick queue iff
-		 * dispatch queue has something on it.
+		 * driver init after queue creation or is not yet fully
+		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
+		 * in such cases.  Kick queue iff dispatch queue has
+		 * something on it and @q has request_fn set.
 		 */
-		if (!list_empty(&q->queue_head))
+		if (!list_empty(&q->queue_head) && q->request_fn)
 			__blk_run_queue(q);
 
 		drain |= q->rq.elvpriv;
@@ -403,6 +411,49 @@
 }
 
 /**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used.  This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before.  On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+	bool drain;
+
+	spin_lock_irq(q->queue_lock);
+	drain = !q->bypass_depth++;
+	queue_flag_set(QUEUE_FLAG_BYPASS, q);
+	spin_unlock_irq(q->queue_lock);
+
+	if (drain) {
+		blk_drain_queue(q, false);
+		/* ensure blk_queue_bypass() is %true inside RCU read lock */
+		synchronize_rcu();
+	}
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+	spin_lock_irq(q->queue_lock);
+	if (!--q->bypass_depth)
+		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+	WARN_ON_ONCE(q->bypass_depth < 0);
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
  *
@@ -418,6 +469,19 @@
 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 
 	spin_lock_irq(lock);
+
+	/*
+	 * Dead queue is permanently in bypass mode till released.  Note
+	 * that, unlike blk_queue_bypass_start(), we aren't performing
+	 * synchronize_rcu() after entering bypass mode to avoid the delay
+	 * as some drivers create and destroy a lot of queues while
+	 * probing.  This is still safe because blk_release_queue() will be
+	 * called only after the queue refcnt drops to zero and nothing,
+	 * RCU or not, would be traversing the queue by then.
+	 */
+	q->bypass_depth++;
+	queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
 	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -428,13 +492,8 @@
 	spin_unlock_irq(lock);
 	mutex_unlock(&q->sysfs_lock);
 
-	/*
-	 * Drain all requests queued before DEAD marking.  The caller might
-	 * be trying to tear down @q before its elevator is initialized, in
-	 * which case we don't want to call into draining.
-	 */
-	if (q->elevator)
-		blk_drain_queue(q, true);
+	/* drain all requests queued before DEAD marking */
+	blk_drain_queue(q, true);
 
 	/* @q won't process any more request, flush async actions */
 	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -498,14 +557,15 @@
 	if (err)
 		goto fail_id;
 
-	if (blk_throtl_init(q))
-		goto fail_id;
-
 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, (unsigned long) q);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+	INIT_LIST_HEAD(&q->queue_head);
 	INIT_LIST_HEAD(&q->timeout_list);
 	INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+	INIT_LIST_HEAD(&q->blkg_list);
+#endif
 	INIT_LIST_HEAD(&q->flush_queue[0]);
 	INIT_LIST_HEAD(&q->flush_queue[1]);
 	INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,6 +582,18 @@
 	 */
 	q->queue_lock = &q->__queue_lock;
 
+	/*
+	 * A queue starts its life with bypass turned on to avoid
+	 * unnecessary bypass on/off overhead and nasty surprises during
+	 * init.  The initial bypass will be finished at the end of
+	 * blk_init_allocated_queue().
+	 */
+	q->bypass_depth = 1;
+	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+	if (blkcg_init_queue(q))
+		goto fail_id;
+
 	return q;
 
 fail_id:
@@ -614,15 +686,15 @@
 
 	q->sg_reserved_size = INT_MAX;
 
-	/*
-	 * all done
-	 */
-	if (!elevator_init(q, NULL)) {
-		blk_queue_congestion_threshold(q);
-		return q;
-	}
+	/* init elevator */
+	if (elevator_init(q, NULL))
+		return NULL;
 
-	return NULL;
+	blk_queue_congestion_threshold(q);
+
+	/* all done, end the initial bypass */
+	blk_queue_bypass_end(q);
+	return q;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
 
@@ -648,33 +720,6 @@
 	mempool_free(rq, q->rq.rq_pool);
 }
 
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
-		  unsigned int flags, gfp_t gfp_mask)
-{
-	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
-	if (!rq)
-		return NULL;
-
-	blk_rq_init(q, rq);
-
-	rq->cmd_flags = flags | REQ_ALLOCED;
-
-	if (flags & REQ_ELVPRIV) {
-		rq->elv.icq = icq;
-		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
-			mempool_free(rq, q->rq.rq_pool);
-			return NULL;
-		}
-		/* @rq->elv.icq holds on to io_context until @rq is freed */
-		if (icq)
-			get_io_context(icq->ioc);
-	}
-
-	return rq;
-}
-
 /*
  * ioc_batching returns true if the ioc is a valid batching request and
  * should be given priority access to a request.
@@ -763,6 +808,22 @@
 }
 
 /**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio.  May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+	if (bio && bio->bi_ioc)
+		return bio->bi_ioc;
+#endif
+	return current->io_context;
+}
+
+/**
  * get_request - get a free request
  * @q: request_queue to allocate request from
  * @rw_flags: RW and SYNC flags
@@ -779,7 +840,7 @@
 static struct request *get_request(struct request_queue *q, int rw_flags,
 				   struct bio *bio, gfp_t gfp_mask)
 {
-	struct request *rq = NULL;
+	struct request *rq;
 	struct request_list *rl = &q->rq;
 	struct elevator_type *et;
 	struct io_context *ioc;
@@ -789,7 +850,7 @@
 	int may_queue;
 retry:
 	et = q->elevator->type;
-	ioc = current->io_context;
+	ioc = rq_ioc(bio);
 
 	if (unlikely(blk_queue_dead(q)))
 		return NULL;
@@ -808,7 +869,7 @@
 			 */
 			if (!ioc && !retried) {
 				spin_unlock_irq(q->queue_lock);
-				create_io_context(current, gfp_mask, q->node);
+				create_io_context(gfp_mask, q->node);
 				spin_lock_irq(q->queue_lock);
 				retried = true;
 				goto retry;
@@ -831,7 +892,7 @@
 					 * process is not a "batcher", and not
 					 * exempted by the IO scheduler
 					 */
-					goto out;
+					return NULL;
 				}
 			}
 		}
@@ -844,7 +905,7 @@
 	 * allocated with any setting of ->nr_requests
 	 */
 	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
-		goto out;
+		return NULL;
 
 	rl->count[is_sync]++;
 	rl->starved[is_sync] = 0;
@@ -859,8 +920,7 @@
 	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
 	 * it will be created after releasing queue_lock.
 	 */
-	if (blk_rq_should_init_elevator(bio) &&
-	    !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
 		rw_flags |= REQ_ELVPRIV;
 		rl->elvpriv++;
 		if (et->icq_cache && ioc)
@@ -871,41 +931,36 @@
 		rw_flags |= REQ_IO_STAT;
 	spin_unlock_irq(q->queue_lock);
 
-	/* create icq if missing */
-	if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
-		icq = ioc_create_icq(q, gfp_mask);
-		if (!icq)
-			goto fail_icq;
+	/* allocate and init request */
+	rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+	if (!rq)
+		goto fail_alloc;
+
+	blk_rq_init(q, rq);
+	rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+	/* init elvpriv */
+	if (rw_flags & REQ_ELVPRIV) {
+		if (unlikely(et->icq_cache && !icq)) {
+			create_io_context(gfp_mask, q->node);
+			ioc = rq_ioc(bio);
+			if (!ioc)
+				goto fail_elvpriv;
+
+			icq = ioc_create_icq(ioc, q, gfp_mask);
+			if (!icq)
+				goto fail_elvpriv;
+		}
+
+		rq->elv.icq = icq;
+		if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+			goto fail_elvpriv;
+
+		/* @rq->elv.icq holds io_context until @rq is freed */
+		if (icq)
+			get_io_context(icq->ioc);
 	}
-
-	rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
-
-fail_icq:
-	if (unlikely(!rq)) {
-		/*
-		 * Allocation failed presumably due to memory. Undo anything
-		 * we might have messed up.
-		 *
-		 * Allocating task should really be put onto the front of the
-		 * wait queue, but this is pretty rare.
-		 */
-		spin_lock_irq(q->queue_lock);
-		freed_request(q, rw_flags);
-
-		/*
-		 * in the very unlikely event that allocation failed and no
-		 * requests for this direction was pending, mark us starved
-		 * so that freeing of a request in the other direction will
-		 * notice us. another possible fix would be to split the
-		 * rq mempool into READ and WRITE
-		 */
-rq_starved:
-		if (unlikely(rl->count[is_sync] == 0))
-			rl->starved[is_sync] = 1;
-
-		goto out;
-	}
-
+out:
 	/*
 	 * ioc may be NULL here, and ioc_batching will be false. That's
 	 * OK, if the queue is under the request limit then requests need
@@ -916,8 +971,48 @@
 		ioc->nr_batch_requests--;
 
 	trace_block_getrq(q, bio, rw_flags & 1);
-out:
 	return rq;
+
+fail_elvpriv:
+	/*
+	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
+	 * and may fail indefinitely under memory pressure and thus
+	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will
+	 * disturb iosched and blkcg but weird is bettern than dead.
+	 */
+	printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+			   dev_name(q->backing_dev_info.dev));
+
+	rq->cmd_flags &= ~REQ_ELVPRIV;
+	rq->elv.icq = NULL;
+
+	spin_lock_irq(q->queue_lock);
+	rl->elvpriv--;
+	spin_unlock_irq(q->queue_lock);
+	goto out;
+
+fail_alloc:
+	/*
+	 * Allocation failed presumably due to memory. Undo anything we
+	 * might have messed up.
+	 *
+	 * Allocating task should really be put onto the front of the wait
+	 * queue, but this is pretty rare.
+	 */
+	spin_lock_irq(q->queue_lock);
+	freed_request(q, rw_flags);
+
+	/*
+	 * in the very unlikely event that allocation failed and no
+	 * requests for this direction was pending, mark us starved so that
+	 * freeing of a request in the other direction will notice
+	 * us. another possible fix would be to split the rq mempool into
+	 * READ and WRITE
+	 */
+rq_starved:
+	if (unlikely(rl->count[is_sync] == 0))
+		rl->starved[is_sync] = 1;
+	return NULL;
 }
 
 /**
@@ -961,7 +1056,7 @@
 		 * up to a big batch of them for a small period time.
 		 * See ioc_batching, ioc_set_batching
 		 */
-		create_io_context(current, GFP_NOIO, q->node);
+		create_io_context(GFP_NOIO, q->node);
 		ioc_set_batching(q, current->io_context);
 
 		spin_lock_irq(q->queue_lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb95dd2..893b800 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -155,20 +155,20 @@
 }
 EXPORT_SYMBOL(put_io_context);
 
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active().  If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
 {
-	struct io_context *ioc;
-	struct io_cq *icq;
 	struct hlist_node *n;
 	unsigned long flags;
+	struct io_cq *icq;
 
-	task_lock(task);
-	ioc = task->io_context;
-	task->io_context = NULL;
-	task_unlock(task);
-
-	if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+	if (!atomic_dec_and_test(&ioc->active_ref)) {
 		put_io_context(ioc);
 		return;
 	}
@@ -197,6 +197,20 @@
 	put_io_context(ioc);
 }
 
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+	struct io_context *ioc;
+
+	task_lock(task);
+	ioc = task->io_context;
+	task->io_context = NULL;
+	task_unlock(task);
+
+	atomic_dec(&ioc->nr_tasks);
+	put_io_context_active(ioc);
+}
+
 /**
  * ioc_clear_queue - break any ioc association with the specified queue
  * @q: request_queue being cleared
@@ -218,19 +232,19 @@
 	}
 }
 
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
-				int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
 {
 	struct io_context *ioc;
+	int ret;
 
 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
 				    node);
 	if (unlikely(!ioc))
-		return;
+		return -ENOMEM;
 
 	/* initialize */
 	atomic_long_set(&ioc->refcount, 1);
-	atomic_set(&ioc->nr_tasks, 1);
+	atomic_set(&ioc->active_ref, 1);
 	spin_lock_init(&ioc->lock);
 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
 	INIT_HLIST_HEAD(&ioc->icq_list);
@@ -249,7 +263,12 @@
 		task->io_context = ioc;
 	else
 		kmem_cache_free(iocontext_cachep, ioc);
+
+	ret = task->io_context ? 0 : -EBUSY;
+
 	task_unlock(task);
+
+	return ret;
 }
 
 /**
@@ -281,7 +300,7 @@
 			return ioc;
 		}
 		task_unlock(task);
-	} while (create_io_context(task, gfp_flags, node));
+	} while (!create_task_io_context(task, gfp_flags, node));
 
 	return NULL;
 }
@@ -325,26 +344,23 @@
 
 /**
  * ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
  * @q: request_queue of interest
  * @gfp_mask: allocation mask
  *
- * Make sure io_cq linking %current->io_context and @q exists.  If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
+ * will be created using @gfp_mask.
  *
  * The caller is responsible for ensuring @ioc won't go away and @q is
  * alive and will stay alive until this function returns.
  */
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+			     gfp_t gfp_mask)
 {
 	struct elevator_type *et = q->elevator->type;
-	struct io_context *ioc;
 	struct io_cq *icq;
 
 	/* allocate stuff */
-	ioc = create_io_context(current, gfp_mask, q->node);
-	if (!ioc)
-		return NULL;
-
 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
 				    q->node);
 	if (!icq)
@@ -382,74 +398,6 @@
 	return icq;
 }
 
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
-	struct io_cq *icq;
-	struct hlist_node *n;
-
-	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
-		icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
- * icq's.  iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioc->lock, flags);
-	ioc->ioprio = ioprio;
-	ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
-	spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioc->lock, flags);
-	ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
-	spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq.  Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
-	unsigned int changed = 0;
-	unsigned long flags;
-
-	if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
-		spin_lock_irqsave(&icq->ioc->lock, flags);
-		changed = icq->flags & ICQ_CHANGED_MASK;
-		icq->flags &= ~ICQ_CHANGED_MASK;
-		spin_unlock_irqrestore(&icq->ioc->lock, flags);
-	}
-	return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
 static int __init blk_ioc_init(void)
 {
 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cf15001..aa41b47 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -9,6 +9,7 @@
 #include <linux/blktrace_api.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 struct queue_sysfs_entry {
 	struct attribute attr;
@@ -479,6 +480,8 @@
 
 	blk_sync_queue(q);
 
+	blkcg_exit_queue(q);
+
 	if (q->elevator) {
 		spin_lock_irq(q->queue_lock);
 		ioc_clear_queue(q);
@@ -486,15 +489,12 @@
 		elevator_exit(q->elevator);
 	}
 
-	blk_throtl_exit(q);
-
 	if (rl->rq_pool)
 		mempool_destroy(rl->rq_pool);
 
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
 
-	blk_throtl_release(q);
 	blk_trace_shutdown(q);
 
 	bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f2ddb94..5b06595 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,6 +21,8 @@
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;	/* 100 ms */
 
+static struct blkcg_policy blkcg_policy_throtl;
+
 /* A workqueue to queue throttle related work */
 static struct workqueue_struct *kthrotld_workqueue;
 static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -38,9 +40,17 @@
 
 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
 
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+	/* total bytes transferred */
+	struct blkg_rwstat		service_bytes;
+	/* total IOs serviced, post merge */
+	struct blkg_rwstat		serviced;
+};
+
 struct throtl_grp {
-	/* List of throtl groups on the request queue*/
-	struct hlist_node tg_node;
+	/* must be the first member */
+	struct blkg_policy_data pd;
 
 	/* active throtl group service_tree member */
 	struct rb_node rb_node;
@@ -52,8 +62,6 @@
 	 */
 	unsigned long disptime;
 
-	struct blkio_group blkg;
-	atomic_t ref;
 	unsigned int flags;
 
 	/* Two lists for READ and WRITE */
@@ -80,18 +88,18 @@
 	/* Some throttle limits got updated for the group */
 	int limits_changed;
 
-	struct rcu_head rcu_head;
+	/* Per cpu stats pointer */
+	struct tg_stats_cpu __percpu *stats_cpu;
+
+	/* List of tgs waiting for per cpu stats memory to be allocated */
+	struct list_head stats_alloc_node;
 };
 
 struct throtl_data
 {
-	/* List of throtl groups */
-	struct hlist_head tg_list;
-
 	/* service tree for active throtl groups */
 	struct throtl_rb_root tg_service_tree;
 
-	struct throtl_grp *root_tg;
 	struct request_queue *queue;
 
 	/* Total Number of queued bios on READ and WRITE lists */
@@ -108,6 +116,33 @@
 	int limits_changed;
 };
 
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
+
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
+{
+	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
+
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+	return pd_to_blkg(&tg->pd);
+}
+
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+	return blkg_to_tg(td->queue->root_blkg);
+}
+
 enum tg_state_flags {
 	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
 };
@@ -128,244 +163,150 @@
 
 THROTL_TG_FNS(on_rr);
 
-#define throtl_log_tg(td, tg, fmt, args...)				\
-	blk_add_trace_msg((td)->queue, "throtl %s " fmt,		\
-				blkg_path(&(tg)->blkg), ##args);      	\
+#define throtl_log_tg(td, tg, fmt, args...)	do {			\
+	char __pbuf[128];						\
+									\
+	blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf));		\
+	blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
 
 #define throtl_log(td, fmt, args...)	\
 	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
 
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
-{
-	if (blkg)
-		return container_of(blkg, struct throtl_grp, blkg);
-
-	return NULL;
-}
-
 static inline unsigned int total_nr_queued(struct throtl_data *td)
 {
 	return td->nr_queued[0] + td->nr_queued[1];
 }
 
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
 {
-	atomic_inc(&tg->ref);
-	return tg;
+	static struct tg_stats_cpu *stats_cpu;	/* this fn is non-reentrant */
+	struct delayed_work *dwork = to_delayed_work(work);
+	bool empty = false;
+
+alloc_stats:
+	if (!stats_cpu) {
+		stats_cpu = alloc_percpu(struct tg_stats_cpu);
+		if (!stats_cpu) {
+			/* allocation failed, try again after some time */
+			queue_delayed_work(system_nrt_wq, dwork,
+					   msecs_to_jiffies(10));
+			return;
+		}
+	}
+
+	spin_lock_irq(&tg_stats_alloc_lock);
+
+	if (!list_empty(&tg_stats_alloc_list)) {
+		struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+							 struct throtl_grp,
+							 stats_alloc_node);
+		swap(tg->stats_cpu, stats_cpu);
+		list_del_init(&tg->stats_alloc_node);
+	}
+
+	empty = list_empty(&tg_stats_alloc_list);
+	spin_unlock_irq(&tg_stats_alloc_lock);
+	if (!empty)
+		goto alloc_stats;
 }
 
-static void throtl_free_tg(struct rcu_head *head)
+static void throtl_pd_init(struct blkcg_gq *blkg)
 {
-	struct throtl_grp *tg;
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
 
-	tg = container_of(head, struct throtl_grp, rcu_head);
-	free_percpu(tg->blkg.stats_cpu);
-	kfree(tg);
-}
-
-static void throtl_put_tg(struct throtl_grp *tg)
-{
-	BUG_ON(atomic_read(&tg->ref) <= 0);
-	if (!atomic_dec_and_test(&tg->ref))
-		return;
-
-	/*
-	 * A group is freed in rcu manner. But having an rcu lock does not
-	 * mean that one can access all the fields of blkg and assume these
-	 * are valid. For example, don't try to follow throtl_data and
-	 * request queue links.
-	 *
-	 * Having a reference to blkg under an rcu allows acess to only
-	 * values local to groups like group stats and group rate limits
-	 */
-	call_rcu(&tg->rcu_head, throtl_free_tg);
-}
-
-static void throtl_init_group(struct throtl_grp *tg)
-{
-	INIT_HLIST_NODE(&tg->tg_node);
 	RB_CLEAR_NODE(&tg->rb_node);
 	bio_list_init(&tg->bio_lists[0]);
 	bio_list_init(&tg->bio_lists[1]);
 	tg->limits_changed = false;
 
-	/* Practically unlimited BW */
-	tg->bps[0] = tg->bps[1] = -1;
-	tg->iops[0] = tg->iops[1] = -1;
+	tg->bps[READ] = -1;
+	tg->bps[WRITE] = -1;
+	tg->iops[READ] = -1;
+	tg->iops[WRITE] = -1;
 
 	/*
-	 * Take the initial reference that will be released on destroy
-	 * This can be thought of a joint reference by cgroup and
-	 * request queue which will be dropped by either request queue
-	 * exit or cgroup deletion path depending on who is exiting first.
+	 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+	 * but percpu allocator can't be called from IO path.  Queue tg on
+	 * tg_stats_alloc_list and allocate from work item.
 	 */
-	atomic_set(&tg->ref, 1);
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+	queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
 {
-	hlist_add_head(&tg->tg_node, &td->tg_list);
-	td->nr_undestroyed_grps++;
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+	list_del_init(&tg->stats_alloc_node);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
+
+	free_percpu(tg->stats_cpu);
 }
 
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 {
-	struct backing_dev_info *bdi = &td->queue->backing_dev_info;
-	unsigned int major, minor;
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	int cpu;
 
-	if (!tg || tg->blkg.dev)
+	if (tg->stats_cpu == NULL)
 		return;
 
-	/*
-	 * Fill in device details for a group which might not have been
-	 * filled at group creation time as queue was being instantiated
-	 * and driver had not attached a device yet
-	 */
-	if (bdi->dev && dev_name(bdi->dev)) {
-		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-		tg->blkg.dev = MKDEV(major, minor);
+	for_each_possible_cpu(cpu) {
+		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+
+		blkg_rwstat_reset(&sc->service_bytes);
+		blkg_rwstat_reset(&sc->serviced);
 	}
 }
 
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
- */
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+					   struct blkcg *blkcg)
 {
-	if (!tg || tg->blkg.dev)
-		return;
-
-	spin_lock_irq(td->queue->queue_lock);
-	__throtl_tg_fill_dev_details(td, tg);
-	spin_unlock_irq(td->queue->queue_lock);
-}
-
-static void throtl_init_add_tg_lists(struct throtl_data *td,
-			struct throtl_grp *tg, struct blkio_cgroup *blkcg)
-{
-	__throtl_tg_fill_dev_details(td, tg);
-
-	/* Add group onto cgroup list */
-	blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
-				tg->blkg.dev, BLKIO_POLICY_THROTL);
-
-	tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
-	tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
-	tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
-	tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
-
-	throtl_add_group_to_td_list(td, tg);
-}
-
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
-{
-	struct throtl_grp *tg = NULL;
-	int ret;
-
-	tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
-	if (!tg)
-		return NULL;
-
-	ret = blkio_alloc_blkg_stats(&tg->blkg);
-
-	if (ret) {
-		kfree(tg);
-		return NULL;
-	}
-
-	throtl_init_group(tg);
-	return tg;
-}
-
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
-{
-	struct throtl_grp *tg = NULL;
-	void *key = td;
-
 	/*
-	 * This is the common case when there are no blkio cgroups.
- 	 * Avoid lookup in this case
- 	 */
-	if (blkcg == &blkio_root_cgroup)
-		tg = td->root_tg;
-	else
-		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+	 * This is the common case when there are no blkcgs.  Avoid lookup
+	 * in this case
+	 */
+	if (blkcg == &blkcg_root)
+		return td_root_tg(td);
 
-	__throtl_tg_fill_dev_details(td, tg);
-	return tg;
+	return blkg_to_tg(blkg_lookup(blkcg, td->queue));
 }
 
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+						  struct blkcg *blkcg)
 {
-	struct throtl_grp *tg = NULL, *__tg = NULL;
-	struct blkio_cgroup *blkcg;
 	struct request_queue *q = td->queue;
-
-	/* no throttling for dead queue */
-	if (unlikely(blk_queue_dead(q)))
-		return NULL;
-
-	rcu_read_lock();
-	blkcg = task_blkio_cgroup(current);
-	tg = throtl_find_tg(td, blkcg);
-	if (tg) {
-		rcu_read_unlock();
-		return tg;
-	}
+	struct throtl_grp *tg = NULL;
 
 	/*
-	 * Need to allocate a group. Allocation of group also needs allocation
-	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
-	 * we need to drop rcu lock and queue_lock before we call alloc.
+	 * This is the common case when there are no blkcgs.  Avoid lookup
+	 * in this case
 	 */
-	rcu_read_unlock();
-	spin_unlock_irq(q->queue_lock);
+	if (blkcg == &blkcg_root) {
+		tg = td_root_tg(td);
+	} else {
+		struct blkcg_gq *blkg;
 
-	tg = throtl_alloc_tg(td);
+		blkg = blkg_lookup_create(blkcg, q);
 
-	/* Group allocated and queue is still alive. take the lock */
-	spin_lock_irq(q->queue_lock);
-
-	/* Make sure @q is still alive */
-	if (unlikely(blk_queue_dead(q))) {
-		kfree(tg);
-		return NULL;
+		/* if %NULL and @q is alive, fall back to root_tg */
+		if (!IS_ERR(blkg))
+			tg = blkg_to_tg(blkg);
+		else if (!blk_queue_dead(q))
+			tg = td_root_tg(td);
 	}
 
-	/*
-	 * Initialize the new group. After sleeping, read the blkcg again.
-	 */
-	rcu_read_lock();
-	blkcg = task_blkio_cgroup(current);
-
-	/*
-	 * If some other thread already allocated the group while we were
-	 * not holding queue lock, free up the group
-	 */
-	__tg = throtl_find_tg(td, blkcg);
-
-	if (__tg) {
-		kfree(tg);
-		rcu_read_unlock();
-		return __tg;
-	}
-
-	/* Group allocation failed. Account the IO to root group */
-	if (!tg) {
-		tg = td->root_tg;
-		return tg;
-	}
-
-	throtl_init_add_tg_lists(td, tg, blkcg);
-	rcu_read_unlock();
 	return tg;
 }
 
@@ -734,16 +675,41 @@
 	return 0;
 }
 
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+					 int rw)
+{
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	struct tg_stats_cpu *stats_cpu;
+	unsigned long flags;
+
+	/* If per cpu stats are not allocated yet, don't do any accounting. */
+	if (tg->stats_cpu == NULL)
+		return;
+
+	/*
+	 * Disabling interrupts to provide mutual exclusion between two
+	 * writes on same cpu. It probably is not needed for 64bit. Not
+	 * optimizing that case yet.
+	 */
+	local_irq_save(flags);
+
+	stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+	blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+	blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+	local_irq_restore(flags);
+}
+
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
-	bool sync = rw_is_sync(bio->bi_rw);
 
 	/* Charge the bio to the group */
 	tg->bytes_disp[rw] += bio->bi_size;
 	tg->io_disp[rw]++;
 
-	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+	throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
 }
 
 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -753,7 +719,7 @@
 
 	bio_list_add(&tg->bio_lists[rw], bio);
 	/* Take a bio reference on tg */
-	throtl_ref_get_tg(tg);
+	blkg_get(tg_to_blkg(tg));
 	tg->nr_queued[rw]++;
 	td->nr_queued[rw]++;
 	throtl_enqueue_tg(td, tg);
@@ -786,8 +752,8 @@
 
 	bio = bio_list_pop(&tg->bio_lists[rw]);
 	tg->nr_queued[rw]--;
-	/* Drop bio reference on tg */
-	throtl_put_tg(tg);
+	/* Drop bio reference on blkg */
+	blkg_put(tg_to_blkg(tg));
 
 	BUG_ON(td->nr_queued[rw] <= 0);
 	td->nr_queued[rw]--;
@@ -865,8 +831,8 @@
 
 static void throtl_process_limit_change(struct throtl_data *td)
 {
-	struct throtl_grp *tg;
-	struct hlist_node *pos, *n;
+	struct request_queue *q = td->queue;
+	struct blkcg_gq *blkg, *n;
 
 	if (!td->limits_changed)
 		return;
@@ -875,7 +841,9 @@
 
 	throtl_log(td, "limits changed");
 
-	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+		struct throtl_grp *tg = blkg_to_tg(blkg);
+
 		if (!tg->limits_changed)
 			continue;
 
@@ -973,119 +941,158 @@
 	}
 }
 
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+				struct blkg_policy_data *pd, int off)
 {
-	/* Something wrong if we are trying to remove same group twice */
-	BUG_ON(hlist_unhashed(&tg->tg_node));
+	struct throtl_grp *tg = pd_to_tg(pd);
+	struct blkg_rwstat rwstat = { }, tmp;
+	int i, cpu;
 
-	hlist_del_init(&tg->tg_node);
+	for_each_possible_cpu(cpu) {
+		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
-	/*
-	 * Put the reference taken at the time of creation so that when all
-	 * queues are gone, group can be destroyed.
-	 */
-	throtl_put_tg(tg);
-	td->nr_undestroyed_grps--;
-}
-
-static void throtl_release_tgs(struct throtl_data *td)
-{
-	struct hlist_node *pos, *n;
-	struct throtl_grp *tg;
-
-	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
-		/*
-		 * If cgroup removal path got to blk_group first and removed
-		 * it from cgroup list, then it will take care of destroying
-		 * cfqg also.
-		 */
-		if (!blkiocg_del_blkio_group(&tg->blkg))
-			throtl_destroy_tg(td, tg);
+		tmp = blkg_rwstat_read((void *)sc + off);
+		for (i = 0; i < BLKG_RWSTAT_NR; i++)
+			rwstat.cnt[i] += tmp.cnt[i];
 	}
+
+	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 }
 
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+			       struct seq_file *sf)
 {
-	unsigned long flags;
-	struct throtl_data *td = key;
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
-	spin_lock_irqsave(td->queue->queue_lock, flags);
-	throtl_destroy_tg(td, tg_of_blkg(blkg));
-	spin_unlock_irqrestore(td->queue->queue_lock, flags);
+	blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
+			  cft->private, true);
+	return 0;
 }
 
-static void throtl_update_blkio_group_common(struct throtl_data *td,
-				struct throtl_grp *tg)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+			      int off)
 {
+	struct throtl_grp *tg = pd_to_tg(pd);
+	u64 v = *(u64 *)((void *)tg + off);
+
+	if (v == -1)
+		return 0;
+	return __blkg_prfill_u64(sf, pd, v);
+}
+
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+			       int off)
+{
+	struct throtl_grp *tg = pd_to_tg(pd);
+	unsigned int v = *(unsigned int *)((void *)tg + off);
+
+	if (v == -1)
+		return 0;
+	return __blkg_prfill_u64(sf, pd, v);
+}
+
+static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+			     struct seq_file *sf)
+{
+	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+			  &blkcg_policy_throtl, cft->private, false);
+	return 0;
+}
+
+static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+			      struct seq_file *sf)
+{
+	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+			  &blkcg_policy_throtl, cft->private, false);
+	return 0;
+}
+
+static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
+		       bool is_u64)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+	struct blkg_conf_ctx ctx;
+	struct throtl_grp *tg;
+	struct throtl_data *td;
+	int ret;
+
+	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+	if (ret)
+		return ret;
+
+	tg = blkg_to_tg(ctx.blkg);
+	td = ctx.blkg->q->td;
+
+	if (!ctx.v)
+		ctx.v = -1;
+
+	if (is_u64)
+		*(u64 *)((void *)tg + cft->private) = ctx.v;
+	else
+		*(unsigned int *)((void *)tg + cft->private) = ctx.v;
+
+	/* XXX: we don't need the following deferred processing */
 	xchg(&tg->limits_changed, true);
 	xchg(&td->limits_changed, true);
-	/* Schedule a work now to process the limit change */
 	throtl_schedule_delayed_work(td, 0);
+
+	blkg_conf_finish(&ctx);
+	return 0;
 }
 
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
-				struct blkio_group *blkg, u64 read_bps)
+static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+			   const char *buf)
 {
-	struct throtl_data *td = key;
-	struct throtl_grp *tg = tg_of_blkg(blkg);
-
-	tg->bps[READ] = read_bps;
-	throtl_update_blkio_group_common(td, tg);
+	return tg_set_conf(cgrp, cft, buf, true);
 }
 
-static void throtl_update_blkio_group_write_bps(void *key,
-				struct blkio_group *blkg, u64 write_bps)
+static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+			    const char *buf)
 {
-	struct throtl_data *td = key;
-	struct throtl_grp *tg = tg_of_blkg(blkg);
-
-	tg->bps[WRITE] = write_bps;
-	throtl_update_blkio_group_common(td, tg);
+	return tg_set_conf(cgrp, cft, buf, false);
 }
 
-static void throtl_update_blkio_group_read_iops(void *key,
-			struct blkio_group *blkg, unsigned int read_iops)
-{
-	struct throtl_data *td = key;
-	struct throtl_grp *tg = tg_of_blkg(blkg);
-
-	tg->iops[READ] = read_iops;
-	throtl_update_blkio_group_common(td, tg);
-}
-
-static void throtl_update_blkio_group_write_iops(void *key,
-			struct blkio_group *blkg, unsigned int write_iops)
-{
-	struct throtl_data *td = key;
-	struct throtl_grp *tg = tg_of_blkg(blkg);
-
-	tg->iops[WRITE] = write_iops;
-	throtl_update_blkio_group_common(td, tg);
-}
+static struct cftype throtl_files[] = {
+	{
+		.name = "throttle.read_bps_device",
+		.private = offsetof(struct throtl_grp, bps[READ]),
+		.read_seq_string = tg_print_conf_u64,
+		.write_string = tg_set_conf_u64,
+		.max_write_len = 256,
+	},
+	{
+		.name = "throttle.write_bps_device",
+		.private = offsetof(struct throtl_grp, bps[WRITE]),
+		.read_seq_string = tg_print_conf_u64,
+		.write_string = tg_set_conf_u64,
+		.max_write_len = 256,
+	},
+	{
+		.name = "throttle.read_iops_device",
+		.private = offsetof(struct throtl_grp, iops[READ]),
+		.read_seq_string = tg_print_conf_uint,
+		.write_string = tg_set_conf_uint,
+		.max_write_len = 256,
+	},
+	{
+		.name = "throttle.write_iops_device",
+		.private = offsetof(struct throtl_grp, iops[WRITE]),
+		.read_seq_string = tg_print_conf_uint,
+		.write_string = tg_set_conf_uint,
+		.max_write_len = 256,
+	},
+	{
+		.name = "throttle.io_service_bytes",
+		.private = offsetof(struct tg_stats_cpu, service_bytes),
+		.read_seq_string = tg_print_cpu_rwstat,
+	},
+	{
+		.name = "throttle.io_serviced",
+		.private = offsetof(struct tg_stats_cpu, serviced),
+		.read_seq_string = tg_print_cpu_rwstat,
+	},
+	{ }	/* terminate */
+};
 
 static void throtl_shutdown_wq(struct request_queue *q)
 {
@@ -1094,19 +1101,13 @@
 	cancel_delayed_work_sync(&td->throtl_work);
 }
 
-static struct blkio_policy_type blkio_policy_throtl = {
-	.ops = {
-		.blkio_unlink_group_fn = throtl_unlink_blkio_group,
-		.blkio_update_group_read_bps_fn =
-					throtl_update_blkio_group_read_bps,
-		.blkio_update_group_write_bps_fn =
-					throtl_update_blkio_group_write_bps,
-		.blkio_update_group_read_iops_fn =
-					throtl_update_blkio_group_read_iops,
-		.blkio_update_group_write_iops_fn =
-					throtl_update_blkio_group_write_iops,
-	},
-	.plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+	.pd_size		= sizeof(struct throtl_grp),
+	.cftypes		= throtl_files,
+
+	.pd_init_fn		= throtl_pd_init,
+	.pd_exit_fn		= throtl_pd_exit,
+	.pd_reset_stats_fn	= throtl_pd_reset_stats,
 };
 
 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1114,7 +1115,7 @@
 	struct throtl_data *td = q->td;
 	struct throtl_grp *tg;
 	bool rw = bio_data_dir(bio), update_disptime = true;
-	struct blkio_cgroup *blkcg;
+	struct blkcg *blkcg;
 	bool throttled = false;
 
 	if (bio->bi_rw & REQ_THROTTLED) {
@@ -1122,33 +1123,31 @@
 		goto out;
 	}
 
+	/* bio_associate_current() needs ioc, try creating */
+	create_io_context(GFP_ATOMIC, q->node);
+
 	/*
 	 * A throtl_grp pointer retrieved under rcu can be used to access
 	 * basic fields like stats and io rates. If a group has no rules,
 	 * just update the dispatch stats in lockless manner and return.
 	 */
-
 	rcu_read_lock();
-	blkcg = task_blkio_cgroup(current);
-	tg = throtl_find_tg(td, blkcg);
+	blkcg = bio_blkcg(bio);
+	tg = throtl_lookup_tg(td, blkcg);
 	if (tg) {
-		throtl_tg_fill_dev_details(td, tg);
-
 		if (tg_no_rule_group(tg, rw)) {
-			blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
-					rw, rw_is_sync(bio->bi_rw));
-			rcu_read_unlock();
-			goto out;
+			throtl_update_dispatch_stats(tg_to_blkg(tg),
+						     bio->bi_size, bio->bi_rw);
+			goto out_unlock_rcu;
 		}
 	}
-	rcu_read_unlock();
 
 	/*
 	 * Either group has not been allocated yet or it is not an unlimited
 	 * IO group
 	 */
 	spin_lock_irq(q->queue_lock);
-	tg = throtl_get_tg(td);
+	tg = throtl_lookup_create_tg(td, blkcg);
 	if (unlikely(!tg))
 		goto out_unlock;
 
@@ -1189,6 +1188,7 @@
 			tg->io_disp[rw], tg->iops[rw],
 			tg->nr_queued[READ], tg->nr_queued[WRITE]);
 
+	bio_associate_current(bio);
 	throtl_add_bio_tg(q->td, tg, bio);
 	throttled = true;
 
@@ -1199,6 +1199,8 @@
 
 out_unlock:
 	spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+	rcu_read_unlock();
 out:
 	return throttled;
 }
@@ -1241,79 +1243,31 @@
 int blk_throtl_init(struct request_queue *q)
 {
 	struct throtl_data *td;
-	struct throtl_grp *tg;
+	int ret;
 
 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
 	if (!td)
 		return -ENOMEM;
 
-	INIT_HLIST_HEAD(&td->tg_list);
 	td->tg_service_tree = THROTL_RB_ROOT;
 	td->limits_changed = false;
 	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
 
-	/* alloc and Init root group. */
-	td->queue = q;
-	tg = throtl_alloc_tg(td);
-
-	if (!tg) {
-		kfree(td);
-		return -ENOMEM;
-	}
-
-	td->root_tg = tg;
-
-	rcu_read_lock();
-	throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
-	rcu_read_unlock();
-
-	/* Attach throtl data to request queue */
 	q->td = td;
-	return 0;
+	td->queue = q;
+
+	/* activate policy */
+	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+	if (ret)
+		kfree(td);
+	return ret;
 }
 
 void blk_throtl_exit(struct request_queue *q)
 {
-	struct throtl_data *td = q->td;
-	bool wait = false;
-
-	BUG_ON(!td);
-
+	BUG_ON(!q->td);
 	throtl_shutdown_wq(q);
-
-	spin_lock_irq(q->queue_lock);
-	throtl_release_tgs(td);
-
-	/* If there are other groups */
-	if (td->nr_undestroyed_grps > 0)
-		wait = true;
-
-	spin_unlock_irq(q->queue_lock);
-
-	/*
-	 * Wait for tg->blkg->key accessors to exit their grace periods.
-	 * Do this wait only if there are other undestroyed groups out
-	 * there (other than root group). This can happen if cgroup deletion
-	 * path claimed the responsibility of cleaning up a group before
-	 * queue cleanup code get to the group.
-	 *
-	 * Do not call synchronize_rcu() unconditionally as there are drivers
-	 * which create/delete request queue hundreds of times during scan/boot
-	 * and synchronize_rcu() can take significant time and slow down boot.
-	 */
-	if (wait)
-		synchronize_rcu();
-
-	/*
-	 * Just being safe to make sure after previous flush if some body did
-	 * update limits through cgroup and another work got queued, cancel
-	 * it.
-	 */
-	throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
 	kfree(q->td);
 }
 
@@ -1323,8 +1277,7 @@
 	if (!kthrotld_workqueue)
 		panic("Failed to create kthrotld\n");
 
-	blkio_policy_register(&blkio_policy_throtl);
-	return 0;
+	return blkcg_policy_register(&blkcg_policy_throtl);
 }
 
 module_init(throtl_init);
diff --git a/block/blk.h b/block/blk.h
index d45be87..85f6ae4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@
 			struct bio *bio);
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 		      struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
 void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@
 
 int blk_dev_init(void);
 
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
 
 /*
  * Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@
  */
 void get_io_context(struct io_context *ioc);
 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+			     gfp_t gfp_mask);
 void ioc_clear_queue(struct request_queue *q);
 
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
-				int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 
 /**
  * create_io_context - try to create task->io_context
- * @task: target task
  * @gfp_mask: allocation mask
  * @node: allocation node
  *
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it.  Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
  *
  * Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
  */
-static inline struct io_context *create_io_context(struct task_struct *task,
-						   gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
 {
 	WARN_ON_ONCE(irqs_disabled());
-	if (unlikely(!task->io_context))
-		create_io_context_slowpath(task, gfp_mask, node);
-	return task->io_context;
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, gfp_mask, node);
+	return current->io_context;
 }
 
 /*
@@ -222,7 +218,6 @@
 extern void blk_throtl_drain(struct request_queue *q);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 {
@@ -231,7 +226,6 @@
 static inline void blk_throtl_drain(struct request_queue *q) { }
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
 #endif /* BLK_INTERNAL_H */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c38536..673c977 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,9 @@
 #include <linux/ioprio.h>
 #include <linux/blktrace_api.h>
 #include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
+
+static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
 
 /*
  * tunables
@@ -171,8 +173,53 @@
 	SYNC_WORKLOAD = 2
 };
 
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+	/* total bytes transferred */
+	struct blkg_rwstat		service_bytes;
+	/* total IOs serviced, post merge */
+	struct blkg_rwstat		serviced;
+	/* number of ios merged */
+	struct blkg_rwstat		merged;
+	/* total time spent on device in ns, may not be accurate w/ queueing */
+	struct blkg_rwstat		service_time;
+	/* total time spent waiting in scheduler queue in ns */
+	struct blkg_rwstat		wait_time;
+	/* number of IOs queued up */
+	struct blkg_rwstat		queued;
+	/* total sectors transferred */
+	struct blkg_stat		sectors;
+	/* total disk time and nr sectors dispatched by this group */
+	struct blkg_stat		time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	/* time not charged to this cgroup */
+	struct blkg_stat		unaccounted_time;
+	/* sum of number of ios queued across all samples */
+	struct blkg_stat		avg_queue_size_sum;
+	/* count of samples taken for average */
+	struct blkg_stat		avg_queue_size_samples;
+	/* how many times this group has been removed from service tree */
+	struct blkg_stat		dequeue;
+	/* total time spent waiting for it to be assigned a timeslice. */
+	struct blkg_stat		group_wait_time;
+	/* time spent idling for this blkcg_gq */
+	struct blkg_stat		idle_time;
+	/* total time with empty current active q with other requests queued */
+	struct blkg_stat		empty_time;
+	/* fields after this shouldn't be cleared on stat reset */
+	uint64_t			start_group_wait_time;
+	uint64_t			start_idle_time;
+	uint64_t			start_empty_time;
+	uint16_t			flags;
+#endif	/* CONFIG_DEBUG_BLK_CGROUP */
+#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
 /* This is per cgroup per device grouping structure */
 struct cfq_group {
+	/* must be the first member */
+	struct blkg_policy_data pd;
+
 	/* group service_tree member */
 	struct rb_node rb_node;
 
@@ -180,7 +227,7 @@
 	u64 vdisktime;
 	unsigned int weight;
 	unsigned int new_weight;
-	bool needs_update;
+	unsigned int dev_weight;
 
 	/* number of cfqq currently on this group */
 	int nr_cfqq;
@@ -206,20 +253,21 @@
 	unsigned long saved_workload_slice;
 	enum wl_type_t saved_workload;
 	enum wl_prio_t saved_serving_prio;
-	struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-	struct hlist_node cfqd_node;
-	int ref;
-#endif
+
 	/* number of requests that are on the dispatch list or inside driver */
 	int dispatched;
 	struct cfq_ttime ttime;
+	struct cfqg_stats stats;
 };
 
 struct cfq_io_cq {
 	struct io_cq		icq;		/* must be the first member */
 	struct cfq_queue	*cfqq[2];
 	struct cfq_ttime	ttime;
+	int			ioprio;		/* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+	uint64_t		blkcg_id;	/* the current blkcg ID */
+#endif
 };
 
 /*
@@ -229,7 +277,7 @@
 	struct request_queue *queue;
 	/* Root service tree for cfq_groups */
 	struct cfq_rb_root grp_service_tree;
-	struct cfq_group root_group;
+	struct cfq_group *root_group;
 
 	/*
 	 * The priority currently being served
@@ -303,12 +351,6 @@
 	struct cfq_queue oom_cfqq;
 
 	unsigned long last_delayed_sync;
-
-	/* List of cfq groups being managed on this device*/
-	struct hlist_head cfqg_list;
-
-	/* Number of groups which are on blkcg->blkg_list */
-	unsigned int nr_blkcg_linked_grps;
 };
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -371,21 +413,284 @@
 CFQ_CFQQ_FNS(wait_busy);
 #undef CFQ_CFQQ_FNS
 
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+	return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+	CFQG_stats_waiting = 0,
+	CFQG_stats_idling,
+	CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name)						\
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
+{									\
+	stats->flags |= (1 << CFQG_stats_##name);			\
+}									\
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
+{									\
+	stats->flags &= ~(1 << CFQG_stats_##name);			\
+}									\
+static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
+{									\
+	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
+}									\
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+	unsigned long long now;
+
+	if (!cfqg_stats_waiting(stats))
+		return;
+
+	now = sched_clock();
+	if (time_after64(now, stats->start_group_wait_time))
+		blkg_stat_add(&stats->group_wait_time,
+			      now - stats->start_group_wait_time);
+	cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+						 struct cfq_group *curr_cfqg)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	if (cfqg_stats_waiting(stats))
+		return;
+	if (cfqg == curr_cfqg)
+		return;
+	stats->start_group_wait_time = sched_clock();
+	cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+	unsigned long long now;
+
+	if (!cfqg_stats_empty(stats))
+		return;
+
+	now = sched_clock();
+	if (time_after64(now, stats->start_empty_time))
+		blkg_stat_add(&stats->empty_time,
+			      now - stats->start_empty_time);
+	cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+	blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	if (blkg_rwstat_sum(&stats->queued))
+		return;
+
+	/*
+	 * group is already marked empty. This can happen if cfqq got new
+	 * request in parent group and moved to this group while being added
+	 * to service tree. Just ignore the event and move on.
+	 */
+	if (cfqg_stats_empty(stats))
+		return;
+
+	stats->start_empty_time = sched_clock();
+	cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	if (cfqg_stats_idling(stats)) {
+		unsigned long long now = sched_clock();
+
+		if (time_after64(now, stats->start_idle_time))
+			blkg_stat_add(&stats->idle_time,
+				      now - stats->start_idle_time);
+		cfqg_stats_clear_idling(stats);
+	}
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	BUG_ON(cfqg_stats_idling(stats));
+
+	stats->start_idle_time = sched_clock();
+	cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	blkg_stat_add(&stats->avg_queue_size_sum,
+		      blkg_rwstat_sum(&stats->queued));
+	blkg_stat_add(&stats->avg_queue_size_samples, 1);
+	cfqg_stats_update_group_wait_time(stats);
+}
+
+#else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+	return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+	return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
+	char __pbuf[128];						\
+									\
+	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
 	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
-			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
-			blkg_path(&(cfqq)->cfqg->blkg), ##args)
+			  cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
+			  __pbuf, ##args);				\
+} while (0)
 
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
-	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
-				blkg_path(&(cfqg)->blkg), ##args)       \
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
+	char __pbuf[128];						\
+									\
+	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
+	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
+} while (0)
 
-#else
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+					    struct cfq_group *curr_cfqg, int rw)
+{
+	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+	cfqg_stats_end_empty_time(&cfqg->stats);
+	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+			unsigned long time, unsigned long unaccounted_time)
+{
+	blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+					      uint64_t bytes, int rw)
+{
+	blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+	blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+	blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+			uint64_t start_time, uint64_t io_start_time, int rw)
+{
+	struct cfqg_stats *stats = &cfqg->stats;
+	unsigned long long now = sched_clock();
+
+	if (time_after64(now, io_start_time))
+		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+	if (time_after64(io_start_time, start_time))
+		blkg_rwstat_add(&stats->wait_time, rw,
+				io_start_time - start_time);
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+	struct cfqg_stats *stats = &cfqg->stats;
+
+	/* queued stats shouldn't be cleared */
+	blkg_rwstat_reset(&stats->service_bytes);
+	blkg_rwstat_reset(&stats->serviced);
+	blkg_rwstat_reset(&stats->merged);
+	blkg_rwstat_reset(&stats->service_time);
+	blkg_rwstat_reset(&stats->wait_time);
+	blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	blkg_stat_reset(&stats->unaccounted_time);
+	blkg_stat_reset(&stats->avg_queue_size_sum);
+	blkg_stat_reset(&stats->avg_queue_size_samples);
+	blkg_stat_reset(&stats->dequeue);
+	blkg_stat_reset(&stats->group_wait_time);
+	blkg_stat_reset(&stats->idle_time);
+	blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+#else	/* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
+
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+			struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+			unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+					      uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+			uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
+
 #define cfq_log(cfqd, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -466,8 +771,9 @@
 }
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
-				       struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+				       struct cfq_io_cq *cic, struct bio *bio,
+				       gfp_t gfp_mask);
 
 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 {
@@ -545,7 +851,7 @@
 {
 	u64 d = delta << CFQ_SERVICE_SHIFT;
 
-	d = d * BLKIO_WEIGHT_DEFAULT;
+	d = d * CFQ_WEIGHT_DEFAULT;
 	do_div(d, cfqg->weight);
 	return d;
 }
@@ -872,9 +1178,9 @@
 cfq_update_group_weight(struct cfq_group *cfqg)
 {
 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-	if (cfqg->needs_update) {
+	if (cfqg->new_weight) {
 		cfqg->weight = cfqg->new_weight;
-		cfqg->needs_update = false;
+		cfqg->new_weight = 0;
 	}
 }
 
@@ -936,7 +1242,7 @@
 	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
 	cfq_group_service_tree_del(st, cfqg);
 	cfqg->saved_workload_slice = 0;
-	cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+	cfqg_stats_update_dequeue(cfqg);
 }
 
 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1008,178 +1314,59 @@
 		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
 		     used_sl, cfqq->slice_dispatch, charge,
 		     iops_mode(cfqd), cfqq->nr_sectors);
-	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
-					  unaccounted_sl);
-	cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+	cfqg_stats_set_start_empty_time(cfqg);
 }
 
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
-{
-	if (blkg)
-		return container_of(blkg, struct cfq_group, blkg);
-	return NULL;
-}
-
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
-					  unsigned int weight)
-{
-	struct cfq_group *cfqg = cfqg_of_blkg(blkg);
-	cfqg->new_weight = weight;
-	cfqg->needs_update = true;
-}
-
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
-			struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
-{
-	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
-	unsigned int major, minor;
-
-	/*
-	 * Add group onto cgroup list. It might happen that bdi->dev is
-	 * not initialized yet. Initialize this new group without major
-	 * and minor info and this info will be filled in once a new thread
-	 * comes for IO.
-	 */
-	if (bdi->dev) {
-		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
-					(void *)cfqd, MKDEV(major, minor));
-	} else
-		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
-					(void *)cfqd, 0);
-
-	cfqd->nr_blkcg_linked_grps++;
-	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
-
-	/* Add group on cfqd list */
-	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
-}
-
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
  */
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
 {
-	struct cfq_group *cfqg = NULL;
-	int i, j, ret;
 	struct cfq_rb_root *st;
-
-	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
-	if (!cfqg)
-		return NULL;
+	int i, j;
 
 	for_each_cfqg_st(cfqg, i, j, st)
 		*st = CFQ_RB_ROOT;
 	RB_CLEAR_NODE(&cfqg->rb_node);
 
 	cfqg->ttime.last_end_request = jiffies;
-
-	/*
-	 * Take the initial reference that will be released on destroy
-	 * This can be thought of a joint reference by cgroup and
-	 * elevator which will be dropped by either elevator exit
-	 * or cgroup deletion path depending on who is exiting first.
-	 */
-	cfqg->ref = 1;
-
-	ret = blkio_alloc_blkg_stats(&cfqg->blkg);
-	if (ret) {
-		kfree(cfqg);
-		return NULL;
-	}
-
-	return cfqg;
 }
 
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfq_pd_init(struct blkcg_gq *blkg)
 {
-	struct cfq_group *cfqg = NULL;
-	void *key = cfqd;
-	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
-	unsigned int major, minor;
+	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
-	/*
-	 * This is the common case when there are no blkio cgroups.
-	 * Avoid lookup in this case
-	 */
-	if (blkcg == &blkio_root_cgroup)
-		cfqg = &cfqd->root_group;
-	else
-		cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
-
-	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
-		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-		cfqg->blkg.dev = MKDEV(major, minor);
-	}
-
-	return cfqg;
+	cfq_init_cfqg_base(cfqg);
+	cfqg->weight = blkg->blkcg->cfq_weight;
 }
 
 /*
  * Search for the cfq group current task belongs to. request_queue lock must
  * be held.
  */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+						struct blkcg *blkcg)
 {
-	struct blkio_cgroup *blkcg;
-	struct cfq_group *cfqg = NULL, *__cfqg = NULL;
 	struct request_queue *q = cfqd->queue;
+	struct cfq_group *cfqg = NULL;
 
-	rcu_read_lock();
-	blkcg = task_blkio_cgroup(current);
-	cfqg = cfq_find_cfqg(cfqd, blkcg);
-	if (cfqg) {
-		rcu_read_unlock();
-		return cfqg;
+	/* avoid lookup for the common case where there's no blkcg */
+	if (blkcg == &blkcg_root) {
+		cfqg = cfqd->root_group;
+	} else {
+		struct blkcg_gq *blkg;
+
+		blkg = blkg_lookup_create(blkcg, q);
+		if (!IS_ERR(blkg))
+			cfqg = blkg_to_cfqg(blkg);
 	}
 
-	/*
-	 * Need to allocate a group. Allocation of group also needs allocation
-	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
-	 * we need to drop rcu lock and queue_lock before we call alloc.
-	 *
-	 * Not taking any queue reference here and assuming that queue is
-	 * around by the time we return. CFQ queue allocation code does
-	 * the same. It might be racy though.
-	 */
-
-	rcu_read_unlock();
-	spin_unlock_irq(q->queue_lock);
-
-	cfqg = cfq_alloc_cfqg(cfqd);
-
-	spin_lock_irq(q->queue_lock);
-
-	rcu_read_lock();
-	blkcg = task_blkio_cgroup(current);
-
-	/*
-	 * If some other thread already allocated the group while we were
-	 * not holding queue lock, free up the group
-	 */
-	__cfqg = cfq_find_cfqg(cfqd, blkcg);
-
-	if (__cfqg) {
-		kfree(cfqg);
-		rcu_read_unlock();
-		return __cfqg;
-	}
-
-	if (!cfqg)
-		cfqg = &cfqd->root_group;
-
-	cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
-	rcu_read_unlock();
-	return cfqg;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
-	cfqg->ref++;
 	return cfqg;
 }
 
@@ -1187,94 +1374,224 @@
 {
 	/* Currently, all async queues are mapped to root group */
 	if (!cfq_cfqq_sync(cfqq))
-		cfqg = &cfqq->cfqd->root_group;
+		cfqg = cfqq->cfqd->root_group;
 
 	cfqq->cfqg = cfqg;
 	/* cfqq reference on cfqg */
-	cfqq->cfqg->ref++;
+	cfqg_get(cfqg);
 }
 
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+				     struct blkg_policy_data *pd, int off)
 {
-	struct cfq_rb_root *st;
-	int i, j;
+	struct cfq_group *cfqg = pd_to_cfqg(pd);
 
-	BUG_ON(cfqg->ref <= 0);
-	cfqg->ref--;
-	if (cfqg->ref)
-		return;
-	for_each_cfqg_st(cfqg, i, j, st)
-		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
-	free_percpu(cfqg->blkg.stats_cpu);
-	kfree(cfqg);
+	if (!cfqg->dev_weight)
+		return 0;
+	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
 }
 
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
+				    struct seq_file *sf)
 {
-	/* Something wrong if we are trying to remove same group twice */
-	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
-
-	hlist_del_init(&cfqg->cfqd_node);
-
-	BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
-	cfqd->nr_blkcg_linked_grps--;
-
-	/*
-	 * Put the reference taken at the time of creation so that when all
-	 * queues are gone, group can be destroyed.
-	 */
-	cfq_put_cfqg(cfqg);
+	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+			  cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
+			  false);
+	return 0;
 }
 
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+			    struct seq_file *sf)
 {
-	struct hlist_node *pos, *n;
+	seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+	return 0;
+}
+
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+				  const char *buf)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+	struct blkg_conf_ctx ctx;
 	struct cfq_group *cfqg;
+	int ret;
 
-	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
-		/*
-		 * If cgroup removal path got to blk_group first and removed
-		 * it from cgroup list, then it will take care of destroying
-		 * cfqg also.
-		 */
-		if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
-			cfq_destroy_cfqg(cfqd, cfqg);
+	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+	if (ret)
+		return ret;
+
+	ret = -EINVAL;
+	cfqg = blkg_to_cfqg(ctx.blkg);
+	if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+		cfqg->dev_weight = ctx.v;
+		cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+		ret = 0;
 	}
+
+	blkg_conf_finish(&ctx);
+	return ret;
 }
 
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
 {
-	unsigned long  flags;
-	struct cfq_data *cfqd = key;
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+	struct blkcg_gq *blkg;
+	struct hlist_node *n;
 
-	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
-	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+	if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+		return -EINVAL;
+
+	spin_lock_irq(&blkcg->lock);
+	blkcg->cfq_weight = (unsigned int)val;
+
+	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+		if (cfqg && !cfqg->dev_weight)
+			cfqg->new_weight = blkcg->cfq_weight;
+	}
+
+	spin_unlock_irq(&blkcg->lock);
+	return 0;
 }
 
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+			   struct seq_file *sf)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+	blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
+			  cft->private, false);
+	return 0;
+}
+
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
+			     struct seq_file *sf)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+	blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
+			  cft->private, true);
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+				      struct blkg_policy_data *pd, int off)
+{
+	struct cfq_group *cfqg = pd_to_cfqg(pd);
+	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+	u64 v = 0;
+
+	if (samples) {
+		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+		do_div(v, samples);
+	}
+	__blkg_prfill_u64(sf, pd, v);
+	return 0;
+}
+
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
+				     struct seq_file *sf)
+{
+	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+	blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
+			  &blkcg_policy_cfq, 0, false);
+	return 0;
+}
+#endif	/* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+	{
+		.name = "weight_device",
+		.read_seq_string = cfqg_print_weight_device,
+		.write_string = cfqg_set_weight_device,
+		.max_write_len = 256,
+	},
+	{
+		.name = "weight",
+		.read_seq_string = cfq_print_weight,
+		.write_u64 = cfq_set_weight,
+	},
+	{
+		.name = "time",
+		.private = offsetof(struct cfq_group, stats.time),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "sectors",
+		.private = offsetof(struct cfq_group, stats.sectors),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "io_service_bytes",
+		.private = offsetof(struct cfq_group, stats.service_bytes),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+	{
+		.name = "io_serviced",
+		.private = offsetof(struct cfq_group, stats.serviced),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+	{
+		.name = "io_service_time",
+		.private = offsetof(struct cfq_group, stats.service_time),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+	{
+		.name = "io_wait_time",
+		.private = offsetof(struct cfq_group, stats.wait_time),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+	{
+		.name = "io_merged",
+		.private = offsetof(struct cfq_group, stats.merged),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+	{
+		.name = "io_queued",
+		.private = offsetof(struct cfq_group, stats.queued),
+		.read_seq_string = cfqg_print_rwstat,
+	},
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	{
+		.name = "avg_queue_size",
+		.read_seq_string = cfqg_print_avg_queue_size,
+	},
+	{
+		.name = "group_wait_time",
+		.private = offsetof(struct cfq_group, stats.group_wait_time),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "idle_time",
+		.private = offsetof(struct cfq_group, stats.idle_time),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "empty_time",
+		.private = offsetof(struct cfq_group, stats.empty_time),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "dequeue",
+		.private = offsetof(struct cfq_group, stats.dequeue),
+		.read_seq_string = cfqg_print_stat,
+	},
+	{
+		.name = "unaccounted_time",
+		.private = offsetof(struct cfq_group, stats.unaccounted_time),
+		.read_seq_string = cfqg_print_stat,
+	},
+#endif	/* CONFIG_DEBUG_BLK_CGROUP */
+	{ }	/* terminate */
+};
 #else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+						struct blkcg *blkcg)
 {
-	return &cfqd->root_group;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
-	return cfqg;
+	return cfqd->root_group;
 }
 
 static inline void
@@ -1282,9 +1599,6 @@
 	cfqq->cfqg = cfqg;
 }
 
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
 #endif /* GROUP_IOSCHED */
 
 /*
@@ -1551,12 +1865,10 @@
 {
 	elv_rb_del(&cfqq->sort_list, rq);
 	cfqq->queued[rq_is_sync(rq)]--;
-	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
-					rq_data_dir(rq), rq_is_sync(rq));
+	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 	cfq_add_rq_rb(rq);
-	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
-			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
-			rq_is_sync(rq));
+	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+				 rq->cmd_flags);
 }
 
 static struct request *
@@ -1612,8 +1924,7 @@
 	cfq_del_rq_rb(rq);
 
 	cfqq->cfqd->rq_queued--;
-	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
-					rq_data_dir(rq), rq_is_sync(rq));
+	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 	if (rq->cmd_flags & REQ_PRIO) {
 		WARN_ON(!cfqq->prio_pending);
 		cfqq->prio_pending--;
@@ -1648,8 +1959,7 @@
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
 				struct bio *bio)
 {
-	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
-					bio_data_dir(bio), cfq_bio_sync(bio));
+	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
 }
 
 static void
@@ -1671,8 +1981,7 @@
 	if (cfqq->next_rq == next)
 		cfqq->next_rq = rq;
 	cfq_remove_request(next);
-	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
-					rq_data_dir(next), rq_is_sync(next));
+	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
 
 	cfqq = RQ_CFQQ(next);
 	/*
@@ -1713,7 +2022,7 @@
 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 	del_timer(&cfqd->idle_slice_timer);
-	cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+	cfqg_stats_update_idle_time(cfqq->cfqg);
 }
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1722,7 +2031,7 @@
 	if (cfqq) {
 		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
 				cfqd->serving_prio, cfqd->serving_type);
-		cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
 		cfqq->slice_start = 0;
 		cfqq->dispatch_start = jiffies;
 		cfqq->allocated_slice = 0;
@@ -2043,7 +2352,7 @@
 	 * task has exited, don't wait
 	 */
 	cic = cfqd->active_cic;
-	if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
 		return;
 
 	/*
@@ -2070,7 +2379,7 @@
 		sl = cfqd->cfq_slice_idle;
 
 	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
-	cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+	cfqg_stats_set_start_idle_time(cfqq->cfqg);
 	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
 			group_idle ? 1 : 0);
 }
@@ -2093,8 +2402,7 @@
 
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
 	cfqq->nr_sectors += blk_rq_sectors(rq);
-	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
-					rq_data_dir(rq), rq_is_sync(rq));
+	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
 }
 
 /*
@@ -2677,7 +2985,7 @@
 
 	BUG_ON(cfq_cfqq_on_rr(cfqq));
 	kmem_cache_free(cfq_pool, cfqq);
-	cfq_put_cfqg(cfqg);
+	cfqg_put(cfqg);
 }
 
 static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2736,7 +3044,7 @@
 	}
 }
 
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
 {
 	struct task_struct *tsk = current;
 	int ioprio_class;
@@ -2744,7 +3052,7 @@
 	if (!cfq_cfqq_prio_changed(cfqq))
 		return;
 
-	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
 	switch (ioprio_class) {
 	default:
 		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2756,11 +3064,11 @@
 		cfqq->ioprio_class = task_nice_ioclass(tsk);
 		break;
 	case IOPRIO_CLASS_RT:
-		cfqq->ioprio = task_ioprio(ioc);
+		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 		cfqq->ioprio_class = IOPRIO_CLASS_RT;
 		break;
 	case IOPRIO_CLASS_BE:
-		cfqq->ioprio = task_ioprio(ioc);
+		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 		cfqq->ioprio_class = IOPRIO_CLASS_BE;
 		break;
 	case IOPRIO_CLASS_IDLE:
@@ -2778,19 +3086,24 @@
 	cfq_clear_cfqq_prio_changed(cfqq);
 }
 
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
+	int ioprio = cic->icq.ioc->ioprio;
 	struct cfq_data *cfqd = cic_to_cfqd(cic);
 	struct cfq_queue *cfqq;
 
-	if (unlikely(!cfqd))
+	/*
+	 * Check whether ioprio has changed.  The condition may trigger
+	 * spuriously on a newly created cic but there's no harm.
+	 */
+	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
 		return;
 
 	cfqq = cic->cfqq[BLK_RW_ASYNC];
 	if (cfqq) {
 		struct cfq_queue *new_cfqq;
-		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
-						GFP_ATOMIC);
+		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+					 GFP_ATOMIC);
 		if (new_cfqq) {
 			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
 			cfq_put_queue(cfqq);
@@ -2800,6 +3113,8 @@
 	cfqq = cic->cfqq[BLK_RW_SYNC];
 	if (cfqq)
 		cfq_mark_cfqq_prio_changed(cfqq);
+
+	cic->ioprio = ioprio;
 }
 
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2823,17 +3138,24 @@
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
-	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
 	struct cfq_data *cfqd = cic_to_cfqd(cic);
-	struct request_queue *q;
+	struct cfq_queue *sync_cfqq;
+	uint64_t id;
 
-	if (unlikely(!cfqd))
+	rcu_read_lock();
+	id = bio_blkcg(bio)->id;
+	rcu_read_unlock();
+
+	/*
+	 * Check whether blkcg has changed.  The condition may trigger
+	 * spuriously on a newly created cic but there's no harm.
+	 */
+	if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
 		return;
 
-	q = cfqd->queue;
-
+	sync_cfqq = cic_to_cfqq(cic, 1);
 	if (sync_cfqq) {
 		/*
 		 * Drop reference to sync queue. A new sync queue will be
@@ -2843,21 +3165,26 @@
 		cic_set_cfqq(cic, NULL, 1);
 		cfq_put_queue(sync_cfqq);
 	}
+
+	cic->blkcg_id = id;
 }
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 
 static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
-		     struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+		     struct bio *bio, gfp_t gfp_mask)
 {
+	struct blkcg *blkcg;
 	struct cfq_queue *cfqq, *new_cfqq = NULL;
-	struct cfq_io_cq *cic;
 	struct cfq_group *cfqg;
 
 retry:
-	cfqg = cfq_get_cfqg(cfqd);
-	cic = cfq_cic_lookup(cfqd, ioc);
-	/* cic always exists here */
+	rcu_read_lock();
+
+	blkcg = bio_blkcg(bio);
+	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
 	cfqq = cic_to_cfqq(cic, is_sync);
 
 	/*
@@ -2870,6 +3197,7 @@
 			cfqq = new_cfqq;
 			new_cfqq = NULL;
 		} else if (gfp_mask & __GFP_WAIT) {
+			rcu_read_unlock();
 			spin_unlock_irq(cfqd->queue->queue_lock);
 			new_cfqq = kmem_cache_alloc_node(cfq_pool,
 					gfp_mask | __GFP_ZERO,
@@ -2885,7 +3213,7 @@
 
 		if (cfqq) {
 			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
-			cfq_init_prio_data(cfqq, ioc);
+			cfq_init_prio_data(cfqq, cic);
 			cfq_link_cfqq_cfqg(cfqq, cfqg);
 			cfq_log_cfqq(cfqd, cfqq, "alloced");
 		} else
@@ -2895,6 +3223,7 @@
 	if (new_cfqq)
 		kmem_cache_free(cfq_pool, new_cfqq);
 
+	rcu_read_unlock();
 	return cfqq;
 }
 
@@ -2904,6 +3233,9 @@
 	switch (ioprio_class) {
 	case IOPRIO_CLASS_RT:
 		return &cfqd->async_cfqq[0][ioprio];
+	case IOPRIO_CLASS_NONE:
+		ioprio = IOPRIO_NORM;
+		/* fall through */
 	case IOPRIO_CLASS_BE:
 		return &cfqd->async_cfqq[1][ioprio];
 	case IOPRIO_CLASS_IDLE:
@@ -2914,11 +3246,11 @@
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
-	      gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+	      struct bio *bio, gfp_t gfp_mask)
 {
-	const int ioprio = task_ioprio(ioc);
-	const int ioprio_class = task_ioprio_class(ioc);
+	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
 	struct cfq_queue **async_cfqq = NULL;
 	struct cfq_queue *cfqq = NULL;
 
@@ -2928,7 +3260,7 @@
 	}
 
 	if (!cfqq)
-		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
 	/*
 	 * pin the queue now that it's allocated, scheduler exit will prune it
@@ -3010,7 +3342,7 @@
 
 	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
 		enable_idle = 0;
-	else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
 		 !cfqd->cfq_slice_idle ||
 		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
 		enable_idle = 0;
@@ -3174,8 +3506,7 @@
 				cfq_clear_cfqq_wait_request(cfqq);
 				__blk_run_queue(cfqd->queue);
 			} else {
-				cfq_blkiocg_update_idle_time_stats(
-						&cfqq->cfqg->blkg);
+				cfqg_stats_update_idle_time(cfqq->cfqg);
 				cfq_mark_cfqq_must_dispatch(cfqq);
 			}
 		}
@@ -3197,14 +3528,13 @@
 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
 	cfq_log_cfqq(cfqd, cfqq, "insert_request");
-	cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+	cfq_init_prio_data(cfqq, RQ_CIC(rq));
 
 	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 	cfq_add_rq_rb(rq);
-	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
-			&cfqd->serving_group->blkg, rq_data_dir(rq),
-			rq_is_sync(rq));
+	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+				 rq->cmd_flags);
 	cfq_rq_enqueued(cfqd, cfqq, rq);
 }
 
@@ -3300,9 +3630,8 @@
 	cfqd->rq_in_driver--;
 	cfqq->dispatched--;
 	(RQ_CFQG(rq))->dispatched--;
-	cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
-			rq_start_time_ns(rq), rq_io_start_time_ns(rq),
-			rq_data_dir(rq), rq_is_sync(rq));
+	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+				     rq_io_start_time_ns(rq), rq->cmd_flags);
 
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -3399,7 +3728,7 @@
 
 	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
 	if (cfqq) {
-		cfq_init_prio_data(cfqq, cic->icq.ioc);
+		cfq_init_prio_data(cfqq, cic);
 
 		return __cfq_may_queue(cfqq);
 	}
@@ -3421,7 +3750,7 @@
 		cfqq->allocated[rw]--;
 
 		/* Put down rq reference on cfqg */
-		cfq_put_cfqg(RQ_CFQG(rq));
+		cfqg_put(RQ_CFQG(rq));
 		rq->elv.priv[0] = NULL;
 		rq->elv.priv[1] = NULL;
 
@@ -3465,32 +3794,25 @@
  * Allocate cfq data structures associated with this request.
  */
 static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+		gfp_t gfp_mask)
 {
 	struct cfq_data *cfqd = q->elevator->elevator_data;
 	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
 	const int rw = rq_data_dir(rq);
 	const bool is_sync = rq_is_sync(rq);
 	struct cfq_queue *cfqq;
-	unsigned int changed;
 
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 
 	spin_lock_irq(q->queue_lock);
 
-	/* handle changed notifications */
-	changed = icq_get_changed(&cic->icq);
-	if (unlikely(changed & ICQ_IOPRIO_CHANGED))
-		changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-	if (unlikely(changed & ICQ_CGROUP_CHANGED))
-		changed_cgroup(cic);
-#endif
-
+	check_ioprio_changed(cic, bio);
+	check_blkcg_changed(cic, bio);
 new_queue:
 	cfqq = cic_to_cfqq(cic, is_sync);
 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
-		cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
 		cic_set_cfqq(cic, cfqq, is_sync);
 	} else {
 		/*
@@ -3516,8 +3838,9 @@
 	cfqq->allocated[rw]++;
 
 	cfqq->ref++;
+	cfqg_get(cfqq->cfqg);
 	rq->elv.priv[0] = cfqq;
-	rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+	rq->elv.priv[1] = cfqq->cfqg;
 	spin_unlock_irq(q->queue_lock);
 	return 0;
 }
@@ -3614,7 +3937,6 @@
 {
 	struct cfq_data *cfqd = e->elevator_data;
 	struct request_queue *q = cfqd->queue;
-	bool wait = false;
 
 	cfq_shutdown_timer_wq(cfqd);
 
@@ -3624,89 +3946,52 @@
 		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
 
 	cfq_put_async_queues(cfqd);
-	cfq_release_cfq_groups(cfqd);
-
-	/*
-	 * If there are groups which we could not unlink from blkcg list,
-	 * wait for a rcu period for them to be freed.
-	 */
-	if (cfqd->nr_blkcg_linked_grps)
-		wait = true;
 
 	spin_unlock_irq(q->queue_lock);
 
 	cfq_shutdown_timer_wq(cfqd);
 
-	/*
-	 * Wait for cfqg->blkg->key accessors to exit their grace periods.
-	 * Do this wait only if there are other unlinked groups out
-	 * there. This can happen if cgroup deletion path claimed the
-	 * responsibility of cleaning up a group before queue cleanup code
-	 * get to the group.
-	 *
-	 * Do not call synchronize_rcu() unconditionally as there are drivers
-	 * which create/delete request queue hundreds of times during scan/boot
-	 * and synchronize_rcu() can take significant time and slow down boot.
-	 */
-	if (wait)
-		synchronize_rcu();
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-	/* Free up per cpu stats for root group */
-	free_percpu(cfqd->root_group.blkg.stats_cpu);
+#ifndef CONFIG_CFQ_GROUP_IOSCHED
+	kfree(cfqd->root_group);
 #endif
+	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
 	kfree(cfqd);
 }
 
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q)
 {
 	struct cfq_data *cfqd;
-	int i, j;
-	struct cfq_group *cfqg;
-	struct cfq_rb_root *st;
+	struct blkcg_gq *blkg __maybe_unused;
+	int i, ret;
 
 	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (!cfqd)
-		return NULL;
+		return -ENOMEM;
+
+	cfqd->queue = q;
+	q->elevator->elevator_data = cfqd;
 
 	/* Init root service tree */
 	cfqd->grp_service_tree = CFQ_RB_ROOT;
 
-	/* Init root group */
-	cfqg = &cfqd->root_group;
-	for_each_cfqg_st(cfqg, i, j, st)
-		*st = CFQ_RB_ROOT;
-	RB_CLEAR_NODE(&cfqg->rb_node);
-
-	/* Give preference to root group over other groups */
-	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
-
+	/* Init root group and prefer root group over other groups by default */
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-	/*
-	 * Set root group reference to 2. One reference will be dropped when
-	 * all groups on cfqd->cfqg_list are being deleted during queue exit.
-	 * Other reference will remain there as we don't want to delete this
-	 * group as it is statically allocated and gets destroyed when
-	 * throtl_data goes away.
-	 */
-	cfqg->ref = 2;
+	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+	if (ret)
+		goto out_free;
 
-	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
-		kfree(cfqg);
-		kfree(cfqd);
-		return NULL;
-	}
+	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+	ret = -ENOMEM;
+	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+					GFP_KERNEL, cfqd->queue->node);
+	if (!cfqd->root_group)
+		goto out_free;
 
-	rcu_read_lock();
-
-	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
-					(void *)cfqd, 0);
-	rcu_read_unlock();
-	cfqd->nr_blkcg_linked_grps++;
-
-	/* Add group on cfqd->cfqg_list */
-	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+	cfq_init_cfqg_base(cfqd->root_group);
 #endif
+	cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+
 	/*
 	 * Not strictly needed (since RB_ROOT just clears the node and we
 	 * zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3718,13 +4003,17 @@
 	/*
 	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
 	 * Grab a permanent reference to it, so that the normal code flow
-	 * will not attempt to free it.
+	 * will not attempt to free it.  oom_cfqq is linked to root_group
+	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
+	 * the reference from linking right away.
 	 */
 	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
 	cfqd->oom_cfqq.ref++;
-	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
 
-	cfqd->queue = q;
+	spin_lock_irq(q->queue_lock);
+	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+	cfqg_put(cfqd->root_group);
+	spin_unlock_irq(q->queue_lock);
 
 	init_timer(&cfqd->idle_slice_timer);
 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3750,7 +4039,11 @@
 	 * second, in order to have larger depth for async operations.
 	 */
 	cfqd->last_delayed_sync = jiffies - HZ;
-	return cfqd;
+	return 0;
+
+out_free:
+	kfree(cfqd);
+	return ret;
 }
 
 /*
@@ -3877,15 +4170,13 @@
 };
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
-	.ops = {
-		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
-		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
-	},
-	.plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+	.pd_size		= sizeof(struct cfq_group),
+	.cftypes		= cfq_blkcg_files,
+
+	.pd_init_fn		= cfq_pd_init,
+	.pd_reset_stats_fn	= cfq_pd_reset_stats,
 };
-#else
-static struct blkio_policy_type blkio_policy_cfq;
 #endif
 
 static int __init cfq_init(void)
@@ -3906,24 +4197,31 @@
 #else
 		cfq_group_idle = 0;
 #endif
+
+	ret = blkcg_policy_register(&blkcg_policy_cfq);
+	if (ret)
+		return ret;
+
 	cfq_pool = KMEM_CACHE(cfq_queue, 0);
 	if (!cfq_pool)
-		return -ENOMEM;
+		goto err_pol_unreg;
 
 	ret = elv_register(&iosched_cfq);
-	if (ret) {
-		kmem_cache_destroy(cfq_pool);
-		return ret;
-	}
-
-	blkio_policy_register(&blkio_policy_cfq);
+	if (ret)
+		goto err_free_pool;
 
 	return 0;
+
+err_free_pool:
+	kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+	blkcg_policy_unregister(&blkcg_policy_cfq);
+	return ret;
 }
 
 static void __exit cfq_exit(void)
 {
-	blkio_policy_unregister(&blkio_policy_cfq);
+	blkcg_policy_unregister(&blkcg_policy_cfq);
 	elv_unregister(&iosched_cfq);
 	kmem_cache_destroy(cfq_pool);
 }
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644
index 2a15592..0000000
--- a/block/cfq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
-	struct blkio_group *curr_blkg, bool direction, bool sync)
-{
-	blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-			unsigned long dequeue)
-{
-	blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
-			unsigned long time, unsigned long unaccounted_time)
-{
-	blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
-	blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-				bool direction, bool sync)
-{
-	blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-		bool direction, bool sync)
-{
-	blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-	blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
-	blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
-	blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync)
-{
-	blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
-	blkiocg_update_completion_stats(blkg, start_time, io_start_time,
-				direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-			struct blkio_group *blkg, void *key, dev_t dev) {
-	blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-	return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
-	struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-			unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
-			unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-				bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-		bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-			struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-	return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 7bf12d7..599b12e 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,13 +337,13 @@
 /*
  * initialize elevator private data (deadline_data).
  */
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q)
 {
 	struct deadline_data *dd;
 
 	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (!dd)
-		return NULL;
+		return -ENOMEM;
 
 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +354,9 @@
 	dd->writes_starved = writes_starved;
 	dd->front_merges = 1;
 	dd->fifo_batch = fifo_batch;
-	return dd;
+
+	q->elevator->elevator_data = dd;
+	return 0;
 }
 
 /*
diff --git a/block/elevator.c b/block/elevator.c
index f016855..6a55d41 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -38,6 +38,7 @@
 #include <trace/events/block.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
@@ -121,15 +122,6 @@
 	return e;
 }
 
-static int elevator_init_queue(struct request_queue *q,
-			       struct elevator_queue *eq)
-{
-	eq->elevator_data = eq->type->ops.elevator_init_fn(q);
-	if (eq->elevator_data)
-		return 0;
-	return -ENOMEM;
-}
-
 static char chosen_elevator[ELV_NAME_MAX];
 
 static int __init elevator_setup(char *str)
@@ -188,7 +180,6 @@
 int elevator_init(struct request_queue *q, char *name)
 {
 	struct elevator_type *e = NULL;
-	struct elevator_queue *eq;
 	int err;
 
 	if (unlikely(q->elevator))
@@ -222,17 +213,16 @@
 		}
 	}
 
-	eq = elevator_alloc(q, e);
-	if (!eq)
+	q->elevator = elevator_alloc(q, e);
+	if (!q->elevator)
 		return -ENOMEM;
 
-	err = elevator_init_queue(q, eq);
+	err = e->ops.elevator_init_fn(q);
 	if (err) {
-		kobject_put(&eq->kobj);
+		kobject_put(&q->elevator->kobj);
 		return err;
 	}
 
-	q->elevator = eq;
 	return 0;
 }
 EXPORT_SYMBOL(elevator_init);
@@ -564,25 +554,6 @@
 	}
 }
 
-void elv_quiesce_start(struct request_queue *q)
-{
-	if (!q->elevator)
-		return;
-
-	spin_lock_irq(q->queue_lock);
-	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
-	spin_unlock_irq(q->queue_lock);
-
-	blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
-	spin_lock_irq(q->queue_lock);
-	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
-	spin_unlock_irq(q->queue_lock);
-}
-
 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 {
 	trace_block_rq_insert(q, rq);
@@ -692,12 +663,13 @@
 	return NULL;
 }
 
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+		    struct bio *bio, gfp_t gfp_mask)
 {
 	struct elevator_queue *e = q->elevator;
 
 	if (e->type->ops.elevator_set_req_fn)
-		return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 	return 0;
 }
 
@@ -801,8 +773,9 @@
 	.release	= elevator_release,
 };
 
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
 {
+	struct elevator_queue *e = q->elevator;
 	int error;
 
 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +793,6 @@
 	}
 	return error;
 }
-
-int elv_register_queue(struct request_queue *q)
-{
-	return __elv_register_queue(q, q->elevator);
-}
 EXPORT_SYMBOL(elv_register_queue);
 
 void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +875,60 @@
  */
 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
-	struct elevator_queue *old_elevator, *e;
+	struct elevator_queue *old = q->elevator;
+	bool registered = old->registered;
 	int err;
 
-	/* allocate new elevator */
-	e = elevator_alloc(q, new_e);
-	if (!e)
-		return -ENOMEM;
+	/*
+	 * Turn on BYPASS and drain all requests w/ elevator private data.
+	 * Block layer doesn't call into a quiesced elevator - all requests
+	 * are directly put on the dispatch list without elevator data
+	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
+	 * merge happens either.
+	 */
+	blk_queue_bypass_start(q);
 
-	err = elevator_init_queue(q, e);
+	/* unregister and clear all auxiliary data of the old elevator */
+	if (registered)
+		elv_unregister_queue(q);
+
+	spin_lock_irq(q->queue_lock);
+	ioc_clear_queue(q);
+	spin_unlock_irq(q->queue_lock);
+
+	/* allocate, init and register new elevator */
+	err = -ENOMEM;
+	q->elevator = elevator_alloc(q, new_e);
+	if (!q->elevator)
+		goto fail_init;
+
+	err = new_e->ops.elevator_init_fn(q);
 	if (err) {
-		kobject_put(&e->kobj);
-		return err;
+		kobject_put(&q->elevator->kobj);
+		goto fail_init;
 	}
 
-	/* turn on BYPASS and drain all requests w/ elevator private data */
-	elv_quiesce_start(q);
-
-	/* unregister old queue, register new one and kill old elevator */
-	if (q->elevator->registered) {
-		elv_unregister_queue(q);
-		err = __elv_register_queue(q, e);
+	if (registered) {
+		err = elv_register_queue(q);
 		if (err)
 			goto fail_register;
 	}
 
-	/* done, clear io_cq's, switch elevators and turn off BYPASS */
-	spin_lock_irq(q->queue_lock);
-	ioc_clear_queue(q);
-	old_elevator = q->elevator;
-	q->elevator = e;
-	spin_unlock_irq(q->queue_lock);
+	/* done, kill the old one and finish */
+	elevator_exit(old);
+	blk_queue_bypass_end(q);
 
-	elevator_exit(old_elevator);
-	elv_quiesce_end(q);
-
-	blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 
 	return 0;
 
 fail_register:
-	/*
-	 * switch failed, exit the new io scheduler and reattach the old
-	 * one again (along with re-adding the sysfs dir)
-	 */
-	elevator_exit(e);
+	elevator_exit(q->elevator);
+fail_init:
+	/* switch failed, restore and re-register old elevator */
+	q->elevator = old;
 	elv_register_queue(q);
-	elv_quiesce_end(q);
+	blk_queue_bypass_end(q);
 
 	return err;
 }
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 413a0b1..5d1bf70 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,15 +59,17 @@
 	return list_entry(rq->queuelist.next, struct request, queuelist);
 }
 
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q)
 {
 	struct noop_data *nd;
 
 	nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
 	if (!nd)
-		return NULL;
+		return -ENOMEM;
+
 	INIT_LIST_HEAD(&nd->queue);
-	return nd;
+	q->elevator->elevator_data = nd;
+	return 0;
 }
 
 static void noop_exit_queue(struct elevator_queue *e)
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee98d5..2ba29ff 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@
 # PnP must come after ACPI since it will eventually need to check if acpi
 # was used and do nothing if so
 obj-$(CONFIG_PNP)		+= pnp/
-obj-$(CONFIG_ARM_AMBA)		+= amba/
+obj-y				+= amba/
 # Many drivers will want to use DMA so this has to be made available
 # really early.
 obj-$(CONFIG_DMA_ENGINE)	+= dma/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff..8099895 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@
 
 config ACPI_HOTPLUG_CPU
 	bool
-	depends on ACPI_PROCESSOR && HOTPLUG_CPU
+	depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
 	select ACPI_CONTAINER
 	default y
 
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a..1502c502 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 static unsigned long power_saving_mwait_eax;
 
@@ -107,7 +108,7 @@
 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return;
 
-	mutex_lock(&isolated_cpus_lock);
+	mutex_lock(&round_robin_lock);
 	cpumask_clear(tmp);
 	for_each_cpu(cpu, pad_busy_cpus)
 		cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@
 	if (cpumask_empty(tmp))
 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
 	if (cpumask_empty(tmp)) {
-		mutex_unlock(&isolated_cpus_lock);
+		mutex_unlock(&round_robin_lock);
 		return;
 	}
 	for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@
 	tsk_in_cpu[tsk_index] = preferred_cpu;
 	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
 	cpu_weight[preferred_cpu]++;
-	mutex_unlock(&isolated_cpus_lock);
+	mutex_unlock(&round_robin_lock);
 
 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762..6686b1e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@
 	u8 ins = entry->instruction;
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		return acpi_os_map_generic_address(&entry->register_region);
+		return apei_map_generic_address(&entry->register_region);
 
 	return 0;
 }
@@ -276,7 +276,7 @@
 	u8 ins = entry->instruction;
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		acpi_os_unmap_generic_address(&entry->register_region);
+		apei_unmap_generic_address(&entry->register_region);
 
 	return 0;
 }
@@ -606,6 +606,19 @@
 	return 0;
 }
 
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+	int rc;
+	u32 access_bit_width;
+	u64 address;
+
+	rc = apei_check_gar(reg, &address, &access_bit_width);
+	if (rc)
+		return rc;
+	return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
 /* read GAR in interrupt (including NMI) or process context */
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a..f220d64 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
 #define APEI_INTERNAL_H
 
 #include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
 
 struct apei_exec_context;
 
@@ -68,6 +70,13 @@
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP	1
 
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+	acpi_os_unmap_generic_address(reg);
+}
+
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0..1599566 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@
 	if (!ghes)
 		return ERR_PTR(-ENOMEM);
 	ghes->generic = generic;
-	rc = acpi_os_map_generic_address(&generic->error_status_address);
+	rc = apei_map_generic_address(&generic->error_status_address);
 	if (rc)
 		goto err_free;
 	error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@
 	return ghes;
 
 err_unmap:
-	acpi_os_unmap_generic_address(&generic->error_status_address);
+	apei_unmap_generic_address(&generic->error_status_address);
 err_free:
 	kfree(ghes);
 	return ERR_PTR(rc);
@@ -330,7 +330,7 @@
 static void ghes_fini(struct ghes *ghes)
 {
 	kfree(ghes->estatus);
-	acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+	apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca..7dd3f9f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@
 
 static void acpi_battery_refresh(struct acpi_battery *battery)
 {
+	int power_unit;
+
 	if (!battery->bat.dev)
 		return;
 
+	power_unit = battery->power_unit;
+
 	acpi_battery_get_info(battery);
-	/* The battery may have changed its reporting units. */
+
+	if (power_unit == battery->power_unit)
+		return;
+
+	/* The battery has changed its reporting units. */
 	sysfs_remove_battery(battery);
 	sysfs_add_battery(battery);
 }
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46..6680df3 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/sysfs.h>
+#include <linux/io.h>
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
 
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3..adceafd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@
                                  Power Management
    -------------------------------------------------------------------------- */
 
+static const char *state_string(int state)
+{
+	switch (state) {
+	case ACPI_STATE_D0:
+		return "D0";
+	case ACPI_STATE_D1:
+		return "D1";
+	case ACPI_STATE_D2:
+		return "D2";
+	case ACPI_STATE_D3_HOT:
+		return "D3hot";
+	case ACPI_STATE_D3_COLD:
+		return "D3";
+	default:
+		return "(unknown)";
+	}
+}
+
 static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
-	int result = 0;
-	acpi_status status = 0;
-	unsigned long long psc = 0;
+	int result = ACPI_STATE_UNKNOWN;
 
 	if (!device || !state)
 		return -EINVAL;
 
-	*state = ACPI_STATE_UNKNOWN;
-
-	if (device->flags.power_manageable) {
-		/*
-		 * Get the device's power state either directly (via _PSC) or
-		 * indirectly (via power resources).
-		 */
-		if (device->power.flags.power_resources) {
-			result = acpi_power_get_inferred_state(device, state);
-			if (result)
-				return result;
-		} else if (device->power.flags.explicit_get) {
-			status = acpi_evaluate_integer(device->handle, "_PSC",
-						       NULL, &psc);
-			if (ACPI_FAILURE(status))
-				return -ENODEV;
-			*state = (int)psc;
-		}
-	} else {
+	if (!device->flags.power_manageable) {
 		/* TBD: Non-recursive algorithm for walking up hierarchy. */
 		*state = device->parent ?
 			device->parent->power.state : ACPI_STATE_D0;
+		goto out;
 	}
 
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-			  device->pnp.bus_id, *state));
+	/*
+	 * Get the device's power state either directly (via _PSC) or
+	 * indirectly (via power resources).
+	 */
+	if (device->power.flags.explicit_get) {
+		unsigned long long psc;
+		acpi_status status = acpi_evaluate_integer(device->handle,
+							   "_PSC", NULL, &psc);
+		if (ACPI_FAILURE(status))
+			return -ENODEV;
+
+		result = psc;
+	}
+	/* The test below covers ACPI_STATE_UNKNOWN too. */
+	if (result <= ACPI_STATE_D2) {
+	  ; /* Do nothing. */
+	} else if (device->power.flags.power_resources) {
+		int error = acpi_power_get_inferred_state(device, &result);
+		if (error)
+			return error;
+	} else if (result == ACPI_STATE_D3_HOT) {
+		result = ACPI_STATE_D3;
+	}
+	*state = result;
+
+ out:
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+			  device->pnp.bus_id, state_string(*state)));
 
 	return 0;
 }
@@ -234,13 +259,14 @@
 	/* Make sure this is a valid target state */
 
 	if (state == device->power.state) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
-				  state));
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+				  state_string(state)));
 		return 0;
 	}
 
 	if (!device->power.states[state].flags.valid) {
-		printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+		printk(KERN_WARNING PREFIX "Device does not support %s\n",
+		       state_string(state));
 		return -ENODEV;
 	}
 	if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@
       end:
 	if (result)
 		printk(KERN_WARNING PREFIX
-			      "Device [%s] failed to transition to D%d\n",
-			      device->pnp.bus_id, state);
+			      "Device [%s] failed to transition to %s\n",
+			      device->pnp.bus_id, state_string(state));
 	else {
 		device->power.state = state;
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Device [%s] transitioned to D%d\n",
-				  device->pnp.bus_id, state));
+				  "Device [%s] transitioned to %s\n",
+				  device->pnp.bus_id, state_string(state)));
 	}
 
 	return result;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f71..dd6d6a3 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@
 	 * We know a device's inferred power state when all the resources
 	 * required for a given D-state are 'on'.
 	 */
-	for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
 		list = &device->power.states[i].resources;
 		if (list->count < 1)
 			continue;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb3..47a8caa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,6 +224,7 @@
 /*
  * Suspend / resume control
  */
+static int acpi_idle_suspend;
 static u32 saved_bm_rld;
 
 static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
+	if (acpi_idle_suspend == 1)
+		return 0;
+
 	acpi_idle_bm_rld_save();
+	acpi_idle_suspend = 1;
 	return 0;
 }
 
 int acpi_processor_resume(struct acpi_device * device)
 {
+	if (acpi_idle_suspend == 0)
+		return 0;
+
 	acpi_idle_bm_rld_restore();
+	acpi_idle_suspend = 0;
 	return 0;
 }
 
@@ -754,6 +763,12 @@
 
 	local_irq_disable();
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	lapic_timer_state_broadcast(pr, cx, 1);
 	kt1 = ktime_get_real();
 	acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@
 
 	local_irq_disable();
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 		current_thread_info()->status &= ~TS_POLLING;
 		/*
@@ -907,14 +928,21 @@
 						drv, drv->safe_state_index);
 		} else {
 			local_irq_disable();
-			acpi_safe_halt();
+			if (!acpi_idle_suspend)
+				acpi_safe_halt();
 			local_irq_enable();
-			return -EINVAL;
+			return -EBUSY;
 		}
 	}
 
 	local_irq_disable();
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 		current_thread_info()->status &= ~TS_POLLING;
 		/*
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8..a093dc1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@
 	struct acpi_buffer state = { 0, NULL };
 	union acpi_object *pss = NULL;
 	int i;
+	int last_invalid = -1;
 
 
 	status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@
 		    ((u32)(px->core_frequency * 1000) !=
 		     (px->core_frequency * 1000))) {
 			printk(KERN_ERR FW_BUG PREFIX
-			       "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
-			       px->core_frequency);
-			result = -EFAULT;
-			kfree(pr->performance->states);
-			goto end;
+			       "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+			       pr->id, px->core_frequency);
+			if (last_invalid == -1)
+				last_invalid = i;
+		} else {
+			if (last_invalid != -1) {
+				/*
+				 * Copy this valid entry over last_invalid entry
+				 */
+				memcpy(&(pr->performance->states[last_invalid]),
+				       px, sizeof(struct acpi_processor_px));
+				++last_invalid;
+			}
 		}
 	}
 
+	if (last_invalid == 0) {
+		printk(KERN_ERR FW_BUG PREFIX
+		       "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+		result = -EFAULT;
+		kfree(pr->performance->states);
+		pr->performance->states = NULL;
+	}
+
+	if (last_invalid > 0)
+		pr->performance->state_count = last_invalid;
+
       end:
 	kfree(buffer.pointer);
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdc..c8a1f3b 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1567,6 +1567,7 @@
 						ACPI_BUS_TYPE_POWER_BUTTON,
 						ACPI_STA_DEFAULT,
 						&ops);
+		device_init_wakeup(&device->dev, true);
 	}
 
 	if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c5..88561029 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -57,6 +57,7 @@
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
 static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -93,11 +94,9 @@
 #ifdef CONFIG_ACPI_SLEEP
 	/* do we have a wakeup address for S2 and S3? */
 	if (acpi_state == ACPI_STATE_S3) {
-		if (!acpi_wakeup_address) {
+		if (!acpi_wakeup_address)
 			return -EFAULT;
-		}
-		acpi_set_firmware_waking_vector(
-				(acpi_physical_address)acpi_wakeup_address);
+		acpi_set_firmware_waking_vector(acpi_wakeup_address);
 
 	}
 	ACPI_FLUSH_CPU_CACHE();
@@ -186,6 +185,14 @@
 	return error;
 }
 
+static int find_powerf_dev(struct device *dev, void *data)
+{
+	struct acpi_device *device = to_acpi_device(dev);
+	const char *hid = acpi_device_hid(device);
+
+	return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
 /**
  *	acpi_pm_finish - Instruct the platform to leave a sleep state.
  *
@@ -194,6 +201,7 @@
  */
 static void acpi_pm_finish(void)
 {
+	struct device *pwr_btn_dev;
 	u32 acpi_state = acpi_target_sleep_state;
 
 	acpi_ec_unblock_transactions();
@@ -211,6 +219,23 @@
 	acpi_set_firmware_waking_vector((acpi_physical_address) 0);
 
 	acpi_target_sleep_state = ACPI_STATE_S0;
+
+	/* If we were woken with the fixed power button, provide a small
+	 * hint to userspace in the form of a wakeup event on the fixed power
+	 * button device (if it can be found).
+	 *
+	 * We delay the event generation til now, as the PM layer requires
+	 * timekeeping to be running before we generate events. */
+	if (!pwr_btn_event_pending)
+		return;
+
+	pwr_btn_event_pending = false;
+	pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+				      find_powerf_dev);
+	if (pwr_btn_dev) {
+		pm_wakeup_event(pwr_btn_dev, 0);
+		put_device(pwr_btn_dev);
+	}
 }
 
 /**
@@ -300,9 +325,23 @@
 	/* ACPI 3.0 specs (P62) says that it's the responsibility
 	 * of the OSPM to clear the status bit [ implying that the
 	 * POWER_BUTTON event should not reach userspace ]
+	 *
+	 * However, we do generate a small hint for userspace in the form of
+	 * a wakeup event. We flag this condition for now and generate the
+	 * event later, as we're currently too early in resume to be able to
+	 * generate wakeup events.
 	 */
-	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-		acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+		acpi_event_status pwr_btn_status;
+
+		acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+		if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+			acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+			/* Flag for later */
+			pwr_btn_event_pending = true;
+		}
+	}
 
 	/*
 	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -732,8 +771,8 @@
 	 * can wake the system.  _S0W may be valid, too.
 	 */
 	if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-	    (device_may_wakeup(dev) &&
-	     adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+	    (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+	     adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
 		acpi_status status;
 
 		acpi_method[3] = 'W';
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181..240a244 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@
 {
 	int result = 0;
 
-	if (!strncmp(val, "enable", strlen("enable") - 1)) {
+	if (!strncmp(val, "enable", strlen("enable"))) {
 		result = acpi_debug_trace(trace_method_name, trace_debug_level,
 					  trace_debug_layer, 0);
 		if (result)
@@ -181,7 +181,7 @@
 		goto exit;
 	}
 
-	if (!strncmp(val, "disable", strlen("disable") - 1)) {
+	if (!strncmp(val, "disable", strlen("disable"))) {
 		int name = 0;
 		result = acpi_debug_trace((char *)&name, trace_debug_level,
 					  trace_debug_layer, 0);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6f..1e0a9e1 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
 	struct acpi_object_list args = { 1, &arg0 };
 
+	if (!video->cap._DOS)
+		return 0;
 
 	if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
 		return -EINVAL;
@@ -1687,10 +1689,6 @@
 	set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
 	set_bit(KEY_DISPLAY_OFF, input->keybit);
 
-	error = input_register_device(input);
-	if (error)
-		goto err_stop_video;
-
 	printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
 	       ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
 	       video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@
 	video->pm_nb.priority = 0;
 	error = register_pm_notifier(&video->pm_nb);
 	if (error)
-		goto err_unregister_input_dev;
+		goto err_stop_video;
+
+	error = input_register_device(input);
+	if (error)
+		goto err_unregister_pm_notifier;
 
 	return 0;
 
- err_unregister_input_dev:
-	input_unregister_device(input);
+ err_unregister_pm_notifier:
+	unregister_pm_notifier(&video->pm_nb);
  err_stop_video:
 	acpi_video_bus_stop_devices(video);
  err_free_input_dev:
@@ -1743,9 +1745,18 @@
 	return 0;
 }
 
+static int __init is_i740(struct pci_dev *dev)
+{
+	if (dev->device == 0x00D1)
+		return 1;
+	if (dev->device == 0x7000)
+		return 1;
+	return 0;
+}
+
 static int __init intel_opregion_present(void)
 {
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+	int opregion = 0;
 	struct pci_dev *dev = NULL;
 	u32 address;
 
@@ -1754,13 +1765,15 @@
 			continue;
 		if (dev->vendor != PCI_VENDOR_ID_INTEL)
 			continue;
+		/* We don't want to poke around undefined i740 registers */
+		if (is_i740(dev))
+			continue;
 		pci_read_config_dword(dev, 0xfc, &address);
 		if (!address)
 			continue;
-		return 1;
+		opregion = 1;
 	}
-#endif
-	return 0;
+	return opregion;
 }
 
 int acpi_video_register(void)
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe740..66e81c2 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
-obj-y		+= bus.o
-
+obj-$(CONFIG_ARM_AMBA)		+= bus.o
+obj-$(CONFIG_TEGRA_AHB)		+= tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 0000000..aa0b1f1
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Jay Cheng <jacheng@nvidia.com>
+ *	James Wylder <james.wylder@motorola.com>
+ *	Benoit Goby <benoit@android.com>
+ *	Colin Cross <ccross@android.com>
+ *	Hiroshi DOYU <hdoyu@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_NAME "tegra-ahb"
+
+#define AHB_ARBITRATION_DISABLE		0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL	0x04
+#define   AHB_PRIORITY_WEIGHT(x)	(((x) & 0x7) << 29)
+#define   PRIORITY_SELECT_USB BIT(6)
+#define   PRIORITY_SELECT_USB2 BIT(18)
+#define   PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM		0x0c
+#define   ENB_FAST_REARBITRATE BIT(2)
+#define   DONT_SPLIT_AHB_WR     BIT(7)
+
+#define AHB_GIZMO_APB_DMA		0x10
+#define AHB_GIZMO_IDE			0x18
+#define AHB_GIZMO_USB			0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE	0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE	0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE	0x28
+#define AHB_GIZMO_XBAR_APB_CTLR		0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE	0x30
+#define AHB_GIZMO_NAND			0x3c
+#define AHB_GIZMO_SDMMC4		0x44
+#define AHB_GIZMO_XIO			0x48
+#define AHB_GIZMO_BSEV			0x60
+#define AHB_GIZMO_BSEA			0x70
+#define AHB_GIZMO_NOR			0x74
+#define AHB_GIZMO_USB2			0x78
+#define AHB_GIZMO_USB3			0x7c
+#define   IMMEDIATE	BIT(18)
+
+#define AHB_GIZMO_SDMMC1		0x80
+#define AHB_GIZMO_SDMMC2		0x84
+#define AHB_GIZMO_SDMMC3		0x88
+#define AHB_MEM_PREFETCH_CFG_X		0xd8
+#define AHB_ARBITRATION_XBAR_CTRL	0xdc
+#define AHB_MEM_PREFETCH_CFG3		0xe0
+#define AHB_MEM_PREFETCH_CFG4		0xe4
+#define AHB_MEM_PREFETCH_CFG1		0xec
+#define AHB_MEM_PREFETCH_CFG2		0xf0
+#define   PREFETCH_ENB	BIT(31)
+#define   MST_ID(x)	(((x) & 0x1f) << 26)
+#define   AHBDMA_MST_ID	MST_ID(5)
+#define   USB_MST_ID	MST_ID(6)
+#define   USB2_MST_ID	MST_ID(18)
+#define   USB3_MST_ID	MST_ID(17)
+#define   ADDR_BNDRY(x)	(((x) & 0xf) << 21)
+#define   INACTIVITY_TIMEOUT(x)	(((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID	0xf8
+
+#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+
+static struct platform_driver tegra_ahb_driver;
+
+static const u32 tegra_ahb_gizmo[] = {
+	AHB_ARBITRATION_DISABLE,
+	AHB_ARBITRATION_PRIORITY_CTRL,
+	AHB_GIZMO_AHB_MEM,
+	AHB_GIZMO_APB_DMA,
+	AHB_GIZMO_IDE,
+	AHB_GIZMO_USB,
+	AHB_GIZMO_AHB_XBAR_BRIDGE,
+	AHB_GIZMO_CPU_AHB_BRIDGE,
+	AHB_GIZMO_COP_AHB_BRIDGE,
+	AHB_GIZMO_XBAR_APB_CTLR,
+	AHB_GIZMO_VCP_AHB_BRIDGE,
+	AHB_GIZMO_NAND,
+	AHB_GIZMO_SDMMC4,
+	AHB_GIZMO_XIO,
+	AHB_GIZMO_BSEV,
+	AHB_GIZMO_BSEA,
+	AHB_GIZMO_NOR,
+	AHB_GIZMO_USB2,
+	AHB_GIZMO_USB3,
+	AHB_GIZMO_SDMMC1,
+	AHB_GIZMO_SDMMC2,
+	AHB_GIZMO_SDMMC3,
+	AHB_MEM_PREFETCH_CFG_X,
+	AHB_ARBITRATION_XBAR_CTRL,
+	AHB_MEM_PREFETCH_CFG3,
+	AHB_MEM_PREFETCH_CFG4,
+	AHB_MEM_PREFETCH_CFG1,
+	AHB_MEM_PREFETCH_CFG2,
+	AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
+};
+
+struct tegra_ahb {
+	void __iomem	*regs;
+	struct device	*dev;
+	u32		ctx[0];
+};
+
+static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
+{
+	return readl(ahb->regs + offset);
+}
+
+static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
+{
+	writel(value, ahb->regs + offset);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
+{
+	struct tegra_ahb *ahb = dev_get_drvdata(dev);
+	struct device_node *dn = data;
+
+	return (ahb->dev->of_node == dn) ? 1 : 0;
+}
+
+int tegra_ahb_enable_smmu(struct device_node *dn)
+{
+	struct device *dev;
+	u32 val;
+	struct tegra_ahb *ahb;
+
+	dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
+				 tegra_ahb_match_by_smmu);
+	if (!dev)
+		return -EPROBE_DEFER;
+	ahb = dev_get_drvdata(dev);
+	val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
+	val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
+	gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ahb_enable_smmu);
+#endif
+
+static int tegra_ahb_suspend(struct device *dev)
+{
+	int i;
+	struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+	for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+		ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
+	return 0;
+}
+
+static int tegra_ahb_resume(struct device *dev)
+{
+	int i;
+	struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+	for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+		gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
+	return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
+			    tegra_ahb_suspend,
+			    tegra_ahb_resume, NULL);
+
+static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
+{
+	u32 val;
+
+	val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
+	val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+	gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
+
+	val = gizmo_readl(ahb, AHB_GIZMO_USB);
+	val |= IMMEDIATE;
+	gizmo_writel(ahb, val, AHB_GIZMO_USB);
+
+	val = gizmo_readl(ahb, AHB_GIZMO_USB2);
+	val |= IMMEDIATE;
+	gizmo_writel(ahb, val, AHB_GIZMO_USB2);
+
+	val = gizmo_readl(ahb, AHB_GIZMO_USB3);
+	val |= IMMEDIATE;
+	gizmo_writel(ahb, val, AHB_GIZMO_USB3);
+
+	val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
+	val |= PRIORITY_SELECT_USB |
+		PRIORITY_SELECT_USB2 |
+		PRIORITY_SELECT_USB3 |
+		AHB_PRIORITY_WEIGHT(7);
+	gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+	val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
+	val &= ~MST_ID(~0);
+	val |= PREFETCH_ENB |
+		AHBDMA_MST_ID |
+		ADDR_BNDRY(0xc) |
+		INACTIVITY_TIMEOUT(0x1000);
+	gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
+
+	val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
+	val &= ~MST_ID(~0);
+	val |= PREFETCH_ENB |
+		USB_MST_ID |
+		ADDR_BNDRY(0xc) |
+		INACTIVITY_TIMEOUT(0x1000);
+	gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
+
+	val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
+	val &= ~MST_ID(~0);
+	val |= PREFETCH_ENB |
+		USB3_MST_ID |
+		ADDR_BNDRY(0xc) |
+		INACTIVITY_TIMEOUT(0x1000);
+	gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
+
+	val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
+	val &= ~MST_ID(~0);
+	val |= PREFETCH_ENB |
+		USB2_MST_ID |
+		ADDR_BNDRY(0xc) |
+		INACTIVITY_TIMEOUT(0x1000);
+	gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct tegra_ahb *ahb;
+	size_t bytes;
+
+	bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
+	ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
+	if (!ahb)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+	ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (!ahb->regs)
+		return -EBUSY;
+
+	ahb->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ahb);
+	tegra_ahb_gizmo_init(ahb);
+	return 0;
+}
+
+static int __devexit tegra_ahb_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+	{ .compatible = "nvidia,tegra30-ahb", },
+	{ .compatible = "nvidia,tegra20-ahb", },
+	{},
+};
+
+static struct platform_driver tegra_ahb_driver = {
+	.probe = tegra_ahb_probe,
+	.remove = __devexit_p(tegra_ahb_remove),
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_ahb_of_match,
+		.pm = &tegra_ahb_pm,
+	},
+};
+module_platform_driver(tegra_ahb_driver);
+
+MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHB driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517..ac6a5be 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller source file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@
 
 module_platform_driver(arasan_cf_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7336d4a..24712ad 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,6 +553,7 @@
 
 #if defined(CONFIG_HAVE_CLK)
 	struct clk		*clk;
+	struct clk              **port_clks;
 #endif
 	/*
 	 * These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@
 	struct resource *res;
 	int n_ports = 0;
 	int rc;
+#if defined(CONFIG_HAVE_CLK)
+	int port;
+#endif
 
 	ata_print_version_once(&pdev->dev, DRV_VERSION);
 
@@ -4054,6 +4058,13 @@
 
 	if (!host || !hpriv)
 		return -ENOMEM;
+#if defined(CONFIG_HAVE_CLK)
+	hpriv->port_clks = devm_kzalloc(&pdev->dev,
+					sizeof(struct clk *) * n_ports,
+					GFP_KERNEL);
+	if (!hpriv->port_clks)
+		return -ENOMEM;
+#endif
 	host->private_data = hpriv;
 	hpriv->n_ports = n_ports;
 	hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@
 #if defined(CONFIG_HAVE_CLK)
 	hpriv->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(hpriv->clk))
-		dev_notice(&pdev->dev, "cannot get clkdev\n");
+		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
 	else
-		clk_enable(hpriv->clk);
+		clk_prepare_enable(hpriv->clk);
+
+	for (port = 0; port < n_ports; port++) {
+		char port_number[16];
+		sprintf(port_number, "%d", port);
+		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+		if (!IS_ERR(hpriv->port_clks[port]))
+			clk_prepare_enable(hpriv->port_clks[port]);
+	}
 #endif
 
 	/*
@@ -4098,9 +4117,15 @@
 err:
 #if defined(CONFIG_HAVE_CLK)
 	if (!IS_ERR(hpriv->clk)) {
-		clk_disable(hpriv->clk);
+		clk_disable_unprepare(hpriv->clk);
 		clk_put(hpriv->clk);
 	}
+	for (port = 0; port < n_ports; port++) {
+		if (!IS_ERR(hpriv->port_clks[port])) {
+			clk_disable_unprepare(hpriv->port_clks[port]);
+			clk_put(hpriv->port_clks[port]);
+		}
+	}
 #endif
 
 	return rc;
@@ -4119,14 +4144,21 @@
 	struct ata_host *host = platform_get_drvdata(pdev);
 #if defined(CONFIG_HAVE_CLK)
 	struct mv_host_priv *hpriv = host->private_data;
+	int port;
 #endif
 	ata_host_detach(host);
 
 #if defined(CONFIG_HAVE_CLK)
 	if (!IS_ERR(hpriv->clk)) {
-		clk_disable(hpriv->clk);
+		clk_disable_unprepare(hpriv->clk);
 		clk_put(hpriv->clk);
 	}
+	for (port = 0; port < host->n_ports; port++) {
+		if (!IS_ERR(hpriv->port_clks[port])) {
+			clk_disable_unprepare(hpriv->port_clks[port]);
+			clk_put(hpriv->port_clks[port]);
+		}
+	}
 #endif
 	return 0;
 }
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652..9851093 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@
 			} else if (skb && card->using_dma) {
 				SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
 								       skb->len, PCI_DMA_TODEVICE);
+				card->tx_skb[port] = skb;
 				iowrite32(SKB_CB(skb)->dma_addr,
 					  card->config_regs + TX_DMA_ADDR(port));
 			}
@@ -1152,7 +1153,8 @@
 		db_fpga_upgrade = db_firmware_upgrade = 0;
 	}
 
-	if (card->fpga_version >= DMA_SUPPORTED){
+	if (card->fpga_version >= DMA_SUPPORTED) {
+		pci_set_master(dev);
 		card->using_dma = 1;
 	} else {
 		card->using_dma = 0;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618a..9b21469 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@
 	  APIs extension; the file's descriptor can then be passed on to other
 	  driver.
 
+config CMA
+	bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+	depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+	select MIGRATION
+	help
+	  This enables the Contiguous Memory Allocator which allows drivers
+	  to allocate big physically-contiguous blocks of memory for use with
+	  hardware components that do not support I/O map nor scatter-gather.
+
+	  For more information see <include/linux/dma-contiguous.h>.
+	  If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+	bool "CMA debug messages (DEVELOPMENT)"
+	depends on DEBUG_KERNEL
+	help
+	  Turns on debug messages in CMA.  This produces KERN_DEBUG
+	  messages for every CMA call as well as various messages while
+	  processing calls such as dma_alloc_from_contiguous().
+	  This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+	int "Size in Mega Bytes"
+	depends on !CMA_SIZE_SEL_PERCENTAGE
+	default 16
+	help
+	  Defines the size (in MiB) of the default memory area for Contiguous
+	  Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+	int "Percentage of total memory"
+	depends on !CMA_SIZE_SEL_MBYTES
+	default 10
+	help
+	  Defines the size of the default memory area for Contiguous Memory
+	  Allocator as a percentage of the total memory in the system.
+
+choice
+	prompt "Selected region size"
+	default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+	bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+	bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+	bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+	bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+	range 4 9
+	default 8
+	help
+	  DMA mapping framework by default aligns all buffers to the smallest
+	  PAGE_SIZE order which is greater than or equal to the requested buffer
+	  size. This works well for buffers up to a few hundreds kilobytes, but
+	  for larger buffers it just a memory waste. With this parameter you can
+	  specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+	  buffers will be aligned only to this specified order. The order is
+	  expressed as a power of two multiplied by the PAGE_SIZE.
+
+	  For example, if your system defaults to 4KiB pages, the order value
+	  of 8 means that the buffers will be aligned up to 1MiB only.
+
+	  If unsure, leave the default value "8".
+
+config CMA_AREAS
+	int "Maximum count of the CMA device-private areas"
+	default 7
+	help
+	  CMA allows to create CMA areas for particular devices. This parameter
+	  sets the maximum number of such device private CMA areas in the
+	  system.
+
+	  If unsure, leave the default value "7".
+
+endif
+
 endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c..5aa2d70 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@
 			   attribute_container.o transport_class.o \
 			   topology.o
 obj-$(CONFIG_DEVTMPFS)	+= devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb5..dcb8a6e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -100,7 +100,7 @@
 	mutex_lock(&deferred_probe_mutex);
 	if (list_empty(&dev->p->deferred_probe)) {
 		dev_dbg(dev, "Added to deferred list\n");
-		list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
 	}
 	mutex_unlock(&deferred_probe_mutex);
 }
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 05c64c1..24e88fe 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -44,8 +44,26 @@
 	return 0;
 }
 
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	/* check for overflowing the buffer's size */
+	if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	    dmabuf->size >> PAGE_SHIFT)
+		return -EINVAL;
+
+	return dmabuf->ops->mmap(dmabuf, vma);
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
+	.mmap		= dma_buf_mmap_internal,
 };
 
 /*
@@ -82,7 +100,8 @@
 			  || !ops->unmap_dma_buf
 			  || !ops->release
 			  || !ops->kmap_atomic
-			  || !ops->kmap)) {
+			  || !ops->kmap
+			  || !ops->mmap)) {
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -406,3 +425,81 @@
 		dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf:	[in]	buffer that should back the vma
+ * @vma:	[in]	vma for the mmap
+ * @pgoff:	[in]	offset in pages where this mmap should start within the
+ * 			dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+		 unsigned long pgoff)
+{
+	if (WARN_ON(!dmabuf || !vma))
+		return -EINVAL;
+
+	/* check for offset overflow */
+	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+		return -EOVERFLOW;
+
+	/* check for overflowing the buffer's size */
+	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	    dmabuf->size >> PAGE_SHIFT)
+		return -EINVAL;
+
+	/* readjust the vma */
+	if (vma->vm_file)
+		fput(vma->vm_file);
+
+	vma->vm_file = dmabuf->file;
+	get_file(vma->vm_file);
+
+	vma->vm_pgoff = pgoff;
+
+	return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf:	[in]	buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	if (WARN_ON(!dmabuf))
+		return NULL;
+
+	if (dmabuf->ops->vmap)
+		return dmabuf->ops->vmap(dmabuf);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf:	[in]	buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	if (WARN_ON(!dmabuf))
+		return;
+
+	if (dmabuf->ops->vunmap)
+		dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c..1b85949 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
 struct dma_coherent_mem {
 	void		*virt_base;
 	dma_addr_t	device_base;
+	phys_addr_t	pfn_base;
 	int		size;
 	int		flags;
 	unsigned long	*bitmap;
@@ -44,6 +45,7 @@
 
 	dev->dma_mem->virt_base = mem_base;
 	dev->dma_mem->device_base = device_addr;
+	dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
 	dev->dma_mem->size = pages;
 	dev->dma_mem->flags = flags;
 
@@ -176,3 +178,43 @@
 	return 0;
 }
 EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev:	device from which the memory was allocated
+ * @vma:	vm_area for the userspace memory
+ * @vaddr:	cpu address returned by dma_alloc_from_coherent
+ * @size:	size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			   void *vaddr, size_t size, int *ret)
+{
+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+	if (mem && vaddr >= mem->virt_base && vaddr + size <=
+		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+		unsigned long off = vma->vm_pgoff;
+		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+		int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+		int count = size >> PAGE_SHIFT;
+
+		*ret = -ENXIO;
+		if (off < count && user_count <= count - off) {
+			unsigned pfn = mem->pfn_base + start + off;
+			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
+					       user_count << PAGE_SHIFT,
+					       vma->vm_page_prot);
+		}
+		return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 0000000..78efb03
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *	Marek Szyprowski <m.szyprowski@samsung.com>
+ *	Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+	unsigned long	base_pfn;
+	unsigned long	count;
+	unsigned long	*bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+	pr_debug("%s(%s)\n", __func__, p);
+	size_cmdline = memparse(p, &p);
+	return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+	struct memblock_region *reg;
+	unsigned long total_pages = 0;
+
+	/*
+	 * We cannot use memblock_phys_mem_size() here, because
+	 * memblock_analyze() has not been called yet.
+	 */
+	for_each_memblock(memory, reg)
+		total_pages += memblock_region_memory_end_pfn(reg) -
+			       memblock_region_memory_base_pfn(reg);
+
+	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+	return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+	unsigned long selected_size = 0;
+
+	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+	if (size_cmdline != -1) {
+		selected_size = size_cmdline;
+	} else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+		selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+		selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+		selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+		selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+	}
+
+	if (selected_size) {
+		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+			 selected_size / SZ_1M);
+
+		dma_declare_contiguous(NULL, selected_size, 0, limit);
+	}
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+	unsigned long pfn = base_pfn;
+	unsigned i = count >> pageblock_order;
+	struct zone *zone;
+
+	WARN_ON_ONCE(!pfn_valid(pfn));
+	zone = page_zone(pfn_to_page(pfn));
+
+	do {
+		unsigned j;
+		base_pfn = pfn;
+		for (j = pageblock_nr_pages; j; --j, pfn++) {
+			WARN_ON_ONCE(!pfn_valid(pfn));
+			if (page_zone(pfn_to_page(pfn)) != zone)
+				return -EINVAL;
+		}
+		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+	} while (--i);
+	return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+				     unsigned long count)
+{
+	int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	struct cma *cma;
+	int ret = -ENOMEM;
+
+	pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+	cma = kmalloc(sizeof *cma, GFP_KERNEL);
+	if (!cma)
+		return ERR_PTR(-ENOMEM);
+
+	cma->base_pfn = base_pfn;
+	cma->count = count;
+	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+	if (!cma->bitmap)
+		goto no_mem;
+
+	ret = cma_activate_area(base_pfn, count);
+	if (ret)
+		goto error;
+
+	pr_debug("%s: returned %p\n", __func__, (void *)cma);
+	return cma;
+
+error:
+	kfree(cma->bitmap);
+no_mem:
+	kfree(cma);
+	return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+	phys_addr_t start;
+	unsigned long size;
+	struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+	struct cma_reserved *r = cma_reserved;
+	unsigned i = cma_reserved_count;
+
+	pr_debug("%s()\n", __func__);
+
+	for (; i; --i, ++r) {
+		struct cma *cma;
+		cma = cma_create_area(PFN_DOWN(r->start),
+				      r->size >> PAGE_SHIFT);
+		if (!IS_ERR(cma))
+			dev_set_cma_area(r->dev, cma);
+	}
+	return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ *			      for particular device
+ * @dev:   Pointer to device structure.
+ * @size:  Size of the reserved memory.
+ * @base:  Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+				  phys_addr_t base, phys_addr_t limit)
+{
+	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+	unsigned long alignment;
+
+	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+		 (unsigned long)size, (unsigned long)base,
+		 (unsigned long)limit);
+
+	/* Sanity checks */
+	if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+		pr_err("Not enough slots for CMA reserved regions!\n");
+		return -ENOSPC;
+	}
+
+	if (!size)
+		return -EINVAL;
+
+	/* Sanitise input arguments */
+	alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+	base = ALIGN(base, alignment);
+	size = ALIGN(size, alignment);
+	limit &= ~(alignment - 1);
+
+	/* Reserve memory */
+	if (base) {
+		if (memblock_is_region_reserved(base, size) ||
+		    memblock_reserve(base, size) < 0) {
+			base = -EBUSY;
+			goto err;
+		}
+	} else {
+		/*
+		 * Use __memblock_alloc_base() since
+		 * memblock_alloc_base() panic()s.
+		 */
+		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+		if (!addr) {
+			base = -ENOMEM;
+			goto err;
+		} else if (addr + size > ~(unsigned long)0) {
+			memblock_free(addr, size);
+			base = -EINVAL;
+			goto err;
+		} else {
+			base = addr;
+		}
+	}
+
+	/*
+	 * Each reserved area must be initialised later, when more kernel
+	 * subsystems (like slab allocator) are available.
+	 */
+	r->start = base;
+	r->size = size;
+	r->dev = dev;
+	cma_reserved_count++;
+	pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+		(unsigned long)base);
+
+	/* Architecture specific contiguous memory fixup. */
+	dma_contiguous_early_fixup(base, size);
+	return 0;
+err:
+	pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+	return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int align)
+{
+	unsigned long mask, pfn, pageno, start = 0;
+	struct cma *cma = dev_get_cma_area(dev);
+	int ret;
+
+	if (!cma || !cma->count)
+		return NULL;
+
+	if (align > CONFIG_CMA_ALIGNMENT)
+		align = CONFIG_CMA_ALIGNMENT;
+
+	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+		 count, align);
+
+	if (!count)
+		return NULL;
+
+	mask = (1 << align) - 1;
+
+	mutex_lock(&cma_mutex);
+
+	for (;;) {
+		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+						    start, count, mask);
+		if (pageno >= cma->count) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		pfn = cma->base_pfn + pageno;
+		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+		if (ret == 0) {
+			bitmap_set(cma->bitmap, pageno, count);
+			break;
+		} else if (ret != -EBUSY) {
+			goto error;
+		}
+		pr_debug("%s(): memory range at %p is busy, retrying\n",
+			 __func__, pfn_to_page(pfn));
+		/* try again with a bit different memory target */
+		start = pageno + mask + 1;
+	}
+
+	mutex_unlock(&cma_mutex);
+
+	pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+	return pfn_to_page(pfn);
+error:
+	mutex_unlock(&cma_mutex);
+	return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count)
+{
+	struct cma *cma = dev_get_cma_area(dev);
+	unsigned long pfn;
+
+	if (!cma || !pages)
+		return false;
+
+	pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+	pfn = page_to_pfn(pages);
+
+	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+		return false;
+
+	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+	mutex_lock(&cma_mutex);
+	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+	free_contig_range(pfn, count);
+	mutex_unlock(&cma_mutex);
+
+	return true;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 90aa2a1..af1a177 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -592,11 +592,9 @@
 {
 	int n;
 
-	n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
-	if (n > 0 && PAGE_SIZE > n + 1) {
-		*(buf + n++) = '\n';
-		*(buf + n++) = '\0';
-	}
+	n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+	buf[n++] = '\n';
+	buf[n] = '\0';
 	return n;
 }
 
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0..9cb845e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1031,7 +1031,7 @@
 	dpm_wait_for_children(dev, async);
 
 	if (async_error)
-		return 0;
+		goto Complete;
 
 	pm_runtime_get_noresume(dev);
 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@
 	if (pm_wakeup_pending()) {
 		pm_runtime_put_sync(dev);
 		async_error = -EBUSY;
-		return 0;
+		goto Complete;
 	}
 
 	device_lock(dev);
@@ -1097,6 +1097,8 @@
 	}
 
 	device_unlock(dev);
+
+ Complete:
 	complete_all(&dev->power.completion);
 
 	if (error) {
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 5f6b247..fa6bf52 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -42,7 +42,7 @@
 	/* If the I2C controller can't do a gather tell the core, it
 	 * will substitute in a linear write for us.
 	 */
-	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
 		return -ENOTSUPP;
 
 	xfer[0].addr = i2c->addr;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda48..c89aa01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -246,11 +246,11 @@
 		map->lock = regmap_lock_mutex;
 		map->unlock = regmap_unlock_mutex;
 	}
-	map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
 	map->format.pad_bytes = config->pad_bits / 8;
 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
-	map->format.buf_size += map->format.pad_bytes;
+	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+			config->val_bits + config->pad_bits, 8);
 	map->reg_shift = config->pad_bits % 8;
 	if (config->reg_stride)
 		map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@
 
 	ret = regcache_init(map, config);
 	if (ret < 0)
-		goto err_free_workbuf;
+		goto err_debugfs;
 
 	/* Add a devres resource for dev_get_regmap() */
 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@
 
 err_cache:
 	regcache_exit(map);
-err_free_workbuf:
+err_debugfs:
+	regmap_debugfs_exit(map);
 	kfree(map->work_buf);
 err_map:
 	kfree(map);
@@ -471,6 +472,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
 
 /**
  * regmap_exit(): Free a previously allocated register map
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e..72b5e72 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@
 	return &soc_dev->dev;
 }
 
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
                                  struct attribute *attr,
                                  int index)
 {
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842..61ce405 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -139,7 +139,9 @@
 		bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
 		break;
 	case 0x4331:
-		/* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+	case 43431:
+		/* Ext PA lines must be enabled for tx on BCM4331 */
+		bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
 		break;
 	case 43224:
 		if (bus->chipinfo.rev == 0) {
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14..c32ebd5 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
 			  bool enable)
 {
-	struct pci_dev *pdev = pc->core->bus->host_pci;
+	struct pci_dev *pdev;
 	u32 coremask, tmp;
 	int err = 0;
 
-	if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+	if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
 		/* This bcma device is not on a PCI host-bus. So the IRQs are
 		 * not routed through the PCI core.
 		 * So we must not enable routing through the PCI core. */
 		goto out;
 	}
 
+	pdev = pc->core->bus->host_pci;
+
 	err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
 	if (err)
 		goto out;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index c7f9335..f16f42d3 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -579,13 +579,13 @@
 	if (!sprom)
 		return -ENOMEM;
 
-	if (bus->chipinfo.id == 0x4331)
+	if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
 		bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
 
 	pr_debug("SPROM offset 0x%x\n", offset);
 	bcma_sprom_read(bus, offset, sprom);
 
-	if (bus->chipinfo.id == 0x4331)
+	if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
 		bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
 	err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63d..e54e31b 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -65,39 +65,80 @@
 
 int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
 
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+	int r;
+
+	wait_event(mdev->misc_wait,
+		   (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+		   mdev->state.disk <= D_FAILED);
+
+	return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+	if (atomic_dec_and_test(&mdev->md_io_in_use))
+		wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+	enum drbd_disk_state ds = mdev->state.disk;
+	return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+				     unsigned int *done)
+{
+	long dt = bdev->dc.disk_timeout * HZ / 10;
+	if (dt == 0)
+		dt = MAX_SCHEDULE_TIMEOUT;
+
+	dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+	if (dt == 0)
+		dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
 static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 				 struct drbd_backing_dev *bdev,
 				 struct page *page, sector_t sector,
 				 int rw, int size)
 {
 	struct bio *bio;
-	struct drbd_md_io md_io;
 	int ok;
 
-	md_io.mdev = mdev;
-	init_completion(&md_io.event);
-	md_io.error = 0;
+	mdev->md_io.done = 0;
+	mdev->md_io.error = -ENODEV;
 
 	if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
 		rw |= REQ_FUA | REQ_FLUSH;
 	rw |= REQ_SYNC;
 
-	bio = bio_alloc(GFP_NOIO, 1);
+	bio = bio_alloc_drbd(GFP_NOIO);
 	bio->bi_bdev = bdev->md_bdev;
 	bio->bi_sector = sector;
 	ok = (bio_add_page(bio, page, size, 0) == size);
 	if (!ok)
 		goto out;
-	bio->bi_private = &md_io;
+	bio->bi_private = &mdev->md_io;
 	bio->bi_end_io = drbd_md_io_complete;
 	bio->bi_rw = rw;
 
+	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* Corresponding put_ldev in drbd_md_io_complete() */
+		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+		ok = 0;
+		goto out;
+	}
+
+	bio_get(bio); /* one bio_put() is in the completion handler */
+	atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
 		bio_endio(bio, -EIO);
 	else
 		submit_bio(rw, bio);
-	wait_for_completion(&md_io.event);
-	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+	wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+	ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
 
  out:
 	bio_put(bio);
@@ -111,7 +152,7 @@
 	int offset = 0;
 	struct page *iop = mdev->md_io_page;
 
-	D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+	D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
 
 	BUG_ON(!bdev->md_bdev);
 
@@ -328,8 +369,13 @@
 		return 1;
 	}
 
-	mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
-	buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+	buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+	if (!buffer) {
+		dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+		complete(&((struct update_al_work *)w)->event);
+		put_ldev(mdev);
+		return 1;
+	}
 
 	buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
 	buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@
 	D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
 	mdev->al_tr_number++;
 
-	mutex_unlock(&mdev->md_io_mutex);
+	drbd_md_put_buffer(mdev);
 
 	complete(&((struct update_al_work *)w)->event);
 	put_ldev(mdev);
@@ -443,8 +489,9 @@
 	/* lock out all other meta data io for now,
 	 * and make sure the page is mapped.
 	 */
-	mutex_lock(&mdev->md_io_mutex);
-	buffer = page_address(mdev->md_io_page);
+	buffer = drbd_md_get_buffer(mdev);
+	if (!buffer)
+		return 0;
 
 	/* Find the valid transaction in the log */
 	for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@
 		if (rv == 0)
 			continue;
 		if (rv == -1) {
-			mutex_unlock(&mdev->md_io_mutex);
+			drbd_md_put_buffer(mdev);
 			return 0;
 		}
 		cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@
 
 	if (!found_valid) {
 		dev_warn(DEV, "No usable activity log found.\n");
-		mutex_unlock(&mdev->md_io_mutex);
+		drbd_md_put_buffer(mdev);
 		return 1;
 	}
 
@@ -493,7 +540,7 @@
 		rv = drbd_al_read_tr(mdev, bdev, buffer, i);
 		ERR_IF(rv == 0) goto cancel;
 		if (rv == -1) {
-			mutex_unlock(&mdev->md_io_mutex);
+			drbd_md_put_buffer(mdev);
 			return 0;
 		}
 
@@ -534,7 +581,7 @@
 		mdev->al_tr_pos = 0;
 
 	/* ok, we are done with it */
-	mutex_unlock(&mdev->md_io_mutex);
+	drbd_md_put_buffer(mdev);
 
 	dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
 	     transactions, active_extents);
@@ -671,16 +718,20 @@
 			else
 				ext->rs_failed += count;
 			if (ext->rs_left < ext->rs_failed) {
-				dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
-				    "rs_failed=%d count=%d\n",
+				dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+				    "rs_failed=%d count=%d cstate=%s\n",
 				     (unsigned long long)sector,
 				     ext->lce.lc_number, ext->rs_left,
-				     ext->rs_failed, count);
-				dump_stack();
+				     ext->rs_failed, count,
+				     drbd_conn_str(mdev->state.conn));
 
-				lc_put(mdev->resync, &ext->lce);
-				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-				return;
+				/* We don't expect to be able to clear more bits
+				 * than have been set when we originally counted
+				 * the set bits to cache that value in ext->rs_left.
+				 * Whatever the reason (disconnect during resync,
+				 * delayed local completion of an application write),
+				 * try to fix it up by recounting here. */
+				ext->rs_left = drbd_bm_e_weight(mdev, enr);
 			}
 		} else {
 			/* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@
 		put_ldev(mdev);
 	}
 	spin_unlock_irq(&mdev->al_lock);
+	wake_up(&mdev->al_wait);
 
 	return 0;
 }
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3030201..b5c5ff5 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -205,7 +205,7 @@
 static void bm_store_page_idx(struct page *page, unsigned long idx)
 {
 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
-	page_private(page) |= idx;
+	set_page_private(page, idx);
 }
 
 static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@
 struct bm_aio_ctx {
 	struct drbd_conf *mdev;
 	atomic_t in_flight;
-	struct completion done;
+	unsigned int done;
 	unsigned flags;
 #define BM_AIO_COPY_PAGES	1
 	int error;
+	struct kref kref;
 };
 
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+	struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+	put_ldev(ctx->mdev);
+	kfree(ctx);
+}
+
 /* bv_page may be a copy, or may be the original */
 static void bm_async_io_complete(struct bio *bio, int error)
 {
@@ -930,20 +939,21 @@
 
 	bm_page_unlock_io(mdev, idx);
 
-	/* FIXME give back to page pool */
 	if (ctx->flags & BM_AIO_COPY_PAGES)
-		put_page(bio->bi_io_vec[0].bv_page);
+		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
 
 	bio_put(bio);
 
-	if (atomic_dec_and_test(&ctx->in_flight))
-		complete(&ctx->done);
+	if (atomic_dec_and_test(&ctx->in_flight)) {
+		ctx->done = 1;
+		wake_up(&mdev->misc_wait);
+		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+	}
 }
 
 static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
 {
-	/* we are process context. we always get a bio */
-	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
 	struct drbd_conf *mdev = ctx->mdev;
 	struct drbd_bitmap *b = mdev->bitmap;
 	struct page *page;
@@ -966,10 +976,8 @@
 	bm_set_page_unchanged(b->bm_pages[page_nr]);
 
 	if (ctx->flags & BM_AIO_COPY_PAGES) {
-		/* FIXME alloc_page is good enough for now, but actually needs
-		 * to use pre-allocated page pool */
 		void *src, *dest;
-		page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
 		dest = kmap_atomic(page);
 		src = kmap_atomic(b->bm_pages[page_nr]);
 		memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@
 
 	bio->bi_bdev = mdev->ldev->md_bdev;
 	bio->bi_sector = on_disk_sector;
+	/* bio_add_page of a single page to an empty bio will always succeed,
+	 * according to api.  Do we want to assert that? */
 	bio_add_page(bio, page, len, 0);
 	bio->bi_private = ctx;
 	bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@
 /*
  * bm_rw: read/write the whole bitmap from/to its on disk location.
  */
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
 {
-	struct bm_aio_ctx ctx = {
-		.mdev = mdev,
-		.in_flight = ATOMIC_INIT(1),
-		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
-		.flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
-	};
+	struct bm_aio_ctx *ctx;
 	struct drbd_bitmap *b = mdev->bitmap;
 	int num_pages, i, count = 0;
 	unsigned long now;
@@ -1021,7 +1026,27 @@
 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
 	 * as we submit copies of pages anyways.
 	 */
-	if (!ctx.flags)
+
+	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+	if (!ctx)
+		return -ENOMEM;
+
+	*ctx = (struct bm_aio_ctx) {
+		.mdev = mdev,
+		.in_flight = ATOMIC_INIT(1),
+		.done = 0,
+		.flags = flags,
+		.error = 0,
+		.kref = { ATOMIC_INIT(2) },
+	};
+
+	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
+		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+		kfree(ctx);
+		return -ENODEV;
+	}
+
+	if (!ctx->flags)
 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
 
 	num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@
 				continue;
 			}
 		}
-		atomic_inc(&ctx.in_flight);
-		bm_page_io_async(&ctx, i, rw);
+		atomic_inc(&ctx->in_flight);
+		bm_page_io_async(ctx, i, rw);
 		++count;
 		cond_resched();
 	}
 
 	/*
-	 * We initialize ctx.in_flight to one to make sure bm_async_io_complete
-	 * will not complete() early, and decrement / test it here.  If there
+	 * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+	 * will not set ctx->done early, and decrement / test it here.  If there
 	 * are still some bios in flight, we need to wait for them here.
+	 * If all IO is done already (or nothing had been submitted), there is
+	 * no need to wait.  Still, we need to put the kref associated with the
+	 * "in_flight reached zero, all done" event.
 	 */
-	if (!atomic_dec_and_test(&ctx.in_flight))
-		wait_for_completion(&ctx.done);
+	if (!atomic_dec_and_test(&ctx->in_flight))
+		wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+	else
+		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
 	dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
 			rw == WRITE ? "WRITE" : "READ",
 			count, jiffies - now);
 
-	if (ctx.error) {
+	if (ctx->error) {
 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
 		drbd_chk_io_error(mdev, 1, true);
-		err = -EIO; /* ctx.error ? */
+		err = -EIO; /* ctx->error ? */
 	}
 
+	if (atomic_read(&ctx->in_flight))
+		err = -EIO; /* Disk failed during IO... */
+
 	now = jiffies;
 	if (rw == WRITE) {
 		drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@
 	dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
 	     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
 
+	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
 	return err;
 }
 
@@ -1091,7 +1126,7 @@
  */
 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
 {
-	return bm_rw(mdev, READ, 0);
+	return bm_rw(mdev, READ, 0, 0);
 }
 
 /**
@@ -1102,7 +1137,7 @@
  */
 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
 {
-	return bm_rw(mdev, WRITE, 0);
+	return bm_rw(mdev, WRITE, 0, 0);
 }
 
 /**
@@ -1112,7 +1147,23 @@
  */
 int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
 {
-	return bm_rw(mdev, WRITE, upper_idx);
+	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev:	DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
 }
 
 
@@ -1130,28 +1181,45 @@
  */
 int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
 {
-	struct bm_aio_ctx ctx = {
-		.mdev = mdev,
-		.in_flight = ATOMIC_INIT(1),
-		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
-		.flags = BM_AIO_COPY_PAGES,
-	};
+	struct bm_aio_ctx *ctx;
+	int err;
 
 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
 		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
 		return 0;
 	}
 
-	bm_page_io_async(&ctx, idx, WRITE_SYNC);
-	wait_for_completion(&ctx.done);
+	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+	if (!ctx)
+		return -ENOMEM;
 
-	if (ctx.error)
+	*ctx = (struct bm_aio_ctx) {
+		.mdev = mdev,
+		.in_flight = ATOMIC_INIT(1),
+		.done = 0,
+		.flags = BM_AIO_COPY_PAGES,
+		.error = 0,
+		.kref = { ATOMIC_INIT(2) },
+	};
+
+	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
+		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+		kfree(ctx);
+		return -ENODEV;
+	}
+
+	bm_page_io_async(ctx, idx, WRITE_SYNC);
+	wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+
+	if (ctx->error)
 		drbd_chk_io_error(mdev, 1, true);
 		/* that should force detach, so the in memory bitmap will be
 		 * gone in a moment as well. */
 
 	mdev->bm_writ_cnt++;
-	return ctx.error;
+	err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+	return err;
 }
 
 /* NOTE
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8d68056..02f013a 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -712,7 +712,6 @@
 	struct list_head tl_requests; /* ring list in the transfer log */
 	struct bio *master_bio;       /* master bio pointer */
 	unsigned long rq_state; /* see comments above _req_mod() */
-	int seq_num;
 	unsigned long start_time;
 };
 
@@ -851,6 +850,7 @@
 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
+	STATE_SENT,		/* Do not change state/UUIDs while this is set */
 };
 
 struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@
 	BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
 
 	/* currently locked for bulk operation */
-	BM_LOCKED_MASK = 0x7,
+	BM_LOCKED_MASK = 0xf,
 
 	/* in detail, that is: */
 	BM_DONT_CLEAR = 0x1,
 	BM_DONT_SET   = 0x2,
 	BM_DONT_TEST  = 0x4,
 
+	/* so we can mark it locked for bulk operation,
+	 * and still allow all non-bulk operations */
+	BM_IS_LOCKED  = 0x8,
+
 	/* (test bit, count bit) allowed (common case) */
-	BM_LOCKED_TEST_ALLOWED = 0x3,
+	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
 
 	/* testing bits, as well as setting new bits allowed, but clearing bits
 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
 	 * requires sending of "out-of-sync" information, though. */
-	BM_LOCKED_SET_ALLOWED = 0x1,
+	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
 
-	/* clear is not expected while bitmap is locked for bulk operation */
+	/* for drbd_bm_write_copy_pages, everything is allowed,
+	 * only concurrent bulk operations are locked out. */
+	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
 };
 
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
 struct drbd_work_queue {
 	struct list_head q;
 	struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@
 };
 
 struct drbd_md_io {
-	struct drbd_conf *mdev;
-	struct completion event;
+	unsigned int done;
 	int error;
 };
 
@@ -1022,6 +1020,7 @@
 	struct drbd_tl_epoch *newest_tle;
 	struct drbd_tl_epoch *oldest_tle;
 	struct list_head out_of_sequence_requests;
+	struct list_head barrier_acked_requests;
 	struct hlist_head *tl_hash;
 	unsigned int tl_hash_s;
 
@@ -1056,6 +1055,8 @@
 	struct crypto_hash *csums_tfm;
 	struct crypto_hash *verify_tfm;
 
+	unsigned long last_reattach_jif;
+	unsigned long last_reconnect_jif;
 	struct drbd_thread receiver;
 	struct drbd_thread worker;
 	struct drbd_thread asender;
@@ -1094,7 +1095,8 @@
 	wait_queue_head_t ee_wait;
 	struct page *md_io_page;	/* one page buffer for md_io */
 	struct page *md_io_tmpp;	/* for logical_block_size != 512 */
-	struct mutex md_io_mutex;	/* protects the md_io_buffer */
+	struct drbd_md_io md_io;
+	atomic_t md_io_in_use;		/* protects the md_io, md_io_page and md_io_tmpp */
 	spinlock_t al_lock;
 	wait_queue_head_t al_wait;
 	struct lru_cache *act_log;	/* activity log */
@@ -1228,8 +1230,8 @@
 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
 extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
 extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
 			enum drbd_packets cmd, struct p_header80 *h,
 			size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@
 extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
 extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
 extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int  drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
 		unsigned long al_enr);
 extern size_t	     drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@
 extern mempool_t *drbd_request_mempool;
 extern mempool_t *drbd_ee_mempool;
 
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
 extern spinlock_t   drbd_pp_lock;
 extern int	    drbd_pp_vacant;
 extern wait_queue_head_t drbd_pp_wait;
 
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES	128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
 extern rwlock_t global_state_lock;
 
 extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@
 extern void suspend_other_sg(struct drbd_conf *mdev);
 extern int drbd_resync_finished(struct drbd_conf *mdev);
 /* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
 extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
-		struct drbd_backing_dev *bdev, sector_t sector, int rw);
+				struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+					    unsigned int *done);
 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
 extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
 
@@ -1754,19 +1788,6 @@
 #define page_chain_for_each_safe(page, n) \
 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
 
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
-	struct bio_vec *bvec;
-	int i;
-
-	__bio_for_each_segment(bvec, bio, i, 0) {
-		if (page_count(bvec->bv_page) > 1)
-			return 1;
-	}
-
-	return 0;
-}
-
 static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
 {
 	struct page *page = e->pages;
@@ -1777,7 +1798,6 @@
 	return 0;
 }
 
-
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 {
 	wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@
 		 * Note: currently we don't support such large bitmaps on 32bit
 		 * arch anyways, but no harm done to be prepared for it here.
 		 */
-		unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+		unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
 		unsigned long left = *bits_left >> shift;
 		unsigned long total = 1UL + (mdev->rs_total >> shift);
 		unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@
 	case D_OUTDATED:
 	case D_CONSISTENT:
 	case D_UP_TO_DATE:
+	case D_FAILED:
 		/* disk state is stable as well. */
 		break;
 
 	/* no new io accepted during tansitional states */
 	case D_ATTACHING:
-	case D_FAILED:
 	case D_NEGOTIATING:
 	case D_UNKNOWN:
 	case D_MASK:
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 211fc44..920ede2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,8 @@
 struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
 mempool_t *drbd_request_mempool;
 mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
 
 /* I do not use a standard mempool, because:
    1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@
 	.release = drbd_release,
 };
 
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+	bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+	struct bio *bio;
+
+	if (!drbd_md_io_bio_set)
+		return bio_alloc(gfp_mask, 1);
+
+	bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+	if (!bio)
+		return NULL;
+	bio->bi_destructor = bio_destructor_drbd;
+	return bio;
+}
 
 #ifdef __CHECKER__
 /* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@
 	mdev->oldest_tle = b;
 	mdev->newest_tle = b;
 	INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+	INIT_LIST_HEAD(&mdev->barrier_acked_requests);
 
 	mdev->tl_hash = NULL;
 	mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@
 	new->n_writes = 0;
 
 	newest_before = mdev->newest_tle;
-	/* never send a barrier number == 0, because that is special-cased
-	 * when using TCQ for our write ordering code */
-	new->br_number = (newest_before->br_number+1) ?: 1;
+	new->br_number = newest_before->br_number+1;
 	if (mdev->newest_tle != new) {
 		mdev->newest_tle->next = new;
 		mdev->newest_tle = new;
@@ -311,7 +329,7 @@
 	   These have been list_move'd to the out_of_sequence_requests list in
 	   _req_mod(, barrier_acked) above.
 	   */
-	list_del_init(&b->requests);
+	list_splice_init(&b->requests, &mdev->barrier_acked_requests);
 
 	nob = b->next;
 	if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@
 		b = tmp;
 		list_splice(&carry_reads, &b->requests);
 	}
+
+	/* Actions operating on the disk state, also want to work on
+	   requests that got barrier acked. */
+	switch (what) {
+	case fail_frozen_disk_io:
+	case restart_frozen_disk_io:
+		list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+			req = list_entry(le, struct drbd_request, tl_requests);
+			_req_mod(req, what);
+		}
+
+	case connection_lost_while_pending:
+	case resend:
+		break;
+	default:
+		dev_err(DEV, "what = %d in _tl_restart()\n", what);
+	}
 }
 
 
@@ -458,6 +493,38 @@
 }
 
 /**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev:	DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+	struct drbd_tl_epoch *b;
+	struct list_head *le, *tle;
+	struct drbd_request *req;
+
+	spin_lock_irq(&mdev->req_lock);
+	b = mdev->oldest_tle;
+	while (b) {
+		list_for_each_safe(le, tle, &b->requests) {
+			req = list_entry(le, struct drbd_request, tl_requests);
+			if (!(req->rq_state & RQ_LOCAL_PENDING))
+				continue;
+			_req_mod(req, abort_disk_io);
+		}
+		b = b->next;
+	}
+
+	list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+		req = list_entry(le, struct drbd_request, tl_requests);
+		if (!(req->rq_state & RQ_LOCAL_PENDING))
+			continue;
+		_req_mod(req, abort_disk_io);
+	}
+
+	spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
  * cl_wide_st_chg() - true if the state change is a cluster wide one
  * @mdev:	DRBD device.
  * @os:		old (current) state.
@@ -470,7 +537,7 @@
 		 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
 		  (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
 		  (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
-		  (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+		  (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
 		(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
 		(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
 }
@@ -509,8 +576,16 @@
 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
 						    union drbd_state,
 						    union drbd_state);
+enum sanitize_state_warnings {
+	NO_WARNING,
+	ABORTED_ONLINE_VERIFY,
+	ABORTED_RESYNC,
+	CONNECTION_LOST_NEGOTIATING,
+	IMPLICITLY_UPGRADED_DISK,
+	IMPLICITLY_UPGRADED_PDSK,
+};
 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
-				       union drbd_state ns, const char **warn_sync_abort);
+				       union drbd_state ns, enum sanitize_state_warnings *warn);
 int drbd_send_state_req(struct drbd_conf *,
 			union drbd_state, union drbd_state);
 
@@ -785,6 +860,13 @@
 	if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
 		rv = SS_IN_TRANSIENT_STATE;
 
+	/* While establishing a connection only allow cstate to change.
+	   Delay/refuse role changes, detach attach etc... */
+	if (test_bit(STATE_SENT, &mdev->flags) &&
+	    !(os.conn == C_WF_REPORT_PARAMS ||
+	      (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+		rv = SS_IN_TRANSIENT_STATE;
+
 	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
 		rv = SS_NEED_CONNECTION;
 
@@ -803,6 +885,21 @@
 	return rv;
 }
 
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+	static const char *msg_table[] = {
+		[NO_WARNING] = "",
+		[ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+		[ABORTED_RESYNC] = "Resync aborted.",
+		[CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+		[IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+		[IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+	};
+
+	if (warn != NO_WARNING)
+		dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
 /**
  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
  * @mdev:	DRBD device.
@@ -814,11 +911,14 @@
  * to D_UNKNOWN. This rule and many more along those lines are in this function.
  */
 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
-				       union drbd_state ns, const char **warn_sync_abort)
+				       union drbd_state ns, enum sanitize_state_warnings *warn)
 {
 	enum drbd_fencing_p fp;
 	enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
 
+	if (warn)
+		*warn = NO_WARNING;
+
 	fp = FP_DONT_CARE;
 	if (get_ldev(mdev)) {
 		fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@
 	/* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
 	 * If you try to go into some Sync* state, that shall fail (elsewhere). */
 	if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
-	    ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+	    ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
 		ns.conn = os.conn;
 
 	/* we cannot fail (again) if we already detached */
 	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
 		ns.disk = D_DISKLESS;
 
-	/* if we are only D_ATTACHING yet,
-	 * we can (and should) go directly to D_DISKLESS. */
-	if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
-		ns.disk = D_DISKLESS;
-
 	/* After C_DISCONNECTING only C_STANDALONE may follow */
 	if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
 		ns.conn = os.conn;
@@ -863,10 +958,9 @@
 	/* Abort resync if a disk fails/detaches */
 	if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
 	    (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
-		if (warn_sync_abort)
-			*warn_sync_abort =
-				os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
-				"Online-verify" : "Resync";
+		if (warn)
+			*warn =	os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+				ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
 		ns.conn = C_CONNECTED;
 	}
 
@@ -877,7 +971,8 @@
 			ns.disk = mdev->new_state_tmp.disk;
 			ns.pdsk = mdev->new_state_tmp.pdsk;
 		} else {
-			dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+			if (warn)
+				*warn = CONNECTION_LOST_NEGOTIATING;
 			ns.disk = D_DISKLESS;
 			ns.pdsk = D_UNKNOWN;
 		}
@@ -959,16 +1054,16 @@
 		ns.disk = disk_max;
 
 	if (ns.disk < disk_min) {
-		dev_warn(DEV, "Implicitly set disk from %s to %s\n",
-			 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+		if (warn)
+			*warn = IMPLICITLY_UPGRADED_DISK;
 		ns.disk = disk_min;
 	}
 	if (ns.pdsk > pdsk_max)
 		ns.pdsk = pdsk_max;
 
 	if (ns.pdsk < pdsk_min) {
-		dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
-			 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+		if (warn)
+			*warn = IMPLICITLY_UPGRADED_PDSK;
 		ns.pdsk = pdsk_min;
 	}
 
@@ -1045,12 +1140,12 @@
 {
 	union drbd_state os;
 	enum drbd_state_rv rv = SS_SUCCESS;
-	const char *warn_sync_abort = NULL;
+	enum sanitize_state_warnings ssw;
 	struct after_state_chg_work *ascw;
 
 	os = mdev->state;
 
-	ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+	ns = sanitize_state(mdev, os, ns, &ssw);
 
 	if (ns.i == os.i)
 		return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@
 		return rv;
 	}
 
-	if (warn_sync_abort)
-		dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+	print_sanitize_warnings(mdev, ssw);
 
 	{
 	char *pbp, pb[300];
@@ -1243,7 +1337,7 @@
 		drbd_thread_stop_nowait(&mdev->receiver);
 
 	/* Upon network failure, we need to restart the receiver. */
-	if (os.conn > C_TEAR_DOWN &&
+	if (os.conn > C_WF_CONNECTION &&
 	    ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
 		drbd_thread_restart_nowait(&mdev->receiver);
 
@@ -1251,6 +1345,15 @@
 	if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
 		drbd_resume_al(mdev);
 
+	/* remember last connect and attach times so request_timer_fn() won't
+	 * kill newly established sessions while we are still trying to thaw
+	 * previously frozen IO */
+	if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+		mdev->last_reconnect_jif = jiffies;
+	if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+	    ns.disk > D_NEGOTIATING)
+		mdev->last_reattach_jif = jiffies;
+
 	ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
 	if (ascw) {
 		ascw->os = os;
@@ -1354,12 +1457,16 @@
 	/* Here we have the actions that are performed after a
 	   state change. This function might sleep */
 
+	if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+		mod_timer(&mdev->request_timer, jiffies + HZ);
+
 	nsm.i = -1;
 	if (ns.susp_nod) {
 		if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
 			what = resend;
 
-		if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+		if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+		    ns.disk > D_NEGOTIATING)
 			what = restart_frozen_disk_io;
 
 		if (what != nothing)
@@ -1408,7 +1515,7 @@
 	/* Do not change the order of the if above and the two below... */
 	if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
 		drbd_send_uuids(mdev);
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
 	}
 	/* No point in queuing send_bitmap if we don't have a connection
 	 * anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@
 	}
 
 	if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
-		if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+		if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+		    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
 			drbd_uuid_new_current(mdev);
 			drbd_send_uuids(mdev);
 		}
-
 		/* D_DISKLESS Peer becomes secondary */
 		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
 			/* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@
 	    os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
 		drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
 		drbd_send_uuids(mdev);
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
 	}
 
 	/* We want to pause/continue resync, tell peer. */
 	if (ns.conn >= C_CONNECTED &&
 	     ((os.aftr_isp != ns.aftr_isp) ||
 	      (os.user_isp != ns.user_isp)))
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
 
 	/* In case one of the isp bits got set, suspend other devices. */
 	if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@
 	/* Make sure the peer gets informed about eventual state
 	   changes (ISP bits) while we were in WFReportParams. */
 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
 
 	if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
 
 	/* We are in the progress to start a full sync... */
 	if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@
 	/* first half of local IO error, failure to attach,
 	 * or administrative detach */
 	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
-		enum drbd_io_error_p eh;
-		int was_io_error;
+		enum drbd_io_error_p eh = EP_PASS_ON;
+		int was_io_error = 0;
 		/* corresponding get_ldev was in __drbd_set_state, to serialize
-		 * our cleanup here with the transition to D_DISKLESS,
-		 * so it is safe to dreference ldev here. */
-		eh = mdev->ldev->dc.on_io_error;
-		was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+		 * our cleanup here with the transition to D_DISKLESS.
+		 * But is is still not save to dreference ldev here, since
+		 * we might come from an failed Attach before ldev was set. */
+		if (mdev->ldev) {
+			eh = mdev->ldev->dc.on_io_error;
+			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
 
-		/* current state still has to be D_FAILED,
-		 * there is only one way out: to D_DISKLESS,
-		 * and that may only happen after our put_ldev below. */
-		if (mdev->state.disk != D_FAILED)
-			dev_err(DEV,
-				"ASSERT FAILED: disk is %s during detach\n",
-				drbd_disk_str(mdev->state.disk));
+			/* Immediately allow completion of all application IO, that waits
+			   for completion from the local disk. */
+			tl_abort_disk_io(mdev);
 
-		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that I am detaching my disk\n");
-		else
-			dev_err(DEV, "Sending state for detaching disk failed\n");
+			/* current state still has to be D_FAILED,
+			 * there is only one way out: to D_DISKLESS,
+			 * and that may only happen after our put_ldev below. */
+			if (mdev->state.disk != D_FAILED)
+				dev_err(DEV,
+					"ASSERT FAILED: disk is %s during detach\n",
+					drbd_disk_str(mdev->state.disk));
 
-		drbd_rs_cancel_all(mdev);
+			if (ns.conn >= C_CONNECTED)
+				drbd_send_state(mdev, ns);
 
-		/* In case we want to get something to stable storage still,
-		 * this may be the last chance.
-		 * Following put_ldev may transition to D_DISKLESS. */
-		drbd_md_sync(mdev);
+			drbd_rs_cancel_all(mdev);
+
+			/* In case we want to get something to stable storage still,
+			 * this may be the last chance.
+			 * Following put_ldev may transition to D_DISKLESS. */
+			drbd_md_sync(mdev);
+		}
 		put_ldev(mdev);
 
 		if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@
                 mdev->rs_failed = 0;
                 atomic_set(&mdev->rs_pending_cnt, 0);
 
-		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+		if (ns.conn >= C_CONNECTED)
+			drbd_send_state(mdev, ns);
+
 		/* corresponding get_ldev in __drbd_set_state
 		 * this may finally trigger drbd_ldev_destroy. */
 		put_ldev(mdev);
 	}
 
 	/* Notify peer that I had a local IO error, and did not detached.. */
-	if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
-		drbd_send_state(mdev);
+	if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+		drbd_send_state(mdev, ns);
 
 	/* Disks got bigger while they were detached */
 	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@
 	/* sync target done with resync.  Explicitly notify peer, even though
 	 * it should (at least for non-empty resyncs) already know itself. */
 	if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
-		drbd_send_state(mdev);
+		drbd_send_state(mdev, ns);
+
+	/* Wake up role changes, that were delayed because of connection establishing */
+	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+		clear_bit(STATE_SENT, &mdev->flags);
+		wake_up(&mdev->state_wait);
+	}
 
 	/* This triggers bitmap writeout of potentially still unwritten pages
 	 * if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@
 	 * No harm done if some bits change during this phase.
 	 */
 	if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
-		drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
-			"write from resync_finished", BM_LOCKED_SET_ALLOWED);
+		drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+			"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
 		put_ldev(mdev);
 	}
 
@@ -2057,7 +2176,11 @@
 
 	D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
 
-	uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+	uuid = mdev->ldev->md.uuid[UI_BITMAP];
+	if (uuid && uuid != UUID_JUST_CREATED)
+		uuid = uuid + UUID_NEW_BM_OFFSET;
+	else
+		get_random_bytes(&uuid, sizeof(u64));
 	drbd_uuid_set(mdev, UI_BITMAP, uuid);
 	drbd_print_uuids(mdev, "updated sync UUID");
 	drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@
 		max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
 	}
 
+	/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+	if (mdev->agreed_pro_version <= 94)
+		max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
 	p.d_size = cpu_to_be64(d_size);
 	p.u_size = cpu_to_be64(u_size);
 	p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@
 }
 
 /**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
  * @mdev:	DRBD device.
  */
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
 {
 	struct socket *sock;
 	struct p_state p;
@@ -2131,6 +2258,37 @@
 	return ok;
 }
 
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev:	DRBD device.
+ * @state:	the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+	struct socket *sock;
+	struct p_state p;
+	int ok = 0;
+
+	mutex_lock(&mdev->data.mutex);
+
+	p.state = cpu_to_be32(state.i);
+	sock = mdev->data.socket;
+
+	if (likely(sock != NULL)) {
+		ok = _drbd_send_cmd(mdev, sock, P_STATE,
+				    (struct p_header80 *)&p, sizeof(p), 0);
+	}
+
+	mutex_unlock(&mdev->data.mutex);
+
+	return ok;
+}
+
 int drbd_send_state_req(struct drbd_conf *mdev,
 	union drbd_state mask, union drbd_state val)
 {
@@ -2615,7 +2773,7 @@
 	struct bio_vec *bvec;
 	int i;
 	/* hint all but last page with MSG_MORE */
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment(bvec, bio, i) {
 		if (!_drbd_no_send_page(mdev, bvec->bv_page,
 				     bvec->bv_offset, bvec->bv_len,
 				     i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@
 	struct bio_vec *bvec;
 	int i;
 	/* hint all but last page with MSG_MORE */
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment(bvec, bio, i) {
 		if (!_drbd_send_page(mdev, bvec->bv_page,
 				     bvec->bv_offset, bvec->bv_len,
 				     i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@
 
 	p.sector   = cpu_to_be64(req->sector);
 	p.block_id = (unsigned long)req;
-	p.seq_num  = cpu_to_be32(req->seq_num =
-				 atomic_add_return(1, &mdev->packet_seq));
+	p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
 	dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
 
@@ -2987,8 +3144,8 @@
 	atomic_set(&mdev->rs_sect_in, 0);
 	atomic_set(&mdev->rs_sect_ev, 0);
 	atomic_set(&mdev->ap_in_flight, 0);
+	atomic_set(&mdev->md_io_in_use, 0);
 
-	mutex_init(&mdev->md_io_mutex);
 	mutex_init(&mdev->data.mutex);
 	mutex_init(&mdev->meta.mutex);
 	sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@
 
 	/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
 
+	if (drbd_md_io_bio_set)
+		bioset_free(drbd_md_io_bio_set);
+	if (drbd_md_io_page_pool)
+		mempool_destroy(drbd_md_io_page_pool);
 	if (drbd_ee_mempool)
 		mempool_destroy(drbd_ee_mempool);
 	if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@
 	if (drbd_al_ext_cache)
 		kmem_cache_destroy(drbd_al_ext_cache);
 
+	drbd_md_io_bio_set   = NULL;
+	drbd_md_io_page_pool = NULL;
 	drbd_ee_mempool      = NULL;
 	drbd_request_mempool = NULL;
 	drbd_ee_cache        = NULL;
@@ -3162,6 +3325,8 @@
 	drbd_bm_ext_cache    = NULL;
 	drbd_al_ext_cache    = NULL;
 	drbd_pp_pool         = NULL;
+	drbd_md_io_page_pool = NULL;
+	drbd_md_io_bio_set   = NULL;
 
 	/* caches */
 	drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@
 		goto Enomem;
 
 	/* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+	drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+	if (drbd_md_io_bio_set == NULL)
+		goto Enomem;
+#endif
+
+	drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+	if (drbd_md_io_page_pool == NULL)
+		goto Enomem;
+
 	drbd_request_mempool = mempool_create(number,
 		mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
 	if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@
 	if (!mdev)
 		return;
 
+	del_timer_sync(&mdev->request_timer);
+
 	/* paranoia asserts */
 	if (mdev->open_cnt != 0)
 		dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@
 	if (!get_ldev_if_state(mdev, D_FAILED))
 		return;
 
-	mutex_lock(&mdev->md_io_mutex);
-	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+	buffer = drbd_md_get_buffer(mdev);
+	if (!buffer)
+		goto out;
+
 	memset(buffer, 0, 512);
 
 	buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@
 	 * since we updated it on metadata. */
 	mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
 
-	mutex_unlock(&mdev->md_io_mutex);
+	drbd_md_put_buffer(mdev);
+out:
 	put_ldev(mdev);
 }
 
@@ -3718,8 +3898,9 @@
 	if (!get_ldev_if_state(mdev, D_ATTACHING))
 		return ERR_IO_MD_DISK;
 
-	mutex_lock(&mdev->md_io_mutex);
-	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+	buffer = drbd_md_get_buffer(mdev);
+	if (!buffer)
+		goto out;
 
 	if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
 		/* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@
 		mdev->sync_conf.al_extents = 127;
 
  err:
-	mutex_unlock(&mdev->md_io_mutex);
+	drbd_md_put_buffer(mdev);
+ out:
 	put_ldev(mdev);
 
 	return rv;
@@ -4183,12 +4365,11 @@
 	static char buildtag[38] = "\0uilt-in";
 
 	if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
-		if (THIS_MODULE != NULL)
-			sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
-		else
+#ifdef MODULE
+		sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+		buildtag[0] = 'b';
 #endif
-			buildtag[0] = 'b';
 	}
 
 	return buildtag;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 946166e..6d4de6a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -289,7 +289,7 @@
 	*/
 	spin_lock_irq(&mdev->req_lock);
 	ns = mdev->state;
-	if (ns.conn < C_WF_REPORT_PARAMS) {
+	if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
 		ns.pdsk = nps;
 		_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
 	}
@@ -432,7 +432,7 @@
 		/* if this was forced, we should consider sync */
 		if (forced)
 			drbd_send_uuids(mdev);
-		drbd_send_state(mdev);
+		drbd_send_current_state(mdev);
 	}
 
 	drbd_md_sync(mdev);
@@ -845,9 +845,10 @@
 	   Because new from 8.3.8 onwards the peer can use multiple
 	   BIOs for a single peer_request */
 	if (mdev->state.conn >= C_CONNECTED) {
-		if (mdev->agreed_pro_version < 94)
-			peer = mdev->peer_max_bio_size;
-		else if (mdev->agreed_pro_version == 94)
+		if (mdev->agreed_pro_version < 94) {
+			peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+			/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+		} else if (mdev->agreed_pro_version == 94)
 			peer = DRBD_MAX_SIZE_H80_PACKET;
 		else /* drbd 8.3.8 onwards */
 			peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@
 		dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
 			(unsigned long long) drbd_get_max_capacity(nbc),
 			(unsigned long long) nbc->dc.disk_size);
-		retcode = ERR_DISK_TO_SMALL;
+		retcode = ERR_DISK_TOO_SMALL;
 		goto fail;
 	}
 
@@ -1046,7 +1047,7 @@
 	}
 
 	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
-		retcode = ERR_MD_DISK_TO_SMALL;
+		retcode = ERR_MD_DISK_TOO_SMALL;
 		dev_warn(DEV, "refusing attach: md-device too small, "
 		     "at least %llu sectors needed for this meta-disk type\n",
 		     (unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@
 	 * (we may currently be R_PRIMARY with no local disk...) */
 	if (drbd_get_max_capacity(nbc) <
 	    drbd_get_capacity(mdev->this_bdev)) {
-		retcode = ERR_DISK_TO_SMALL;
+		retcode = ERR_DISK_TOO_SMALL;
 		goto fail;
 	}
 
@@ -1138,7 +1139,7 @@
 	if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
 	    drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
 		dev_warn(DEV, "refusing to truncate a consistent device\n");
-		retcode = ERR_DISK_TO_SMALL;
+		retcode = ERR_DISK_TOO_SMALL;
 		goto force_diskless_dec;
 	}
 
@@ -1336,17 +1337,34 @@
 {
 	enum drbd_ret_code retcode;
 	int ret;
+	struct detach dt = {};
+
+	if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+		reply->ret_code = ERR_MANDATORY_TAG;
+		goto out;
+	}
+
+	if (dt.detach_force) {
+		drbd_force_state(mdev, NS(disk, D_FAILED));
+		reply->ret_code = SS_SUCCESS;
+		goto out;
+	}
+
 	drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+	drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
 	retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+	drbd_md_put_buffer(mdev);
 	/* D_FAILED will transition to DISKLESS. */
 	ret = wait_event_interruptible(mdev->misc_wait,
 			mdev->state.disk != D_FAILED);
 	drbd_resume_io(mdev);
+
 	if ((int)retcode == (int)SS_IS_DISKLESS)
 		retcode = SS_NOTHING_TO_DO;
 	if (ret)
 		retcode = ERR_INTR;
 	reply->ret_code = retcode;
+out:
 	return 0;
 }
 
@@ -1711,7 +1729,7 @@
 
 	if (rs.no_resync && mdev->agreed_pro_version < 93) {
 		retcode = ERR_NEED_APV_93;
-		goto fail;
+		goto fail_ldev;
 	}
 
 	if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@
  fail:
 	reply->ret_code = retcode;
 	return 0;
+
+ fail_ldev:
+	put_ldev(mdev);
+	goto fail;
 }
 
 static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@
 
 	/* If there is still bitmap IO pending, probably because of a previous
 	 * resync just being finished, wait for it before requesting a new resync. */
+	drbd_suspend_io(mdev);
 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@
 
 		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
 	}
+	drbd_resume_io(mdev);
 
 	reply->ret_code = retcode;
 	return 0;
@@ -1980,6 +2004,7 @@
 
 	/* If there is still bitmap IO pending, probably because of a previous
 	 * resync just being finished, wait for it before requesting a new resync. */
+	drbd_suspend_io(mdev);
 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@
 		} else
 			retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
 	}
+	drbd_resume_io(mdev);
 
 	reply->ret_code = retcode;
 	return 0;
@@ -2170,11 +2196,13 @@
 
 	/* If there is still bitmap IO pending, e.g. previous resync or verify
 	 * just being finished, wait for it before requesting a new resync. */
+	drbd_suspend_io(mdev);
 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
 	/* w_make_ov_request expects position to be aligned */
 	mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
 	reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+	drbd_resume_io(mdev);
 	return 0;
 }
 
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2959cdf..869bada 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -52,7 +52,7 @@
 	if (unlikely(v >= 1000000)) {
 		/* cool: > GiByte/s */
 		seq_printf(seq, "%ld,", v / 1000000);
-		v /= 1000000;
+		v %= 1000000;
 		seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
 	} else if (likely(v >= 1000))
 		seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 436f519..ea4836e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -466,6 +466,7 @@
 		goto out;
 	}
 	(*newsock)->ops  = sock->ops;
+	__module_get((*newsock)->ops->owner);
 
 out:
 	return err;
@@ -750,6 +751,7 @@
 {
 	struct socket *s, *sock, *msock;
 	int try, h, ok;
+	enum drbd_state_rv rv;
 
 	D_ASSERT(!mdev->data.socket);
 
@@ -888,25 +890,32 @@
 		}
 	}
 
-	if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
-		return 0;
-
 	sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
 	sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
 
 	atomic_set(&mdev->packet_seq, 0);
 	mdev->peer_seq = 0;
 
-	drbd_thread_start(&mdev->asender);
-
 	if (drbd_send_protocol(mdev) == -1)
 		return -1;
+	set_bit(STATE_SENT, &mdev->flags);
 	drbd_send_sync_param(mdev, &mdev->sync_conf);
 	drbd_send_sizes(mdev, 0, 0);
 	drbd_send_uuids(mdev);
-	drbd_send_state(mdev);
+	drbd_send_current_state(mdev);
 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
 	clear_bit(RESIZE_PENDING, &mdev->flags);
+
+	spin_lock_irq(&mdev->req_lock);
+	rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+	if (mdev->state.conn != C_WF_REPORT_PARAMS)
+		clear_bit(STATE_SENT, &mdev->flags);
+	spin_unlock_irq(&mdev->req_lock);
+
+	if (rv < SS_SUCCESS)
+		return 0;
+
+	drbd_thread_start(&mdev->asender);
 	mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
 
 	return 1;
@@ -957,7 +966,7 @@
 		rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
 					NULL);
 		if (rv) {
-			dev_err(DEV, "local disk flush failed with status %d\n", rv);
+			dev_info(DEV, "local disk flush failed with status %d\n", rv);
 			/* would rather check on EOPNOTSUPP, but that is not reliable.
 			 * don't try again for ANY return value != 0
 			 * if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@
 
 		if (epoch_size != 0 &&
 		    atomic_read(&epoch->active) == 0 &&
-		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+		    (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
 			if (!(ev & EV_CLEANUP)) {
 				spin_unlock(&mdev->epoch_lock);
 				drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
 				spin_lock(&mdev->epoch_lock);
 			}
-			dec_unacked(mdev);
+			if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+				dec_unacked(mdev);
 
 			if (mdev->current_epoch != epoch) {
 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@
 	/* In most cases, we will only need one bio.  But in case the lower
 	 * level restrictions happen to be different at this offset on this
 	 * side than those of the sending peer, we may need to submit the
-	 * request in more than one bio. */
+	 * request in more than one bio.
+	 *
+	 * Plain bio_alloc is good enough here, this is no DRBD internally
+	 * generated bio, but a bio allocated on behalf of the peer.
+	 */
 next_bio:
 	bio = bio_alloc(GFP_NOIO, nr_pages);
 	if (!bio) {
@@ -1583,6 +1597,24 @@
 	return ok;
 }
 
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+	struct drbd_epoch_entry *rs_e;
+	bool rv = 0;
+
+	spin_lock_irq(&mdev->req_lock);
+	list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+		if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+			rv = 1;
+			break;
+		}
+	}
+	spin_unlock_irq(&mdev->req_lock);
+
+	return rv;
+}
+
 /* Called from receive_Data.
  * Synchronize packets on sock with packets on msock.
  *
@@ -1826,6 +1858,9 @@
 	list_add(&e->w.list, &mdev->active_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
+	if (mdev->state.conn == C_SYNC_TARGET)
+		wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
 	switch (mdev->net_conf->wire_protocol) {
 	case DRBD_PROT_C:
 		inc_unacked(mdev);
@@ -2420,7 +2455,7 @@
 			mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
 			mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
 
-			dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+			dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
 			drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
 
 			return -1;
@@ -2806,10 +2841,10 @@
 
 	if (apv >= 88) {
 		if (apv == 88) {
-			if (data_size > SHARED_SECRET_MAX) {
-				dev_err(DEV, "verify-alg too long, "
-				    "peer wants %u, accepting only %u byte\n",
-						data_size, SHARED_SECRET_MAX);
+			if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+				dev_err(DEV, "verify-alg of wrong size, "
+					"peer wants %u, accepting only up to %u byte\n",
+					data_size, SHARED_SECRET_MAX);
 				return false;
 			}
 
@@ -3168,9 +3203,20 @@
 	os = ns = mdev->state;
 	spin_unlock_irq(&mdev->req_lock);
 
-	/* peer says his disk is uptodate, while we think it is inconsistent,
-	 * and this happens while we think we have a sync going on. */
-	if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+	/* If some other part of the code (asender thread, timeout)
+	 * already decided to close the connection again,
+	 * we must not "re-establish" it here. */
+	if (os.conn <= C_TEAR_DOWN)
+		return false;
+
+	/* If this is the "end of sync" confirmation, usually the peer disk
+	 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+	 * set) resync started in PausedSyncT, or if the timing of pause-/
+	 * unpause-sync events has been "just right", the peer disk may
+	 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+	 */
+	if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+	    real_peer_disk == D_UP_TO_DATE &&
 	    os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
 		/* If we are (becoming) SyncSource, but peer is still in sync
 		 * preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@
 			/* Nowadays only used when forcing a node into primary role and
 			   setting its disk to UpToDate with that */
 			drbd_send_uuids(mdev);
-			drbd_send_state(mdev);
+			drbd_send_current_state(mdev);
 		}
 	}
 
@@ -3776,6 +3822,13 @@
 	if (mdev->state.conn == C_STANDALONE)
 		return;
 
+	/* We are about to start the cleanup after connection loss.
+	 * Make sure drbd_make_request knows about that.
+	 * Usually we should be in some network failure state already,
+	 * but just in case we are not, we fix it up here.
+	 */
+	drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
 	/* asender does not clean up anything. it must not interfere, either */
 	drbd_thread_stop(&mdev->asender);
 	drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@
 	atomic_set(&mdev->rs_pending_cnt, 0);
 	wake_up(&mdev->misc_wait);
 
-	del_timer(&mdev->request_timer);
-
 	/* make sure syncer is stopped and w_resume_next_sg queued */
 	del_timer_sync(&mdev->resync_timer);
 	resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@
 
 	if (mdev->state.conn == C_AHEAD &&
 	    atomic_read(&mdev->ap_in_flight) == 0 &&
-	    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+	    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
 		mdev->start_resync_timer.expires = jiffies + HZ;
 		add_timer(&mdev->start_resync_timer);
 	}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 4a0f314..9c5c849 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -37,6 +37,7 @@
 	const int rw = bio_data_dir(bio);
 	int cpu;
 	cpu = part_stat_lock();
+	part_round_stats(cpu, &mdev->vdisk->part0);
 	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
 	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
 	part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@
 {
 	const unsigned long s = req->rq_state;
 	struct drbd_conf *mdev = req->mdev;
-	/* only WRITES may end up here without a master bio (on barrier ack) */
-	int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+	int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
 
 	/* we must not complete the master bio, while it is
 	 *	still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@
 		return;
 	if (s & RQ_NET_PENDING)
 		return;
-	if (s & RQ_LOCAL_PENDING)
+	if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
 		return;
 
 	if (req->master_bio) {
@@ -277,6 +277,9 @@
 		req->master_bio = NULL;
 	}
 
+	if (s & RQ_LOCAL_PENDING)
+		return;
+
 	if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
 		/* this is disconnected (local only) operation,
 		 * or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@
 		break;
 
 	case completed_ok:
-		if (bio_data_dir(req->master_bio) == WRITE)
+		if (req->rq_state & RQ_WRITE)
 			mdev->writ_cnt += req->size>>9;
 		else
 			mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@
 		req->rq_state &= ~RQ_LOCAL_PENDING;
 
 		_req_may_be_done_not_susp(req, m);
-		put_ldev(mdev);
+		break;
+
+	case abort_disk_io:
+		req->rq_state |= RQ_LOCAL_ABORTED;
+		if (req->rq_state & RQ_WRITE)
+			_req_may_be_done_not_susp(req, m);
+		else
+			goto goto_queue_for_net_read;
 		break;
 
 	case write_completed_with_error:
@@ -447,7 +457,6 @@
 
 		__drbd_chk_io_error(mdev, false);
 		_req_may_be_done_not_susp(req, m);
-		put_ldev(mdev);
 		break;
 
 	case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@
 		req->rq_state |= RQ_LOCAL_COMPLETED;
 		req->rq_state &= ~RQ_LOCAL_PENDING;
 		_req_may_be_done_not_susp(req, m);
-		put_ldev(mdev);
 		break;
 
 	case read_completed_with_error:
@@ -467,7 +475,8 @@
 		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
 
 		__drbd_chk_io_error(mdev, false);
-		put_ldev(mdev);
+
+	goto_queue_for_net_read:
 
 		/* no point in retrying if there is no good remote data,
 		 * or we have no connection. */
@@ -556,10 +565,8 @@
 		drbd_queue_work(&mdev->data.work, &req->w);
 		break;
 
-	case oos_handed_to_network:
-		/* actually the same */
+	case read_retry_remote_canceled:
 	case send_canceled:
-		/* treat it the same */
 	case send_failed:
 		/* real cleanup will be done from tl_clear.  just update flags
 		 * so it is no longer marked as on the worker queue */
@@ -589,17 +596,17 @@
 		}
 		req->rq_state &= ~RQ_NET_QUEUED;
 		req->rq_state |= RQ_NET_SENT;
-		/* because _drbd_send_zc_bio could sleep, and may want to
-		 * dereference the bio even after the "write_acked_by_peer" and
-		 * "completed_ok" events came in, once we return from
-		 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
-		 * whether it is done already, and end it.  */
 		_req_may_be_done_not_susp(req, m);
 		break;
 
-	case read_retry_remote_canceled:
+	case oos_handed_to_network:
+		/* Was not set PENDING, no longer QUEUED, so is now DONE
+		 * as far as this connection is concerned. */
 		req->rq_state &= ~RQ_NET_QUEUED;
-		/* fall through, in case we raced with drbd_disconnect */
+		req->rq_state |= RQ_NET_DONE;
+		_req_may_be_done_not_susp(req, m);
+		break;
+
 	case connection_lost_while_pending:
 		/* transfer log cleanup after connection loss */
 		/* assert something? */
@@ -616,8 +623,6 @@
 			_req_may_be_done(req, m); /* Allowed while state.susp */
 		break;
 
-	case write_acked_by_peer_and_sis:
-		req->rq_state |= RQ_NET_SIS;
 	case conflict_discarded_by_peer:
 		/* for discarded conflicting writes of multiple primaries,
 		 * there is no need to keep anything in the tl, potential
@@ -628,18 +633,15 @@
 			      (unsigned long long)req->sector, req->size);
 		req->rq_state |= RQ_NET_DONE;
 		/* fall through */
+	case write_acked_by_peer_and_sis:
 	case write_acked_by_peer:
+		if (what == write_acked_by_peer_and_sis)
+			req->rq_state |= RQ_NET_SIS;
 		/* protocol C; successfully written on peer.
-		 * Nothing to do here.
+		 * Nothing more to do here.
 		 * We want to keep the tl in place for all protocols, to cater
-		 * for volatile write-back caches on lower level devices.
-		 *
-		 * A barrier request is expected to have forced all prior
-		 * requests onto stable storage, so completion of a barrier
-		 * request could set NET_DONE right here, and not wait for the
-		 * P_BARRIER_ACK, but that is an unnecessary optimization. */
+		 * for volatile write-back caches on lower level devices. */
 
-		/* this makes it effectively the same as for: */
 	case recv_acked_by_peer:
 		/* protocol B; pretends to be successfully written on peer.
 		 * see also notes above in handed_over_to_network about
@@ -773,6 +775,7 @@
 	int local, remote, send_oos = 0;
 	int err = -EIO;
 	int ret = 0;
+	union drbd_state s;
 
 	/* allocate outside of all locks; */
 	req = drbd_req_new(mdev, bio);
@@ -834,8 +837,9 @@
 		drbd_al_begin_io(mdev, sector);
 	}
 
-	remote = remote && drbd_should_do_remote(mdev->state);
-	send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+	s = mdev->state;
+	remote = remote && drbd_should_do_remote(s);
+	send_oos = rw == WRITE && drbd_should_send_oos(s);
 	D_ASSERT(!(remote && send_oos));
 
 	if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +871,7 @@
 
 	if (is_susp(mdev->state)) {
 		/* If we got suspended, use the retry mechanism of
-		   generic_make_request() to restart processing of this
+		   drbd_make_request() to restart processing of this
 		   bio. In the next call to drbd_make_request
 		   we sleep in inc_ap_bio() */
 		ret = 1;
@@ -1091,7 +1095,6 @@
 	 */
 	D_ASSERT(bio->bi_size > 0);
 	D_ASSERT((bio->bi_size & 0x1ff) == 0);
-	D_ASSERT(bio->bi_idx == 0);
 
 	/* to make some things easier, force alignment of requests within the
 	 * granularity of our hash tables */
@@ -1099,8 +1102,9 @@
 	e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
 
 	if (likely(s_enr == e_enr)) {
-		inc_ap_bio(mdev, 1);
-		drbd_make_request_common(mdev, bio, start_time);
+		do {
+			inc_ap_bio(mdev, 1);
+		} while (drbd_make_request_common(mdev, bio, start_time));
 		return;
 	}
 
@@ -1196,36 +1200,66 @@
 	struct drbd_conf *mdev = (struct drbd_conf *) data;
 	struct drbd_request *req; /* oldest request */
 	struct list_head *le;
-	unsigned long et = 0; /* effective timeout = ko_count * timeout */
+	unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+	unsigned long now;
 
 	if (get_net_conf(mdev)) {
-		et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+		if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+			ent = mdev->net_conf->timeout*HZ/10
+				* mdev->net_conf->ko_count;
 		put_net_conf(mdev);
 	}
-	if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+	if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+		dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+		put_ldev(mdev);
+	}
+	et = min_not_zero(dt, ent);
+
+	if (!et)
 		return; /* Recurring timer stopped */
 
+	now = jiffies;
+
 	spin_lock_irq(&mdev->req_lock);
 	le = &mdev->oldest_tle->requests;
 	if (list_empty(le)) {
 		spin_unlock_irq(&mdev->req_lock);
-		mod_timer(&mdev->request_timer, jiffies + et);
+		mod_timer(&mdev->request_timer, now + et);
 		return;
 	}
 
 	le = le->prev;
 	req = list_entry(le, struct drbd_request, tl_requests);
-	if (time_is_before_eq_jiffies(req->start_time + et)) {
-		if (req->rq_state & RQ_NET_PENDING) {
-			dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
-			_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
-		} else {
-			dev_warn(DEV, "Local backing block device frozen?\n");
-			mod_timer(&mdev->request_timer, jiffies + et);
-		}
-	} else {
-		mod_timer(&mdev->request_timer, req->start_time + et);
-	}
 
+	/* The request is considered timed out, if
+	 * - we have some effective timeout from the configuration,
+	 *   with above state restrictions applied,
+	 * - the oldest request is waiting for a response from the network
+	 *   resp. the local disk,
+	 * - the oldest request is in fact older than the effective timeout,
+	 * - the connection was established (resp. disk was attached)
+	 *   for longer than the timeout already.
+	 * Note that for 32bit jiffies and very stable connections/disks,
+	 * we may have a wrap around, which is catched by
+	 *   !time_in_range(now, last_..._jif, last_..._jif + timeout).
+	 *
+	 * Side effect: once per 32bit wrap-around interval, which means every
+	 * ~198 days with 250 HZ, we have a window where the timeout would need
+	 * to expire twice (worst case) to become effective. Good enough.
+	 */
+	if (ent && req->rq_state & RQ_NET_PENDING &&
+		 time_after(now, req->start_time + ent) &&
+		!time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+		dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+		_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+	}
+	if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+		 time_after(now, req->start_time + dt) &&
+		!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+		dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+		__drbd_chk_io_error(mdev, 1);
+	}
+	nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
 	spin_unlock_irq(&mdev->req_lock);
+	mod_timer(&mdev->request_timer, nt);
 }
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 68a234a..3d21119 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -105,6 +105,7 @@
 	read_completed_with_error,
 	read_ahead_completed_with_error,
 	write_completed_with_error,
+	abort_disk_io,
 	completed_ok,
 	resend,
 	fail_frozen_disk_io,
@@ -118,18 +119,21 @@
  * same time, so we should hold the request lock anyways.
  */
 enum drbd_req_state_bits {
-	/* 210
-	 * 000: no local possible
-	 * 001: to be submitted
+	/* 3210
+	 * 0000: no local possible
+	 * 0001: to be submitted
 	 *    UNUSED, we could map: 011: submitted, completion still pending
-	 * 110: completed ok
-	 * 010: completed with error
+	 * 0110: completed ok
+	 * 0010: completed with error
+	 * 1001: Aborted (before completion)
+	 * 1x10: Aborted and completed -> free
 	 */
 	__RQ_LOCAL_PENDING,
 	__RQ_LOCAL_COMPLETED,
 	__RQ_LOCAL_OK,
+	__RQ_LOCAL_ABORTED,
 
-	/* 76543
+	/* 87654
 	 * 00000: no network possible
 	 * 00001: to be send
 	 * 00011: to be send, on worker queue
@@ -199,8 +203,9 @@
 #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
 #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
 
-#define RQ_LOCAL_MASK      ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
 
 #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
 #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6..620c70f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@
 void drbd_md_io_complete(struct bio *bio, int error)
 {
 	struct drbd_md_io *md_io;
+	struct drbd_conf *mdev;
 
 	md_io = (struct drbd_md_io *)bio->bi_private;
+	mdev = container_of(md_io, struct drbd_conf, md_io);
+
 	md_io->error = error;
 
-	complete(&md_io->event);
+	/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+	 * to timeout on the lower level device, and eventually detach from it.
+	 * If this io completion runs after that timeout expired, this
+	 * drbd_md_put_buffer() may allow us to finally try and re-attach.
+	 * During normal operation, this only puts that extra reference
+	 * down to 1 again.
+	 * Make sure we first drop the reference, and only then signal
+	 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+	 * next drbd_md_sync_page_io(), that we trigger the
+	 * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+	 */
+	drbd_md_put_buffer(mdev);
+	md_io->done = 1;
+	wake_up(&mdev->misc_wait);
+	bio_put(bio);
+	put_ldev(mdev);
 }
 
 /* reads on behalf of the partner,
@@ -226,6 +244,7 @@
 	spin_lock_irqsave(&mdev->req_lock, flags);
 	__req_mod(req, what, &m);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
+	put_ldev(mdev);
 
 	if (m.bio)
 		complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@
 	sg_init_table(&sg, 1);
 	crypto_hash_init(&desc);
 
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment(bvec, bio, i) {
 		sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
 		crypto_hash_update(&desc, &sg, sg.length);
 	}
@@ -728,7 +747,7 @@
 	}
 
 	drbd_start_resync(mdev, C_SYNC_SOURCE);
-	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
 	return 1;
 }
 
@@ -1519,14 +1538,14 @@
 	}
 
 	drbd_state_lock(mdev);
-
+	write_lock_irq(&global_state_lock);
 	if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+		write_unlock_irq(&global_state_lock);
 		drbd_state_unlock(mdev);
 		return;
 	}
 
-	write_lock_irq(&global_state_lock);
-	ns = mdev->state;
+	ns.i = mdev->state.i;
 
 	ns.aftr_isp = !_drbd_may_sync_now(mdev);
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b0b00d7..cce7df3 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -551,7 +551,7 @@
 static void floppy_start(void);
 static void process_fd_request(void);
 static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
 
 static int floppy_request_regions(int);
 static void floppy_release_regions(int);
@@ -588,6 +588,8 @@
 static struct floppy_fdc_state fdc_state[N_FDC];
 static int fdc;			/* current fdc */
 
+static struct workqueue_struct *floppy_wq;
+
 static struct floppy_struct *_floppy = floppy_type;
 static unsigned char current_drive;
 static long current_count_sectors;
@@ -629,16 +631,15 @@
 static inline void debugt(const char *func, const char *msg) { }
 #endif /* DEBUGT */
 
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
 
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
 static const char *timeout_message;
 
 static void is_alive(const char *func, const char *message)
 {
 	/* this routine checks whether the floppy driver is "alive" */
 	if (test_bit(0, &fdc_busy) && command_status < 2 &&
-	    !timer_pending(&fd_timeout)) {
+	    !delayed_work_pending(&fd_timeout)) {
 		DPRINT("%s: timeout handler died.  %s\n", func, message);
 	}
 }
@@ -666,15 +667,18 @@
 
 static void __reschedule_timeout(int drive, const char *message)
 {
+	unsigned long delay;
+
 	if (drive == current_reqD)
 		drive = current_drive;
-	del_timer(&fd_timeout);
+
 	if (drive < 0 || drive >= N_DRIVE) {
-		fd_timeout.expires = jiffies + 20UL * HZ;
+		delay = 20UL * HZ;
 		drive = 0;
 	} else
-		fd_timeout.expires = jiffies + UDP->timeout;
-	add_timer(&fd_timeout);
+		delay = UDP->timeout;
+
+	queue_delayed_work(floppy_wq, &fd_timeout, delay);
 	if (UDP->flags & FD_DEBUG)
 		DPRINT("reschedule timeout %s\n", message);
 	timeout_message = message;
@@ -872,7 +876,7 @@
 
 	command_status = FD_COMMAND_NONE;
 
-	__reschedule_timeout(drive, "lock fdc");
+	reschedule_timeout(drive, "lock fdc");
 	set_fdc(drive);
 	return 0;
 }
@@ -880,23 +884,15 @@
 /* unlocks the driver */
 static void unlock_fdc(void)
 {
-	unsigned long flags;
-
-	raw_cmd = NULL;
 	if (!test_bit(0, &fdc_busy))
 		DPRINT("FDC access conflict!\n");
 
-	if (do_floppy)
-		DPRINT("device interrupt still active at FDC release: %pf!\n",
-		       do_floppy);
+	raw_cmd = NULL;
 	command_status = FD_COMMAND_NONE;
-	spin_lock_irqsave(&floppy_lock, flags);
-	del_timer(&fd_timeout);
+	__cancel_delayed_work(&fd_timeout);
+	do_floppy = NULL;
 	cont = NULL;
 	clear_bit(0, &fdc_busy);
-	if (current_req || set_next_request())
-		do_fd_request(current_req->q);
-	spin_unlock_irqrestore(&floppy_lock, flags);
 	wake_up(&fdc_wait);
 }
 
@@ -968,26 +964,24 @@
 
 static void schedule_bh(void (*handler)(void))
 {
+	WARN_ON(work_pending(&floppy_work));
+
 	PREPARE_WORK(&floppy_work, (work_func_t)handler);
-	schedule_work(&floppy_work);
+	queue_work(floppy_wq, &floppy_work);
 }
 
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
 
 static void cancel_activity(void)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&floppy_lock, flags);
 	do_floppy = NULL;
-	PREPARE_WORK(&floppy_work, (work_func_t)empty);
-	del_timer(&fd_timer);
-	spin_unlock_irqrestore(&floppy_lock, flags);
+	cancel_delayed_work_sync(&fd_timer);
+	cancel_work_sync(&floppy_work);
 }
 
 /* this function makes sure that the disk stays in the drive during the
  * transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
 {
 	debug_dcl(DP->flags, "calling disk change from watchdog\n");
 
@@ -997,21 +991,20 @@
 		cont->done(0);
 		reset_fdc();
 	} else {
-		del_timer(&fd_timer);
-		fd_timer.function = (timeout_fn)fd_watchdog;
-		fd_timer.expires = jiffies + HZ / 10;
-		add_timer(&fd_timer);
+		cancel_delayed_work(&fd_timer);
+		PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+		queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
 	}
 }
 
 static void main_command_interrupt(void)
 {
-	del_timer(&fd_timer);
+	cancel_delayed_work(&fd_timer);
 	cont->interrupt();
 }
 
 /* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
 {
 	if (FDCS->reset) {
 		reset_fdc();	/* do the reset during sleep to win time
@@ -1020,11 +1013,10 @@
 		return 1;
 	}
 
-	if (time_before(jiffies, delay)) {
-		del_timer(&fd_timer);
-		fd_timer.function = function;
-		fd_timer.expires = delay;
-		add_timer(&fd_timer);
+	if (time_before(jiffies, expires)) {
+		cancel_delayed_work(&fd_timer);
+		PREPARE_DELAYED_WORK(&fd_timer, function);
+		queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
 		return 1;
 	}
 	return 0;
@@ -1342,7 +1334,7 @@
 	 */
 	FDCS->dtr = raw_cmd->rate & 3;
 	return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
-				      (timeout_fn)floppy_ready);
+				      (work_func_t)floppy_ready);
 }				/* fdc_dtr */
 
 static void tell_sector(void)
@@ -1447,7 +1439,7 @@
 	int flags;
 	int dflags;
 	unsigned long ready_date;
-	timeout_fn function;
+	work_func_t function;
 
 	flags = raw_cmd->flags;
 	if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1453,9 @@
 		 */
 		if (time_after(ready_date, jiffies + DP->select_delay)) {
 			ready_date -= DP->select_delay;
-			function = (timeout_fn)floppy_start;
+			function = (work_func_t)floppy_start;
 		} else
-			function = (timeout_fn)setup_rw_floppy;
+			function = (work_func_t)setup_rw_floppy;
 
 		/* wait until the floppy is spinning fast enough */
 		if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1485,7 @@
 		inr = result();
 		cont->interrupt();
 	} else if (flags & FD_RAW_NEED_DISK)
-		fd_watchdog();
+		fd_watchdog(NULL);
 }
 
 static int blind_seek;
@@ -1802,20 +1794,22 @@
 		pr_info("do_floppy=%pf\n", do_floppy);
 	if (work_pending(&floppy_work))
 		pr_info("floppy_work.func=%pf\n", floppy_work.func);
-	if (timer_pending(&fd_timer))
-		pr_info("fd_timer.function=%pf\n", fd_timer.function);
-	if (timer_pending(&fd_timeout)) {
-		pr_info("timer_function=%pf\n", fd_timeout.function);
-		pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
-		pr_info("now=%lu\n", jiffies);
-	}
+	if (delayed_work_pending(&fd_timer))
+		pr_info("delayed work.function=%p expires=%ld\n",
+		       fd_timer.work.func,
+		       fd_timer.timer.expires - jiffies);
+	if (delayed_work_pending(&fd_timeout))
+		pr_info("timer_function=%p expires=%ld\n",
+		       fd_timeout.work.func,
+		       fd_timeout.timer.expires - jiffies);
+
 	pr_info("cont=%p\n", cont);
 	pr_info("current_req=%p\n", current_req);
 	pr_info("command_status=%d\n", command_status);
 	pr_info("\n");
 }
 
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
 {
 	unsigned long flags;
 
@@ -1868,7 +1862,7 @@
 
 	/* wait_for_completion also schedules reset if needed. */
 	return fd_wait_for_completion(DRS->select_date + DP->select_delay,
-				      (timeout_fn)function);
+				      (work_func_t)function);
 }
 
 static void floppy_ready(void)
@@ -2821,7 +2815,6 @@
 		spin_lock_irq(&floppy_lock);
 		pending = set_next_request();
 		spin_unlock_irq(&floppy_lock);
-
 		if (!pending) {
 			do_floppy = NULL;
 			unlock_fdc();
@@ -2898,13 +2891,15 @@
 		 current_req->cmd_flags))
 		return;
 
-	if (test_bit(0, &fdc_busy)) {
+	if (test_and_set_bit(0, &fdc_busy)) {
 		/* fdc busy, this new request will be treated when the
 		   current one is done */
 		is_alive(__func__, "old request running");
 		return;
 	}
-	lock_fdc(MAXTIMEOUT, false);
+	command_status = FD_COMMAND_NONE;
+	__reschedule_timeout(MAXTIMEOUT, "fd_request");
+	set_fdc(0);
 	process_fd_request();
 	is_alive(__func__, "");
 }
@@ -3612,9 +3607,7 @@
 
 	mutex_lock(&floppy_mutex);
 	mutex_lock(&open_lock);
-	if (UDRS->fd_ref < 0)
-		UDRS->fd_ref = 0;
-	else if (!UDRS->fd_ref--) {
+	if (!UDRS->fd_ref--) {
 		DPRINT("floppy_release with fd_ref == 0");
 		UDRS->fd_ref = 0;
 	}
@@ -3650,13 +3643,7 @@
 		set_bit(FD_VERIFY_BIT, &UDRS->flags);
 	}
 
-	if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
-		goto out2;
-
-	if (mode & FMODE_EXCL)
-		UDRS->fd_ref = -1;
-	else
-		UDRS->fd_ref++;
+	UDRS->fd_ref++;
 
 	opened_bdev[drive] = bdev;
 
@@ -3719,10 +3706,8 @@
 	mutex_unlock(&floppy_mutex);
 	return 0;
 out:
-	if (UDRS->fd_ref < 0)
-		UDRS->fd_ref = 0;
-	else
-		UDRS->fd_ref--;
+	UDRS->fd_ref--;
+
 	if (!UDRS->fd_ref)
 		opened_bdev[drive] = NULL;
 out2:
@@ -4159,10 +4144,16 @@
 			goto out_put_disk;
 		}
 
+		floppy_wq = alloc_ordered_workqueue("floppy", 0);
+		if (!floppy_wq) {
+			err = -ENOMEM;
+			goto out_put_disk;
+		}
+
 		disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
 		if (!disks[dr]->queue) {
 			err = -ENOMEM;
-			goto out_put_disk;
+			goto out_destroy_workq;
 		}
 
 		blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4204,7 @@
 	use_virtual_dma = can_use_virtual_dma & 1;
 	fdc_state[0].address = FDC1;
 	if (fdc_state[0].address == -1) {
-		del_timer_sync(&fd_timeout);
+		cancel_delayed_work(&fd_timeout);
 		err = -ENODEV;
 		goto out_unreg_region;
 	}
@@ -4224,7 +4215,7 @@
 	fdc = 0;		/* reset fdc in case of unexpected interrupt */
 	err = floppy_grab_irq_and_dma();
 	if (err) {
-		del_timer_sync(&fd_timeout);
+		cancel_delayed_work(&fd_timeout);
 		err = -EBUSY;
 		goto out_unreg_region;
 	}
@@ -4281,13 +4272,13 @@
 		user_reset_fdc(-1, FD_RESET_ALWAYS, false);
 	}
 	fdc = 0;
-	del_timer_sync(&fd_timeout);
+	cancel_delayed_work(&fd_timeout);
 	current_drive = 0;
 	initialized = true;
 	if (have_no_fdc) {
 		DPRINT("no floppy controllers found\n");
 		err = have_no_fdc;
-		goto out_flush_work;
+		goto out_release_dma;
 	}
 
 	for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4293,7 @@
 
 		err = platform_device_register(&floppy_device[drive]);
 		if (err)
-			goto out_flush_work;
+			goto out_release_dma;
 
 		err = device_create_file(&floppy_device[drive].dev,
 					 &dev_attr_cmos);
@@ -4320,13 +4311,14 @@
 
 out_unreg_platform_dev:
 	platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
-	flush_work_sync(&floppy_work);
+out_release_dma:
 	if (atomic_read(&usage_count))
 		floppy_release_irq_and_dma();
 out_unreg_region:
 	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
 	platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+	destroy_workqueue(floppy_wq);
 out_unreg_blkdev:
 	unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
@@ -4397,7 +4389,7 @@
 	 * We might have scheduled a free_irq(), wait it to
 	 * drain first:
 	 */
-	flush_work_sync(&floppy_work);
+	flush_workqueue(floppy_wq);
 
 	if (fd_request_irq()) {
 		DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4480,9 @@
 			pr_info("motor off timer %d still active\n", drive);
 #endif
 
-	if (timer_pending(&fd_timeout))
+	if (delayed_work_pending(&fd_timeout))
 		pr_info("floppy timer still active:%s\n", timeout_message);
-	if (timer_pending(&fd_timer))
+	if (delayed_work_pending(&fd_timer))
 		pr_info("auxiliary floppy timer still active\n");
 	if (work_pending(&floppy_work))
 		pr_info("work still pending\n");
@@ -4560,8 +4552,9 @@
 		put_disk(disks[drive]);
 	}
 
-	del_timer_sync(&fd_timeout);
-	del_timer_sync(&fd_timer);
+	cancel_delayed_work_sync(&fd_timeout);
+	cancel_delayed_work_sync(&fd_timer);
+	destroy_workqueue(floppy_wq);
 
 	if (atomic_read(&usage_count))
 		floppy_release_irq_and_dma();
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c..264bc77 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -294,18 +294,16 @@
  */
 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
 {
-	unsigned long flags = 0;
-
 	atomic_set(&port->commands[tag].active, 1);
 
-	spin_lock_irqsave(&port->cmd_issue_lock, flags);
+	spin_lock(&port->cmd_issue_lock);
 
 	writel((1 << MTIP_TAG_BIT(tag)),
 			port->s_active[MTIP_TAG_INDEX(tag)]);
 	writel((1 << MTIP_TAG_BIT(tag)),
 			port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 
-	spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+	spin_unlock(&port->cmd_issue_lock);
 
 	/* Set the command's timeout value.*/
 	port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +434,7 @@
 		writel(0xFFFFFFFF, port->completed[i]);
 
 	/* Clear any pending interrupts for this port */
-	writel(readl(port->dd->mmio + PORT_IRQ_STAT),
-					port->dd->mmio + PORT_IRQ_STAT);
+	writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
 
 	/* Clear any pending interrupts on the HBA. */
 	writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +779,24 @@
 
 	/* Stop the timer to prevent command timeouts. */
 	del_timer(&port->cmd_timer);
+	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+			test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+		cmd = &port->commands[MTIP_TAG_INTERNAL];
+		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+		atomic_inc(&cmd->active); /* active > 1 indicates error */
+		if (cmd->comp_data && cmd->comp_func) {
+			cmd->comp_func(port, MTIP_TAG_INTERNAL,
+					cmd->comp_data, PORT_IRQ_TF_ERR);
+		}
+		goto handle_tfe_exit;
+	}
 
 	/* clear the tag accumulator */
 	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 
-	/* Set eh_active */
-	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
 	/* Loop through all the groups */
 	for (group = 0; group < dd->slot_groups; group++) {
 		completed = readl(port->completed[group]);
@@ -940,6 +948,7 @@
 	}
 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
 
+handle_tfe_exit:
 	/* clear eh_active */
 	clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
 	wake_up_interruptible(&port->svc_wait);
@@ -961,6 +970,8 @@
 	/* walk all bits in all slot groups */
 	for (group = 0; group < dd->slot_groups; group++) {
 		completed = readl(port->completed[group]);
+		if (!completed)
+			continue;
 
 		/* clear completed status register in the hardware.*/
 		writel(completed, port->completed[group]);
@@ -1329,22 +1340,6 @@
 			}
 			rv = -EAGAIN;
 		}
-
-		if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
-			& (1 << MTIP_TAG_INTERNAL)) {
-			dev_warn(&port->dd->pdev->dev,
-				"Retiring internal command but CI is 1.\n");
-			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-						&port->dd->dd_flag)) {
-				hba_reset_nosleep(port->dd);
-				rv = -ENXIO;
-			} else {
-				mtip_restart_port(port);
-				rv = -EAGAIN;
-			}
-			goto exec_ic_exit;
-		}
-
 	} else {
 		/* Spin for <timeout> checking if command still outstanding */
 		timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,23 +1356,27 @@
 				rv = -ENXIO;
 				goto exec_ic_exit;
 			}
-		}
-
-		if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
-			& (1 << MTIP_TAG_INTERNAL)) {
-			dev_err(&port->dd->pdev->dev,
-				"Internal command did not complete [atomic]\n");
-			rv = -EAGAIN;
-			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-						&port->dd->dd_flag)) {
-				hba_reset_nosleep(port->dd);
-				rv = -ENXIO;
-			} else {
-				mtip_restart_port(port);
-				rv = -EAGAIN;
+			if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+				atomic_inc(&int_cmd->active); /* error */
+				break;
 			}
 		}
 	}
+
+	if (atomic_read(&int_cmd->active) > 1) {
+		dev_err(&port->dd->pdev->dev,
+			"Internal command [%02X] failed\n", fis->command);
+		rv = -EIO;
+	}
+	if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+			& (1 << MTIP_TAG_INTERNAL)) {
+		rv = -ENXIO;
+		if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+					&port->dd->dd_flag)) {
+			mtip_restart_port(port);
+			rv = -EAGAIN;
+		}
+	}
 exec_ic_exit:
 	/* Clear the allocated and active bits for the internal command. */
 	atomic_set(&int_cmd->active, 0);
@@ -1893,13 +1892,33 @@
 				void __user *user_buffer)
 {
 	struct host_to_dev_fis	fis;
-	struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+	struct host_to_dev_fis *reply;
+	u8 *buf = NULL;
+	dma_addr_t dma_addr = 0;
+	int rv = 0, xfer_sz = command[3];
+
+	if (xfer_sz) {
+		if (user_buffer)
+			return -EFAULT;
+
+		buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+				ATA_SECT_SIZE * xfer_sz,
+				&dma_addr,
+				GFP_KERNEL);
+		if (!buf) {
+			dev_err(&port->dd->pdev->dev,
+				"Memory allocation failed (%d bytes)\n",
+				ATA_SECT_SIZE * xfer_sz);
+			return -ENOMEM;
+		}
+		memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+	}
 
 	/* Build the FIS. */
 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
-	fis.type		= 0x27;
-	fis.opts		= 1 << 7;
-	fis.command		= command[0];
+	fis.type	= 0x27;
+	fis.opts	= 1 << 7;
+	fis.command	= command[0];
 	fis.features	= command[2];
 	fis.sect_count	= command[3];
 	if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1927,11 @@
 		fis.cyl_hi	= 0xC2;
 	}
 
+	if (xfer_sz)
+		reply = (port->rxfis + RX_FIS_PIO_SETUP);
+	else
+		reply = (port->rxfis + RX_FIS_D2H_REG);
+
 	dbg_printk(MTIP_DRV_NAME
 		" %s: User Command: cmd %x, sect %x, "
 		"feat %x, sectcnt %x\n",
@@ -1917,43 +1941,46 @@
 		command[2],
 		command[3]);
 
-	memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
 	/* Execute the command. */
 	if (mtip_exec_internal_command(port,
 				&fis,
 				 5,
-				 port->sector_buffer_dma,
-				 (command[3] != 0) ? ATA_SECT_SIZE : 0,
+				 (xfer_sz ? dma_addr : 0),
+				 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
 				 0,
 				 GFP_KERNEL,
 				 MTIP_IOCTL_COMMAND_TIMEOUT_MS)
 				 < 0) {
-		return -1;
+		rv = -EFAULT;
+		goto exit_drive_command;
 	}
 
 	/* Collect the completion status. */
 	command[0] = reply->command; /* Status*/
 	command[1] = reply->features; /* Error*/
-	command[2] = command[3];
+	command[2] = reply->sect_count;
 
 	dbg_printk(MTIP_DRV_NAME
 		" %s: Completion Status: stat %x, "
-		"err %x, cmd %x\n",
+		"err %x, nsect %x\n",
 		__func__,
 		command[0],
 		command[1],
 		command[2]);
 
-	if (user_buffer && command[3]) {
+	if (xfer_sz) {
 		if (copy_to_user(user_buffer,
-				 port->sector_buffer,
+				 buf,
 				 ATA_SECT_SIZE * command[3])) {
-			return -EFAULT;
+			rv = -EFAULT;
+			goto exit_drive_command;
 		}
 	}
-
-	return 0;
+exit_drive_command:
+	if (buf)
+		dmam_free_coherent(&port->dd->pdev->dev,
+				ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+	return rv;
 }
 
 /*
@@ -2003,6 +2030,32 @@
 	return rv;
 }
 
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+	switch (fis->command) {
+	case ATA_CMD_DOWNLOAD_MICRO:
+		*timeout = 120000; /* 2 minutes */
+		break;
+	case ATA_CMD_SEC_ERASE_UNIT:
+	case 0xFC:
+		*timeout = 240000; /* 4 minutes */
+		break;
+	case ATA_CMD_STANDBYNOW1:
+		*timeout = 10000;  /* 10 seconds */
+		break;
+	case 0xF7:
+	case 0xFA:
+		*timeout = 60000;  /* 60 seconds */
+		break;
+	case ATA_CMD_SMART:
+		*timeout = 15000;  /* 15 seconds */
+		break;
+	default:
+		*timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+		break;
+	}
+}
+
 /*
  * Executes a taskfile
  * See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2076,7 @@
 	unsigned int taskin = 0;
 	unsigned int taskout = 0;
 	u8 nsect = 0;
-	unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+	unsigned int timeout;
 	unsigned int force_single_sector;
 	unsigned int transfer_size;
 	unsigned long task_file_data;
@@ -2153,32 +2206,7 @@
 		fis.lba_hi,
 		fis.device);
 
-	switch (fis.command) {
-	case ATA_CMD_DOWNLOAD_MICRO:
-		/* Change timeout for Download Microcode to 2 minutes */
-		timeout = 120000;
-		break;
-	case ATA_CMD_SEC_ERASE_UNIT:
-		/* Change timeout for Security Erase Unit to 4 minutes.*/
-		timeout = 240000;
-		break;
-	case ATA_CMD_STANDBYNOW1:
-		/* Change timeout for standby immediate to 10 seconds.*/
-		timeout = 10000;
-		break;
-	case 0xF7:
-	case 0xFA:
-		/* Change timeout for vendor unique command to 10 secs */
-		timeout = 10000;
-		break;
-	case ATA_CMD_SMART:
-		/* Change timeout for vendor unique command to 15 secs */
-		timeout = 15000;
-		break;
-	default:
-		timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
-		break;
-	}
+	mtip_set_timeout(&fis, &timeout);
 
 	/* Determine the correct transfer size.*/
 	if (force_single_sector)
@@ -2295,13 +2323,12 @@
 {
 	switch (cmd) {
 	case HDIO_GET_IDENTITY:
-		if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
-			dev_warn(&dd->pdev->dev,
-				"Unable to read identity\n");
-			return -EIO;
-		}
-
+	{
+		if (copy_to_user((void __user *)arg, dd->port->identify,
+						sizeof(u16) * ATA_ID_WORDS))
+			return -EFAULT;
 		break;
+	}
 	case HDIO_DRIVE_CMD:
 	{
 		u8 drive_command[4];
@@ -2537,40 +2564,58 @@
 	int size = 0;
 	int n;
 
-	size += sprintf(&buf[size], "S ACTive:\n");
+	size += sprintf(&buf[size], "Hardware\n--------\n");
+	size += sprintf(&buf[size], "S ACTive      : [ 0x");
 
-	for (n = 0; n < dd->slot_groups; n++)
-		size += sprintf(&buf[size], "0x%08x\n",
+	for (n = dd->slot_groups-1; n >= 0; n--)
+		size += sprintf(&buf[size], "%08X ",
 					 readl(dd->port->s_active[n]));
 
-	size += sprintf(&buf[size], "Command Issue:\n");
+	size += sprintf(&buf[size], "]\n");
+	size += sprintf(&buf[size], "Command Issue : [ 0x");
 
-	for (n = 0; n < dd->slot_groups; n++)
-		size += sprintf(&buf[size], "0x%08x\n",
+	for (n = dd->slot_groups-1; n >= 0; n--)
+		size += sprintf(&buf[size], "%08X ",
 					readl(dd->port->cmd_issue[n]));
 
-	size += sprintf(&buf[size], "Allocated:\n");
+	size += sprintf(&buf[size], "]\n");
+	size += sprintf(&buf[size], "Completed     : [ 0x");
 
-	for (n = 0; n < dd->slot_groups; n++) {
+	for (n = dd->slot_groups-1; n >= 0; n--)
+		size += sprintf(&buf[size], "%08X ",
+				readl(dd->port->completed[n]));
+
+	size += sprintf(&buf[size], "]\n");
+	size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+				readl(dd->port->mmio + PORT_IRQ_STAT));
+	size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+				readl(dd->mmio + HOST_IRQ_STAT));
+	size += sprintf(&buf[size], "\n");
+
+	size += sprintf(&buf[size], "Local\n-----\n");
+	size += sprintf(&buf[size], "Allocated    : [ 0x");
+
+	for (n = dd->slot_groups-1; n >= 0; n--) {
 		if (sizeof(long) > sizeof(u32))
 			group_allocated =
 				dd->port->allocated[n/2] >> (32*(n&1));
 		else
 			group_allocated = dd->port->allocated[n];
-		size += sprintf(&buf[size], "0x%08x\n",
-				 group_allocated);
+		size += sprintf(&buf[size], "%08X ", group_allocated);
 	}
+	size += sprintf(&buf[size], "]\n");
 
-	size += sprintf(&buf[size], "Completed:\n");
+	size += sprintf(&buf[size], "Commands in Q: [ 0x");
 
-	for (n = 0; n < dd->slot_groups; n++)
-		size += sprintf(&buf[size], "0x%08x\n",
-				readl(dd->port->completed[n]));
-
-	size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
-				readl(dd->port->mmio + PORT_IRQ_STAT));
-	size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
-				readl(dd->mmio + HOST_IRQ_STAT));
+	for (n = dd->slot_groups-1; n >= 0; n--) {
+		if (sizeof(long) > sizeof(u32))
+			group_allocated =
+				dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+		else
+			group_allocated = dd->port->cmds_to_issue[n];
+		size += sprintf(&buf[size], "%08X ", group_allocated);
+	}
+	size += sprintf(&buf[size], "]\n");
 
 	return size;
 }
@@ -2592,8 +2637,24 @@
 	return size;
 }
 
+static ssize_t mtip_hw_show_flags(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct driver_data *dd = dev_to_disk(dev)->private_data;
+	int size = 0;
+
+	size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+							dd->port->flags);
+	size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+							dd->dd_flag);
+
+	return size;
+}
+
 static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
 
 /*
  * Create the sysfs related attributes.
@@ -2616,6 +2677,9 @@
 	if (sysfs_create_file(kobj, &dev_attr_status.attr))
 		dev_warn(&dd->pdev->dev,
 			"Error creating 'status' sysfs entry\n");
+	if (sysfs_create_file(kobj, &dev_attr_flags.attr))
+		dev_warn(&dd->pdev->dev,
+			"Error creating 'flags' sysfs entry\n");
 	return 0;
 }
 
@@ -2636,6 +2700,7 @@
 
 	sysfs_remove_file(kobj, &dev_attr_registers.attr);
 	sysfs_remove_file(kobj, &dev_attr_status.attr);
+	sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
 	return 0;
 }
@@ -3634,7 +3699,10 @@
 	set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
 	blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
 	blk_queue_physical_block_size(dd->queue, 4096);
+	blk_queue_max_hw_sectors(dd->queue, 0xffff);
+	blk_queue_max_segment_size(dd->queue, 0x400000);
 	blk_queue_io_min(dd->queue, 4096);
+
 	/*
 	 * write back cache is not supported in the device. FUA depends on
 	 * write back cache support, hence setting flush support to zero.
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef5833..b2c88da 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -113,33 +113,35 @@
 
 #define __force_bit2int (unsigned int __force)
 
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT		0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT		1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT		2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT		3 /* download microcde */
-#define MTIP_PF_PAUSE_IO	((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+	/* below are bit numbers in 'flags' defined in mtip_port */
+	MTIP_PF_IC_ACTIVE_BIT       = 0, /* pio/ioctl */
+	MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
+	MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
+	MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
+	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) | \
 				(1 << MTIP_PF_EH_ACTIVE_BIT) | \
 				(1 << MTIP_PF_SE_ACTIVE_BIT) | \
-				(1 << MTIP_PF_DM_ACTIVE_BIT))
+				(1 << MTIP_PF_DM_ACTIVE_BIT)),
 
-#define MTIP_PF_SVC_THD_ACTIVE_BIT	4
-#define MTIP_PF_ISSUE_CMDS_BIT		5
-#define MTIP_PF_REBUILD_BIT		6
-#define MTIP_PF_SVC_THD_STOP_BIT	8
+	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
+	MTIP_PF_ISSUE_CMDS_BIT      = 5,
+	MTIP_PF_REBUILD_BIT         = 6,
+	MTIP_PF_SVC_THD_STOP_BIT    = 8,
 
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT	1
-#define MTIP_DDF_OVER_TEMP_BIT		2
-#define MTIP_DDF_WRITE_PROTECT_BIT	3
-#define MTIP_DDF_STOP_IO	((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+	/* below are bit numbers in 'dd_flag' defined in driver_data */
+	MTIP_DDF_REMOVE_PENDING_BIT = 1,
+	MTIP_DDF_OVER_TEMP_BIT      = 2,
+	MTIP_DDF_WRITE_PROTECT_BIT  = 3,
+	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
 				(1 << MTIP_DDF_OVER_TEMP_BIT) | \
-				(1 << MTIP_DDF_WRITE_PROTECT_BIT))
+				(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
 
-#define MTIP_DDF_CLEANUP_BIT		5
-#define MTIP_DDF_RESUME_BIT		6
-#define MTIP_DDF_INIT_DONE_BIT		7
-#define MTIP_DDF_REBUILD_FAILED_BIT	8
+	MTIP_DDF_CLEANUP_BIT        = 5,
+	MTIP_DDF_RESUME_BIT         = 6,
+	MTIP_DDF_INIT_DONE_BIT      = 7,
+	MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
 
 __packed struct smart_attr{
 	u8 attr_id;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 013c7a5..65665c9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -141,7 +141,7 @@
 struct rbd_snap {
 	struct	device		dev;
 	const char		*name;
-	size_t			size;
+	u64			size;
 	struct list_head	node;
 	u64			id;
 };
@@ -175,8 +175,7 @@
 	/* protects updating the header */
 	struct rw_semaphore     header_rwsem;
 	char                    snap_name[RBD_MAX_SNAP_NAME_LEN];
-	u32 cur_snap;	/* index+1 of current snapshot within snap context
-			   0 - for the head */
+	u64                     snap_id;	/* current snapshot id */
 	int read_only;
 
 	struct list_head	node;
@@ -241,7 +240,7 @@
 	put_device(&rbd_dev->dev);
 }
 
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
 
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
@@ -450,7 +449,9 @@
 	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 
 	dout("rbd_release_client %p\n", rbdc);
+	spin_lock(&rbd_client_list_lock);
 	list_del(&rbdc->node);
+	spin_unlock(&rbd_client_list_lock);
 
 	ceph_destroy_client(rbdc->client);
 	kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@
  */
 static void rbd_put_client(struct rbd_device *rbd_dev)
 {
-	spin_lock(&rbd_client_list_lock);
 	kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
-	spin_unlock(&rbd_client_list_lock);
 	rbd_dev->rbd_client = NULL;
 }
 
@@ -487,16 +486,18 @@
  */
 static int rbd_header_from_disk(struct rbd_image_header *header,
 				 struct rbd_image_header_ondisk *ondisk,
-				 int allocated_snaps,
+				 u32 allocated_snaps,
 				 gfp_t gfp_flags)
 {
-	int i;
-	u32 snap_count;
+	u32 i, snap_count;
 
 	if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
 		return -ENXIO;
 
 	snap_count = le32_to_cpu(ondisk->snap_count);
+	if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+			 / sizeof (*ondisk))
+		return -EINVAL;
 	header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
 				snap_count * sizeof (*ondisk),
 				gfp_flags);
@@ -506,11 +507,11 @@
 	header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 	if (snap_count) {
 		header->snap_names = kmalloc(header->snap_names_len,
-					     GFP_KERNEL);
+					     gfp_flags);
 		if (!header->snap_names)
 			goto err_snapc;
 		header->snap_sizes = kmalloc(snap_count * sizeof(u64),
-					     GFP_KERNEL);
+					     gfp_flags);
 		if (!header->snap_sizes)
 			goto err_names;
 	} else {
@@ -552,21 +553,6 @@
 	return -ENOMEM;
 }
 
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
-	return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
-	struct rbd_image_header *header = &rbd_dev->header;
-
-	if (!rbd_dev->cur_snap)
-		return 0;
-
-	return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
 static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
 			u64 *seq, u64 *size)
 {
@@ -605,7 +591,7 @@
 			snapc->seq = header->snap_seq;
 		else
 			snapc->seq = 0;
-		dev->cur_snap = 0;
+		dev->snap_id = CEPH_NOSNAP;
 		dev->read_only = 0;
 		if (size)
 			*size = header->image_size;
@@ -613,8 +599,7 @@
 		ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
 		if (ret < 0)
 			goto done;
-
-		dev->cur_snap = header->total_snaps - ret;
+		dev->snap_id = snapc->seq;
 		dev->read_only = 1;
 	}
 
@@ -935,7 +920,6 @@
 	layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
 	layout->fl_stripe_count = cpu_to_le32(1);
 	layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
-	layout->fl_pg_preferred = cpu_to_le32(-1);
 	layout->fl_pg_pool = cpu_to_le32(dev->poolid);
 	ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
 				req, ops);
@@ -1168,7 +1152,7 @@
 			 int coll_index)
 {
 	return rbd_do_op(rq, rbd_dev, NULL,
-			 (snapid ? snapid : CEPH_NOSNAP),
+			 snapid,
 			 CEPH_OSD_OP_READ,
 			 CEPH_OSD_FLAG_READ,
 			 2,
@@ -1187,7 +1171,7 @@
 			  u64 *ver)
 {
 	return rbd_req_sync_op(dev, NULL,
-			       (snapid ? snapid : CEPH_NOSNAP),
+			       snapid,
 			       CEPH_OSD_OP_READ,
 			       CEPH_OSD_FLAG_READ,
 			       NULL,
@@ -1238,7 +1222,7 @@
 	dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
 		notify_id, (int)opcode);
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-	rc = __rbd_update_snaps(dev);
+	rc = __rbd_refresh_header(dev);
 	mutex_unlock(&ctl_mutex);
 	if (rc)
 		pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@
 					      coll, cur_seg);
 			else
 				rbd_req_read(rq, rbd_dev,
-					     cur_snap_id(rbd_dev),
+					     rbd_dev->snap_id,
 					     ofs,
 					     op_size, bio,
 					     coll, cur_seg);
@@ -1592,7 +1576,7 @@
 {
 	ssize_t rc;
 	struct rbd_image_header_ondisk *dh;
-	int snap_count = 0;
+	u32 snap_count = 0;
 	u64 ver;
 	size_t len;
 
@@ -1656,7 +1640,7 @@
 	struct ceph_mon_client *monc;
 
 	/* we should create a snapshot only if we're pointing at the head */
-	if (dev->cur_snap)
+	if (dev->snap_id != CEPH_NOSNAP)
 		return -EINVAL;
 
 	monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@
 	if (ret < 0)
 		return ret;
 
-	dev->header.snapc->seq =  new_snapid;
+	down_write(&dev->header_rwsem);
+	dev->header.snapc->seq = new_snapid;
+	up_write(&dev->header_rwsem);
 
 	return 0;
 bad:
@@ -1703,7 +1689,7 @@
 /*
  * only read the first part of the ondisk header, without the snaps info
  */
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
 {
 	int ret;
 	struct rbd_image_header h;
@@ -1890,7 +1876,7 @@
 
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
 
-	rc = __rbd_update_snaps(rbd_dev);
+	rc = __rbd_refresh_header(rbd_dev);
 	if (rc < 0)
 		ret = rc;
 
@@ -1949,7 +1935,7 @@
 {
 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-	return sprintf(buf, "%zd\n", snap->size);
+	return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
 }
 
 static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@
 {
 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-	return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+	return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
 }
 
 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@
 					 rbd_dev->header.obj_version);
 		if (ret == -ERANGE) {
 			mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-			rc = __rbd_update_snaps(rbd_dev);
+			rc = __rbd_refresh_header(rbd_dev);
 			mutex_unlock(&ctl_mutex);
 			if (rc < 0)
 				return rc;
@@ -2558,7 +2544,7 @@
 	if (ret < 0)
 		goto err_unlock;
 
-	ret = __rbd_update_snaps(rbd_dev);
+	ret = __rbd_refresh_header(rbd_dev);
 	if (ret < 0)
 		goto err_unlock;
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393..60eed4b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -526,6 +526,14 @@
 	return 0;
 }
 
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+	if (n >= 26)
+		ptr = encode_disk_name(ptr, n / 26 - 1);
+	*ptr = 'a' + n % 26;
+	return ptr + 1;
+}
+
 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 			       struct blkfront_info *info,
 			       u16 vdisk_info, u16 sector_size)
@@ -536,6 +544,7 @@
 	unsigned int offset;
 	int minor;
 	int nr_parts;
+	char *ptr;
 
 	BUG_ON(info->gd != NULL);
 	BUG_ON(info->rq != NULL);
@@ -560,7 +569,11 @@
 					"emulated IDE disks,\n\t choose an xvd device name"
 					"from xvde on\n", info->vdevice);
 	}
-	err = -ENODEV;
+	if (minor >> MINORBITS) {
+		pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+			info->vdevice, minor);
+		return -ENODEV;
+	}
 
 	if ((minor % nr_parts) == 0)
 		nr_minors = nr_parts;
@@ -574,23 +587,14 @@
 	if (gd == NULL)
 		goto release;
 
-	if (nr_minors > 1) {
-		if (offset < 26)
-			sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
-		else
-			sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
-				'a' + ((offset / 26)-1), 'a' + (offset % 26));
-	} else {
-		if (offset < 26)
-			sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
-				'a' + offset,
-				minor & (nr_parts - 1));
-		else
-			sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
-				'a' + ((offset / 26) - 1),
-				'a' + (offset % 26),
-				minor & (nr_parts - 1));
-	}
+	strcpy(gd->disk_name, DEV_NAME);
+	ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+	BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+	if (nr_minors > 1)
+		*ptr = 0;
+	else
+		snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+			 "%d", minor & (nr_parts - 1));
 
 	gd->major = XENVBD_MAJOR;
 	gd->first_minor = minor;
@@ -1496,7 +1500,9 @@
 
 static void __exit xlblk_exit(void)
 {
-	return xenbus_unregister_driver(&blkfront_driver);
+	xenbus_unregister_driver(&blkfront_driver);
+	unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+	kfree(minors);
 }
 module_exit(xlblk_exit);
 
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ad591bd..10308cd 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@
 
 	/* Atheros AR3011 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x3002) },
+	{ USB_DEVICE(0x0CF3, 0xE019) },
 	{ USB_DEVICE(0x13d3, 0x3304) },
 	{ USB_DEVICE(0x0930, 0x0215) },
 	{ USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,7 @@
 	{ USB_DEVICE(0x04CA, 0x3005) },
 	{ USB_DEVICE(0x13d3, 0x3362) },
 	{ USB_DEVICE(0x0CF3, 0xE004) },
+	{ USB_DEVICE(0x0930, 0x0219) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +103,7 @@
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU22 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 94f2d65..27068d1 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -136,7 +136,7 @@
 
 void btmrvl_interrupt(struct btmrvl_private *priv);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 681ca9d..dc304de 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -44,23 +44,33 @@
 }
 EXPORT_SYMBOL_GPL(btmrvl_interrupt);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
 {
 	struct hci_event_hdr *hdr = (void *) skb->data;
 	struct hci_ev_cmd_complete *ec;
-	u16 opcode, ocf;
+	u16 opcode, ocf, ogf;
 
 	if (hdr->evt == HCI_EV_CMD_COMPLETE) {
 		ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
 		opcode = __le16_to_cpu(ec->opcode);
 		ocf = hci_opcode_ocf(opcode);
+		ogf = hci_opcode_ogf(opcode);
+
 		if (ocf == BT_CMD_MODULE_CFG_REQ &&
 					priv->btmrvl_dev.sendcmdflag) {
 			priv->btmrvl_dev.sendcmdflag = false;
 			priv->adapter->cmd_complete = true;
 			wake_up_interruptible(&priv->adapter->cmd_wait_q);
 		}
+
+		if (ogf == OGF) {
+			BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+			kfree_skb(skb);
+			return false;
+		}
 	}
+
+	return true;
 }
 EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index a853244..0cd61d9 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -562,10 +562,12 @@
 		skb_put(skb, buf_len);
 		skb_pull(skb, SDIO_HEADER_LEN);
 
-		if (type == HCI_EVENT_PKT)
-			btmrvl_check_evtpkt(priv, skb);
+		if (type == HCI_EVENT_PKT) {
+			if (btmrvl_check_evtpkt(priv, skb))
+				hci_recv_frame(skb);
+		} else
+			hci_recv_frame(skb);
 
-		hci_recv_frame(skb);
 		hdev->stat.byte_rx += buf_len;
 		break;
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c9463af..83ebb24 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -125,6 +125,7 @@
 
 	/* Atheros 3011 with sflash firmware */
 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+	{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
 	{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
 	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
 	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +140,7 @@
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 764f70c..0a41852 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -898,6 +898,7 @@
 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
 	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c009175..8e2d914 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -212,6 +212,7 @@
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB	    0x0069
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99..731c904 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@
 	u32 *data = buf;
 
 	/* data ready? */
-	if (readl(trng->base + TRNG_ODATA) & 1) {
+	if (readl(trng->base + TRNG_ISR) & 1) {
 		*data = readl(trng->base + TRNG_ODATA);
+		/*
+		  ensure data ready is only set again AFTER the next data
+		  word is ready in case it got set between checking ISR
+		  and reading ODATA, so we don't risk re-reading the
+		  same word
+		*/
+		readl(trng->base + TRNG_ISR);
 		return 4;
 	} else
 		return 0;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 165e1fe..4864407 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -12,6 +12,7 @@
 config COMMON_CLK
 	bool
 	select HAVE_CLK_PREPARE
+	select CLKDEV_LOOKUP
 	---help---
 	  The common clock framework is a single definition of struct
 	  clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@
 menu "Common Clock Framework"
 	depends on COMMON_CLK
 
-config COMMON_CLK_DISABLE_UNUSED
-	bool "Disabled unused clocks at boot"
-	depends on COMMON_CLK
-	---help---
-	  Traverses the entire clock tree and disables any clocks that are
-	  enabled in hardware but have not been enabled by any device drivers.
-	  This saves power and keeps the software model of the clock in line
-	  with reality.
-
-	  If in doubt, say "N".
-
 config COMMON_CLK_DEBUG
 	bool "DebugFS representation of clock tree"
 	depends on COMMON_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1f736bc..b9a5158 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,4 +1,7 @@
 
 obj-$(CONFIG_CLKDEV_LOOKUP)	+= clkdev.o
 obj-$(CONFIG_COMMON_CLK)	+= clk.o clk-fixed-rate.o clk-gate.o \
-				   clk-mux.o clk-divider.o
+				   clk-mux.o clk-divider.o clk-fixed-factor.o
+# SoCs specific
+obj-$(CONFIG_ARCH_MXS)		+= mxs/
+obj-$(CONFIG_PLAT_SPEAR)	+= spear/
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index d5ac6a7..8ea11b44 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -45,7 +45,6 @@
 
 	return parent_rate / div;
 }
-EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
 
 /*
  * The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@
 	if (divider->flags & CLK_DIVIDER_ONE_BASED)
 		maxdiv--;
 
-	if (!best_parent_rate) {
-		parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+	if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+		parent_rate = *best_parent_rate;
 		bestdiv = DIV_ROUND_UP(parent_rate, rate);
 		bestdiv = bestdiv == 0 ? 1 : bestdiv;
 		bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@
 	int div;
 	div = clk_divider_bestdiv(hw, rate, prate);
 
-	if (prate)
-		return *prate / div;
-	else {
-		unsigned long r;
-		r = __clk_get_rate(__clk_get_parent(hw->clk));
-		return r / div;
-	}
+	return *prate / div;
 }
-EXPORT_SYMBOL_GPL(clk_divider_round_rate);
 
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
 {
 	struct clk_divider *divider = to_clk_divider(hw);
 	unsigned int div;
 	unsigned long flags = 0;
 	u32 val;
 
-	div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+	div = parent_rate / rate;
 
 	if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
 		div--;
@@ -147,15 +140,26 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(clk_divider_set_rate);
 
-struct clk_ops clk_divider_ops = {
+const struct clk_ops clk_divider_ops = {
 	.recalc_rate = clk_divider_recalc_rate,
 	.round_rate = clk_divider_round_rate,
 	.set_rate = clk_divider_set_rate,
 };
 EXPORT_SYMBOL_GPL(clk_divider_ops);
 
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
 struct clk *clk_register_divider(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@
 {
 	struct clk_divider *div;
 	struct clk *clk;
+	struct clk_init_data init;
 
+	/* allocate the divider */
 	div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-
 	if (!div) {
 		pr_err("%s: could not allocate divider clk\n", __func__);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
+	init.name = name;
+	init.ops = &clk_divider_ops;
+	init.flags = flags;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
 	/* struct clk_divider assignments */
 	div->reg = reg;
 	div->shift = shift;
 	div->width = width;
 	div->flags = clk_divider_flags;
 	div->lock = lock;
+	div->hw.init = &init;
 
-	if (parent_name) {
-		div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
-		if (!div->parent[0])
-			goto out;
-	}
+	/* register the clock */
+	clk = clk_register(dev, &div->hw);
 
-	clk = clk_register(dev, name,
-			&clk_divider_ops, &div->hw,
-			div->parent,
-			(parent_name ? 1 : 0),
-			flags);
-	if (clk)
-		return clk;
+	if (IS_ERR(clk))
+		kfree(div);
 
-out:
-	kfree(div->parent[0]);
-	kfree(div);
-
-	return NULL;
+	return clk;
 }
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 0000000..c8c003e
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed.  clk->rate = parent->rate / div * mult
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+	return parent_rate * fix->mult / fix->div;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *prate)
+{
+	struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+		unsigned long best_parent;
+
+		best_parent = (rate / fix->mult) * fix->div;
+		*prate = __clk_round_rate(__clk_get_parent(hw->clk),
+				best_parent);
+	}
+
+	return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+	.round_rate = clk_factor_round_rate,
+	.set_rate = clk_factor_set_rate,
+	.recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+		const char *parent_name, unsigned long flags,
+		unsigned int mult, unsigned int div)
+{
+	struct clk_fixed_factor *fix;
+	struct clk_init_data init;
+	struct clk *clk;
+
+	fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+	if (!fix) {
+		pr_err("%s: could not allocate fixed factor clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* struct clk_fixed_factor assignments */
+	fix->mult = mult;
+	fix->div = div;
+	fix->hw.init = &init;
+
+	init.name = name;
+	init.ops = &clk_fixed_factor_ops;
+	init.flags = flags;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	clk = clk_register(dev, &fix->hw);
+
+	if (IS_ERR(clk))
+		kfree(fix);
+
+	return clk;
+}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 90c79fb..cbd2462 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -32,51 +32,50 @@
 {
 	return to_clk_fixed_rate(hw)->fixed_rate;
 }
-EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
 
-struct clk_ops clk_fixed_rate_ops = {
+const struct clk_ops clk_fixed_rate_ops = {
 	.recalc_rate = clk_fixed_rate_recalc_rate,
 };
 EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
 
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		unsigned long fixed_rate)
 {
 	struct clk_fixed_rate *fixed;
-	char **parent_names = NULL;
-	u8 len;
+	struct clk *clk;
+	struct clk_init_data init;
 
+	/* allocate fixed-rate clock */
 	fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-
 	if (!fixed) {
 		pr_err("%s: could not allocate fixed clk\n", __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	init.name = name;
+	init.ops = &clk_fixed_rate_ops;
+	init.flags = flags;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
 	/* struct clk_fixed_rate assignments */
 	fixed->fixed_rate = fixed_rate;
+	fixed->hw.init = &init;
 
-	if (parent_name) {
-		parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
+	/* register the clock */
+	clk = clk_register(dev, &fixed->hw);
 
-		if (! parent_names)
-			goto out;
+	if (IS_ERR(clk))
+		kfree(fixed);
 
-		len = sizeof(char) * strlen(parent_name);
-
-		parent_names[0] = kmalloc(len, GFP_KERNEL);
-
-		if (!parent_names[0])
-			goto out;
-
-		strncpy(parent_names[0], parent_name, len);
-	}
-
-out:
-	return clk_register(dev, name,
-			&clk_fixed_rate_ops, &fixed->hw,
-			parent_names,
-			(parent_name ? 1 : 0),
-			flags);
+	return clk;
 }
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index b5902e2..578465e 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -28,32 +28,38 @@
 
 #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
 
-static void clk_gate_set_bit(struct clk_gate *gate)
+/*
+ * It works on following logic:
+ *
+ * For enabling clock, enable = 1
+ *	set2dis = 1	-> clear bit	-> set = 0
+ *	set2dis = 0	-> set bit	-> set = 1
+ *
+ * For disabling clock, enable = 0
+ *	set2dis = 1	-> set bit	-> set = 1
+ *	set2dis = 0	-> clear bit	-> set = 0
+ *
+ * So, result is always: enable xor set2dis.
+ */
+static void clk_gate_endisable(struct clk_hw *hw, int enable)
 {
-	u32 reg;
+	struct clk_gate *gate = to_clk_gate(hw);
+	int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
 	unsigned long flags = 0;
+	u32 reg;
+
+	set ^= enable;
 
 	if (gate->lock)
 		spin_lock_irqsave(gate->lock, flags);
 
 	reg = readl(gate->reg);
-	reg |= BIT(gate->bit_idx);
-	writel(reg, gate->reg);
 
-	if (gate->lock)
-		spin_unlock_irqrestore(gate->lock, flags);
-}
+	if (set)
+		reg |= BIT(gate->bit_idx);
+	else
+		reg &= ~BIT(gate->bit_idx);
 
-static void clk_gate_clear_bit(struct clk_gate *gate)
-{
-	u32 reg;
-	unsigned long flags = 0;
-
-	if (gate->lock)
-		spin_lock_irqsave(gate->lock, flags);
-
-	reg = readl(gate->reg);
-	reg &= ~BIT(gate->bit_idx);
 	writel(reg, gate->reg);
 
 	if (gate->lock)
@@ -62,27 +68,15 @@
 
 static int clk_gate_enable(struct clk_hw *hw)
 {
-	struct clk_gate *gate = to_clk_gate(hw);
-
-	if (gate->flags & CLK_GATE_SET_TO_DISABLE)
-		clk_gate_clear_bit(gate);
-	else
-		clk_gate_set_bit(gate);
+	clk_gate_endisable(hw, 1);
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(clk_gate_enable);
 
 static void clk_gate_disable(struct clk_hw *hw)
 {
-	struct clk_gate *gate = to_clk_gate(hw);
-
-	if (gate->flags & CLK_GATE_SET_TO_DISABLE)
-		clk_gate_set_bit(gate);
-	else
-		clk_gate_clear_bit(gate);
+	clk_gate_endisable(hw, 0);
 }
-EXPORT_SYMBOL_GPL(clk_gate_disable);
 
 static int clk_gate_is_enabled(struct clk_hw *hw)
 {
@@ -99,15 +93,25 @@
 
 	return reg ? 1 : 0;
 }
-EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
 
-struct clk_ops clk_gate_ops = {
+const struct clk_ops clk_gate_ops = {
 	.enable = clk_gate_enable,
 	.disable = clk_gate_disable,
 	.is_enabled = clk_gate_is_enabled,
 };
 EXPORT_SYMBOL_GPL(clk_gate_ops);
 
+/**
+ * clk_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
 struct clk *clk_register_gate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@
 {
 	struct clk_gate *gate;
 	struct clk *clk;
+	struct clk_init_data init;
 
+	/* allocate the gate */
 	gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-
 	if (!gate) {
 		pr_err("%s: could not allocate gated clk\n", __func__);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
+	init.name = name;
+	init.ops = &clk_gate_ops;
+	init.flags = flags;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
 	/* struct clk_gate assignments */
 	gate->reg = reg;
 	gate->bit_idx = bit_idx;
 	gate->flags = clk_gate_flags;
 	gate->lock = lock;
+	gate->hw.init = &init;
 
-	if (parent_name) {
-		gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
-		if (!gate->parent[0])
-			goto out;
-	}
+	clk = clk_register(dev, &gate->hw);
 
-	clk = clk_register(dev, name,
-			&clk_gate_ops, &gate->hw,
-			gate->parent,
-			(parent_name ? 1 : 0),
-			flags);
-	if (clk)
-		return clk;
-out:
-	kfree(gate->parent[0]);
-	kfree(gate);
+	if (IS_ERR(clk))
+		kfree(gate);
 
-	return NULL;
+	return clk;
 }
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index c71ad1f..fd36a8e 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -55,7 +55,6 @@
 
 	return val;
 }
-EXPORT_SYMBOL_GPL(clk_mux_get_parent);
 
 static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
 {
@@ -82,35 +81,47 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(clk_mux_set_parent);
 
-struct clk_ops clk_mux_ops = {
+const struct clk_ops clk_mux_ops = {
 	.get_parent = clk_mux_get_parent,
 	.set_parent = clk_mux_set_parent,
 };
 EXPORT_SYMBOL_GPL(clk_mux_ops);
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
-		char **parent_names, u8 num_parents, unsigned long flags,
+		const char **parent_names, u8 num_parents, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
 		u8 clk_mux_flags, spinlock_t *lock)
 {
 	struct clk_mux *mux;
+	struct clk *clk;
+	struct clk_init_data init;
 
-	mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
-
+	/* allocate the mux */
+	mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
 	if (!mux) {
 		pr_err("%s: could not allocate mux clk\n", __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	init.name = name;
+	init.ops = &clk_mux_ops;
+	init.flags = flags;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
 	/* struct clk_mux assignments */
 	mux->reg = reg;
 	mux->shift = shift;
 	mux->width = width;
 	mux->flags = clk_mux_flags;
 	mux->lock = lock;
+	mux->hw.init = &init;
 
-	return clk_register(dev, name, &clk_mux_ops, &mux->hw,
-			parent_names, num_parents, flags);
+	clk = clk_register(dev, &mux->hw);
+
+	if (IS_ERR(clk))
+		kfree(mux);
+
+	return clk;
 }
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9cf6f59e..dcbe056 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -194,9 +194,8 @@
 late_initcall(clk_debug_init);
 #else
 static inline int clk_debug_register(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DEBUG */
+#endif
 
-#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
 /* caller must hold prepare_lock */
 static void clk_disable_unused_subtree(struct clk *clk)
 {
@@ -246,9 +245,6 @@
 	return 0;
 }
 late_initcall(clk_disable_unused);
-#else
-static inline int clk_disable_unused(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
 
 /***    helper functions   ***/
 
@@ -287,7 +283,7 @@
 	unsigned long ret;
 
 	if (!clk) {
-		ret = -EINVAL;
+		ret = 0;
 		goto out;
 	}
 
@@ -297,7 +293,7 @@
 		goto out;
 
 	if (!clk->parent)
-		ret = -ENODEV;
+		ret = 0;
 
 out:
 	return ret;
@@ -562,7 +558,7 @@
  * @clk: the clk whose rate is being returned
  *
  * Simply returns the cached rate of the clk.  Does not query the hardware.  If
- * clk is NULL then returns -EINVAL.
+ * clk is NULL then returns 0.
  */
 unsigned long clk_get_rate(struct clk *clk)
 {
@@ -584,18 +580,22 @@
  */
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
-	unsigned long unused;
+	unsigned long parent_rate = 0;
 
 	if (!clk)
 		return -EINVAL;
 
-	if (!clk->ops->round_rate)
-		return clk->rate;
+	if (!clk->ops->round_rate) {
+		if (clk->flags & CLK_SET_RATE_PARENT)
+			return __clk_round_rate(clk->parent, rate);
+		else
+			return clk->rate;
+	}
 
-	if (clk->flags & CLK_SET_RATE_PARENT)
-		return clk->ops->round_rate(clk->hw, rate, &unused);
-	else
-		return clk->ops->round_rate(clk->hw, rate, NULL);
+	if (clk->parent)
+		parent_rate = clk->parent->rate;
+
+	return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 }
 
 /**
@@ -765,25 +765,41 @@
 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
 	struct clk *top = clk;
-	unsigned long best_parent_rate = clk->parent->rate;
+	unsigned long best_parent_rate = 0;
 	unsigned long new_rate;
 
-	if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
-		clk->new_rate = clk->rate;
+	/* sanity */
+	if (IS_ERR_OR_NULL(clk))
+		return NULL;
+
+	/* save parent rate, if it exists */
+	if (clk->parent)
+		best_parent_rate = clk->parent->rate;
+
+	/* never propagate up to the parent */
+	if (!(clk->flags & CLK_SET_RATE_PARENT)) {
+		if (!clk->ops->round_rate) {
+			clk->new_rate = clk->rate;
+			return NULL;
+		}
+		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+		goto out;
+	}
+
+	/* need clk->parent from here on out */
+	if (!clk->parent) {
+		pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
 		return NULL;
 	}
 
-	if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+	if (!clk->ops->round_rate) {
 		top = clk_calc_new_rates(clk->parent, rate);
-		new_rate = clk->new_rate = clk->parent->new_rate;
+		new_rate = clk->parent->new_rate;
 
 		goto out;
 	}
 
-	if (clk->flags & CLK_SET_RATE_PARENT)
-		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-	else
-		new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+	new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 
 	if (best_parent_rate != clk->parent->rate) {
 		top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -834,18 +850,21 @@
 {
 	struct clk *child;
 	unsigned long old_rate;
+	unsigned long best_parent_rate = 0;
 	struct hlist_node *tmp;
 
 	old_rate = clk->rate;
 
+	if (clk->parent)
+		best_parent_rate = clk->parent->rate;
+
 	if (clk->ops->set_rate)
-		clk->ops->set_rate(clk->hw, clk->new_rate);
+		clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 
 	if (clk->ops->recalc_rate)
-		clk->rate = clk->ops->recalc_rate(clk->hw,
-				clk->parent->rate);
+		clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
 	else
-		clk->rate = clk->parent->rate;
+		clk->rate = best_parent_rate;
 
 	if (clk->notifier_count && old_rate != clk->rate)
 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -859,38 +878,19 @@
  * @clk: the clk whose rate is being changed
  * @rate: the new rate for clk
  *
- * In the simplest case clk_set_rate will only change the rate of clk.
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
  *
- * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
- * will fail; only when the clk is disabled will it be able to change
- * its rate.
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
+ * propagate up to clk's parent; whether or not this happens depends on the
+ * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
+ * after calling .round_rate then upstream parent propagation is ignored.  If
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
+ * up to clk's parent and set it's rate.  Upward propagation will continue
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
+ * .round_rate stops requesting changes to clk's parent_rate.
  *
- * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
- * recursively propagate up to clk's parent; whether or not this happens
- * depends on the outcome of clk's .round_rate implementation.  If
- * *parent_rate is 0 after calling .round_rate then upstream parent
- * propagation is ignored.  If *parent_rate comes back with a new rate
- * for clk's parent then we propagate up to clk's parent and set it's
- * rate.  Upward propagation will continue until either a clk does not
- * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
- * changes to clk's parent_rate.  If there is a failure during upstream
- * propagation then clk_set_rate will unwind and restore each clk's rate
- * that had been successfully changed.  Afterwards a rate change abort
- * notification will be propagated downstream, starting from the clk
- * that failed.
- *
- * At the end of all of the rate setting, clk_set_rate internally calls
- * __clk_recalc_rates and propagates the rate changes downstream,
- * starting from the highest clk whose rate was changed.  This has the
- * added benefit of propagating post-rate change notifiers.
- *
- * Note that while post-rate change and rate change abort notifications
- * are guaranteed to be sent to a clk only once per call to
- * clk_set_rate, pre-change notifications will be sent for every clk
- * whose rate is changed.  Stacking pre-change notifications is noisy
- * for the drivers subscribed to them, but this allows drivers to react
- * to intermediate clk rate changes up until the point where the final
- * rate is achieved at the end of upstream propagation.
+ * Rate changes are accomplished via tree traversal that also recalculates the
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
  *
  * Returns 0 on success, -EERROR otherwise.
  */
@@ -906,6 +906,11 @@
 	if (rate == clk->rate)
 		goto out;
 
+	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	/* calculate new rates and get the topmost changed clock */
 	top = clk_calc_new_rates(clk, rate);
 	if (!top) {
@@ -997,7 +1002,7 @@
 
 	if (!clk->parents)
 		clk->parents =
-			kmalloc((sizeof(struct clk*) * clk->num_parents),
+			kzalloc((sizeof(struct clk*) * clk->num_parents),
 					GFP_KERNEL);
 
 	if (!clk->parents)
@@ -1063,9 +1068,13 @@
 	old_parent = clk->parent;
 
 	/* find index of new parent clock using cached parent ptrs */
-	for (i = 0; i < clk->num_parents; i++)
-		if (clk->parents[i] == parent)
-			break;
+	if (clk->parents)
+		for (i = 0; i < clk->num_parents; i++)
+			if (clk->parents[i] == parent)
+				break;
+	else
+		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+								GFP_KERNEL);
 
 	/*
 	 * find index of new parent clock using string name comparison
@@ -1074,7 +1083,8 @@
 	if (i == clk->num_parents)
 		for (i = 0; i < clk->num_parents; i++)
 			if (!strcmp(clk->parent_names[i], parent->name)) {
-				clk->parents[i] = __clk_lookup(parent->name);
+				if (clk->parents)
+					clk->parents[i] = __clk_lookup(parent->name);
 				break;
 			}
 
@@ -1175,40 +1185,41 @@
  *
  * Initializes the lists in struct clk, queries the hardware for the
  * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * 	.name
- * 	.ops
- * 	.hw
- * 	.parent_names
- * 	.num_parents
- * 	.flags
- *
- * Essentially, everything that would normally be passed into clk_register is
- * assumed to be initialized already in __clk_init.  The other members may be
- * populated, but are optional.
- *
- * __clk_init is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.
  */
-void __clk_init(struct device *dev, struct clk *clk)
+int __clk_init(struct device *dev, struct clk *clk)
 {
-	int i;
+	int i, ret = 0;
 	struct clk *orphan;
 	struct hlist_node *tmp, *tmp2;
 
 	if (!clk)
-		return;
+		return -EINVAL;
 
 	mutex_lock(&prepare_lock);
 
 	/* check to see if a clock with this name is already registered */
-	if (__clk_lookup(clk->name))
+	if (__clk_lookup(clk->name)) {
+		pr_debug("%s: clk %s already initialized\n",
+				__func__, clk->name);
+		ret = -EEXIST;
 		goto out;
+	}
+
+	/* check that clk_ops are sane.  See Documentation/clk.txt */
+	if (clk->ops->set_rate &&
+			!(clk->ops->round_rate && clk->ops->recalc_rate)) {
+		pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+				__func__, clk->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (clk->ops->set_parent && !clk->ops->get_parent) {
+		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
+				__func__, clk->name);
+		ret = -EINVAL;
+		goto out;
+	}
 
 	/* throw a WARN if any entries in parent_names are NULL */
 	for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1313,130 @@
 out:
 	mutex_unlock(&prepare_lock);
 
-	return;
+	return ret;
 }
 
 /**
+ * __clk_register - register a clock and return a cookie.
+ *
+ * Same as clk_register, except that the .clk field inside hw shall point to a
+ * preallocated (generally statically allocated) struct clk. None of the fields
+ * of the struct clk need to be initialized.
+ *
+ * The data pointed to by .init and .clk field shall NOT be marked as init
+ * data.
+ *
+ * __clk_register is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized.  It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations.  Returns 0
+ * on success, otherwise an error code.
+ */
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+{
+	int ret;
+	struct clk *clk;
+
+	clk = hw->clk;
+	clk->name = hw->init->name;
+	clk->ops = hw->init->ops;
+	clk->hw = hw;
+	clk->flags = hw->init->flags;
+	clk->parent_names = hw->init->parent_names;
+	clk->num_parents = hw->init->num_parents;
+
+	ret = __clk_init(dev, clk);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return clk;
+}
+EXPORT_SYMBOL_GPL(__clk_register);
+
+/**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
  * @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
  *
  * clk_register is the primary interface for populating the clock tree with new
  * clock nodes.  It returns a pointer to the newly allocated struct clk which
  * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
  */
-struct clk *clk_register(struct device *dev, const char *name,
-		const struct clk_ops *ops, struct clk_hw *hw,
-		char **parent_names, u8 num_parents, unsigned long flags)
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
+	int i, ret;
 	struct clk *clk;
 
 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-	if (!clk)
-		return NULL;
+	if (!clk) {
+		pr_err("%s: could not allocate clk\n", __func__);
+		ret = -ENOMEM;
+		goto fail_out;
+	}
 
-	clk->name = name;
-	clk->ops = ops;
+	clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+	if (!clk->name) {
+		pr_err("%s: could not allocate clk->name\n", __func__);
+		ret = -ENOMEM;
+		goto fail_name;
+	}
+	clk->ops = hw->init->ops;
 	clk->hw = hw;
-	clk->flags = flags;
-	clk->parent_names = parent_names;
-	clk->num_parents = num_parents;
+	clk->flags = hw->init->flags;
+	clk->num_parents = hw->init->num_parents;
 	hw->clk = clk;
 
-	__clk_init(dev, clk);
+	/* allocate local copy in case parent_names is __initdata */
+	clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
+			GFP_KERNEL);
 
-	return clk;
+	if (!clk->parent_names) {
+		pr_err("%s: could not allocate clk->parent_names\n", __func__);
+		ret = -ENOMEM;
+		goto fail_parent_names;
+	}
+
+
+	/* copy each string name in case parent_names is __initdata */
+	for (i = 0; i < clk->num_parents; i++) {
+		clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+						GFP_KERNEL);
+		if (!clk->parent_names[i]) {
+			pr_err("%s: could not copy parent_names\n", __func__);
+			ret = -ENOMEM;
+			goto fail_parent_names_copy;
+		}
+	}
+
+	ret = __clk_init(dev, clk);
+	if (!ret)
+		return clk;
+
+fail_parent_names_copy:
+	while (--i >= 0)
+		kfree(clk->parent_names[i]);
+	kfree(clk->parent_names);
+fail_parent_names:
+	kfree(clk->name);
+fail_name:
+	kfree(clk);
+fail_out:
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(clk_register);
 
+/**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+ *
+ * Currently unimplemented.
+ */
+void clk_unregister(struct clk *clk) {}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
 /***        clk rate change notifiers        ***/
 
 /**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644
index 0000000..7bedeec
--- /dev/null
+++ b/drivers/clk/mxs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for mxs specific clk
+#
+
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+
+obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
+obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644
index 0000000..90e1da9
--- /dev/null
+++ b/drivers/clk/mxs/clk-div.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_div - mxs integer divider clock
+ * @divider: the parent class
+ * @ops: pointer to clk_ops of parent class
+ * @reg: register address
+ * @busy: busy bit shift
+ *
+ * The mxs divider clock is a subclass of basic clk_divider with an
+ * addtional busy bit.
+ */
+struct clk_div {
+	struct clk_divider divider;
+	const struct clk_ops *ops;
+	void __iomem *reg;
+	u8 busy;
+};
+
+static inline struct clk_div *to_clk_div(struct clk_hw *hw)
+{
+	struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+
+	return container_of(divider, struct clk_div, divider);
+}
+
+static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_div *div = to_clk_div(hw);
+
+	return div->ops->recalc_rate(&div->divider.hw, parent_rate);
+}
+
+static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+			       unsigned long *prate)
+{
+	struct clk_div *div = to_clk_div(hw);
+
+	return div->ops->round_rate(&div->divider.hw, rate, prate);
+}
+
+static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long parent_rate)
+{
+	struct clk_div *div = to_clk_div(hw);
+	int ret;
+
+	ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
+	if (!ret)
+		ret = mxs_clk_wait(div->reg, div->busy);
+
+	return ret;
+}
+
+static struct clk_ops clk_div_ops = {
+	.recalc_rate = clk_div_recalc_rate,
+	.round_rate = clk_div_round_rate,
+	.set_rate = clk_div_set_rate,
+};
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+			void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+	struct clk_div *div;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	div = kzalloc(sizeof(*div), GFP_KERNEL);
+	if (!div)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &clk_div_ops;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	div->reg = reg;
+	div->busy = busy;
+
+	div->divider.reg = reg;
+	div->divider.shift = shift;
+	div->divider.width = width;
+	div->divider.flags = CLK_DIVIDER_ONE_BASED;
+	div->divider.lock = &mxs_lock;
+	div->divider.hw.init = &init;
+	div->ops = &clk_divider_ops;
+
+	clk = clk_register(NULL, &div->divider.hw);
+	if (IS_ERR(clk))
+		kfree(div);
+
+	return clk;
+}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644
index 0000000..e6aa6b5
--- /dev/null
+++ b/drivers/clk/mxs/clk-frac.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_frac - mxs fractional divider clock
+ * @hw: clk_hw for the fractional divider clock
+ * @reg: register address
+ * @shift: the divider bit shift
+ * @width: the divider bit width
+ * @busy: busy bit shift
+ *
+ * The clock is an adjustable fractional divider with a busy bit to wait
+ * when the divider is adjusted.
+ */
+struct clk_frac {
+	struct clk_hw hw;
+	void __iomem *reg;
+	u8 shift;
+	u8 width;
+	u8 busy;
+};
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	u32 div;
+
+	div = readl_relaxed(frac->reg) >> frac->shift;
+	div &= (1 << frac->width) - 1;
+
+	return (parent_rate >> frac->width) * div;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *prate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	unsigned long parent_rate = *prate;
+	u32 div;
+	u64 tmp;
+
+	if (rate > parent_rate)
+		return -EINVAL;
+
+	tmp = rate;
+	tmp <<= frac->width;
+	do_div(tmp, parent_rate);
+	div = tmp;
+
+	if (!div)
+		return -EINVAL;
+
+	return (parent_rate >> frac->width) * div;
+}
+
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	unsigned long flags;
+	u32 div, val;
+	u64 tmp;
+
+	if (rate > parent_rate)
+		return -EINVAL;
+
+	tmp = rate;
+	tmp <<= frac->width;
+	do_div(tmp, parent_rate);
+	div = tmp;
+
+	if (!div)
+		return -EINVAL;
+
+	spin_lock_irqsave(&mxs_lock, flags);
+
+	val = readl_relaxed(frac->reg);
+	val &= ~(((1 << frac->width) - 1) << frac->shift);
+	val |= div << frac->shift;
+	writel_relaxed(val, frac->reg);
+
+	spin_unlock_irqrestore(&mxs_lock, flags);
+
+	return mxs_clk_wait(frac->reg, frac->busy);
+}
+
+static struct clk_ops clk_frac_ops = {
+	.recalc_rate = clk_frac_recalc_rate,
+	.round_rate = clk_frac_round_rate,
+	.set_rate = clk_frac_set_rate,
+};
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+			 void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+	struct clk_frac *frac;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+	if (!frac)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &clk_frac_ops;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	frac->reg = reg;
+	frac->shift = shift;
+	frac->width = width;
+	frac->busy = busy;
+	frac->hw.init = &init;
+
+	clk = clk_register(NULL, &frac->hw);
+	if (IS_ERR(clk))
+		kfree(frac);
+
+	return clk;
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644
index 0000000..db2391c
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx23.h>
+#include "clk.h"
+
+#define DIGCTRL			MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
+#define CLKCTRL			MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+#define PLLCTRL0		(CLKCTRL + 0x0000)
+#define CPU			(CLKCTRL + 0x0020)
+#define HBUS			(CLKCTRL + 0x0030)
+#define XBUS			(CLKCTRL + 0x0040)
+#define XTAL			(CLKCTRL + 0x0050)
+#define PIX			(CLKCTRL + 0x0060)
+#define SSP			(CLKCTRL + 0x0070)
+#define GPMI			(CLKCTRL + 0x0080)
+#define SPDIF			(CLKCTRL + 0x0090)
+#define EMI			(CLKCTRL + 0x00a0)
+#define SAIF			(CLKCTRL + 0x00c0)
+#define TV			(CLKCTRL + 0x00d0)
+#define ETM			(CLKCTRL + 0x00e0)
+#define FRAC			(CLKCTRL + 0x00f0)
+#define CLKSEQ			(CLKCTRL + 0x0110)
+
+#define BP_CPU_INTERRUPT_WAIT	12
+#define BP_CLKSEQ_BYPASS_SAIF	0
+#define BP_CLKSEQ_BYPASS_SSP	5
+#define BP_SAIF_DIV_FRAC_EN	16
+#define BP_FRAC_IOFRAC		24
+
+static void __init clk_misc_init(void)
+{
+	u32 val;
+
+	/* Gate off cpu clock in WFI for power saving */
+	__mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+	/* Clear BYPASS for SAIF */
+	__mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+
+	/* SAIF has to use frac div for functional operation */
+	val = readl_relaxed(SAIF);
+	val |= 1 << BP_SAIF_DIV_FRAC_EN;
+	writel_relaxed(val, SAIF);
+
+	/*
+	 * Source ssp clock from ref_io than ref_xtal,
+	 * as ref_xtal only provides 24 MHz as maximum.
+	 */
+	__mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+
+	/*
+	 * 480 MHz seems too high to be ssp clock source directly,
+	 * so set frac to get a 288 MHz ref_io.
+	 */
+	__mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
+	__mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+}
+
+static struct clk_lookup uart_lookups[] = {
+	{ .dev_id = "duart", },
+	{ .dev_id = "mxs-auart.0", },
+	{ .dev_id = "mxs-auart.1", },
+	{ .dev_id = "8006c000.serial", },
+	{ .dev_id = "8006e000.serial", },
+	{ .dev_id = "80070000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] = {
+	{ .dev_id = "imx23-dma-apbh", },
+	{ .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] = {
+	{ .dev_id = "duart", .con_id = "apb_pclk"},
+	{ .dev_id = "80070000.serial", .con_id = "apb_pclk"},
+	{ .dev_id = "imx23-dma-apbx", },
+	{ .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp_lookups[] = {
+	{ .dev_id = "imx23-mmc.0", },
+	{ .dev_id = "imx23-mmc.1", },
+	{ .dev_id = "80010000.ssp", },
+	{ .dev_id = "80034000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] = {
+	{ .dev_id = "imx23-fb", },
+	{ .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] = {
+	{ .dev_id = "imx23-gpmi-nand", },
+	{ .dev_id = "8000c000.gpmi", },
+};
+
+static const char *sel_pll[]  __initconst = { "pll", "ref_xtal", };
+static const char *sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_io[]   __initconst = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+
+enum imx23_clk {
+	ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
+	lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
+	cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
+	emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
+	clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
+	lcdif, etm, usb, usb_pwr,
+	clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx23_clk clks_init_on[] __initdata = {
+	cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx23_clocks_init(void)
+{
+	int i;
+
+	clk_misc_init();
+
+	clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+	clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
+	clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
+	clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
+	clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
+	clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
+	clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
+	clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
+	clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
+	clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
+	clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
+	clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+	clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+	clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+	clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+	clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
+	clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+	clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
+	clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
+	clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+	clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+	clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+	clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
+	clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
+	clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+	clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+	clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
+	clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
+	clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+	clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
+	clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+	clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
+	clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+	clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
+	clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+	clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+	clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+	clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
+	clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
+	clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+	clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
+	clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++)
+		if (IS_ERR(clks[i])) {
+			pr_err("i.MX23 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clks[i]));
+			return PTR_ERR(clks[i]);
+		}
+
+	clk_register_clkdev(clks[clk32k], NULL, "timrot");
+	clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+	clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+	clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+	clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
+	clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+	clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+
+	for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+		clk_prepare_enable(clks[clks_init_on[i]]);
+
+	mxs_timer_init(MX23_INT_TIMER0);
+
+	return 0;
+}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644
index 0000000..7fad6c8
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx28.h>
+#include "clk.h"
+
+#define CLKCTRL			MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+#define PLL0CTRL0		(CLKCTRL + 0x0000)
+#define PLL1CTRL0		(CLKCTRL + 0x0020)
+#define PLL2CTRL0		(CLKCTRL + 0x0040)
+#define CPU			(CLKCTRL + 0x0050)
+#define HBUS			(CLKCTRL + 0x0060)
+#define XBUS			(CLKCTRL + 0x0070)
+#define XTAL			(CLKCTRL + 0x0080)
+#define SSP0			(CLKCTRL + 0x0090)
+#define SSP1			(CLKCTRL + 0x00a0)
+#define SSP2			(CLKCTRL + 0x00b0)
+#define SSP3			(CLKCTRL + 0x00c0)
+#define GPMI			(CLKCTRL + 0x00d0)
+#define SPDIF			(CLKCTRL + 0x00e0)
+#define EMI			(CLKCTRL + 0x00f0)
+#define SAIF0			(CLKCTRL + 0x0100)
+#define SAIF1			(CLKCTRL + 0x0110)
+#define LCDIF			(CLKCTRL + 0x0120)
+#define ETM			(CLKCTRL + 0x0130)
+#define ENET			(CLKCTRL + 0x0140)
+#define FLEXCAN			(CLKCTRL + 0x0160)
+#define FRAC0			(CLKCTRL + 0x01b0)
+#define FRAC1			(CLKCTRL + 0x01c0)
+#define CLKSEQ			(CLKCTRL + 0x01d0)
+
+#define BP_CPU_INTERRUPT_WAIT	12
+#define BP_SAIF_DIV_FRAC_EN	16
+#define BP_ENET_DIV_TIME	21
+#define BP_ENET_SLEEP		31
+#define BP_CLKSEQ_BYPASS_SAIF0	0
+#define BP_CLKSEQ_BYPASS_SSP0	3
+#define BP_FRAC0_IO1FRAC	16
+#define BP_FRAC0_IO0FRAC	24
+
+#define DIGCTRL			MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+#define BP_SAIF_CLKMUX		10
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ *  DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ *		clock pins selected for SAIF1 input clocks.
+ *  CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ *		SAIF0 clock inputs selected for SAIF1 input clocks.
+ *  EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ *		clocks.
+ *  EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ *		clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+	if (clkmux > 0x3)
+		return -EINVAL;
+
+	__mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
+	__mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+
+	return 0;
+}
+
+static void __init clk_misc_init(void)
+{
+	u32 val;
+
+	/* Gate off cpu clock in WFI for power saving */
+	__mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+	/* 0 is a bad default value for a divider */
+	__mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+
+	/* Clear BYPASS for SAIF */
+	__mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+
+	/* SAIF has to use frac div for functional operation */
+	val = readl_relaxed(SAIF0);
+	val |= 1 << BP_SAIF_DIV_FRAC_EN;
+	writel_relaxed(val, SAIF0);
+
+	val = readl_relaxed(SAIF1);
+	val |= 1 << BP_SAIF_DIV_FRAC_EN;
+	writel_relaxed(val, SAIF1);
+
+	/* Extra fec clock setting */
+	val = readl_relaxed(ENET);
+	val &= ~(1 << BP_ENET_SLEEP);
+	writel_relaxed(val, ENET);
+
+	/*
+	 * Source ssp clock from ref_io than ref_xtal,
+	 * as ref_xtal only provides 24 MHz as maximum.
+	 */
+	__mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+
+	/*
+	 * 480 MHz seems too high to be ssp clock source directly,
+	 * so set frac0 to get a 288 MHz ref_io0.
+	 */
+	val = readl_relaxed(FRAC0);
+	val &= ~(0x3f << BP_FRAC0_IO0FRAC);
+	val |= 30 << BP_FRAC0_IO0FRAC;
+	writel_relaxed(val, FRAC0);
+}
+
+static struct clk_lookup uart_lookups[] = {
+	{ .dev_id = "duart", },
+	{ .dev_id = "mxs-auart.0", },
+	{ .dev_id = "mxs-auart.1", },
+	{ .dev_id = "mxs-auart.2", },
+	{ .dev_id = "mxs-auart.3", },
+	{ .dev_id = "mxs-auart.4", },
+	{ .dev_id = "8006a000.serial", },
+	{ .dev_id = "8006c000.serial", },
+	{ .dev_id = "8006e000.serial", },
+	{ .dev_id = "80070000.serial", },
+	{ .dev_id = "80072000.serial", },
+	{ .dev_id = "80074000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] = {
+	{ .dev_id = "imx28-dma-apbh", },
+	{ .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] = {
+	{ .dev_id = "duart", .con_id = "apb_pclk"},
+	{ .dev_id = "80074000.serial", .con_id = "apb_pclk"},
+	{ .dev_id = "imx28-dma-apbx", },
+	{ .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp0_lookups[] = {
+	{ .dev_id = "imx28-mmc.0", },
+	{ .dev_id = "80010000.ssp", },
+};
+
+static struct clk_lookup ssp1_lookups[] = {
+	{ .dev_id = "imx28-mmc.1", },
+	{ .dev_id = "80012000.ssp", },
+};
+
+static struct clk_lookup ssp2_lookups[] = {
+	{ .dev_id = "imx28-mmc.2", },
+	{ .dev_id = "80014000.ssp", },
+};
+
+static struct clk_lookup ssp3_lookups[] = {
+	{ .dev_id = "imx28-mmc.3", },
+	{ .dev_id = "80016000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] = {
+	{ .dev_id = "imx28-fb", },
+	{ .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] = {
+	{ .dev_id = "imx28-gpmi-nand", },
+	{ .dev_id = "8000c000.gpmi", },
+};
+
+static struct clk_lookup fec_lookups[] = {
+	{ .dev_id = "imx28-fec.0", },
+	{ .dev_id = "imx28-fec.1", },
+	{ .dev_id = "800f0000.ethernet", },
+	{ .dev_id = "800f4000.ethernet", },
+};
+
+static struct clk_lookup can0_lookups[] = {
+	{ .dev_id = "flexcan.0", },
+	{ .dev_id = "80032000.can", },
+};
+
+static struct clk_lookup can1_lookups[] = {
+	{ .dev_id = "flexcan.1", },
+	{ .dev_id = "80034000.can", },
+};
+
+static struct clk_lookup saif0_lookups[] = {
+	{ .dev_id = "mxs-saif.0", },
+	{ .dev_id = "80042000.saif", },
+};
+
+static struct clk_lookup saif1_lookups[] = {
+	{ .dev_id = "mxs-saif.1", },
+	{ .dev_id = "80046000.saif", },
+};
+
+static const char *sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[]  __initconst = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[]  __initconst = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+
+enum imx28_clk {
+	ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
+	ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
+	ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
+	lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
+	ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
+	emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
+	clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
+	ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
+	fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
+	clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx28_clk clks_init_on[] __initdata = {
+	cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx28_clocks_init(void)
+{
+	int i;
+
+	clk_misc_init();
+
+	clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+	clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
+	clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
+	clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
+	clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
+	clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
+	clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+	clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
+	clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
+	clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
+	clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
+	clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+	clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+	clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
+	clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
+	clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
+	clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
+	clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
+	clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
+	clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+	clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
+	clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+	clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
+	clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+	clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+	clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
+	clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+	clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
+	clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
+	clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
+	clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
+	clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+	clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+	clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+	clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
+	clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
+	clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
+	clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
+	clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
+	clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+	clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+	clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
+	clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
+	clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+	clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+	clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+	clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
+	clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
+	clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
+	clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
+	clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+	clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+	clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+	clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
+	clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
+	clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
+	clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+	clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
+	clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
+	clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
+	clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
+	clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
+	clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
+	clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
+	clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++)
+		if (IS_ERR(clks[i])) {
+			pr_err("i.MX28 clk %d: register failed with %ld\n",
+				i, PTR_ERR(clks[i]));
+			return PTR_ERR(clks[i]);
+		}
+
+	clk_register_clkdev(clks[clk32k], NULL, "timrot");
+	clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+	clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+	clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+	clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+	clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
+	clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
+	clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
+	clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
+	clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+	clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
+	clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
+	clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+	clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
+	clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
+	clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+
+	for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+		clk_prepare_enable(clks[clks_init_on[i]]);
+
+	mxs_timer_init(MX28_INT_TIMER0);
+
+	return 0;
+}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644
index 0000000..fadae41
--- /dev/null
+++ b/drivers/clk/mxs/clk-pll.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_pll - mxs pll clock
+ * @hw: clk_hw for the pll
+ * @base: base address of the pll
+ * @power: the shift of power bit
+ * @rate: the clock rate of the pll
+ *
+ * The mxs pll is a fixed rate clock with power and gate control,
+ * and the shift of gate bit is always 31.
+ */
+struct clk_pll {
+	struct clk_hw hw;
+	void __iomem *base;
+	u8 power;
+	unsigned long rate;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+
+	writel_relaxed(1 << pll->power, pll->base + SET);
+
+	udelay(10);
+
+	return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+
+	writel_relaxed(1 << pll->power, pll->base + CLR);
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+
+	writel_relaxed(1 << 31, pll->base + CLR);
+
+	return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+
+	writel_relaxed(1 << 31, pll->base + SET);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+
+	return pll->rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+	.prepare = clk_pll_prepare,
+	.unprepare = clk_pll_unprepare,
+	.enable = clk_pll_enable,
+	.disable = clk_pll_disable,
+	.recalc_rate = clk_pll_recalc_rate,
+};
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+			void __iomem *base, u8 power, unsigned long rate)
+{
+	struct clk_pll *pll;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &clk_pll_ops;
+	init.flags = 0;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	pll->base = base;
+	pll->rate = rate;
+	pll->power = power;
+	pll->hw.init = &init;
+
+	clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(clk))
+		kfree(pll);
+
+	return clk;
+}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644
index 0000000..4adeed6
--- /dev/null
+++ b/drivers/clk/mxs/clk-ref.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_ref - mxs reference clock
+ * @hw: clk_hw for the reference clock
+ * @reg: register address
+ * @idx: the index of the reference clock within the same register
+ *
+ * The mxs reference clock sources from pll.  Every 4 reference clocks share
+ * one register space, and @idx is used to identify them.  Each reference
+ * clock has a gate control and a fractional * divider.  The rate is calculated
+ * as pll rate  * (18 / FRAC), where FRAC = 18 ~ 35.
+ */
+struct clk_ref {
+	struct clk_hw hw;
+	void __iomem *reg;
+	u8 idx;
+};
+
+#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
+
+static int clk_ref_enable(struct clk_hw *hw)
+{
+	struct clk_ref *ref = to_clk_ref(hw);
+
+	writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
+
+	return 0;
+}
+
+static void clk_ref_disable(struct clk_hw *hw)
+{
+	struct clk_ref *ref = to_clk_ref(hw);
+
+	writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
+}
+
+static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_ref *ref = to_clk_ref(hw);
+	u64 tmp = parent_rate;
+	u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
+
+	tmp *= 18;
+	do_div(tmp, frac);
+
+	return tmp;
+}
+
+static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
+			       unsigned long *prate)
+{
+	unsigned long parent_rate = *prate;
+	u64 tmp = parent_rate;
+	u8 frac;
+
+	tmp = tmp * 18 + rate / 2;
+	do_div(tmp, rate);
+	frac = tmp;
+
+	if (frac < 18)
+		frac = 18;
+	else if (frac > 35)
+		frac = 35;
+
+	tmp = parent_rate;
+	tmp *= 18;
+	do_div(tmp, frac);
+
+	return tmp;
+}
+
+static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long parent_rate)
+{
+	struct clk_ref *ref = to_clk_ref(hw);
+	unsigned long flags;
+	u64 tmp = parent_rate;
+	u32 val;
+	u8 frac, shift = ref->idx * 8;
+
+	tmp = tmp * 18 + rate / 2;
+	do_div(tmp, rate);
+	frac = tmp;
+
+	if (frac < 18)
+		frac = 18;
+	else if (frac > 35)
+		frac = 35;
+
+	spin_lock_irqsave(&mxs_lock, flags);
+
+	val = readl_relaxed(ref->reg);
+	val &= ~(0x3f << shift);
+	val |= frac << shift;
+	writel_relaxed(val, ref->reg);
+
+	spin_unlock_irqrestore(&mxs_lock, flags);
+
+	return 0;
+}
+
+static const struct clk_ops clk_ref_ops = {
+	.enable		= clk_ref_enable,
+	.disable	= clk_ref_disable,
+	.recalc_rate	= clk_ref_recalc_rate,
+	.round_rate	= clk_ref_round_rate,
+	.set_rate	= clk_ref_set_rate,
+};
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+			void __iomem *reg, u8 idx)
+{
+	struct clk_ref *ref;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (!ref)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &clk_ref_ops;
+	init.flags = 0;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	ref->reg = reg;
+	ref->idx = idx;
+	ref->hw.init = &init;
+
+	clk = clk_register(NULL, &ref->hw);
+	if (IS_ERR(clk))
+		kfree(ref);
+
+	return clk;
+}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644
index 0000000..b24d560
--- /dev/null
+++ b/drivers/clk/mxs/clk.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(mxs_lock);
+
+int mxs_clk_wait(void __iomem *reg, u8 shift)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+	while (readl_relaxed(reg) & (1 << shift))
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+
+	return 0;
+}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644
index 0000000..81421e2
--- /dev/null
+++ b/drivers/clk/mxs/clk.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXS_CLK_H
+#define __MXS_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#define SET	0x4
+#define CLR	0x8
+
+extern spinlock_t mxs_lock;
+
+int mxs_clk_wait(void __iomem *reg, u8 shift);
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+			void __iomem *base, u8 power, unsigned long rate);
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+			void __iomem *reg, u8 idx);
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+			void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+			 void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+static inline struct clk *mxs_clk_fixed(const char *name, int rate)
+{
+	return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mxs_clk_gate(const char *name,
+			const char *parent_name, void __iomem *reg, u8 shift)
+{
+	return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
+				 reg, shift, CLK_GATE_SET_TO_DISABLE,
+				 &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
+		u8 shift, u8 width, const char **parent_names, int num_parents)
+{
+	return clk_register_mux(NULL, name, parent_names, num_parents,
+				CLK_SET_RATE_PARENT, reg, shift, width,
+				0, &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_fixed_factor(const char *name,
+		const char *parent_name, unsigned int mult, unsigned int div)
+{
+	return clk_register_fixed_factor(NULL, name, parent_name,
+					 CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644
index 0000000..cdb425d
--- /dev/null
+++ b/drivers/clk/spear/Makefile
@@ -0,0 +1,10 @@
+#
+# SPEAr Clock specific Makefile
+#
+
+obj-y	+= clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX)	+= spear3xx_clock.o
+obj-$(CONFIG_ARCH_SPEAR6XX)	+= spear6xx_clock.o
+obj-$(CONFIG_MACH_SPEAR1310)	+= spear1310_clock.o
+obj-$(CONFIG_MACH_SPEAR1340)	+= spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644
index 0000000..6756e7c
--- /dev/null
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Auxiliary Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-aux-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: Auxiliary Synthesizer clock
+ *
+ * Aux synth gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2		EQ1
+ * Fout2 = Fin * X/Y			EQ2
+ */
+
+#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
+
+static struct aux_clk_masks default_aux_masks = {
+	.eq_sel_mask = AUX_EQ_SEL_MASK,
+	.eq_sel_shift = AUX_EQ_SEL_SHIFT,
+	.eq1_mask = AUX_EQ1_SEL,
+	.eq2_mask = AUX_EQ2_SEL,
+	.xscale_sel_mask = AUX_XSCALE_MASK,
+	.xscale_sel_shift = AUX_XSCALE_SHIFT,
+	.yscale_sel_mask = AUX_YSCALE_MASK,
+	.yscale_sel_shift = AUX_YSCALE_SHIFT,
+	.enable_bit = AUX_SYNT_ENB,
+};
+
+static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
+		int index)
+{
+	struct clk_aux *aux = to_clk_aux(hw);
+	struct aux_rate_tbl *rtbl = aux->rtbl;
+	u8 eq = rtbl[index].eq ? 1 : 2;
+
+	return (((prate / 10000) * rtbl[index].xscale) /
+			(rtbl[index].yscale * eq)) * 10000;
+}
+
+static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
+		unsigned long *prate)
+{
+	struct clk_aux *aux = to_clk_aux(hw);
+	int unused;
+
+	return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
+			aux->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_aux *aux = to_clk_aux(hw);
+	unsigned int num = 1, den = 1, val, eqn;
+	unsigned long flags = 0;
+
+	if (aux->lock)
+		spin_lock_irqsave(aux->lock, flags);
+
+	val = readl_relaxed(aux->reg);
+
+	if (aux->lock)
+		spin_unlock_irqrestore(aux->lock, flags);
+
+	eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
+	if (eqn == aux->masks->eq1_mask)
+		den = 2;
+
+	/* calculate numerator */
+	num = (val >> aux->masks->xscale_sel_shift) &
+		aux->masks->xscale_sel_mask;
+
+	/* calculate denominator */
+	den *= (val >> aux->masks->yscale_sel_shift) &
+		aux->masks->yscale_sel_mask;
+
+	if (!den)
+		return 0;
+
+	return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of aux */
+static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long prate)
+{
+	struct clk_aux *aux = to_clk_aux(hw);
+	struct aux_rate_tbl *rtbl = aux->rtbl;
+	unsigned long val, flags = 0;
+	int i;
+
+	clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
+			&i);
+
+	if (aux->lock)
+		spin_lock_irqsave(aux->lock, flags);
+
+	val = readl_relaxed(aux->reg) &
+		~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
+	val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
+		aux->masks->eq_sel_shift;
+	val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
+	val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
+		aux->masks->xscale_sel_shift;
+	val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
+	val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
+		aux->masks->yscale_sel_shift;
+	writel_relaxed(val, aux->reg);
+
+	if (aux->lock)
+		spin_unlock_irqrestore(aux->lock, flags);
+
+	return 0;
+}
+
+static struct clk_ops clk_aux_ops = {
+	.recalc_rate = clk_aux_recalc_rate,
+	.round_rate = clk_aux_round_rate,
+	.set_rate = clk_aux_set_rate,
+};
+
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+		const char *parent_name, unsigned long flags, void __iomem *reg,
+		struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+		u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
+{
+	struct clk_aux *aux;
+	struct clk_init_data init;
+	struct clk *clk;
+
+	if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+		pr_err("Invalid arguments passed");
+		return ERR_PTR(-EINVAL);
+	}
+
+	aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+	if (!aux) {
+		pr_err("could not allocate aux clk\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* struct clk_aux assignments */
+	if (!masks)
+		aux->masks = &default_aux_masks;
+	else
+		aux->masks = masks;
+
+	aux->reg = reg;
+	aux->rtbl = rtbl;
+	aux->rtbl_cnt = rtbl_cnt;
+	aux->lock = lock;
+	aux->hw.init = &init;
+
+	init.name = aux_name;
+	init.ops = &clk_aux_ops;
+	init.flags = flags;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	clk = clk_register(NULL, &aux->hw);
+	if (IS_ERR_OR_NULL(clk))
+		goto free_aux;
+
+	if (gate_name) {
+		struct clk *tgate_clk;
+
+		tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
+				aux->masks->enable_bit, 0, lock);
+		if (IS_ERR_OR_NULL(tgate_clk))
+			goto free_aux;
+
+		if (gate_clk)
+			*gate_clk = tgate_clk;
+	}
+
+	return clk;
+
+free_aux:
+	kfree(aux);
+	pr_err("clk register failed\n");
+
+	return NULL;
+}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644
index 0000000..958aa3a
--- /dev/null
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Fractional Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-frac-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define DIV_FACTOR_MASK		0x1FFFF
+
+/*
+ * DOC: Fractional Synthesizer clock
+ *
+ * Fout from synthesizer can be given from below equation:
+ *
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ *	0-13 (fractional part)
+ *	14-16 (integer part)
+ *	div is (16-14 bits).(13-0 bits) (in binary)
+ *
+ *	Fout = Fin/(2 * div)
+ *	Fout = ((Fin / 10000)/(2 * div)) * 10000
+ *	Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
+ *	Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
+ *
+ * div << 14 simply 17 bit value written at register.
+ * Max error due to scaling down by 10000 is 10 KHz
+ */
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
+		int index)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	struct frac_rate_tbl *rtbl = frac->rtbl;
+
+	prate /= 10000;
+	prate <<= 14;
+	prate /= (2 * rtbl[index].div);
+	prate *= 10000;
+
+	return prate;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
+		unsigned long *prate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	int unused;
+
+	return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
+			frac->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	unsigned long flags = 0;
+	unsigned int div = 1, val;
+
+	if (frac->lock)
+		spin_lock_irqsave(frac->lock, flags);
+
+	val = readl_relaxed(frac->reg);
+
+	if (frac->lock)
+		spin_unlock_irqrestore(frac->lock, flags);
+
+	div = val & DIV_FACTOR_MASK;
+
+	if (!div)
+		return 0;
+
+	parent_rate = parent_rate / 10000;
+
+	parent_rate = (parent_rate << 14) / (2 * div);
+	return parent_rate * 10000;
+}
+
+/* Configures new clock rate of frac */
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long prate)
+{
+	struct clk_frac *frac = to_clk_frac(hw);
+	struct frac_rate_tbl *rtbl = frac->rtbl;
+	unsigned long flags = 0, val;
+	int i;
+
+	clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
+			&i);
+
+	if (frac->lock)
+		spin_lock_irqsave(frac->lock, flags);
+
+	val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
+	val |= rtbl[i].div & DIV_FACTOR_MASK;
+	writel_relaxed(val, frac->reg);
+
+	if (frac->lock)
+		spin_unlock_irqrestore(frac->lock, flags);
+
+	return 0;
+}
+
+struct clk_ops clk_frac_ops = {
+	.recalc_rate = clk_frac_recalc_rate,
+	.round_rate = clk_frac_round_rate,
+	.set_rate = clk_frac_set_rate,
+};
+
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+		unsigned long flags, void __iomem *reg,
+		struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
+{
+	struct clk_init_data init;
+	struct clk_frac *frac;
+	struct clk *clk;
+
+	if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+		pr_err("Invalid arguments passed");
+		return ERR_PTR(-EINVAL);
+	}
+
+	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+	if (!frac) {
+		pr_err("could not allocate frac clk\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* struct clk_frac assignments */
+	frac->reg = reg;
+	frac->rtbl = rtbl;
+	frac->rtbl_cnt = rtbl_cnt;
+	frac->lock = lock;
+	frac->hw.init = &init;
+
+	init.name = name;
+	init.ops = &clk_frac_ops;
+	init.flags = flags;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	clk = clk_register(NULL, &frac->hw);
+	if (!IS_ERR_OR_NULL(clk))
+		return clk;
+
+	pr_err("clk register failed\n");
+	kfree(frac);
+
+	return NULL;
+}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644
index 0000000..1afc18c
--- /dev/null
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * General Purpose Timer Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-gpt-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define GPT_MSCALE_MASK		0xFFF
+#define GPT_NSCALE_SHIFT	12
+#define GPT_NSCALE_MASK		0xF
+
+/*
+ * DOC: General Purpose Timer Synthesizer clock
+ *
+ * Calculates gpt synth clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+
+#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
+
+static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
+		int index)
+{
+	struct clk_gpt *gpt = to_clk_gpt(hw);
+	struct gpt_rate_tbl *rtbl = gpt->rtbl;
+
+	prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
+
+	return prate;
+}
+
+static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
+		unsigned long *prate)
+{
+	struct clk_gpt *gpt = to_clk_gpt(hw);
+	int unused;
+
+	return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
+			gpt->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_gpt *gpt = to_clk_gpt(hw);
+	unsigned long flags = 0;
+	unsigned int div = 1, val;
+
+	if (gpt->lock)
+		spin_lock_irqsave(gpt->lock, flags);
+
+	val = readl_relaxed(gpt->reg);
+
+	if (gpt->lock)
+		spin_unlock_irqrestore(gpt->lock, flags);
+
+	div += val & GPT_MSCALE_MASK;
+	div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+
+	if (!div)
+		return 0;
+
+	return parent_rate / div;
+}
+
+/* Configures new clock rate of gpt */
+static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long prate)
+{
+	struct clk_gpt *gpt = to_clk_gpt(hw);
+	struct gpt_rate_tbl *rtbl = gpt->rtbl;
+	unsigned long flags = 0, val;
+	int i;
+
+	clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
+			&i);
+
+	if (gpt->lock)
+		spin_lock_irqsave(gpt->lock, flags);
+
+	val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
+	val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
+
+	val |= rtbl[i].mscale & GPT_MSCALE_MASK;
+	val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
+
+	writel_relaxed(val, gpt->reg);
+
+	if (gpt->lock)
+		spin_unlock_irqrestore(gpt->lock, flags);
+
+	return 0;
+}
+
+static struct clk_ops clk_gpt_ops = {
+	.recalc_rate = clk_gpt_recalc_rate,
+	.round_rate = clk_gpt_round_rate,
+	.set_rate = clk_gpt_set_rate,
+};
+
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+		long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+		rtbl_cnt, spinlock_t *lock)
+{
+	struct clk_init_data init;
+	struct clk_gpt *gpt;
+	struct clk *clk;
+
+	if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+		pr_err("Invalid arguments passed");
+		return ERR_PTR(-EINVAL);
+	}
+
+	gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
+	if (!gpt) {
+		pr_err("could not allocate gpt clk\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* struct clk_gpt assignments */
+	gpt->reg = reg;
+	gpt->rtbl = rtbl;
+	gpt->rtbl_cnt = rtbl_cnt;
+	gpt->lock = lock;
+	gpt->hw.init = &init;
+
+	init.name = name;
+	init.ops = &clk_gpt_ops;
+	init.flags = flags;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	clk = clk_register(NULL, &gpt->hw);
+	if (!IS_ERR_OR_NULL(clk))
+		return clk;
+
+	pr_err("clk register failed\n");
+	kfree(gpt);
+
+	return NULL;
+}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644
index 0000000..5f1b6ba
--- /dev/null
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * VCO-PLL clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-vco-pll: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: VCO-PLL clock
+ *
+ * VCO and PLL rate are derived from following equations:
+ *
+ * In normal mode
+ * vco = (2 * M[15:8] * Fin)/N
+ *
+ * In Dithered mode
+ * vco = (2 * M[15:0] * Fin)/(256 * N)
+ *
+ * pll_rate = pll/2^p
+ *
+ * vco and pll are very closely bound to each other, "vco needs to program:
+ * mode, m & n" and "pll needs to program p", both share common enable/disable
+ * logic.
+ *
+ * clk_register_vco_pll() registers instances of both vco & pll.
+ * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
+ * set_rate to vco. A single rate table exists for both the clocks, which
+ * configures m, n and p.
+ */
+
+/* PLL_CTR register masks */
+#define PLL_MODE_NORMAL		0
+#define PLL_MODE_FRACTION	1
+#define PLL_MODE_DITH_DSM	2
+#define PLL_MODE_DITH_SSM	3
+#define PLL_MODE_MASK		3
+#define PLL_MODE_SHIFT		3
+#define PLL_ENABLE		2
+
+#define PLL_LOCK_SHIFT		0
+#define PLL_LOCK_MASK		1
+
+/* PLL FRQ register masks */
+#define PLL_NORM_FDBK_M_MASK	0xFF
+#define PLL_NORM_FDBK_M_SHIFT	24
+#define PLL_DITH_FDBK_M_MASK	0xFFFF
+#define PLL_DITH_FDBK_M_SHIFT	16
+#define PLL_DIV_P_MASK		0x7
+#define PLL_DIV_P_SHIFT		8
+#define PLL_DIV_N_MASK		0xFF
+#define PLL_DIV_N_SHIFT		0
+
+#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+/* Calculates pll clk rate for specific value of mode, m, n and p */
+static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
+		unsigned long prate, int index, unsigned long *pll_rate)
+{
+	unsigned long rate = prate;
+	unsigned int mode;
+
+	mode = rtbl[index].mode ? 256 : 1;
+	rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
+
+	if (pll_rate)
+		*pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
+
+	return rate * 10000;
+}
+
+static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
+				unsigned long *prate, int *index)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+	unsigned long prev_rate, vco_prev_rate, rate = 0;
+	unsigned long vco_parent_rate =
+		__clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+
+	if (!prate) {
+		pr_err("%s: prate is must for pll clk\n", __func__);
+		return -EINVAL;
+	}
+
+	for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
+		prev_rate = rate;
+		vco_prev_rate = *prate;
+		*prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
+				&rate);
+		if (drate < rate) {
+			/* previous clock was best */
+			if (*index) {
+				rate = prev_rate;
+				*prate = vco_prev_rate;
+				(*index)--;
+			}
+			break;
+		}
+	}
+
+	return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long *prate)
+{
+	int unused;
+
+	return clk_pll_round_rate_index(hw, drate, prate, &unused);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
+		parent_rate)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+	unsigned long flags = 0;
+	unsigned int p;
+
+	if (pll->vco->lock)
+		spin_lock_irqsave(pll->vco->lock, flags);
+
+	p = readl_relaxed(pll->vco->cfg_reg);
+
+	if (pll->vco->lock)
+		spin_unlock_irqrestore(pll->vco->lock, flags);
+
+	p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+
+	return parent_rate / (1 << p);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long prate)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+	struct pll_rate_tbl *rtbl = pll->vco->rtbl;
+	unsigned long flags = 0, val;
+	int i;
+
+	clk_pll_round_rate_index(hw, drate, NULL, &i);
+
+	if (pll->vco->lock)
+		spin_lock_irqsave(pll->vco->lock, flags);
+
+	val = readl_relaxed(pll->vco->cfg_reg);
+	val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
+	val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
+	writel_relaxed(val, pll->vco->cfg_reg);
+
+	if (pll->vco->lock)
+		spin_unlock_irqrestore(pll->vco->lock, flags);
+
+	return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+	.recalc_rate = clk_pll_recalc_rate,
+	.round_rate = clk_pll_round_rate,
+	.set_rate = clk_pll_set_rate,
+};
+
+static inline unsigned long vco_calc_rate(struct clk_hw *hw,
+		unsigned long prate, int index)
+{
+	struct clk_vco *vco = to_clk_vco(hw);
+
+	return pll_calc_rate(vco->rtbl, prate, index, NULL);
+}
+
+static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
+		unsigned long *prate)
+{
+	struct clk_vco *vco = to_clk_vco(hw);
+	int unused;
+
+	return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
+			vco->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_vco *vco = to_clk_vco(hw);
+	unsigned long flags = 0;
+	unsigned int num = 2, den = 0, val, mode = 0;
+
+	if (vco->lock)
+		spin_lock_irqsave(vco->lock, flags);
+
+	mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+	val = readl_relaxed(vco->cfg_reg);
+
+	if (vco->lock)
+		spin_unlock_irqrestore(vco->lock, flags);
+
+	den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+	/* calculate numerator & denominator */
+	if (!mode) {
+		/* Normal mode */
+		num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+	} else {
+		/* Dithered mode */
+		num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+		den *= 256;
+	}
+
+	if (!den) {
+		WARN(1, "%s: denominator can't be zero\n", __func__);
+		return 0;
+	}
+
+	return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of vco */
+static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
+				unsigned long prate)
+{
+	struct clk_vco *vco = to_clk_vco(hw);
+	struct pll_rate_tbl *rtbl = vco->rtbl;
+	unsigned long flags = 0, val;
+	int i;
+
+	clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
+			&i);
+
+	if (vco->lock)
+		spin_lock_irqsave(vco->lock, flags);
+
+	val = readl_relaxed(vco->mode_reg);
+	val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+	val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
+	writel_relaxed(val, vco->mode_reg);
+
+	val = readl_relaxed(vco->cfg_reg);
+	val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
+	val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
+
+	val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
+	if (rtbl[i].mode)
+		val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
+			PLL_DITH_FDBK_M_SHIFT;
+	else
+		val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
+			PLL_NORM_FDBK_M_SHIFT;
+
+	writel_relaxed(val, vco->cfg_reg);
+
+	if (vco->lock)
+		spin_unlock_irqrestore(vco->lock, flags);
+
+	return 0;
+}
+
+static struct clk_ops clk_vco_ops = {
+	.recalc_rate = clk_vco_recalc_rate,
+	.round_rate = clk_vco_round_rate,
+	.set_rate = clk_vco_set_rate,
+};
+
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+		const char *vco_gate_name, const char *parent_name,
+		unsigned long flags, void __iomem *mode_reg, void __iomem
+		*cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+		spinlock_t *lock, struct clk **pll_clk,
+		struct clk **vco_gate_clk)
+{
+	struct clk_vco *vco;
+	struct clk_pll *pll;
+	struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
+	struct clk_init_data vco_init, pll_init;
+	const char **vco_parent_name;
+
+	if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
+			!rtbl || !rtbl_cnt) {
+		pr_err("Invalid arguments passed");
+		return ERR_PTR(-EINVAL);
+	}
+
+	vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+	if (!vco) {
+		pr_err("could not allocate vco clk\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll) {
+		pr_err("could not allocate pll clk\n");
+		goto free_vco;
+	}
+
+	/* struct clk_vco assignments */
+	vco->mode_reg = mode_reg;
+	vco->cfg_reg = cfg_reg;
+	vco->rtbl = rtbl;
+	vco->rtbl_cnt = rtbl_cnt;
+	vco->lock = lock;
+	vco->hw.init = &vco_init;
+
+	pll->vco = vco;
+	pll->hw.init = &pll_init;
+
+	if (vco_gate_name) {
+		tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
+				parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
+		if (IS_ERR_OR_NULL(tvco_gate_clk))
+			goto free_pll;
+
+		if (vco_gate_clk)
+			*vco_gate_clk = tvco_gate_clk;
+		vco_parent_name = &vco_gate_name;
+	} else {
+		vco_parent_name = &parent_name;
+	}
+
+	vco_init.name = vco_name;
+	vco_init.ops = &clk_vco_ops;
+	vco_init.flags = flags;
+	vco_init.parent_names = vco_parent_name;
+	vco_init.num_parents = 1;
+
+	pll_init.name = pll_name;
+	pll_init.ops = &clk_pll_ops;
+	pll_init.flags = CLK_SET_RATE_PARENT;
+	pll_init.parent_names = &vco_name;
+	pll_init.num_parents = 1;
+
+	vco_clk = clk_register(NULL, &vco->hw);
+	if (IS_ERR_OR_NULL(vco_clk))
+		goto free_pll;
+
+	tpll_clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR_OR_NULL(tpll_clk))
+		goto free_pll;
+
+	if (pll_clk)
+		*pll_clk = tpll_clk;
+
+	return vco_clk;
+
+free_pll:
+	kfree(pll);
+free_vco:
+	kfree(vco);
+
+	pr_err("Failed to register vco pll clock\n");
+
+	return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644
index 0000000..7cd6378
--- /dev/null
+++ b/drivers/clk/spear/clk.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * SPEAr clk - Common routines
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+#include "clk.h"
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+		unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+		int *index)
+{
+	unsigned long prev_rate, rate = 0;
+
+	for (*index = 0; *index < rtbl_cnt; (*index)++) {
+		prev_rate = rate;
+		rate = calc_rate(hw, parent_rate, *index);
+		if (drate < rate) {
+			/* previous clock was best */
+			if (*index) {
+				rate = prev_rate;
+				(*index)--;
+			}
+			break;
+		}
+	}
+
+	return rate;
+}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644
index 0000000..9317376
--- /dev/null
+++ b/drivers/clk/spear/clk.h
@@ -0,0 +1,134 @@
+/*
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SPEAR_CLK_H
+#define __SPEAR_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Auxiliary Synth clk */
+/* Default masks */
+#define AUX_EQ_SEL_SHIFT	30
+#define AUX_EQ_SEL_MASK		1
+#define AUX_EQ1_SEL		0
+#define AUX_EQ2_SEL		1
+#define AUX_XSCALE_SHIFT	16
+#define AUX_XSCALE_MASK		0xFFF
+#define AUX_YSCALE_SHIFT	0
+#define AUX_YSCALE_MASK		0xFFF
+#define AUX_SYNT_ENB		31
+
+struct aux_clk_masks {
+	u32 eq_sel_mask;
+	u32 eq_sel_shift;
+	u32 eq1_mask;
+	u32 eq2_mask;
+	u32 xscale_sel_mask;
+	u32 xscale_sel_shift;
+	u32 yscale_sel_mask;
+	u32 yscale_sel_shift;
+	u32 enable_bit;
+};
+
+struct aux_rate_tbl {
+	u16 xscale;
+	u16 yscale;
+	u8 eq;
+};
+
+struct clk_aux {
+	struct			clk_hw hw;
+	void __iomem		*reg;
+	struct aux_clk_masks	*masks;
+	struct aux_rate_tbl	*rtbl;
+	u8			rtbl_cnt;
+	spinlock_t		*lock;
+};
+
+/* Fractional Synth clk */
+struct frac_rate_tbl {
+	u32 div;
+};
+
+struct clk_frac {
+	struct			clk_hw hw;
+	void __iomem		*reg;
+	struct frac_rate_tbl	*rtbl;
+	u8			rtbl_cnt;
+	spinlock_t		*lock;
+};
+
+/* GPT clk */
+struct gpt_rate_tbl {
+	u16 mscale;
+	u16 nscale;
+};
+
+struct clk_gpt {
+	struct			clk_hw hw;
+	void __iomem		*reg;
+	struct gpt_rate_tbl	*rtbl;
+	u8			rtbl_cnt;
+	spinlock_t		*lock;
+};
+
+/* VCO-PLL clk */
+struct pll_rate_tbl {
+	u8 mode;
+	u16 m;
+	u8 n;
+	u8 p;
+};
+
+struct clk_vco {
+	struct			clk_hw hw;
+	void __iomem		*mode_reg;
+	void __iomem		*cfg_reg;
+	struct pll_rate_tbl	*rtbl;
+	u8			rtbl_cnt;
+	spinlock_t		*lock;
+};
+
+struct clk_pll {
+	struct			clk_hw hw;
+	struct clk_vco		*vco;
+	const char		*parent[1];
+	spinlock_t		*lock;
+};
+
+typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
+		int index);
+
+/* clk register routines */
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+		const char *parent_name, unsigned long flags, void __iomem *reg,
+		struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+		u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+		unsigned long flags, void __iomem *reg,
+		struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+		long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+		rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+		const char *vco_gate_name, const char *parent_name,
+		unsigned long flags, void __iomem *mode_reg, void __iomem
+		*cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+		spinlock_t *lock, struct clk **pll_clk,
+		struct clk **vco_gate_clk);
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+		unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+		int *index);
+
+#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644
index 0000000..8f05652
--- /dev/null
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -0,0 +1,1106 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310_clock.c
+ *
+ * SPEAr1310 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* PLL related registers and bit values */
+#define SPEAR1310_PLL_CFG			(VA_MISC_BASE + 0x210)
+	/* PLL_CFG bit values */
+	#define SPEAR1310_CLCD_SYNT_CLK_MASK		1
+	#define SPEAR1310_CLCD_SYNT_CLK_SHIFT		31
+	#define SPEAR1310_RAS_SYNT2_3_CLK_MASK		2
+	#define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT		29
+	#define SPEAR1310_RAS_SYNT_CLK_MASK		2
+	#define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT		27
+	#define SPEAR1310_PLL_CLK_MASK			2
+	#define SPEAR1310_PLL3_CLK_SHIFT		24
+	#define SPEAR1310_PLL2_CLK_SHIFT		22
+	#define SPEAR1310_PLL1_CLK_SHIFT		20
+
+#define SPEAR1310_PLL1_CTR			(VA_MISC_BASE + 0x214)
+#define SPEAR1310_PLL1_FRQ			(VA_MISC_BASE + 0x218)
+#define SPEAR1310_PLL2_CTR			(VA_MISC_BASE + 0x220)
+#define SPEAR1310_PLL2_FRQ			(VA_MISC_BASE + 0x224)
+#define SPEAR1310_PLL3_CTR			(VA_MISC_BASE + 0x22C)
+#define SPEAR1310_PLL3_FRQ			(VA_MISC_BASE + 0x230)
+#define SPEAR1310_PLL4_CTR			(VA_MISC_BASE + 0x238)
+#define SPEAR1310_PLL4_FRQ			(VA_MISC_BASE + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG			(VA_MISC_BASE + 0x244)
+	/* PERIP_CLK_CFG bit values */
+	#define SPEAR1310_GPT_OSC24_VAL			0
+	#define SPEAR1310_GPT_APB_VAL			1
+	#define SPEAR1310_GPT_CLK_MASK			1
+	#define SPEAR1310_GPT3_CLK_SHIFT		11
+	#define SPEAR1310_GPT2_CLK_SHIFT		10
+	#define SPEAR1310_GPT1_CLK_SHIFT		9
+	#define SPEAR1310_GPT0_CLK_SHIFT		8
+	#define SPEAR1310_UART_CLK_PLL5_VAL		0
+	#define SPEAR1310_UART_CLK_OSC24_VAL		1
+	#define SPEAR1310_UART_CLK_SYNT_VAL		2
+	#define SPEAR1310_UART_CLK_MASK			2
+	#define SPEAR1310_UART_CLK_SHIFT		4
+
+	#define SPEAR1310_AUX_CLK_PLL5_VAL		0
+	#define SPEAR1310_AUX_CLK_SYNT_VAL		1
+	#define SPEAR1310_CLCD_CLK_MASK			2
+	#define SPEAR1310_CLCD_CLK_SHIFT		2
+	#define SPEAR1310_C3_CLK_MASK			1
+	#define SPEAR1310_C3_CLK_SHIFT			1
+
+#define SPEAR1310_GMAC_CLK_CFG			(VA_MISC_BASE + 0x248)
+	#define SPEAR1310_GMAC_PHY_IF_SEL_MASK		3
+	#define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT		4
+	#define SPEAR1310_GMAC_PHY_CLK_MASK		1
+	#define SPEAR1310_GMAC_PHY_CLK_SHIFT		3
+	#define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK	2
+	#define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT	1
+
+#define SPEAR1310_I2S_CLK_CFG			(VA_MISC_BASE + 0x24C)
+	/* I2S_CLK_CFG register mask */
+	#define SPEAR1310_I2S_SCLK_X_MASK		0x1F
+	#define SPEAR1310_I2S_SCLK_X_SHIFT		27
+	#define SPEAR1310_I2S_SCLK_Y_MASK		0x1F
+	#define SPEAR1310_I2S_SCLK_Y_SHIFT		22
+	#define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT		21
+	#define SPEAR1310_I2S_SCLK_SYNTH_ENB		20
+	#define SPEAR1310_I2S_PRS1_CLK_X_MASK		0xFF
+	#define SPEAR1310_I2S_PRS1_CLK_X_SHIFT		12
+	#define SPEAR1310_I2S_PRS1_CLK_Y_MASK		0xFF
+	#define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT		4
+	#define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT		3
+	#define SPEAR1310_I2S_REF_SEL_MASK		1
+	#define SPEAR1310_I2S_REF_SHIFT			2
+	#define SPEAR1310_I2S_SRC_CLK_MASK		2
+	#define SPEAR1310_I2S_SRC_CLK_SHIFT		0
+
+#define SPEAR1310_C3_CLK_SYNT			(VA_MISC_BASE + 0x250)
+#define SPEAR1310_UART_CLK_SYNT			(VA_MISC_BASE + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT			(VA_MISC_BASE + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT		(VA_MISC_BASE + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT			(VA_MISC_BASE + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT			(VA_MISC_BASE + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT			(VA_MISC_BASE + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT			(VA_MISC_BASE + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0			(VA_MISC_BASE + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1			(VA_MISC_BASE + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2			(VA_MISC_BASE + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3			(VA_MISC_BASE + 0x298)
+	/* Check Fractional synthesizer reg masks */
+
+#define SPEAR1310_PERIP1_CLK_ENB		(VA_MISC_BASE + 0x300)
+	/* PERIP1_CLK_ENB register masks */
+	#define SPEAR1310_RTC_CLK_ENB			31
+	#define SPEAR1310_ADC_CLK_ENB			30
+	#define SPEAR1310_C3_CLK_ENB			29
+	#define SPEAR1310_JPEG_CLK_ENB			28
+	#define SPEAR1310_CLCD_CLK_ENB			27
+	#define SPEAR1310_DMA_CLK_ENB			25
+	#define SPEAR1310_GPIO1_CLK_ENB			24
+	#define SPEAR1310_GPIO0_CLK_ENB			23
+	#define SPEAR1310_GPT1_CLK_ENB			22
+	#define SPEAR1310_GPT0_CLK_ENB			21
+	#define SPEAR1310_I2S0_CLK_ENB			20
+	#define SPEAR1310_I2S1_CLK_ENB			19
+	#define SPEAR1310_I2C0_CLK_ENB			18
+	#define SPEAR1310_SSP_CLK_ENB			17
+	#define SPEAR1310_UART_CLK_ENB			15
+	#define SPEAR1310_PCIE_SATA_2_CLK_ENB		14
+	#define SPEAR1310_PCIE_SATA_1_CLK_ENB		13
+	#define SPEAR1310_PCIE_SATA_0_CLK_ENB		12
+	#define SPEAR1310_UOC_CLK_ENB			11
+	#define SPEAR1310_UHC1_CLK_ENB			10
+	#define SPEAR1310_UHC0_CLK_ENB			9
+	#define SPEAR1310_GMAC_CLK_ENB			8
+	#define SPEAR1310_CFXD_CLK_ENB			7
+	#define SPEAR1310_SDHCI_CLK_ENB			6
+	#define SPEAR1310_SMI_CLK_ENB			5
+	#define SPEAR1310_FSMC_CLK_ENB			4
+	#define SPEAR1310_SYSRAM0_CLK_ENB		3
+	#define SPEAR1310_SYSRAM1_CLK_ENB		2
+	#define SPEAR1310_SYSROM_CLK_ENB		1
+	#define SPEAR1310_BUS_CLK_ENB			0
+
+#define SPEAR1310_PERIP2_CLK_ENB		(VA_MISC_BASE + 0x304)
+	/* PERIP2_CLK_ENB register masks */
+	#define SPEAR1310_THSENS_CLK_ENB		8
+	#define SPEAR1310_I2S_REF_PAD_CLK_ENB		7
+	#define SPEAR1310_ACP_CLK_ENB			6
+	#define SPEAR1310_GPT3_CLK_ENB			5
+	#define SPEAR1310_GPT2_CLK_ENB			4
+	#define SPEAR1310_KBD_CLK_ENB			3
+	#define SPEAR1310_CPU_DBG_CLK_ENB		2
+	#define SPEAR1310_DDR_CORE_CLK_ENB		1
+	#define SPEAR1310_DDR_CTRL_CLK_ENB		0
+
+#define SPEAR1310_RAS_CLK_ENB			(VA_MISC_BASE + 0x310)
+	/* RAS_CLK_ENB register masks */
+	#define SPEAR1310_SYNT3_CLK_ENB			17
+	#define SPEAR1310_SYNT2_CLK_ENB			16
+	#define SPEAR1310_SYNT1_CLK_ENB			15
+	#define SPEAR1310_SYNT0_CLK_ENB			14
+	#define SPEAR1310_PCLK3_CLK_ENB			13
+	#define SPEAR1310_PCLK2_CLK_ENB			12
+	#define SPEAR1310_PCLK1_CLK_ENB			11
+	#define SPEAR1310_PCLK0_CLK_ENB			10
+	#define SPEAR1310_PLL3_CLK_ENB			9
+	#define SPEAR1310_PLL2_CLK_ENB			8
+	#define SPEAR1310_C125M_PAD_CLK_ENB		7
+	#define SPEAR1310_C30M_CLK_ENB			6
+	#define SPEAR1310_C48M_CLK_ENB			5
+	#define SPEAR1310_OSC_25M_CLK_ENB		4
+	#define SPEAR1310_OSC_32K_CLK_ENB		3
+	#define SPEAR1310_OSC_24M_CLK_ENB		2
+	#define SPEAR1310_PCLK_CLK_ENB			1
+	#define SPEAR1310_ACLK_CLK_ENB			0
+
+/* RAS Area Control Register */
+#define SPEAR1310_RAS_CTRL_REG0			(VA_SPEAR1310_RAS_BASE + 0x000)
+	#define SPEAR1310_SSP1_CLK_MASK			3
+	#define SPEAR1310_SSP1_CLK_SHIFT		26
+	#define SPEAR1310_TDM_CLK_MASK			1
+	#define SPEAR1310_TDM2_CLK_SHIFT		24
+	#define SPEAR1310_TDM1_CLK_SHIFT		23
+	#define SPEAR1310_I2C_CLK_MASK			1
+	#define SPEAR1310_I2C7_CLK_SHIFT		22
+	#define SPEAR1310_I2C6_CLK_SHIFT		21
+	#define SPEAR1310_I2C5_CLK_SHIFT		20
+	#define SPEAR1310_I2C4_CLK_SHIFT		19
+	#define SPEAR1310_I2C3_CLK_SHIFT		18
+	#define SPEAR1310_I2C2_CLK_SHIFT		17
+	#define SPEAR1310_I2C1_CLK_SHIFT		16
+	#define SPEAR1310_GPT64_CLK_MASK		1
+	#define SPEAR1310_GPT64_CLK_SHIFT		15
+	#define SPEAR1310_RAS_UART_CLK_MASK		1
+	#define SPEAR1310_UART5_CLK_SHIFT		14
+	#define SPEAR1310_UART4_CLK_SHIFT		13
+	#define SPEAR1310_UART3_CLK_SHIFT		12
+	#define SPEAR1310_UART2_CLK_SHIFT		11
+	#define SPEAR1310_UART1_CLK_SHIFT		10
+	#define SPEAR1310_PCI_CLK_MASK			1
+	#define SPEAR1310_PCI_CLK_SHIFT			0
+
+#define SPEAR1310_RAS_CTRL_REG1			(VA_SPEAR1310_RAS_BASE + 0x004)
+	#define SPEAR1310_PHY_CLK_MASK			0x3
+	#define SPEAR1310_RMII_PHY_CLK_SHIFT		0
+	#define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT	2
+
+#define SPEAR1310_RAS_SW_CLK_CTRL		(VA_SPEAR1310_RAS_BASE + 0x0148)
+	#define SPEAR1310_CAN1_CLK_ENB			25
+	#define SPEAR1310_CAN0_CLK_ENB			24
+	#define SPEAR1310_GPT64_CLK_ENB			23
+	#define SPEAR1310_SSP1_CLK_ENB			22
+	#define SPEAR1310_I2C7_CLK_ENB			21
+	#define SPEAR1310_I2C6_CLK_ENB			20
+	#define SPEAR1310_I2C5_CLK_ENB			19
+	#define SPEAR1310_I2C4_CLK_ENB			18
+	#define SPEAR1310_I2C3_CLK_ENB			17
+	#define SPEAR1310_I2C2_CLK_ENB			16
+	#define SPEAR1310_I2C1_CLK_ENB			15
+	#define SPEAR1310_UART5_CLK_ENB			14
+	#define SPEAR1310_UART4_CLK_ENB			13
+	#define SPEAR1310_UART3_CLK_ENB			12
+	#define SPEAR1310_UART2_CLK_ENB			11
+	#define SPEAR1310_UART1_CLK_ENB			10
+	#define SPEAR1310_RS485_1_CLK_ENB		9
+	#define SPEAR1310_RS485_0_CLK_ENB		8
+	#define SPEAR1310_TDM2_CLK_ENB			7
+	#define SPEAR1310_TDM1_CLK_ENB			6
+	#define SPEAR1310_PCI_CLK_ENB			5
+	#define SPEAR1310_GMII_CLK_ENB			4
+	#define SPEAR1310_MII2_CLK_ENB			3
+	#define SPEAR1310_MII1_CLK_ENB			2
+	#define SPEAR1310_MII0_CLK_ENB			1
+	#define SPEAR1310_ESRAM_CLK_ENB			0
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+	/* PCLK 24MHz */
+	{.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+	{.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+	/* For VCO1div2 = 500 MHz */
+	{.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+	{.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+	{.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+	{.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+	/* For gmac phy input clk */
+	{.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+	{.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+	{.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+	{.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+	{.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+	{.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+	.eq_sel_mask = AUX_EQ_SEL_MASK,
+	.eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
+	.eq1_mask = AUX_EQ1_SEL,
+	.eq2_mask = AUX_EQ2_SEL,
+	.xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
+	.xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
+	.yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
+	.yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+	.eq_sel_mask = AUX_EQ_SEL_MASK,
+	.eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
+	.eq1_mask = AUX_EQ1_SEL,
+	.eq2_mask = AUX_EQ2_SEL,
+	.xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
+	.xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
+	.yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
+	.yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
+	.enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+	/* For parent clk = 49.152 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+	/* For i2s_ref_clk = 12.288MHz */
+	{.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+	/* For ahb = 166.67 MHz */
+	{.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+	{.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+	{.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+	{.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+	/* For vco1div4 = 250 MHz */
+	{.div = 0x14000}, /* 25 MHz */
+	{.div = 0x0A000}, /* 50 MHz */
+	{.div = 0x05000}, /* 100 MHz */
+	{.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+	"osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+	"gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
+	"i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+	"pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+	"pll2_clk", };
+static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
+	"ras_pll2_clk", "ras_synth0_clk", };
+static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
+	"ras_pll2_clk", "ras_synth0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+	"ras_plclk0_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+
+void __init spear1310_clk_init(void)
+{
+	struct clk *clk, *clk1;
+
+	clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+	clk_register_clkdev(clk, "apb_pclk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+			32000);
+	clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+			24000000);
+	clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+			25000000);
+	clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+			CLK_IS_ROOT, 125000000);
+	clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+			CLK_IS_ROOT, 12288000);
+	clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+	/* clock derived from 32 KHz osc clk */
+	clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+	/* clock derived from 24 or 25 MHz osc clk */
+	/* vco-pll */
+	clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+			SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+			0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco1_clk", NULL);
+	clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+			SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+			0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco2_clk", NULL);
+	clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+	clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+			SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+			0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco3_clk", NULL);
+	clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+	clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+			0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
+			ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco4_clk", NULL);
+	clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+			48000000);
+	clk_register_clkdev(clk, "pll5_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+			25000000);
+	clk_register_clkdev(clk, "pll6_clk", NULL);
+
+	/* vco div n clocks */
+	clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+			4);
+	clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+	/* peripherals */
+	clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+			128);
+	clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_thermal");
+
+	/* clock derived from pll4 clk */
+	clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "ddr_clk", NULL);
+
+	/* clock derived from pll1 clk */
+	clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
+	clk_register_clkdev(clk, "cpu_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+	clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
+			6);
+	clk_register_clkdev(clk, "ahb_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
+			12);
+	clk_register_clkdev(clk, "apb_clk", NULL);
+
+	/* gpt clocks */
+	clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt0");
+
+	clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt1");
+
+	clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt2");
+
+	clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt3");
+
+	/* others */
+	clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "uart_synth_clk", NULL);
+	clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+			ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+	clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+	clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+	clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+	clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b2800000.cf");
+	clk_register_clkdev(clk, NULL, "arasan_xd");
+
+	clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "c3_synth_clk", NULL);
+	clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+			ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+			SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "c3");
+
+	/* gmac */
+	clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+			gmac_phy_input_parents,
+			ARRAY_SIZE(gmac_phy_input_parents), 0,
+			SPEAR1310_GMAC_CLK_CFG,
+			SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
+			SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+	clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+			"gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
+			NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+			ARRAY_SIZE(gmac_phy_parents), 0,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
+			SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+	/* clcd */
+	clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+			ARRAY_SIZE(clcd_synth_parents), 0,
+			SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+			SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+	clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+			SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
+			ARRAY_SIZE(clcd_rtbl), &_lock);
+	clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+			ARRAY_SIZE(clcd_pixel_parents), 0,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
+			SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+	clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "clcd_clk", NULL);
+
+	/* i2s */
+	clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+			ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
+			SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+	clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+			SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+			ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+	clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+			ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
+			SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+	clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+			"i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
+			&i2s_sclk_masks, i2s_sclk_rtbl,
+			ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+	clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+	/* clock derived from ahb clk */
+	clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+	clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "ea800000.dma");
+	clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+	clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b2000000.jpeg");
+
+	clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+	clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+	clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+	clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+	clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "uoc");
+
+	clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "dw_pcie.0");
+	clk_register_clkdev(clk, NULL, "ahci.0");
+
+	clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "dw_pcie.1");
+	clk_register_clkdev(clk, NULL, "ahci.1");
+
+	clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "dw_pcie.2");
+	clk_register_clkdev(clk, NULL, "ahci.2");
+
+	clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+	clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+	clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+			0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
+			ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "adc_synth_clk", NULL);
+	clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "adc_clk");
+
+	/* clock derived from apb clk */
+	clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+	clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+	clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+	clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0180000.i2s");
+
+	clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
+			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0200000.i2s");
+
+	clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+	/* RAS clks */
+	clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+			gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+			0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+			SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+			gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+			0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+			SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+	clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+			SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+	clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+			SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+	clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+			SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+	clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+			SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_tx125_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
+			30000000);
+	clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_30m_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
+			48000000);
+	clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_48m_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
+			SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+			50000000);
+
+	clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
+			50000000);
+
+	clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+	clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+	clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c400000.eth");
+
+	clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c500000.eth");
+
+	clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c600000.eth");
+
+	clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c700000.eth");
+
+	clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+			smii_rgmii_phy_parents,
+			ARRAY_SIZE(smii_rgmii_phy_parents), 0,
+			SPEAR1310_RAS_CTRL_REG1,
+			SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
+			SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "stmmacphy.1");
+	clk_register_clkdev(clk, NULL, "stmmacphy.2");
+	clk_register_clkdev(clk, NULL, "stmmacphy.4");
+
+	clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+			ARRAY_SIZE(rmii_phy_parents), 0,
+			SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
+			SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "stmmacphy.3");
+
+	clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c800000.serial");
+
+	clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5c900000.serial");
+
+	clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5ca00000.serial");
+
+	clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5cb00000.serial");
+
+	clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5cc00000.serial");
+
+	clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5cd00000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5ce00000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5cf00000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5d000000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5d100000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5d200000.i2c");
+
+	clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5d300000.i2c");
+
+	clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+			ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "5d400000.spi");
+
+	clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+			ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "pci_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "pci");
+
+	clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+			ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
+
+	clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+			ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+			SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+			SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
+}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644
index 0000000..e3ea721
--- /dev/null
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -0,0 +1,964 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340_clock.c
+ *
+ * SPEAr1340 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* Clock Configuration Registers */
+#define SPEAR1340_SYS_CLK_CTRL			(VA_MISC_BASE + 0x200)
+	#define SPEAR1340_HCLK_SRC_SEL_SHIFT	27
+	#define SPEAR1340_HCLK_SRC_SEL_MASK	1
+	#define SPEAR1340_SCLK_SRC_SEL_SHIFT	23
+	#define SPEAR1340_SCLK_SRC_SEL_MASK	3
+
+/* PLL related registers and bit values */
+#define SPEAR1340_PLL_CFG			(VA_MISC_BASE + 0x210)
+	/* PLL_CFG bit values */
+	#define SPEAR1340_CLCD_SYNT_CLK_MASK		1
+	#define SPEAR1340_CLCD_SYNT_CLK_SHIFT		31
+	#define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT		29
+	#define SPEAR1340_GEN_SYNT_CLK_MASK		2
+	#define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT		27
+	#define SPEAR1340_PLL_CLK_MASK			2
+	#define SPEAR1340_PLL3_CLK_SHIFT		24
+	#define SPEAR1340_PLL2_CLK_SHIFT		22
+	#define SPEAR1340_PLL1_CLK_SHIFT		20
+
+#define SPEAR1340_PLL1_CTR			(VA_MISC_BASE + 0x214)
+#define SPEAR1340_PLL1_FRQ			(VA_MISC_BASE + 0x218)
+#define SPEAR1340_PLL2_CTR			(VA_MISC_BASE + 0x220)
+#define SPEAR1340_PLL2_FRQ			(VA_MISC_BASE + 0x224)
+#define SPEAR1340_PLL3_CTR			(VA_MISC_BASE + 0x22C)
+#define SPEAR1340_PLL3_FRQ			(VA_MISC_BASE + 0x230)
+#define SPEAR1340_PLL4_CTR			(VA_MISC_BASE + 0x238)
+#define SPEAR1340_PLL4_FRQ			(VA_MISC_BASE + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG			(VA_MISC_BASE + 0x244)
+	/* PERIP_CLK_CFG bit values */
+	#define SPEAR1340_SPDIF_CLK_MASK		1
+	#define SPEAR1340_SPDIF_OUT_CLK_SHIFT		15
+	#define SPEAR1340_SPDIF_IN_CLK_SHIFT		14
+	#define SPEAR1340_GPT3_CLK_SHIFT		13
+	#define SPEAR1340_GPT2_CLK_SHIFT		12
+	#define SPEAR1340_GPT_CLK_MASK			1
+	#define SPEAR1340_GPT1_CLK_SHIFT		9
+	#define SPEAR1340_GPT0_CLK_SHIFT		8
+	#define SPEAR1340_UART_CLK_MASK			2
+	#define SPEAR1340_UART1_CLK_SHIFT		6
+	#define SPEAR1340_UART0_CLK_SHIFT		4
+	#define SPEAR1340_CLCD_CLK_MASK			2
+	#define SPEAR1340_CLCD_CLK_SHIFT		2
+	#define SPEAR1340_C3_CLK_MASK			1
+	#define SPEAR1340_C3_CLK_SHIFT			1
+
+#define SPEAR1340_GMAC_CLK_CFG			(VA_MISC_BASE + 0x248)
+	#define SPEAR1340_GMAC_PHY_CLK_MASK		1
+	#define SPEAR1340_GMAC_PHY_CLK_SHIFT		2
+	#define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK	2
+	#define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT	0
+
+#define SPEAR1340_I2S_CLK_CFG			(VA_MISC_BASE + 0x24C)
+	/* I2S_CLK_CFG register mask */
+	#define SPEAR1340_I2S_SCLK_X_MASK		0x1F
+	#define SPEAR1340_I2S_SCLK_X_SHIFT		27
+	#define SPEAR1340_I2S_SCLK_Y_MASK		0x1F
+	#define SPEAR1340_I2S_SCLK_Y_SHIFT		22
+	#define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT		21
+	#define SPEAR1340_I2S_SCLK_SYNTH_ENB		20
+	#define SPEAR1340_I2S_PRS1_CLK_X_MASK		0xFF
+	#define SPEAR1340_I2S_PRS1_CLK_X_SHIFT		12
+	#define SPEAR1340_I2S_PRS1_CLK_Y_MASK		0xFF
+	#define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT		4
+	#define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT		3
+	#define SPEAR1340_I2S_REF_SEL_MASK		1
+	#define SPEAR1340_I2S_REF_SHIFT			2
+	#define SPEAR1340_I2S_SRC_CLK_MASK		2
+	#define SPEAR1340_I2S_SRC_CLK_SHIFT		0
+
+#define SPEAR1340_C3_CLK_SYNT			(VA_MISC_BASE + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT		(VA_MISC_BASE + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT		(VA_MISC_BASE + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT			(VA_MISC_BASE + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT		(VA_MISC_BASE + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT			(VA_MISC_BASE + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT			(VA_MISC_BASE + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT			(VA_MISC_BASE + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT			(VA_MISC_BASE + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT			(VA_MISC_BASE + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0			(VA_MISC_BASE + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1			(VA_MISC_BASE + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2			(VA_MISC_BASE + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3			(VA_MISC_BASE + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB		(VA_MISC_BASE + 0x30C)
+	#define SPEAR1340_RTC_CLK_ENB			31
+	#define SPEAR1340_ADC_CLK_ENB			30
+	#define SPEAR1340_C3_CLK_ENB			29
+	#define SPEAR1340_CLCD_CLK_ENB			27
+	#define SPEAR1340_DMA_CLK_ENB			25
+	#define SPEAR1340_GPIO1_CLK_ENB			24
+	#define SPEAR1340_GPIO0_CLK_ENB			23
+	#define SPEAR1340_GPT1_CLK_ENB			22
+	#define SPEAR1340_GPT0_CLK_ENB			21
+	#define SPEAR1340_I2S_PLAY_CLK_ENB		20
+	#define SPEAR1340_I2S_REC_CLK_ENB		19
+	#define SPEAR1340_I2C0_CLK_ENB			18
+	#define SPEAR1340_SSP_CLK_ENB			17
+	#define SPEAR1340_UART0_CLK_ENB			15
+	#define SPEAR1340_PCIE_SATA_CLK_ENB		12
+	#define SPEAR1340_UOC_CLK_ENB			11
+	#define SPEAR1340_UHC1_CLK_ENB			10
+	#define SPEAR1340_UHC0_CLK_ENB			9
+	#define SPEAR1340_GMAC_CLK_ENB			8
+	#define SPEAR1340_CFXD_CLK_ENB			7
+	#define SPEAR1340_SDHCI_CLK_ENB			6
+	#define SPEAR1340_SMI_CLK_ENB			5
+	#define SPEAR1340_FSMC_CLK_ENB			4
+	#define SPEAR1340_SYSRAM0_CLK_ENB		3
+	#define SPEAR1340_SYSRAM1_CLK_ENB		2
+	#define SPEAR1340_SYSROM_CLK_ENB		1
+	#define SPEAR1340_BUS_CLK_ENB			0
+
+#define SPEAR1340_PERIP2_CLK_ENB		(VA_MISC_BASE + 0x310)
+	#define SPEAR1340_THSENS_CLK_ENB		8
+	#define SPEAR1340_I2S_REF_PAD_CLK_ENB		7
+	#define SPEAR1340_ACP_CLK_ENB			6
+	#define SPEAR1340_GPT3_CLK_ENB			5
+	#define SPEAR1340_GPT2_CLK_ENB			4
+	#define SPEAR1340_KBD_CLK_ENB			3
+	#define SPEAR1340_CPU_DBG_CLK_ENB		2
+	#define SPEAR1340_DDR_CORE_CLK_ENB		1
+	#define SPEAR1340_DDR_CTRL_CLK_ENB		0
+
+#define SPEAR1340_PERIP3_CLK_ENB		(VA_MISC_BASE + 0x314)
+	#define SPEAR1340_PLGPIO_CLK_ENB		18
+	#define SPEAR1340_VIDEO_DEC_CLK_ENB		16
+	#define SPEAR1340_VIDEO_ENC_CLK_ENB		15
+	#define SPEAR1340_SPDIF_OUT_CLK_ENB		13
+	#define SPEAR1340_SPDIF_IN_CLK_ENB		12
+	#define SPEAR1340_VIDEO_IN_CLK_ENB		11
+	#define SPEAR1340_CAM0_CLK_ENB			10
+	#define SPEAR1340_CAM1_CLK_ENB			9
+	#define SPEAR1340_CAM2_CLK_ENB			8
+	#define SPEAR1340_CAM3_CLK_ENB			7
+	#define SPEAR1340_MALI_CLK_ENB			6
+	#define SPEAR1340_CEC0_CLK_ENB			5
+	#define SPEAR1340_CEC1_CLK_ENB			4
+	#define SPEAR1340_PWM_CLK_ENB			3
+	#define SPEAR1340_I2C1_CLK_ENB			2
+	#define SPEAR1340_UART1_CLK_ENB			1
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+	/* PCLK 24MHz */
+	{.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+	{.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+	{.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/*
+ * All below entries generate 166 MHz for
+ * different values of vco1div2
+ */
+static struct frac_rate_tbl amba_synth_rtbl[] = {
+	{.div = 0x06062}, /* for vco1div2 = 500 MHz */
+	{.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
+	{.div = 0x04000}, /* for vco1div2 = 332 MHz */
+	{.div = 0x03031}, /* for vco1div2 = 250 MHz */
+	{.div = 0x0268D}, /* for vco1div2 = 200 MHz */
+};
+
+/*
+ * Synthesizer Clock derived from vcodiv2. This clock is one of the
+ * possible clocks to feed cpu directly.
+ * We can program this synthesizer to make cpu run on different clock
+ * frequencies.
+ * Following table provides configuration values to let cpu run on 200,
+ * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * (vco1div2) clock.
+ *
+ * --------------------------------------------------------------------
+ * vco1div2(Mhz)	fout(Mhz)	cpuclk = fout/2		div
+ * --------------------------------------------------------------------
+ * 400			200		100			0x04000
+ * 400			250		125			0x03333
+ * 400			332		166			0x0268D
+ * 400			400		200			0x02000
+ * --------------------------------------------------------------------
+ * 500			200		100			0x05000
+ * 500			250		125			0x04000
+ * 500			332		166			0x03031
+ * 500			400		200			0x02800
+ * 500			500		250			0x02000
+ * --------------------------------------------------------------------
+ * 664			200		100			0x06a38
+ * 664			250		125			0x054FD
+ * 664			332		166			0x04000
+ * 664			400		200			0x0351E
+ * 664			500		250			0x02A7E
+ * --------------------------------------------------------------------
+ * 800			200		100			0x08000
+ * 800			250		125			0x06666
+ * 800			332		166			0x04D18
+ * 800			400		200			0x04000
+ * 800			500		250			0x03333
+ * --------------------------------------------------------------------
+ * sys rate configuration table is in descending order of divisor.
+ */
+static struct frac_rate_tbl sys_synth_rtbl[] = {
+	{.div = 0x08000},
+	{.div = 0x06a38},
+	{.div = 0x06666},
+	{.div = 0x054FD},
+	{.div = 0x05000},
+	{.div = 0x04D18},
+	{.div = 0x04000},
+	{.div = 0x0351E},
+	{.div = 0x03333},
+	{.div = 0x03031},
+	{.div = 0x02A7E},
+	{.div = 0x02800},
+	{.div = 0x0268D},
+	{.div = 0x02000},
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+	/* For VCO1div2 = 500 MHz */
+	{.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+	{.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+	{.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+	{.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+	/* For gmac phy input clk */
+	{.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+	{.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+	{.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+	{.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+	{.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+	{.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
+	{.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+	.eq_sel_mask = AUX_EQ_SEL_MASK,
+	.eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
+	.eq1_mask = AUX_EQ1_SEL,
+	.eq2_mask = AUX_EQ2_SEL,
+	.xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
+	.xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
+	.yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
+	.yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+	.eq_sel_mask = AUX_EQ_SEL_MASK,
+	.eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
+	.eq1_mask = AUX_EQ1_SEL,
+	.eq2_mask = AUX_EQ2_SEL,
+	.xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
+	.xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
+	.yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
+	.yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
+	.enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+	/* For parent clk = 49.152 MHz */
+	{.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
+	{.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
+	{.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
+	{.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
+
+	/*
+	 * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
+	 * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
+	 */
+	{.xscale = 1, .yscale = 3, .eq = 0},
+
+	/* For parent clk = 49.152 MHz */
+	{.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
+	{.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+	/* For sclk = ref_clk * x/2/y */
+	{.xscale = 1, .yscale = 4, .eq = 0},
+	{.xscale = 1, .yscale = 2, .eq = 0},
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+	/* For ahb = 166.67 MHz */
+	{.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+	{.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+	{.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+	{.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+	/* For vco1div4 = 250 MHz */
+	{.div = 0x1624E}, /* 22.5792 MHz */
+	{.div = 0x14585}, /* 24.576 MHz */
+	{.div = 0x14000}, /* 25 MHz */
+	{.div = 0x0B127}, /* 45.1584 MHz */
+	{.div = 0x0A000}, /* 50 MHz */
+	{.div = 0x061A8}, /* 81.92 MHz */
+	{.div = 0x05000}, /* 100 MHz */
+	{.div = 0x02800}, /* 200 MHz */
+	{.div = 0x02620}, /* 210 MHz */
+	{.div = 0x02460}, /* 220 MHz */
+	{.div = 0x022C0}, /* 230 MHz */
+	{.div = 0x02160}, /* 240 MHz */
+	{.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
+	"sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
+	"uart0_synth_gate_clk", };
+static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
+	"uart1_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+	"osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+	"gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
+	"i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
+};
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+	"pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+	"pll2_clk", };
+
+void __init spear1340_clk_init(void)
+{
+	struct clk *clk, *clk1;
+
+	clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+	clk_register_clkdev(clk, "apb_pclk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+			32000);
+	clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+			24000000);
+	clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+			25000000);
+	clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+			CLK_IS_ROOT, 125000000);
+	clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+			CLK_IS_ROOT, 12288000);
+	clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+	/* clock derived from 32 KHz osc clk */
+	clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+	/* clock derived from 24 or 25 MHz osc clk */
+	/* vco-pll */
+	clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+			SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+			0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco1_clk", NULL);
+	clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+			SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+			0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco2_clk", NULL);
+	clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+	clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+			SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+	clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+			0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco3_clk", NULL);
+	clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+	clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+			0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
+			ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco4_clk", NULL);
+	clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+			48000000);
+	clk_register_clkdev(clk, "pll5_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+			25000000);
+	clk_register_clkdev(clk, "pll6_clk", NULL);
+
+	/* vco div n clocks */
+	clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+			4);
+	clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+	/* peripherals */
+	clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+			128);
+	clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_thermal");
+
+	/* clock derived from pll4 clk */
+	clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "ddr_clk", NULL);
+
+	/* clock derived from pll1 clk */
+	clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+			SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
+			ARRAY_SIZE(sys_synth_rtbl), &_lock);
+	clk_register_clkdev(clk, "sys_synth_clk", NULL);
+
+	clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+			SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
+			ARRAY_SIZE(amba_synth_rtbl), &_lock);
+	clk_register_clkdev(clk, "amba_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+			ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+			SPEAR1340_SCLK_SRC_SEL_SHIFT,
+			SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "sys_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "cpu_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
+			3);
+	clk_register_clkdev(clk, "cpu_div3_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+	clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+			ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+			SPEAR1340_HCLK_SRC_SEL_SHIFT,
+			SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "ahb_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+			2);
+	clk_register_clkdev(clk, "apb_clk", NULL);
+
+	/* gpt clocks */
+	clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt0");
+
+	clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt1");
+
+	clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt2");
+
+	clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "gpt3");
+
+	/* others */
+	clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "uart0_synth_clk", NULL);
+	clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+			ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+	clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "uart1_synth_clk", NULL);
+	clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+			ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b4100000.serial");
+
+	clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+	clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+	clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+	clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b2800000.cf");
+	clk_register_clkdev(clk, NULL, "arasan_xd");
+
+	clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+			"vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
+			aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "c3_synth_clk", NULL);
+	clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+			ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+			SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "c3");
+
+	/* gmac */
+	clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+			gmac_phy_input_parents,
+			ARRAY_SIZE(gmac_phy_input_parents), 0,
+			SPEAR1340_GMAC_CLK_CFG,
+			SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
+			SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+	clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+			"gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
+			NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+			ARRAY_SIZE(gmac_phy_parents), 0,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
+			SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+	/* clcd */
+	clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+			ARRAY_SIZE(clcd_synth_parents), 0,
+			SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+			SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+	clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+			SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
+			ARRAY_SIZE(clcd_rtbl), &_lock);
+	clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+			ARRAY_SIZE(clcd_pixel_parents), 0,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
+			SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+	clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "clcd_clk", NULL);
+
+	/* i2s */
+	clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+			ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
+			SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
+			0, &_lock);
+	clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+	clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+			SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+			ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+	clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+			ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
+			SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+	clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+	clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+			"i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
+			&i2s_sclk_masks, i2s_sclk_rtbl,
+			ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+	clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+	/* clock derived from ahb clk */
+	clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+	clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b4000000.i2c");
+
+	clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "ea800000.dma");
+	clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+	clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+	clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+	clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+	clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+	clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "uoc");
+
+	clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "dw_pcie");
+	clk_register_clkdev(clk, NULL, "ahci");
+
+	clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+	clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+	clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+			0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
+			ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "adc_synth_clk", NULL);
+	clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "adc_clk");
+
+	/* clock derived from apb clk */
+	clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+	clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+	clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+	clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b2400000.i2s");
+
+	clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
+			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "b2000000.i2s");
+
+	clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+	/* RAS clks */
+	clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+			gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+			0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+			SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+			gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+			0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+			SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+	clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+			SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+	clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+			SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+	clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+			SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+	clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+			SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+			&_lock);
+	clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+	clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "mali");
+
+	clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_cec.0");
+
+	clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_cec.1");
+
+	clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+			ARRAY_SIZE(spdif_out_parents), 0,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
+			SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "spdif-out");
+
+	clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+			ARRAY_SIZE(spdif_in_parents), 0,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
+			SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spdif-in");
+
+	clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "acp_clk");
+
+	clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "plgpio");
+
+	clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "video_dec");
+
+	clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
+			0, &_lock);
+	clk_register_clkdev(clk, NULL, "video_enc");
+
+	clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_vip");
+
+	clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_camif.0");
+
+	clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_camif.1");
+
+	clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_camif.2");
+
+	clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "spear_camif.3");
+
+	clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+			SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "pwm");
+}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644
index 0000000..01dd6da
--- /dev/null
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -0,0 +1,612 @@
+/*
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR			(MISC_BASE + 0x008)
+#define PLL1_FRQ			(MISC_BASE + 0x00C)
+#define PLL2_CTR			(MISC_BASE + 0x014)
+#define PLL2_FRQ			(MISC_BASE + 0x018)
+#define PLL_CLK_CFG			(MISC_BASE + 0x020)
+	/* PLL_CLK_CFG register masks */
+	#define MCTR_CLK_SHIFT		28
+	#define MCTR_CLK_MASK		3
+
+#define CORE_CLK_CFG			(MISC_BASE + 0x024)
+	/* CORE CLK CFG register masks */
+	#define GEN_SYNTH2_3_CLK_SHIFT	18
+	#define GEN_SYNTH2_3_CLK_MASK	1
+
+	#define HCLK_RATIO_SHIFT	10
+	#define HCLK_RATIO_MASK		2
+	#define PCLK_RATIO_SHIFT	8
+	#define PCLK_RATIO_MASK		2
+
+#define PERIP_CLK_CFG			(MISC_BASE + 0x028)
+	/* PERIP_CLK_CFG register masks */
+	#define UART_CLK_SHIFT		4
+	#define UART_CLK_MASK		1
+	#define FIRDA_CLK_SHIFT		5
+	#define FIRDA_CLK_MASK		2
+	#define GPT0_CLK_SHIFT		8
+	#define GPT1_CLK_SHIFT		11
+	#define GPT2_CLK_SHIFT		12
+	#define GPT_CLK_MASK		1
+
+#define PERIP1_CLK_ENB			(MISC_BASE + 0x02C)
+	/* PERIP1_CLK_ENB register masks */
+	#define UART_CLK_ENB		3
+	#define SSP_CLK_ENB		5
+	#define I2C_CLK_ENB		7
+	#define JPEG_CLK_ENB		8
+	#define FIRDA_CLK_ENB		10
+	#define GPT1_CLK_ENB		11
+	#define GPT2_CLK_ENB		12
+	#define ADC_CLK_ENB		15
+	#define RTC_CLK_ENB		17
+	#define GPIO_CLK_ENB		18
+	#define DMA_CLK_ENB		19
+	#define SMI_CLK_ENB		21
+	#define GMAC_CLK_ENB		23
+	#define USBD_CLK_ENB		24
+	#define USBH_CLK_ENB		25
+	#define C3_CLK_ENB		31
+
+#define RAS_CLK_ENB			(MISC_BASE + 0x034)
+	#define RAS_AHB_CLK_ENB		0
+	#define RAS_PLL1_CLK_ENB	1
+	#define RAS_APB_CLK_ENB		2
+	#define RAS_32K_CLK_ENB		3
+	#define RAS_24M_CLK_ENB		4
+	#define RAS_48M_CLK_ENB		5
+	#define RAS_PLL2_CLK_ENB	7
+	#define RAS_SYNT0_CLK_ENB	8
+	#define RAS_SYNT1_CLK_ENB	9
+	#define RAS_SYNT2_CLK_ENB	10
+	#define RAS_SYNT3_CLK_ENB	11
+
+#define PRSC0_CLK_CFG			(MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG			(MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG			(MISC_BASE + 0x04C)
+#define AMEM_CLK_CFG			(MISC_BASE + 0x050)
+	#define AMEM_CLK_ENB		0
+
+#define CLCD_CLK_SYNT			(MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT			(MISC_BASE + 0x060)
+#define UART_CLK_SYNT			(MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT			(MISC_BASE + 0x068)
+#define GEN0_CLK_SYNT			(MISC_BASE + 0x06C)
+#define GEN1_CLK_SYNT			(MISC_BASE + 0x070)
+#define GEN2_CLK_SYNT			(MISC_BASE + 0x074)
+#define GEN3_CLK_SYNT			(MISC_BASE + 0x078)
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+	{.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
+	{.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+	/* For PLL1 = 332 MHz */
+	{.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
+	{.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+	/* For pll1 = 332 MHz */
+	{.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+	{.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+	{.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* clock parents */
+static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
+static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+	"pll2_clk", };
+
+#ifdef CONFIG_MACH_SPEAR300
+static void __init spear300_clk_init(void)
+{
+	struct clk *clk;
+
+	clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+			1, 1);
+	clk_register_clkdev(clk, NULL, "60000000.clcd");
+
+	clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "94000000.flash");
+
+	clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+	clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "a9000000.gpio");
+
+	clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "a0000000.kbd");
+}
+#endif
+
+/* array of all spear 310 clock lookups */
+#ifdef CONFIG_MACH_SPEAR310
+static void __init spear310_clk_init(void)
+{
+	struct clk *clk;
+
+	clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "emi", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "44000000.flash");
+
+	clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "tdm");
+
+	clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "b2000000.serial");
+
+	clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "b2080000.serial");
+
+	clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "b2100000.serial");
+
+	clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "b2180000.serial");
+
+	clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "b2200000.serial");
+}
+#endif
+
+/* array of all spear 320 clock lookups */
+#ifdef CONFIG_MACH_SPEAR320
+	#define SMII_PCLK_SHIFT				18
+	#define SMII_PCLK_MASK				2
+	#define SMII_PCLK_VAL_PAD			0x0
+	#define SMII_PCLK_VAL_PLL2			0x1
+	#define SMII_PCLK_VAL_SYNTH0			0x2
+	#define SDHCI_PCLK_SHIFT			15
+	#define SDHCI_PCLK_MASK				1
+	#define SDHCI_PCLK_VAL_48M			0x0
+	#define SDHCI_PCLK_VAL_SYNTH3			0x1
+	#define I2S_REF_PCLK_SHIFT			8
+	#define I2S_REF_PCLK_MASK			1
+	#define I2S_REF_PCLK_SYNTH_VAL			0x1
+	#define I2S_REF_PCLK_PLL2_VAL			0x0
+	#define UART1_PCLK_SHIFT			6
+	#define UART1_PCLK_MASK				1
+	#define SPEAR320_UARTX_PCLK_VAL_SYNTH1		0x0
+	#define SPEAR320_UARTX_PCLK_VAL_APB		0x1
+
+static const char *i2s_ref_parents[] = { "ras_pll2_clk",
+	"ras_gen2_synth_gate_clk", };
+static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
+	"ras_gen3_synth_gate_clk",
+};
+static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
+	"ras_gen0_synth_gate_clk", };
+static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
+};
+
+static void __init spear320_clk_init(void)
+{
+	struct clk *clk;
+
+	clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
+			CLK_IS_ROOT, 125000000);
+	clk_register_clkdev(clk, "smii_125m_pad", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+			1, 1);
+	clk_register_clkdev(clk, NULL, "90000000.clcd");
+
+	clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "emi", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "4c000000.flash");
+
+	clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "a7000000.i2c");
+
+	clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "pwm", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "a5000000.spi");
+
+	clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "a6000000.spi");
+
+	clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+	clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+	clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "i2s");
+
+	clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
+			ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
+			I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
+			4);
+	clk_register_clkdev(clk, "i2s_sclk", NULL);
+
+	clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "a9300000.serial");
+
+	clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
+			ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
+			SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+	clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
+			ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
+			SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "smii_pclk");
+
+	clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
+	clk_register_clkdev(clk, NULL, "smii");
+
+	clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
+			UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "a3000000.serial");
+
+	clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "a4000000.serial");
+
+	clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "a9100000.serial");
+
+	clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "a9200000.serial");
+
+	clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "60000000.serial");
+
+	clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
+			ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+			SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, NULL, "60100000.serial");
+}
+#endif
+
+void __init spear3xx_clk_init(void)
+{
+	struct clk *clk, *clk1;
+
+	clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+	clk_register_clkdev(clk, "apb_pclk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+			32000);
+	clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+			24000000);
+	clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+	/* clock derived from 32 KHz osc clk */
+	clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+			PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+	/* clock derived from 24 MHz osc clk */
+	clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+			48000000);
+	clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "fc880000.wdt");
+
+	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
+			"osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco1_clk", NULL);
+	clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+			"osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco2_clk", NULL);
+	clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+	/* clock derived from pll1 clk */
+	clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+	clk_register_clkdev(clk, "cpu_clk", NULL);
+
+	clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+			CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+			HCLK_RATIO_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "ahb_clk", NULL);
+
+	clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+			"pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "uart_synth_clk", NULL);
+	clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+			ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
+			UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
+			PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+	clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+			"pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "firda_synth_clk", NULL);
+	clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+			ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+			FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+			PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "firda");
+
+	/* gpt clocks */
+	clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
+			ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
+			GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt0");
+
+	clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+			ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
+			GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+			PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt1");
+
+	clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+			ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+			GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+			PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt2");
+
+	/* general synths clocks */
+	clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
+			"pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gen0_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
+
+	clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
+			"pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gen1_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+			ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
+			GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+
+	clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
+			"gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gen2_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+
+	clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
+			"gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "gen3_synth_clk", NULL);
+	clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+
+	/* clock derived from pll3 clk */
+	clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
+			PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "usbh_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+	clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+			PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "designware_udc");
+
+	/* clock derived from ahb clk */
+	clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+			1);
+	clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+	clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+			ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+			MCTR_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "ddr_clk", NULL);
+
+	clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+			CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+			PCLK_RATIO_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "apb_clk", NULL);
+
+	clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
+			AMEM_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "amem_clk", NULL);
+
+	clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			C3_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "c3_clk");
+
+	clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			DMA_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+	clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			GMAC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "e0800000.eth");
+
+	clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			I2C_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0180000.i2c");
+
+	clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			JPEG_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "jpeg");
+
+	clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			SMI_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+	/* clock derived from apb clk */
+	clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			ADC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "adc");
+
+	clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			GPIO_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+	clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			SSP_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0100000.spi");
+
+	/* RAS clk enable */
+	clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
+			RAS_AHB_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
+			RAS_APB_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
+			RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_32k_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
+			RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_24m_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
+			RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_pll1_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+			RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+			RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
+			"gen0_synth_gate_clk", 0, RAS_CLK_ENB,
+			RAS_SYNT0_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
+			"gen1_synth_gate_clk", 0, RAS_CLK_ENB,
+			RAS_SYNT1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
+			"gen2_synth_gate_clk", 0, RAS_CLK_ENB,
+			RAS_SYNT2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
+
+	clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
+			"gen3_synth_gate_clk", 0, RAS_CLK_ENB,
+			RAS_SYNT3_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+
+	if (of_machine_is_compatible("st,spear300"))
+		spear300_clk_init();
+	else if (of_machine_is_compatible("st,spear310"))
+		spear310_clk_init();
+	else if (of_machine_is_compatible("st,spear320"))
+		spear320_clk_init();
+}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644
index 0000000..61026ae
--- /dev/null
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -0,0 +1,342 @@
+/*
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR			(MISC_BASE + 0x008)
+#define PLL1_FRQ			(MISC_BASE + 0x00C)
+#define PLL2_CTR			(MISC_BASE + 0x014)
+#define PLL2_FRQ			(MISC_BASE + 0x018)
+#define PLL_CLK_CFG			(MISC_BASE + 0x020)
+	/* PLL_CLK_CFG register masks */
+	#define MCTR_CLK_SHIFT		28
+	#define MCTR_CLK_MASK		3
+
+#define CORE_CLK_CFG			(MISC_BASE + 0x024)
+	/* CORE CLK CFG register masks */
+	#define HCLK_RATIO_SHIFT	10
+	#define HCLK_RATIO_MASK		2
+	#define PCLK_RATIO_SHIFT	8
+	#define PCLK_RATIO_MASK		2
+
+#define PERIP_CLK_CFG			(MISC_BASE + 0x028)
+	/* PERIP_CLK_CFG register masks */
+	#define CLCD_CLK_SHIFT		2
+	#define CLCD_CLK_MASK		2
+	#define UART_CLK_SHIFT		4
+	#define UART_CLK_MASK		1
+	#define FIRDA_CLK_SHIFT		5
+	#define FIRDA_CLK_MASK		2
+	#define GPT0_CLK_SHIFT		8
+	#define GPT1_CLK_SHIFT		10
+	#define GPT2_CLK_SHIFT		11
+	#define GPT3_CLK_SHIFT		12
+	#define GPT_CLK_MASK		1
+
+#define PERIP1_CLK_ENB			(MISC_BASE + 0x02C)
+	/* PERIP1_CLK_ENB register masks */
+	#define UART0_CLK_ENB		3
+	#define UART1_CLK_ENB		4
+	#define SSP0_CLK_ENB		5
+	#define SSP1_CLK_ENB		6
+	#define I2C_CLK_ENB		7
+	#define JPEG_CLK_ENB		8
+	#define FSMC_CLK_ENB		9
+	#define FIRDA_CLK_ENB		10
+	#define GPT2_CLK_ENB		11
+	#define GPT3_CLK_ENB		12
+	#define GPIO2_CLK_ENB		13
+	#define SSP2_CLK_ENB		14
+	#define ADC_CLK_ENB		15
+	#define GPT1_CLK_ENB		11
+	#define RTC_CLK_ENB		17
+	#define GPIO1_CLK_ENB		18
+	#define DMA_CLK_ENB		19
+	#define SMI_CLK_ENB		21
+	#define CLCD_CLK_ENB		22
+	#define GMAC_CLK_ENB		23
+	#define USBD_CLK_ENB		24
+	#define USBH0_CLK_ENB		25
+	#define USBH1_CLK_ENB		26
+
+#define PRSC0_CLK_CFG			(MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG			(MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG			(MISC_BASE + 0x04C)
+
+#define CLCD_CLK_SYNT			(MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT			(MISC_BASE + 0x060)
+#define UART_CLK_SYNT			(MISC_BASE + 0x064)
+
+/* vco rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+	{.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
+	{.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
+	{.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+	/* For PLL1 = 332 MHz */
+	{.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+	{.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+	{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+	"pll2_clk", };
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+	/* For pll1 = 332 MHz */
+	{.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+	{.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+	{.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+void __init spear6xx_clk_init(void)
+{
+	struct clk *clk, *clk1;
+
+	clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+	clk_register_clkdev(clk, "apb_pclk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+			32000);
+	clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+	clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
+			30000000);
+	clk_register_clkdev(clk, "osc_30m_clk", NULL);
+
+	/* clock derived from 32 KHz osc clk */
+	clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
+			PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "rtc-spear");
+
+	/* clock derived from 30 MHz osc clk */
+	clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+			48000000);
+	clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
+			0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+			&_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco1_clk", NULL);
+	clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+			"osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+			ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+	clk_register_clkdev(clk, "vco2_clk", NULL);
+	clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+	clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
+			1);
+	clk_register_clkdev(clk, NULL, "wdt");
+
+	/* clock derived from pll1 clk */
+	clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+	clk_register_clkdev(clk, "cpu_clk", NULL);
+
+	clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+			CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+			HCLK_RATIO_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "ahb_clk", NULL);
+
+	clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+			"pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "uart_synth_clk", NULL);
+	clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+			ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
+			UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "uart_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
+			PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+	clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
+			PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0080000.serial");
+
+	clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+			"pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "firda_synth_clk", NULL);
+	clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+			ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+			FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+			PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "firda");
+
+	clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
+			"pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
+			ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+	clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+	clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+
+	clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+			ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
+			CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+			PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "clcd");
+
+	/* gpt clocks */
+	clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+			ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+			GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt0");
+
+	clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+			ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+			GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+			PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt1");
+
+	clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+			ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+			GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+			PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt2");
+
+	clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+			gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+	clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+
+	clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+			ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
+			GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+
+	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+			PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "gpt3");
+
+	/* clock derived from pll3 clk */
+	clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+			PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "usbh.0_clk");
+
+	clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+			PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "usbh.1_clk");
+
+	clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+			PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "designware_udc");
+
+	/* clock derived from ahb clk */
+	clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+			1);
+	clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+	clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+			ARRAY_SIZE(ddr_parents),
+			0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
+			&_lock);
+	clk_register_clkdev(clk, "ddr_clk", NULL);
+
+	clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+			CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+			PCLK_RATIO_MASK, 0, &_lock);
+	clk_register_clkdev(clk, "apb_clk", NULL);
+
+	clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			DMA_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+	clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			FSMC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d1800000.flash");
+
+	clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			GMAC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "e0800000.ethernet");
+
+	clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			I2C_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d0200000.i2c");
+
+	clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			JPEG_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "jpeg");
+
+	clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+			SMI_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+	/* clock derived from apb clk */
+	clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			ADC_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "adc");
+
+	clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
+	clk_register_clkdev(clk, NULL, "f0100000.gpio");
+
+	clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			GPIO1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+	clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			GPIO2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "d8100000.gpio");
+
+	clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			SSP0_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "ssp-pl022.0");
+
+	clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			SSP1_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "ssp-pl022.1");
+
+	clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+			SSP2_CLK_ENB, 0, &_lock);
+	clk_register_clkdev(clk, NULL, "ssp-pl022.2");
+}
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d..dd3e661 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_SH_TIMER_CMT)	+= sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)	+= sh_mtu2.o
 obj-$(CONFIG_SH_TIMER_TMU)	+= sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI)	+= em_sti.o
 obj-$(CONFIG_CLKBLD_I8253)	+= i8253.o
 obj-$(CONFIG_CLKSRC_MMIO)	+= mmio.o
 obj-$(CONFIG_DW_APB_TIMER)	+= dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 0000000..372051d
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ *  Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+	void __iomem *base;
+	struct clk *clk;
+	struct platform_device *pdev;
+	unsigned int active[USER_NR];
+	unsigned long rate;
+	raw_spinlock_t lock;
+	struct clock_event_device ced;
+	struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+	return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+				unsigned long value)
+{
+	iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+	int ret;
+
+	/* enable clock */
+	ret = clk_enable(p->clk);
+	if (ret) {
+		dev_err(&p->pdev->dev, "cannot enable clock\n");
+		return ret;
+	}
+
+	/* configure channel, periodic mode and maximum timeout */
+	p->rate = clk_get_rate(p->clk);
+
+	/* reset the counter */
+	em_sti_write(p, STI_SET_H, 0x40000000);
+	em_sti_write(p, STI_SET_L, 0x00000000);
+
+	/* mask and clear pending interrupts */
+	em_sti_write(p, STI_INTENCLR, 3);
+	em_sti_write(p, STI_INTFFCLR, 3);
+
+	/* enable updates of counter registers */
+	em_sti_write(p, STI_CONTROL, 1);
+
+	return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+	/* mask interrupts */
+	em_sti_write(p, STI_INTENCLR, 3);
+
+	/* stop clock */
+	clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+	cycle_t ticks;
+	unsigned long flags;
+
+	/* the STI hardware buffers the 48-bit count, but to
+	 * break it out into two 32-bit access the registers
+	 * must be accessed in a certain order.
+	 * Always read STI_COUNT_H before STI_COUNT_L.
+	 */
+	raw_spin_lock_irqsave(&p->lock, flags);
+	ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+	ticks |= em_sti_read(p, STI_COUNT_L);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
+
+	return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&p->lock, flags);
+
+	/* mask compare A interrupt */
+	em_sti_write(p, STI_INTENCLR, 1);
+
+	/* update compare A value */
+	em_sti_write(p, STI_COMPA_H, next >> 32);
+	em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+	/* clear compare A interrupt source */
+	em_sti_write(p, STI_INTFFCLR, 1);
+
+	/* unmask compare A interrupt */
+	em_sti_write(p, STI_INTENSET, 1);
+
+	raw_spin_unlock_irqrestore(&p->lock, flags);
+
+	return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+	struct em_sti_priv *p = dev_id;
+
+	p->ced.event_handler(&p->ced);
+	return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+	unsigned long flags;
+	int used_before;
+	int ret = 0;
+
+	raw_spin_lock_irqsave(&p->lock, flags);
+	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+	if (!used_before)
+		ret = em_sti_enable(p);
+
+	if (!ret)
+		p->active[user] = 1;
+	raw_spin_unlock_irqrestore(&p->lock, flags);
+
+	return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+	unsigned long flags;
+	int used_before, used_after;
+
+	raw_spin_lock_irqsave(&p->lock, flags);
+	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+	p->active[user] = 0;
+	used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+	if (used_before && !used_after)
+		em_sti_disable(p);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+	return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+	return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+	int ret;
+	struct em_sti_priv *p = cs_to_em_sti(cs);
+
+	ret = em_sti_start(p, USER_CLOCKSOURCE);
+	if (!ret)
+		__clocksource_updatefreq_hz(cs, p->rate);
+	return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+	em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+	em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+	struct clocksource *cs = &p->cs;
+
+	memset(cs, 0, sizeof(*cs));
+	cs->name = dev_name(&p->pdev->dev);
+	cs->rating = 200;
+	cs->read = em_sti_clocksource_read;
+	cs->enable = em_sti_clocksource_enable;
+	cs->disable = em_sti_clocksource_disable;
+	cs->suspend = em_sti_clocksource_disable;
+	cs->resume = em_sti_clocksource_resume;
+	cs->mask = CLOCKSOURCE_MASK(48);
+	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+	dev_info(&p->pdev->dev, "used as clock source\n");
+
+	/* Register with dummy 1 Hz value, gets updated in ->enable() */
+	clocksource_register_hz(cs, 1);
+	return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+	return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+				    struct clock_event_device *ced)
+{
+	struct em_sti_priv *p = ced_to_em_sti(ced);
+
+	/* deal with old setting first */
+	switch (ced->mode) {
+	case CLOCK_EVT_MODE_ONESHOT:
+		em_sti_stop(p, USER_CLOCKEVENT);
+		break;
+	default:
+		break;
+	}
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_ONESHOT:
+		dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+		em_sti_start(p, USER_CLOCKEVENT);
+		clockevents_config(&p->ced, p->rate);
+		break;
+	case CLOCK_EVT_MODE_SHUTDOWN:
+	case CLOCK_EVT_MODE_UNUSED:
+		em_sti_stop(p, USER_CLOCKEVENT);
+		break;
+	default:
+		break;
+	}
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+				   struct clock_event_device *ced)
+{
+	struct em_sti_priv *p = ced_to_em_sti(ced);
+	cycle_t next;
+	int safe;
+
+	next = em_sti_set_next(p, em_sti_count(p) + delta);
+	safe = em_sti_count(p) < (next - 1);
+
+	return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+	struct clock_event_device *ced = &p->ced;
+
+	memset(ced, 0, sizeof(*ced));
+	ced->name = dev_name(&p->pdev->dev);
+	ced->features = CLOCK_EVT_FEAT_ONESHOT;
+	ced->rating = 200;
+	ced->cpumask = cpumask_of(0);
+	ced->set_next_event = em_sti_clock_event_next;
+	ced->set_mode = em_sti_clock_event_mode;
+
+	dev_info(&p->pdev->dev, "used for clock events\n");
+
+	/* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+	clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+	struct em_sti_priv *p;
+	struct resource *res;
+	int irq, ret;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (p == NULL) {
+		dev_err(&pdev->dev, "failed to allocate driver data\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	p->pdev = pdev;
+	platform_set_drvdata(pdev, p);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
+		ret = -EINVAL;
+		goto err0;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		ret = -EINVAL;
+		goto err0;
+	}
+
+	/* map memory, let base point to the STI instance */
+	p->base = ioremap_nocache(res->start, resource_size(res));
+	if (p->base == NULL) {
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
+		ret = -ENXIO;
+		goto err0;
+	}
+
+	/* get hold of clock */
+	p->clk = clk_get(&pdev->dev, "sclk");
+	if (IS_ERR(p->clk)) {
+		dev_err(&pdev->dev, "cannot get clock\n");
+		ret = PTR_ERR(p->clk);
+		goto err1;
+	}
+
+	if (request_irq(irq, em_sti_interrupt,
+			IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+			dev_name(&pdev->dev), p)) {
+		dev_err(&pdev->dev, "failed to request low IRQ\n");
+		ret = -ENOENT;
+		goto err2;
+	}
+
+	raw_spin_lock_init(&p->lock);
+	em_sti_register_clockevent(p);
+	em_sti_register_clocksource(p);
+	return 0;
+
+err2:
+	clk_put(p->clk);
+err1:
+	iounmap(p->base);
+err0:
+	kfree(p);
+	return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+	return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+	{ .compatible = "renesas,em-sti", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+	.probe		= em_sti_probe,
+	.remove		= __devexit_p(em_sti_remove),
+	.driver		= {
+		.name	= "em_sti",
+		.of_match_table = em_sti_dt_ids,
+	}
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef..98b06ba 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@
 	unsigned long next_match_value;
 	unsigned long max_match_value;
 	unsigned long rate;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct clock_event_device ced;
 	struct clocksource cs;
 	unsigned long total_cycles;
 };
 
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
 
 #define CMSTR -1 /* shared register */
 #define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@
 	unsigned long flags, value;
 
 	/* start stop register shared by multiple timer channels */
-	spin_lock_irqsave(&sh_cmt_lock, flags);
+	raw_spin_lock_irqsave(&sh_cmt_lock, flags);
 	value = sh_cmt_read(p, CMSTR);
 
 	if (start)
@@ -148,7 +148,7 @@
 		value &= ~(1 << cfg->timer_bit);
 
 	sh_cmt_write(p, CMSTR, value);
-	spin_unlock_irqrestore(&sh_cmt_lock, flags);
+	raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
 }
 
 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&p->lock, flags);
+	raw_spin_lock_irqsave(&p->lock, flags);
 	__sh_cmt_set_next(p, delta);
-	spin_unlock_irqrestore(&p->lock, flags);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@
 	int ret = 0;
 	unsigned long flags;
 
-	spin_lock_irqsave(&p->lock, flags);
+	raw_spin_lock_irqsave(&p->lock, flags);
 
 	if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 		ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@
 	if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
 		__sh_cmt_set_next(p, p->max_match_value);
  out:
-	spin_unlock_irqrestore(&p->lock, flags);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
 
 	return ret;
 }
@@ -408,7 +408,7 @@
 	unsigned long flags;
 	unsigned long f;
 
-	spin_lock_irqsave(&p->lock, flags);
+	raw_spin_lock_irqsave(&p->lock, flags);
 
 	f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
 	p->flags &= ~flag;
@@ -420,7 +420,7 @@
 	if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
 		__sh_cmt_set_next(p, p->max_match_value);
 
-	spin_unlock_irqrestore(&p->lock, flags);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@
 	unsigned long value;
 	int has_wrapped;
 
-	spin_lock_irqsave(&p->lock, flags);
+	raw_spin_lock_irqsave(&p->lock, flags);
 	value = p->total_cycles;
 	raw = sh_cmt_get_counter(p, &has_wrapped);
 
 	if (unlikely(has_wrapped))
 		raw += p->match_value + 1;
-	spin_unlock_irqrestore(&p->lock, flags);
+	raw_spin_unlock_irqrestore(&p->lock, flags);
 
 	return value + raw;
 }
@@ -591,7 +591,7 @@
 		p->max_match_value = (1 << p->width) - 1;
 
 	p->match_value = p->max_match_value;
-	spin_lock_init(&p->lock);
+	raw_spin_lock_init(&p->lock);
 
 	if (clockevent_rating)
 		sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f6..d9b76ca 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@
 	struct clock_event_device ced;
 };
 
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
 
 #define TSTR -1 /* shared register */
 #define TCR  0 /* channel register */
@@ -107,7 +107,7 @@
 	unsigned long flags, value;
 
 	/* start stop register shared by multiple timer channels */
-	spin_lock_irqsave(&sh_mtu2_lock, flags);
+	raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
 	value = sh_mtu2_read(p, TSTR);
 
 	if (start)
@@ -116,7 +116,7 @@
 		value &= ~(1 << cfg->timer_bit);
 
 	sh_mtu2_write(p, TSTR, value);
-	spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+	raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
 }
 
 static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b6..c1b51d4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@
 	struct clocksource cs;
 };
 
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
 
 #define TSTR -1 /* shared register */
 #define TCOR  0 /* channel register */
@@ -95,7 +95,7 @@
 	unsigned long flags, value;
 
 	/* start stop register shared by multiple timer channels */
-	spin_lock_irqsave(&sh_tmu_lock, flags);
+	raw_spin_lock_irqsave(&sh_tmu_lock, flags);
 	value = sh_tmu_read(p, TSTR);
 
 	if (start)
@@ -104,7 +104,7 @@
 		value &= ~(1 << cfg->timer_bit);
 
 	sh_tmu_write(p, TSTR, value);
-	spin_unlock_irqrestore(&sh_tmu_lock, flags);
+	raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 }
 
 static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@
 
 	sh_tmu_enable(p);
 
-	/* TODO: calculate good shift from rate and counter bit width */
-
-	ced->shift = 32;
-	ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
-	ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
-	ced->min_delta_ns = 5000;
+	clockevents_config(ced, p->rate);
 
 	if (periodic) {
 		p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@
 	ced->set_mode = sh_tmu_clock_event_mode;
 
 	dev_info(&p->pdev->dev, "used for clock events\n");
-	clockevents_register_device(ced);
+
+	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
 
 	ret = setup_irq(p->irqaction.irq, &p->irqaction);
 	if (ret) {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6ecc5f..1cc6b3f 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,6 +16,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/clk.h>
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
 
@@ -79,6 +80,7 @@
 	void __iomem *reg;
 	void __iomem *sram;
 	int irq;
+	struct clk *clk;
 	struct task_struct *queue_th;
 
 	/* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@
 	if (ret)
 		goto err_thread;
 
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	cp->clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(cp->clk))
+		clk_prepare_enable(cp->clk);
+
 	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
 	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
 	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@
 	memset(cp->sram, 0, cp->sram_size);
 	iounmap(cp->sram);
 	iounmap(cp->reg);
+
+	if (!IS_ERR(cp->clk)) {
+		clk_disable_unprepare(cp->clk);
+		clk_put(cp->clk);
+	}
+
 	kfree(cp);
 	cpg = NULL;
 	return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef378b5..aadeb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -238,6 +238,7 @@
 config MXS_DMA
 	bool "MXS DMA support"
 	depends on SOC_IMX23 || SOC_IMX28
+	select STMP_DEVICE
 	select DMA_ENGINE
 	help
 	  Support the MXS DMA engine. This engine including APBH-DMA
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704ab..49ecbbb 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
  * @dualmaster: whether this version supports dual AHB masters or not.
+ * @nomadik: whether the channels have Nomadik security extension bits
+ *	that need to be checked for permission before use and some registers are
+ *	missing
  */
 struct vendor_data {
 	u8 channels;
 	bool dualmaster;
+	bool nomadik;
 };
 
 /*
@@ -385,7 +389,7 @@
 
 		spin_lock_irqsave(&ch->lock, flags);
 
-		if (!ch->serving) {
+		if (!ch->locked && !ch->serving) {
 			ch->serving = virt_chan;
 			ch->signal = -1;
 			spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@
 	int ret, tmp;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-			__func__, sgl->length, plchan->name);
+			__func__, sg_dma_len(sgl), plchan->name);
 
 	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
@@ -1378,11 +1382,11 @@
 
 		dsg->len = sg_dma_len(sg);
 		if (direction == DMA_MEM_TO_DEV) {
-			dsg->src_addr = sg_phys(sg);
+			dsg->src_addr = sg_dma_address(sg);
 			dsg->dst_addr = slave_addr;
 		} else {
 			dsg->src_addr = slave_addr;
-			dsg->dst_addr = sg_phys(sg);
+			dsg->dst_addr = sg_dma_address(sg);
 		}
 	}
 
@@ -1484,6 +1488,9 @@
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
+	/* The Nomadik variant does not have the config register */
+	if (pl08x->vd->nomadik)
+		return;
 	writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
@@ -1616,7 +1623,7 @@
 			__func__, err);
 		writel(err, pl08x->base + PL080_ERR_CLEAR);
 	}
-	tc = readl(pl08x->base + PL080_INT_STATUS);
+	tc = readl(pl08x->base + PL080_TC_STATUS);
 	if (tc)
 		writel(tc, pl08x->base + PL080_TC_CLEAR);
 
@@ -1773,8 +1780,10 @@
 		spin_lock_irqsave(&ch->lock, flags);
 		virt_chan = ch->serving;
 
-		seq_printf(s, "%d\t\t%s\n",
-			   ch->id, virt_chan ? virt_chan->name : "(none)");
+		seq_printf(s, "%d\t\t%s%s\n",
+			   ch->id,
+			   virt_chan ? virt_chan->name : "(none)",
+			   ch->locked ? " LOCKED" : "");
 
 		spin_unlock_irqrestore(&ch->lock, flags);
 	}
@@ -1918,7 +1927,7 @@
 	}
 
 	/* Initialize physical channels */
-	pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
+	pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
 			GFP_KERNEL);
 	if (!pl08x->phy_chans) {
 		dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@
 		ch->id = i;
 		ch->base = pl08x->base + PL080_Cx_BASE(i);
 		spin_lock_init(&ch->lock);
-		ch->serving = NULL;
 		ch->signal = -1;
+
+		/*
+		 * Nomadik variants can have channels that are locked
+		 * down for the secure world only. Lock up these channels
+		 * by perpetually serving a dummy virtual channel.
+		 */
+		if (vd->nomadik) {
+			u32 val;
+
+			val = readl(ch->base + PL080_CH_CONFIG);
+			if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
+				dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
+				ch->locked = true;
+			}
+		}
+
 		dev_dbg(&adev->dev, "physical channel %d is %s\n",
 			i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
 	}
@@ -2017,6 +2041,12 @@
 	.dualmaster = true,
 };
 
+static struct vendor_data vendor_nomadik = {
+	.channels = 8,
+	.dualmaster = true,
+	.nomadik = true,
+};
+
 static struct vendor_data vendor_pl081 = {
 	.channels = 2,
 	.dualmaster = false,
@@ -2037,9 +2067,9 @@
 	},
 	/* Nomadik 8815 PL080 variant */
 	{
-		.id	= 0x00280880,
+		.id	= 0x00280080,
 		.mask	= 0x00ffffff,
-		.data	= &vendor_pl080,
+		.data	= &vendor_nomadik,
 	},
 	{ 0, 0 },
 };
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index bf0d7e4..7292aa8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
  */
 
 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
-#define	ATC_DEFAULT_CTRLA	(0)
 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
 				|ATC_DIF(AT_DMA_MEM_IF))
 
@@ -574,7 +573,6 @@
 		return NULL;
 	}
 
-	ctrla =   ATC_DEFAULT_CTRLA;
 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 		| ATC_SRC_ADDR_MODE_INCR
 		| ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@
 	 * of the most common optimization.
 	 */
 	if (!((src | dest  | len) & 3)) {
-		ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
+		ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
 		src_width = dst_width = 2;
 	} else if (!((src | dest | len) & 1)) {
-		ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
+		ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
 		src_width = dst_width = 1;
 	} else {
-		ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
+		ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
 		src_width = dst_width = 0;
 	}
 
@@ -668,7 +666,8 @@
 		return NULL;
 	}
 
-	ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
+		| ATC_DCSIZE(sconfig->dst_maxburst);
 	ctrlb = ATC_IEN;
 
 	switch (direction) {
@@ -796,12 +795,12 @@
 		enum dma_transfer_direction direction)
 {
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
-	struct at_dma_slave	*atslave = chan->private;
 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
 	u32			ctrla;
 
 	/* prepare common CRTLA value */
-	ctrla =   ATC_DEFAULT_CTRLA | atslave->ctrla
+	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
+		| ATC_DCSIZE(sconfig->dst_maxburst)
 		| ATC_DST_WIDTH(reg_width)
 		| ATC_SRC_WIDTH(reg_width)
 		| period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bc..8a6c8e8 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
 /* Bitfields in CTRLA */
 #define	ATC_BTSIZE_MAX		0xFFFFUL	/* Maximum Buffer Transfer Size */
 #define	ATC_BTSIZE(x)		(ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
-/* Chunck Tranfer size definitions are in at_hdmac.h */
+#define	ATC_SCSIZE_MASK		(0x7 << 16)	/* Source Chunk Transfer Size */
+#define		ATC_SCSIZE(x)		(ATC_SCSIZE_MASK & ((x) << 16))
+#define		ATC_SCSIZE_1		(0x0 << 16)
+#define		ATC_SCSIZE_4		(0x1 << 16)
+#define		ATC_SCSIZE_8		(0x2 << 16)
+#define		ATC_SCSIZE_16		(0x3 << 16)
+#define		ATC_SCSIZE_32		(0x4 << 16)
+#define		ATC_SCSIZE_64		(0x5 << 16)
+#define		ATC_SCSIZE_128		(0x6 << 16)
+#define		ATC_SCSIZE_256		(0x7 << 16)
+#define	ATC_DCSIZE_MASK		(0x7 << 20)	/* Destination Chunk Transfer Size */
+#define		ATC_DCSIZE(x)		(ATC_DCSIZE_MASK & ((x) << 20))
+#define		ATC_DCSIZE_1		(0x0 << 20)
+#define		ATC_DCSIZE_4		(0x1 << 20)
+#define		ATC_DCSIZE_8		(0x2 << 20)
+#define		ATC_DCSIZE_16		(0x3 << 20)
+#define		ATC_DCSIZE_32		(0x4 << 20)
+#define		ATC_DCSIZE_64		(0x5 << 20)
+#define		ATC_DCSIZE_128		(0x6 << 20)
+#define		ATC_DCSIZE_256		(0x7 << 20)
 #define	ATC_SRC_WIDTH_MASK	(0x3 << 24)	/* Source Single Transfer Size */
 #define		ATC_SRC_WIDTH(x)	((x) << 24)
 #define		ATC_SRC_WIDTH_BYTE	(0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f..e67b4e0 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@
 
 	if (!sgl)
 		goto out;
-	if (sgl->length == 0)
+	if (sg_dma_len(sgl) == 0)
 		goto out;
 
 	spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4..780e042 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@
 
 		if (dir == DMA_MEM_TO_DEV)
 			/* increment source address */
-			src = sg_phys(sg);
+			src = sg_dma_address(sg);
 		else
 			/* increment destination address */
-			dst =  sg_phys(sg);
+			dst = sg_dma_address(sg);
 
 		bytes_to_transfer = sg_dma_len(sg);
 
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079..7212961 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -742,7 +743,7 @@
 			struct dw_desc	*desc;
 			u32		len, dlen, mem;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 
 			if (!((mem | len) & 7))
@@ -809,7 +810,7 @@
 			struct dw_desc	*desc;
 			u32		len, dlen, mem;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 
 			if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@
 		err = PTR_ERR(dw->clk);
 		goto err_clk;
 	}
-	clk_enable(dw->clk);
+	clk_prepare_enable(dw->clk);
 
 	/* force dma off, just in case */
 	dw_dma_off(dw);
@@ -1510,7 +1511,7 @@
 	return 0;
 
 err_irq:
-	clk_disable(dw->clk);
+	clk_disable_unprepare(dw->clk);
 	clk_put(dw->clk);
 err_clk:
 	iounmap(dw->regs);
@@ -1540,7 +1541,7 @@
 		channel_clear_bit(dw, CH_EN, dwc->mask);
 	}
 
-	clk_disable(dw->clk);
+	clk_disable_unprepare(dw->clk);
 	clk_put(dw->clk);
 
 	iounmap(dw->regs);
@@ -1559,7 +1560,7 @@
 	struct dw_dma	*dw = platform_get_drvdata(pdev);
 
 	dw_dma_off(platform_get_drvdata(pdev));
-	clk_disable(dw->clk);
+	clk_disable_unprepare(dw->clk);
 }
 
 static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@
 	struct dw_dma	*dw = platform_get_drvdata(pdev);
 
 	dw_dma_off(platform_get_drvdata(pdev));
-	clk_disable(dw->clk);
+	clk_disable_unprepare(dw->clk);
 
 	return 0;
 }
@@ -1578,7 +1579,7 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct dw_dma	*dw = platform_get_drvdata(pdev);
 
-	clk_enable(dw->clk);
+	clk_prepare_enable(dw->clk);
 	dma_writel(dw, CFG, DW_CFG_DMA_EN);
 	return 0;
 }
@@ -1592,12 +1593,21 @@
 	.poweroff_noirq = dw_suspend_noirq,
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+	{ .compatible = "snps,dma-spear1340" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
 static struct platform_driver dw_driver = {
 	.remove		= __exit_p(dw_remove),
 	.shutdown	= dw_shutdown,
 	.driver = {
 		.name	= "dw_dmac",
 		.pm	= &dw_dev_pm_ops,
+		.of_match_table = of_match_ptr(dw_dma_id_table),
 	},
 };
 
@@ -1616,4 +1626,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b57..c64917e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
 #define M2M_CONTROL_TM_SHIFT		13
 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_NFBINT		BIT(21)
 #define M2M_CONTROL_RSS_SHIFT		22
 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
 #define M2M_CONTROL_PWSC_SHIFT		25
 
 #define M2M_INTERRUPT			0x0004
-#define M2M_INTERRUPT_DONEINT		BIT(1)
+#define M2M_INTERRUPT_MASK		6
+
+#define M2M_STATUS			0x000c
+#define M2M_STATUS_CTL_SHIFT		1
+#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_BUF_SHIFT		4
+#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_DONE			BIT(6)
 
 #define M2M_BCR0			0x0010
 #define M2M_BCR1			0x0014
@@ -426,15 +442,6 @@
 
 /*
  * M2M DMA implementation
- *
- * For the M2M transfers we don't use NFB at all. This is because it simply
- * doesn't work well with memcpy transfers. When you submit both buffers it is
- * extremely unlikely that you get an NFB interrupt, but it instead reports
- * DONE interrupt and both buffers are already transferred which means that we
- * weren't able to update the next buffer.
- *
- * So for now we "simulate" NFB by just submitting buffer after buffer
- * without double buffering.
  */
 
 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@
 	m2m_fill_desc(edmac);
 	control |= M2M_CONTROL_DONEINT;
 
+	if (ep93xx_dma_advance_active(edmac)) {
+		m2m_fill_desc(edmac);
+		control |= M2M_CONTROL_NFBINT;
+	}
+
 	/*
 	 * Now we can finally enable the channel. For M2M channel this must be
 	 * done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@
 	}
 }
 
+/*
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
+ * M2M DMA controller transactions complete normally. This is not always the
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
+ * In effect, disabling the channel when only DONE bit is set could stop
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
+ * Control FSM to check current state of DMA channel.
+ */
 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 {
+	u32 status = readl(edmac->regs + M2M_STATUS);
+	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
+	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
+	bool done = status & M2M_STATUS_DONE;
+	bool last_done;
 	u32 control;
+	struct ep93xx_dma_desc *desc;
 
-	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+	/* Accept only DONE and NFB interrupts */
+	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 		return INTERRUPT_UNKNOWN;
 
-	/* Clear the DONE bit */
-	writel(0, edmac->regs + M2M_INTERRUPT);
-
-	/* Disable interrupts and the channel */
-	control = readl(edmac->regs + M2M_CONTROL);
-	control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
-	writel(control, edmac->regs + M2M_CONTROL);
-
-	/*
-	 * Since we only get DONE interrupt we have to find out ourselves
-	 * whether there still is something to process. So we try to advance
-	 * the chain an see whether it succeeds.
-	 */
-	if (ep93xx_dma_advance_active(edmac)) {
-		edmac->edma->hw_submit(edmac);
-		return INTERRUPT_NEXT_BUFFER;
+	if (done) {
+		/* Clear the DONE bit */
+		writel(0, edmac->regs + M2M_INTERRUPT);
 	}
 
-	return INTERRUPT_DONE;
+	/*
+	 * Check whether we are done with descriptors or not. This, together
+	 * with DMA channel state, determines action to take in interrupt.
+	 */
+	desc = ep93xx_dma_get_active(edmac);
+	last_done = !desc || desc->txd.cookie;
+
+	/*
+	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
+	 * DMA channel. Using DONE and NFB bits from channel status register
+	 * or bits from channel interrupt register is not reliable.
+	 */
+	if (!last_done &&
+	    (buf_fsm == M2M_STATUS_BUF_NO ||
+	     buf_fsm == M2M_STATUS_BUF_ON)) {
+		/*
+		 * Two buffers are ready for update when Buffer FSM is in
+		 * DMA_NO_BUF state. Only one buffer can be prepared without
+		 * disabling the channel or polling the DONE bit.
+		 * To simplify things, always prepare only one buffer.
+		 */
+		if (ep93xx_dma_advance_active(edmac)) {
+			m2m_fill_desc(edmac);
+			if (done && !edmac->chan.private) {
+				/* Software trigger for memcpy channel */
+				control = readl(edmac->regs + M2M_CONTROL);
+				control |= M2M_CONTROL_START;
+				writel(control, edmac->regs + M2M_CONTROL);
+			}
+			return INTERRUPT_NEXT_BUFFER;
+		} else {
+			last_done = true;
+		}
+	}
+
+	/*
+	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
+	 * and Control FSM is in DMA_STALL state.
+	 */
+	if (last_done &&
+	    buf_fsm == M2M_STATUS_BUF_NO &&
+	    ctl_fsm == M2M_STATUS_CTL_STALL) {
+		/* Disable interrupts and the channel */
+		control = readl(edmac->regs + M2M_CONTROL);
+		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
+			    | M2M_CONTROL_ENABLE);
+		writel(control, edmac->regs + M2M_CONTROL);
+		return INTERRUPT_DONE;
+	}
+
+	/*
+	 * Nothing to do this time.
+	 */
+	return INTERRUPT_NEXT_BUFFER;
 }
 
 /*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8..fcfeb3c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@
 	struct scatterlist *sg = d->sg;
 	unsigned long now;
 
-	now = min(d->len, sg->length);
+	now = min(d->len, sg_dma_len(sg));
 	if (d->len != IMX_DMA_LENGTH_LOOP)
 		d->len -= now;
 
@@ -763,16 +763,16 @@
 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 
 	for_each_sg(sgl, sg, sg_len, i) {
-		dma_length += sg->length;
+		dma_length += sg_dma_len(sg);
 	}
 
 	switch (imxdmac->word_size) {
 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
-		if (sgl->length & 3 || sgl->dma_address & 3)
+		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
 			return NULL;
 		break;
 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
-		if (sgl->length & 1 || sgl->dma_address & 1)
+		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
 			return NULL;
 		break;
 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@
 		imxdmac->sg_list[i].page_link = 0;
 		imxdmac->sg_list[i].offset = 0;
 		imxdmac->sg_list[i].dma_address = dma_addr;
-		imxdmac->sg_list[i].length = period_len;
+		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
 		dma_addr += period_len;
 	}
 
 	/* close the loop */
 	imxdmac->sg_list[periods].offset = 0;
-	imxdmac->sg_list[periods].length = 0;
+	sg_dma_len(&imxdmac->sg_list[periods]) = 0;
 	imxdmac->sg_list[periods].page_link =
 		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
 
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d3e38e2..1dc2a4a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
@@ -271,6 +271,7 @@
 	enum dma_status			status;
 	unsigned int			chn_count;
 	unsigned int			chn_real_count;
+	struct tasklet_struct		tasklet;
 };
 
 #define IMX_DMA_SG_LOOP		BIT(0)
@@ -322,8 +323,9 @@
 	struct sdma_context_data	*context;
 	dma_addr_t			context_phys;
 	struct dma_device		dma_device;
-	struct clk			*clk;
-	struct mutex			channel_0_lock;
+	struct clk			*clk_ipg;
+	struct clk			*clk_ahb;
+	spinlock_t			channel_0_lock;
 	struct sdma_script_start_addrs	*script_addrs;
 };
 
@@ -401,19 +403,27 @@
 }
 
 /*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
  */
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
 {
-	struct sdma_engine *sdma = sdmac->sdma;
-	int channel = sdmac->channel;
 	int ret;
+	unsigned long timeout = 500;
 
-	init_completion(&sdmac->done);
+	sdma_enable_channel(sdma, 0);
 
-	sdma_enable_channel(sdma, channel);
+	while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+		if (timeout-- <= 0)
+			break;
+		udelay(1);
+	}
 
-	ret = wait_for_completion_timeout(&sdmac->done, HZ);
+	if (ret) {
+		/* Clear the interrupt status */
+		writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+	} else {
+		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+	}
 
 	return ret ? 0 : -ETIMEDOUT;
 }
@@ -425,17 +435,17 @@
 	void *buf_virt;
 	dma_addr_t buf_phys;
 	int ret;
-
-	mutex_lock(&sdma->channel_0_lock);
+	unsigned long flags;
 
 	buf_virt = dma_alloc_coherent(NULL,
 			size,
 			&buf_phys, GFP_KERNEL);
 	if (!buf_virt) {
-		ret = -ENOMEM;
-		goto err_out;
+		return -ENOMEM;
 	}
 
+	spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
 	bd0->mode.command = C0_SETPM;
 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 	bd0->mode.count = size / 2;
@@ -444,13 +454,12 @@
 
 	memcpy(buf_virt, buf, size);
 
-	ret = sdma_run_channel(&sdma->channel[0]);
+	ret = sdma_run_channel0(sdma);
+
+	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
 	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 
-err_out:
-	mutex_unlock(&sdma->channel_0_lock);
-
 	return ret;
 }
 
@@ -534,13 +543,11 @@
 		sdmac->desc.callback(sdmac->desc.callback_param);
 }
 
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
 {
-	complete(&sdmac->done);
+	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
-	/* not interested in channel 0 interrupts */
-	if (sdmac->channel == 0)
-		return;
+	complete(&sdmac->done);
 
 	if (sdmac->flags & IMX_DMA_SG_LOOP)
 		sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@
 	unsigned long stat;
 
 	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+	/* not interested in channel 0 interrupts */
+	stat &= ~1;
 	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 
 	while (stat) {
 		int channel = fls(stat) - 1;
 		struct sdma_channel *sdmac = &sdma->channel[channel];
 
-		mxc_sdma_handle_channel(sdmac);
+		tasklet_schedule(&sdmac->tasklet);
 
 		__clear_bit(channel, &stat);
 	}
@@ -659,6 +668,7 @@
 	struct sdma_context_data *context = sdma->context;
 	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 	int ret;
+	unsigned long flags;
 
 	if (sdmac->direction == DMA_DEV_TO_MEM) {
 		load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@
 	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
 	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 
-	mutex_lock(&sdma->channel_0_lock);
+	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 
 	memset(context, 0, sizeof(*context));
 	context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@
 	bd0->mode.count = sizeof(*context) / 4;
 	bd0->buffer_addr = sdma->context_phys;
 	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+	ret = sdma_run_channel0(sdma);
 
-	ret = sdma_run_channel(&sdma->channel[0]);
-
-	mutex_unlock(&sdma->channel_0_lock);
+	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
 	return ret;
 }
@@ -806,8 +815,6 @@
 
 	init_completion(&sdmac->done);
 
-	sdmac->buf_tail = 0;
-
 	return 0;
 out:
 
@@ -859,7 +866,8 @@
 	sdmac->peripheral_type = data->peripheral_type;
 	sdmac->event_id0 = data->dma_request;
 
-	clk_enable(sdmac->sdma->clk);
+	clk_enable(sdmac->sdma->clk_ipg);
+	clk_enable(sdmac->sdma->clk_ahb);
 
 	ret = sdma_request_channel(sdmac);
 	if (ret)
@@ -896,7 +904,8 @@
 
 	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
 
-	clk_disable(sdma->clk);
+	clk_disable(sdma->clk_ipg);
+	clk_disable(sdma->clk_ahb);
 }
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -916,6 +925,8 @@
 
 	sdmac->flags = 0;
 
+	sdmac->buf_tail = 0;
+
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 			sg_len, channel);
 
@@ -938,7 +949,7 @@
 
 		bd->buffer_addr = sg->dma_address;
 
-		count = sg->length;
+		count = sg_dma_len(sg);
 
 		if (count > 0xffff) {
 			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1016,6 +1027,8 @@
 
 	sdmac->status = DMA_IN_PROGRESS;
 
+	sdmac->buf_tail = 0;
+
 	sdmac->flags |= IMX_DMA_SG_LOOP;
 	sdmac->direction = direction;
 	ret = sdma_load_context(sdmac);
@@ -1169,12 +1182,14 @@
 	addr = (void *)header + header->script_addrs_start;
 	ram_code = (void *)header + header->ram_code_start;
 
-	clk_enable(sdma->clk);
+	clk_enable(sdma->clk_ipg);
+	clk_enable(sdma->clk_ahb);
 	/* download the RAM image for SDMA */
 	sdma_load_script(sdma, ram_code,
 			header->ram_code_size,
 			addr->ram_code_start_addr);
-	clk_disable(sdma->clk);
+	clk_disable(sdma->clk_ipg);
+	clk_disable(sdma->clk_ahb);
 
 	sdma_add_scripts(sdma, addr);
 
@@ -1216,7 +1231,8 @@
 		return -ENODEV;
 	}
 
-	clk_enable(sdma->clk);
+	clk_enable(sdma->clk_ipg);
+	clk_enable(sdma->clk_ahb);
 
 	/* Be sure SDMA has not started yet */
 	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1285,14 @@
 	/* Initializes channel's priorities */
 	sdma_set_channel_priority(&sdma->channel[0], 7);
 
-	clk_disable(sdma->clk);
+	clk_disable(sdma->clk_ipg);
+	clk_disable(sdma->clk_ahb);
 
 	return 0;
 
 err_dma_alloc:
-	clk_disable(sdma->clk);
+	clk_disable(sdma->clk_ipg);
+	clk_disable(sdma->clk_ahb);
 	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
 	return ret;
 }
@@ -1297,7 +1315,7 @@
 	if (!sdma)
 		return -ENOMEM;
 
-	mutex_init(&sdma->channel_0_lock);
+	spin_lock_init(&sdma->channel_0_lock);
 
 	sdma->dev = &pdev->dev;
 
@@ -1313,12 +1331,21 @@
 		goto err_request_region;
 	}
 
-	sdma->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(sdma->clk)) {
-		ret = PTR_ERR(sdma->clk);
+	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(sdma->clk_ipg)) {
+		ret = PTR_ERR(sdma->clk_ipg);
 		goto err_clk;
 	}
 
+	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(sdma->clk_ahb)) {
+		ret = PTR_ERR(sdma->clk_ahb);
+		goto err_clk;
+	}
+
+	clk_prepare(sdma->clk_ipg);
+	clk_prepare(sdma->clk_ahb);
+
 	sdma->regs = ioremap(iores->start, resource_size(iores));
 	if (!sdma->regs) {
 		ret = -ENOMEM;
@@ -1359,6 +1386,8 @@
 		dma_cookie_init(&sdmac->chan);
 		sdmac->channel = i;
 
+		tasklet_init(&sdmac->tasklet, sdma_tasklet,
+			     (unsigned long) sdmac);
 		/*
 		 * Add the channel to the DMAC list. Do not add channel 0 though
 		 * because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1455,6 @@
 err_request_irq:
 	iounmap(sdma->regs);
 err_ioremap:
-	clk_put(sdma->clk);
 err_clk:
 	release_mem_region(iores->start, resource_size(iores));
 err_request_region:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7..222e907 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@
 			}
 		}
 		/*Populate CTL_HI values*/
-		ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+		ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
 							desc->width,
 							midc->dma->block_size);
 		/*Populate SAR and DAR values*/
-		sg_phy_addr = sg_phys(sg);
+		sg_phy_addr = sg_dma_address(sg);
 		if (desc->dirn ==  DMA_MEM_TO_DEV) {
 			lli_bloc_desc->sar  = sg_phy_addr;
 			lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@
 			txd = intel_mid_dma_prep_memcpy(chan,
 						mids->dma_slave.dst_addr,
 						mids->dma_slave.src_addr,
-						sgl->length,
+						sg_dma_len(sgl),
 						flags);
 			return txd;
 		} else {
@@ -759,7 +759,7 @@
 	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
 			sg_len, direction, flags);
 
-	txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+	txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
 	if (NULL == txd) {
 		pr_err("MDMA: Prep memcpy failed\n");
 		return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 62e3f8e..5ec7204 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1715,7 +1715,7 @@
 	}
 
 	/* Make sure IPU HSP clock is running */
-	clk_enable(ipu_data.ipu_clk);
+	clk_prepare_enable(ipu_data.ipu_clk);
 
 	/* Disable all interrupts */
 	idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@
 err_idmac_init:
 err_attach_irq:
 	ipu_irq_detach_irq(&ipu_data, pdev);
-	clk_disable(ipu_data.ipu_clk);
+	clk_disable_unprepare(ipu_data.ipu_clk);
 	clk_put(ipu_data.ipu_clk);
 err_clk_get:
 	iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@
 
 	ipu_idmac_exit(ipu);
 	ipu_irq_detach_irq(ipu, pdev);
-	clk_disable(ipu->ipu_clk);
+	clk_disable_unprepare(ipu->ipu_clk);
 	clk_put(ipu->ipu_clk);
 	iounmap(ipu->reg_ic);
 	iounmap(ipu->reg_ipu);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55f..0b12e68 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/memory.h>
+#include <linux/clk.h>
 #include <plat/mv_xor.h>
 
 #include "dmaengine.h"
@@ -1307,11 +1308,25 @@
 	if (dram)
 		mv_xor_conf_mbus_windows(msp, dram);
 
+	/* Not all platforms can gate the clock, so it is not
+	 * an error if the clock does not exists.
+	 */
+	msp->clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(msp->clk))
+		clk_prepare_enable(msp->clk);
+
 	return 0;
 }
 
 static int mv_xor_shared_remove(struct platform_device *pdev)
 {
+	struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+
+	if (!IS_ERR(msp->clk)) {
+		clk_disable_unprepare(msp->clk);
+		clk_put(msp->clk);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b..a5b422f 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
 struct mv_xor_shared_private {
 	void __iomem	*xor_base;
 	void __iomem	*xor_high_base;
+	struct clk	*clk;
 };
 
 
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6..c96ab15 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,11 +22,14 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/fsl/mxs-dma.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <asm/irq.h>
 #include <mach/mxs.h>
-#include <mach/common.h>
 
 #include "dmaengine.h"
 
@@ -36,12 +39,8 @@
  * dma can program the controller registers of peripheral devices.
  */
 
-#define MXS_DMA_APBH		0
-#define MXS_DMA_APBX		1
-#define dma_is_apbh()		(mxs_dma->dev_id == MXS_DMA_APBH)
-
-#define APBH_VERSION_LATEST	3
-#define apbh_is_old()		(mxs_dma->version < APBH_VERSION_LATEST)
+#define dma_is_apbh(mxs_dma)	((mxs_dma)->type == MXS_DMA_APBH)
+#define apbh_is_old(mxs_dma)	((mxs_dma)->dev_id == IMX23_DMA)
 
 #define HW_APBHX_CTRL0				0x000
 #define BM_APBH_CTRL0_APB_BURST8_EN		(1 << 29)
@@ -51,13 +50,14 @@
 #define HW_APBHX_CTRL2				0x020
 #define HW_APBHX_CHANNEL_CTRL			0x030
 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL	16
-#define HW_APBH_VERSION				(cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION				0x800
-#define BP_APBHX_VERSION_MAJOR			24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
-	(((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
-	(((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+/*
+ * The offset of NXTCMDAR register is different per both dma type and version,
+ * while stride for each channel is all the same 0x70.
+ */
+#define HW_APBHX_CHn_NXTCMDAR(d, n) \
+	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(d, n) \
+	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
 
 /*
  * ccw bits definitions
@@ -121,9 +121,19 @@
 #define MXS_DMA_CHANNELS		16
 #define MXS_DMA_CHANNELS_MASK		0xffff
 
+enum mxs_dma_devtype {
+	MXS_DMA_APBH,
+	MXS_DMA_APBX,
+};
+
+enum mxs_dma_id {
+	IMX23_DMA,
+	IMX28_DMA,
+};
+
 struct mxs_dma_engine {
-	int				dev_id;
-	unsigned int			version;
+	enum mxs_dma_id			dev_id;
+	enum mxs_dma_devtype		type;
 	void __iomem			*base;
 	struct clk			*clk;
 	struct dma_device		dma_device;
@@ -131,17 +141,86 @@
 	struct mxs_dma_chan		mxs_chans[MXS_DMA_CHANNELS];
 };
 
+struct mxs_dma_type {
+	enum mxs_dma_id id;
+	enum mxs_dma_devtype type;
+};
+
+static struct mxs_dma_type mxs_dma_types[] = {
+	{
+		.id = IMX23_DMA,
+		.type = MXS_DMA_APBH,
+	}, {
+		.id = IMX23_DMA,
+		.type = MXS_DMA_APBX,
+	}, {
+		.id = IMX28_DMA,
+		.type = MXS_DMA_APBH,
+	}, {
+		.id = IMX28_DMA,
+		.type = MXS_DMA_APBX,
+	}
+};
+
+static struct platform_device_id mxs_dma_ids[] = {
+	{
+		.name = "imx23-dma-apbh",
+		.driver_data = (kernel_ulong_t) &mxs_dma_types[0],
+	}, {
+		.name = "imx23-dma-apbx",
+		.driver_data = (kernel_ulong_t) &mxs_dma_types[1],
+	}, {
+		.name = "imx28-dma-apbh",
+		.driver_data = (kernel_ulong_t) &mxs_dma_types[2],
+	}, {
+		.name = "imx28-dma-apbx",
+		.driver_data = (kernel_ulong_t) &mxs_dma_types[3],
+	}, {
+		/* end of list */
+	}
+};
+
+static const struct of_device_id mxs_dma_dt_ids[] = {
+	{ .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
+	{ .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
+	{ .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
+	{ .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+int mxs_dma_is_apbh(struct dma_chan *chan)
+{
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+	return dma_is_apbh(mxs_dma);
+}
+
+int mxs_dma_is_apbx(struct dma_chan *chan)
+{
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+	return !dma_is_apbh(mxs_dma);
+}
+
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 
-	if (dma_is_apbh() && apbh_is_old())
+	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
 		writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
-			mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
 	else
 		writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
-			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 }
 
 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@
 
 	/* set cmd_addr up */
 	writel(mxs_chan->ccw_phys,
-		mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+		mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
 
 	/* write 1 to SEMA to kick off the channel */
-	writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+	writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
 }
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@
 	int chan_id = mxs_chan->chan.chan_id;
 
 	/* freeze the channel */
-	if (dma_is_apbh() && apbh_is_old())
+	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
 		writel(1 << chan_id,
-			mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
 	else
 		writel(1 << chan_id,
-			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 
 	mxs_chan->status = DMA_PAUSED;
 }
@@ -184,21 +263,16 @@
 	int chan_id = mxs_chan->chan.chan_id;
 
 	/* unfreeze the channel */
-	if (dma_is_apbh() && apbh_is_old())
+	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
 		writel(1 << chan_id,
-			mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
 	else
 		writel(1 << chan_id,
-			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
 
 	mxs_chan->status = DMA_IN_PROGRESS;
 }
 
-static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
-{
-	return container_of(chan, struct mxs_dma_chan, chan);
-}
-
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@
 	/* completion status */
 	stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
 	stat1 &= MXS_DMA_CHANNELS_MASK;
-	writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+	writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
 
 	/* error status */
 	stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
-	writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+	writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
 
 	/*
 	 * When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@
 		ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
 	} else {
 		for_each_sg(sgl, sg, sg_len, i) {
-			if (sg->length > MAX_XFER_BYTES) {
+			if (sg_dma_len(sg) > MAX_XFER_BYTES) {
 				dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
-						sg->length, MAX_XFER_BYTES);
+						sg_dma_len(sg), MAX_XFER_BYTES);
 				goto err_out;
 			}
 
@@ -425,7 +499,7 @@
 
 			ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
 			ccw->bufaddr = sg->dma_address;
-			ccw->xfer_bytes = sg->length;
+			ccw->xfer_bytes = sg_dma_len(sg);
 
 			ccw->bits = 0;
 			ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@
 	if (ret)
 		return ret;
 
-	ret = mxs_reset_block(mxs_dma->base);
+	ret = stmp_reset_block(mxs_dma->base);
 	if (ret)
 		goto err_out;
 
-	/* only major version matters */
-	mxs_dma->version = readl(mxs_dma->base +
-				((mxs_dma->dev_id == MXS_DMA_APBX) ?
-				HW_APBX_VERSION : HW_APBH_VERSION)) >>
-				BP_APBHX_VERSION_MAJOR;
-
 	/* enable apbh burst */
-	if (dma_is_apbh()) {
+	if (dma_is_apbh(mxs_dma)) {
 		writel(BM_APBH_CTRL0_APB_BURST_EN,
-			mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
 		writel(BM_APBH_CTRL0_APB_BURST8_EN,
-			mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
 	}
 
 	/* enable irq for all the channels */
 	writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
-		mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+		mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
 
 err_out:
 	clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@
 
 static int __init mxs_dma_probe(struct platform_device *pdev)
 {
-	const struct platform_device_id *id_entry =
-				platform_get_device_id(pdev);
+	const struct platform_device_id *id_entry;
+	const struct of_device_id *of_id;
+	const struct mxs_dma_type *dma_type;
 	struct mxs_dma_engine *mxs_dma;
 	struct resource *iores;
 	int ret, i;
@@ -606,7 +675,15 @@
 	if (!mxs_dma)
 		return -ENOMEM;
 
-	mxs_dma->dev_id = id_entry->driver_data;
+	of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
+	if (of_id)
+		id_entry = of_id->data;
+	else
+		id_entry = platform_get_device_id(pdev);
+
+	dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+	mxs_dma->type = dma_type->type;
+	mxs_dma->dev_id = dma_type->id;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -689,23 +766,12 @@
 	return ret;
 }
 
-static struct platform_device_id mxs_dma_type[] = {
-	{
-		.name = "mxs-dma-apbh",
-		.driver_data = MXS_DMA_APBH,
-	}, {
-		.name = "mxs-dma-apbx",
-		.driver_data = MXS_DMA_APBX,
-	}, {
-		/* end of list */
-	}
-};
-
 static struct platform_driver mxs_dma_driver = {
 	.driver		= {
 		.name	= "mxs-dma",
+		.of_match_table = mxs_dma_dt_ids,
 	},
-	.id_table	= mxs_dma_type,
+	.id_table	= mxs_dma_ids,
 };
 
 static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495..987ab5c 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@
 			goto err_desc_get;
 
 		desc->regs.dev_addr = reg;
-		desc->regs.mem_addr = sg_phys(sg);
+		desc->regs.mem_addr = sg_dma_address(sg);
 		desc->regs.size = sg_dma_len(sg);
 		desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa3fb21..e4feba6 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/interrupt.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl330.h>
 #include <linux/pm_runtime.h>
@@ -393,6 +392,8 @@
 	struct pl330_reqcfg *cfg;
 	/* Pointer to first xfer in the request. */
 	struct pl330_xfer *x;
+	/* Hook to attach to DMAC's list of reqs with due callback */
+	struct list_head rqd;
 };
 
 /*
@@ -462,8 +463,6 @@
 	/* Number of bytes taken to setup MC for the req */
 	u32 mc_len;
 	struct pl330_req *r;
-	/* Hook to attach to DMAC's list of reqs with due callback */
-	struct list_head rqd;
 };
 
 /* ToBeDone for tasklet */
@@ -1684,7 +1683,7 @@
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(const struct pl330_info *pi)
 {
-	struct _pl330_req *rqdone;
+	struct pl330_req *rqdone, *tmp;
 	struct pl330_dmac *pl330;
 	unsigned long flags;
 	void __iomem *regs;
@@ -1751,7 +1750,10 @@
 			if (active == -1) /* Aborted */
 				continue;
 
-			rqdone = &thrd->req[active];
+			/* Detach the req */
+			rqdone = thrd->req[active].r;
+			thrd->req[active].r = NULL;
+
 			mark_free(thrd, active);
 
 			/* Get going again ASAP */
@@ -1763,20 +1765,11 @@
 	}
 
 	/* Now that we are in no hurry, do the callbacks */
-	while (!list_empty(&pl330->req_done)) {
-		struct pl330_req *r;
-
-		rqdone = container_of(pl330->req_done.next,
-					struct _pl330_req, rqd);
-
-		list_del_init(&rqdone->rqd);
-
-		/* Detach the req */
-		r = rqdone->r;
-		rqdone->r = NULL;
+	list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+		list_del(&rqdone->rqd);
 
 		spin_unlock_irqrestore(&pl330->lock, flags);
-		_callback(r, PL330_ERR_NONE);
+		_callback(rqdone, PL330_ERR_NONE);
 		spin_lock_irqsave(&pl330->lock, flags);
 	}
 
@@ -2322,7 +2315,7 @@
 	/* Pick up ripe tomatoes */
 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
 		if (desc->status == DONE) {
-			if (pch->cyclic)
+			if (!pch->cyclic)
 				dma_cookie_complete(&desc->txd);
 			list_move_tail(&desc->node, &list);
 		}
@@ -2540,7 +2533,7 @@
 }
 
 /* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
 {
 	struct dma_pl330_desc *desc;
 	unsigned long flags;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3..000d309 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@
 	}
 
 	sg[periods].offset = 0;
-	sg[periods].length = 0;
+	sg_dma_len(&sg[periods]) = 0;
 	sg[periods].page_link =
 		((unsigned long)sg | 0x01) & ~0x02;
 
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c9..7be9b72 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@
 				     input_addr_to_dram_addr(mci, input_addr));
 }
 
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
-			      u64 *input_addr_min, u64 *input_addr_max)
-{
-	struct amd64_pvt *pvt;
-	u64 base, mask;
-
-	pvt = mci->pvt_info;
-	BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
-	get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
-	*input_addr_min = base & ~mask;
-	*input_addr_max = base | mask;
-}
-
 /* Map the Error address to a PAGE and PAGE OFFSET. */
 static inline void error_address_to_page_and_offset(u64 error_address,
 						    u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@
 	int channel, csrow;
 	u32 page, offset;
 
+	error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+	/*
+	 * Find out which node the error address belongs to. This may be
+	 * different from the node that detected the error.
+	 */
+	src_mci = find_mc_by_sys_addr(mci, sys_addr);
+	if (!src_mci) {
+		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+			     (unsigned long)sys_addr);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     page, offset, syndrome,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "failed to map error addr to a node",
+				     NULL);
+		return;
+	}
+
+	/* Now map the sys_addr to a CSROW */
+	csrow = sys_addr_to_csrow(src_mci, sys_addr);
+	if (csrow < 0) {
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     page, offset, syndrome,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "failed to map error addr to a csrow",
+				     NULL);
+		return;
+	}
+
 	/* CHIPKILL enabled */
 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
 		channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@
 			 * 2 DIMMs is in error. So we need to ID 'both' of them
 			 * as suspect.
 			 */
-			amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
-					   "error reporting race\n", syndrome);
-			edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+			amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+				      "possible error reporting race\n",
+				      syndrome);
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     page, offset, syndrome,
+					     csrow, -1, -1,
+					     EDAC_MOD_STR,
+					     "unknown syndrome - possible error reporting race",
+					     NULL);
 			return;
 		}
 	} else {
@@ -1084,28 +1102,10 @@
 		channel = ((sys_addr & BIT(3)) != 0);
 	}
 
-	/*
-	 * Find out which node the error address belongs to. This may be
-	 * different from the node that detected the error.
-	 */
-	src_mci = find_mc_by_sys_addr(mci, sys_addr);
-	if (!src_mci) {
-		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
-			     (unsigned long)sys_addr);
-		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
-		return;
-	}
-
-	/* Now map the sys_addr to a CSROW */
-	csrow = sys_addr_to_csrow(src_mci, sys_addr);
-	if (csrow < 0) {
-		edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
-	} else {
-		error_address_to_page_and_offset(sys_addr, &page, &offset);
-
-		edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
-				  channel, EDAC_MOD_STR);
-	}
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+			     page, offset, syndrome,
+			     csrow, channel, -1,
+			     EDAC_MOD_STR, "", NULL);
 }
 
 static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@
 	u32 page, offset;
 	int nid, csrow, chan = 0;
 
+	error_address_to_page_and_offset(sys_addr, &page, &offset);
+
 	csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
 
 	if (csrow < 0) {
-		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     page, offset, syndrome,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "failed to map error addr to a csrow",
+				     NULL);
 		return;
 	}
 
-	error_address_to_page_and_offset(sys_addr, &page, &offset);
-
 	/*
 	 * We need the syndromes for channel detection only when we're
 	 * ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@
 	if (dct_ganging_enabled(pvt))
 		chan = get_channel_from_ecc_syndrome(mci, syndrome);
 
-	if (chan >= 0)
-		edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
-				  EDAC_MOD_STR);
-	else
-		/*
-		 * Channel unknown, report all channels on this CSROW as failed.
-		 */
-		for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
-			edac_mc_handle_ce(mci, page, offset, syndrome,
-					  csrow, chan, EDAC_MOD_STR);
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+			     page, offset, syndrome,
+			     csrow, chan, -1,
+			     EDAC_MOD_STR, "", NULL);
 }
 
 /*
@@ -1918,7 +1917,12 @@
 	/* Ensure that the Error Address is VALID */
 	if (!(m->status & MCI_STATUS_ADDRV)) {
 		amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
-		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     0, 0, 0,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "HW has no ERROR_ADDRESS available",
+				     NULL);
 		return;
 	}
 
@@ -1942,11 +1946,17 @@
 
 	if (!(m->status & MCI_STATUS_ADDRV)) {
 		amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
-		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     0, 0, 0,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "HW has no ERROR_ADDRESS available",
+				     NULL);
 		return;
 	}
 
 	sys_addr = get_error_address(m);
+	error_address_to_page_and_offset(sys_addr, &page, &offset);
 
 	/*
 	 * Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@
 	if (!src_mci) {
 		amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
 				  (unsigned long)sys_addr);
-		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     page, offset, 0,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "ERROR ADDRESS NOT mapped to a MC", NULL);
 		return;
 	}
 
@@ -1966,10 +1980,17 @@
 	if (csrow < 0) {
 		amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
 				  (unsigned long)sys_addr);
-		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     page, offset, 0,
+				     -1, -1, -1,
+				     EDAC_MOD_STR,
+				     "ERROR ADDRESS NOT mapped to CS",
+				     NULL);
 	} else {
-		error_address_to_page_and_offset(sys_addr, &page, &offset);
-		edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     page, offset, 0,
+				     csrow, -1, -1,
+				     EDAC_MOD_STR, "", NULL);
 	}
 }
 
@@ -2171,7 +2192,7 @@
 	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
 
 	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
-	debugf0("    nr_pages= %u  channel-count = %d\n",
+	debugf0("    nr_pages/channel= %u  channel-count = %d\n",
 		nr_pages, pvt->channel_count);
 
 	return nr_pages;
@@ -2185,9 +2206,12 @@
 {
 	struct csrow_info *csrow;
 	struct amd64_pvt *pvt = mci->pvt_info;
-	u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+	u64 base, mask;
 	u32 val;
-	int i, empty = 1;
+	int i, j, empty = 1;
+	enum mem_type mtype;
+	enum edac_type edac_mode;
+	int nr_pages = 0;
 
 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
 
@@ -2211,41 +2235,32 @@
 
 		empty = 0;
 		if (csrow_enabled(i, 0, pvt))
-			csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+			nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
 		if (csrow_enabled(i, 1, pvt))
-			csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
-		find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
-		sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
-		csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
-		sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
-		csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+			nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
 
 		get_cs_base_and_mask(pvt, i, 0, &base, &mask);
-		csrow->page_mask = ~mask;
 		/* 8 bytes of resolution */
 
-		csrow->mtype = amd64_determine_memory_type(pvt, i);
+		mtype = amd64_determine_memory_type(pvt, i);
 
 		debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
-		debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
-			(unsigned long)input_addr_min,
-			(unsigned long)input_addr_max);
-		debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
-			(unsigned long)sys_addr, csrow->page_mask);
-		debugf1("    nr_pages: %u  first_page: 0x%lx "
-			"last_page: 0x%lx\n",
-			(unsigned)csrow->nr_pages,
-			csrow->first_page, csrow->last_page);
+		debugf1("    nr_pages: %u\n", nr_pages * pvt->channel_count);
 
 		/*
 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
 		 */
 		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
-			csrow->edac_mode =
-			    (pvt->nbcfg & NBCFG_CHIPKILL) ?
-			    EDAC_S4ECD4ED : EDAC_SECDED;
+			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+				    EDAC_S4ECD4ED : EDAC_SECDED;
 		else
-			csrow->edac_mode = EDAC_NONE;
+			edac_mode = EDAC_NONE;
+
+		for (j = 0; j < pvt->channel_count; j++) {
+			csrow->channels[j].dimm->mtype = mtype;
+			csrow->channels[j].dimm->edac_mode = edac_mode;
+			csrow->channels[j].dimm->nr_pages = nr_pages;
+		}
 	}
 
 	return empty;
@@ -2540,6 +2555,7 @@
 	struct amd64_pvt *pvt = NULL;
 	struct amd64_family_type *fam_type = NULL;
 	struct mem_ctl_info *mci = NULL;
+	struct edac_mc_layer layers[2];
 	int err = 0, ret;
 	u8 nid = get_node_id(F2);
 
@@ -2574,7 +2590,13 @@
 		goto err_siblings;
 
 	ret = -ENOMEM;
-	mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = pvt->csels[0].b_cnt;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = pvt->channel_count;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
 	if (!mci)
 		goto err_siblings;
 
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c8..9774d44 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
 	edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
 
 #define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS  1
 #define AMD76X_NR_DIMMS  4
 
 /* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@
 
 		if (handle_errors) {
 			row = (info->ecc_mode_status >> 4) & 0xf;
-			edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
-					row, mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     mci->csrows[row].first_page, 0, 0,
+					     row, 0, -1,
+					     mci->ctl_name, "", NULL);
 		}
 	}
 
@@ -159,8 +160,10 @@
 
 		if (handle_errors) {
 			row = info->ecc_mode_status & 0xf;
-			edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
-					0, row, 0, mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     mci->csrows[row].first_page, 0, 0,
+					     row, 0, -1,
+					     mci->ctl_name, "", NULL);
 		}
 	}
 
@@ -186,11 +189,13 @@
 			enum edac_type edac_mode)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	u32 mba, mba_base, mba_mask, dms;
 	int index;
 
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
 
 		/* find the DRAM Chip Select Base address and mask */
 		pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@
 		mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
 		pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
 		csrow->first_page = mba_base >> PAGE_SHIFT;
-		csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
-		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
 		csrow->page_mask = mba_mask >> PAGE_SHIFT;
-		csrow->grain = csrow->nr_pages << PAGE_SHIFT;
-		csrow->mtype = MEM_RDDR;
-		csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
-		csrow->edac_mode = edac_mode;
+		dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+		dimm->mtype = MEM_RDDR;
+		dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+		dimm->edac_mode = edac_mode;
 	}
 }
 
@@ -230,7 +235,8 @@
 		EDAC_SECDED,
 		EDAC_SECDED
 	};
-	struct mem_ctl_info *mci = NULL;
+	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	u32 ems;
 	u32 ems_mode;
 	struct amd76x_error_info discard;
@@ -238,11 +244,17 @@
 	debugf0("%s()\n", __func__);
 	pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
 	ems_mode = (ems >> 10) & 0x3;
-	mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
 
-	if (mci == NULL) {
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = AMD76X_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = 1;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+	if (mci == NULL)
 		return -ENOMEM;
-	}
 
 	debugf0("%s(): mci = %p\n", __func__, mci);
 	mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274..69ee6aa 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@
 	syndrome = (ar & 0x000000001fe00000ul) >> 21;
 
 	/* TODO: Decoding of the error address */
-	edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
-			  syndrome, 0, chan, "");
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+			     csrow->first_page + pfn, offset, syndrome,
+			     0, chan, -1, "", "", NULL);
 }
 
 static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@
 	offset = address & ~PAGE_MASK;
 
 	/* TODO: Decoding of the error address */
-	edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+			     csrow->first_page + pfn, offset, 0,
+			     0, chan, -1, "", "", NULL);
 }
 
 static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@
 static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
 {
 	struct csrow_info		*csrow = &mci->csrows[0];
+	struct dimm_info		*dimm;
 	struct cell_edac_priv		*priv = mci->pvt_info;
 	struct device_node		*np;
+	int				j;
+	u32				nr_pages;
 
 	for (np = NULL;
 	     (np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@
 		if (of_node_to_nid(np) != priv->node)
 			continue;
 		csrow->first_page = r.start >> PAGE_SHIFT;
-		csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
-		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-		csrow->mtype = MEM_XDR;
-		csrow->edac_mode = EDAC_SECDED;
+		nr_pages = resource_size(&r) >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + nr_pages - 1;
+
+		for (j = 0; j < csrow->nr_channels; j++) {
+			dimm = csrow->channels[j].dimm;
+			dimm->mtype = MEM_XDR;
+			dimm->edac_mode = EDAC_SECDED;
+			dimm->nr_pages = nr_pages / csrow->nr_channels;
+		}
 		dev_dbg(mci->dev,
 			"Initialized on node %d, chanmask=0x%x,"
 			" first_page=0x%lx, nr_pages=0x%x\n",
 			priv->node, priv->chanmask,
-			csrow->first_page, csrow->nr_pages);
+			csrow->first_page, nr_pages);
 		break;
 	}
 }
@@ -157,9 +168,10 @@
 {
 	struct cbe_mic_tm_regs __iomem	*regs;
 	struct mem_ctl_info		*mci;
+	struct edac_mc_layer		layers[2];
 	struct cell_edac_priv		*priv;
 	u64				reg;
-	int				rc, chanmask;
+	int				rc, chanmask, num_chans;
 
 	regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
 	if (regs == NULL)
@@ -184,8 +196,16 @@
 		in_be64(&regs->mic_fir));
 
 	/* Allocate & init EDAC MC data structure */
-	mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
-			    chanmask == 3 ? 2 : 1, pdev->id);
+	num_chans = chanmask == 3 ? 2 : 1;
+
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = 1;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = num_chans;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+			    sizeof(struct cell_edac_priv));
 	if (mci == NULL)
 		return -ENOMEM;
 	priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0d..e22030a 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@
 {
 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
 	struct csrow_info *csrow;
-	int index;
+	struct dimm_info *dimm;
+	int index, j;
 	u32 mbmr, mbbar, bba;
-	unsigned long row_size, last_nr_pages = 0;
+	unsigned long row_size, nr_pages, last_nr_pages = 0;
 
 	get_total_mem(pdata);
 
@@ -350,36 +351,41 @@
 
 		row_size = bba * (1UL << 28);	/* 256M */
 		csrow->first_page = last_nr_pages;
-		csrow->nr_pages = row_size >> PAGE_SHIFT;
-		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		nr_pages = row_size >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + nr_pages - 1;
 		last_nr_pages = csrow->last_page + 1;
 
-		csrow->mtype = MEM_RDDR;
-		csrow->edac_mode = EDAC_SECDED;
+		for (j = 0; j < csrow->nr_channels; j++) {
+			dimm = csrow->channels[j].dimm;
 
-		switch (csrow->nr_channels) {
-		case 1: /* Single channel */
-			csrow->grain = 32; /* four-beat burst of 32 bytes */
-			break;
-		case 2: /* Dual channel */
-		default:
-			csrow->grain = 64; /* four-beat burst of 64 bytes */
-			break;
-		}
+			dimm->nr_pages = nr_pages / csrow->nr_channels;
+			dimm->mtype = MEM_RDDR;
+			dimm->edac_mode = EDAC_SECDED;
 
-		switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
-		case 6: /* 0110, no way to differentiate X8 VS X16 */
-		case 5:	/* 0101 */
-		case 8: /* 1000 */
-			csrow->dtype = DEV_X16;
-			break;
-		case 7: /* 0111 */
-		case 9: /* 1001 */
-			csrow->dtype = DEV_X8;
-			break;
-		default:
-			csrow->dtype = DEV_UNKNOWN;
-			break;
+			switch (csrow->nr_channels) {
+			case 1: /* Single channel */
+				dimm->grain = 32; /* four-beat burst of 32 bytes */
+				break;
+			case 2: /* Dual channel */
+			default:
+				dimm->grain = 64; /* four-beat burst of 64 bytes */
+				break;
+			}
+
+			switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+			case 6: /* 0110, no way to differentiate X8 VS X16 */
+			case 5:	/* 0101 */
+			case 8: /* 1000 */
+				dimm->dtype = DEV_X16;
+				break;
+			case 7: /* 0111 */
+			case 9: /* 1001 */
+				dimm->dtype = DEV_X8;
+				break;
+			default:
+				dimm->dtype = DEV_UNKNOWN;
+				break;
+			}
 		}
 	}
 }
@@ -549,13 +555,18 @@
 	if (apiexcp & CECC_EXCP_DETECTED) {
 		cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
 		channel = cpc925_mc_find_channel(mci, syndrome);
-		edac_mc_handle_ce(mci, pfn, offset, syndrome,
-				  csrow, channel, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     pfn, offset, syndrome,
+				     csrow, channel, -1,
+				     mci->ctl_name, "", NULL);
 	}
 
 	if (apiexcp & UECC_EXCP_DETECTED) {
 		cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
-		edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     pfn, offset, 0,
+				     csrow, -1, -1,
+				     mci->ctl_name, "", NULL);
 	}
 
 	cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@
 {
 	static int edac_mc_idx;
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	void __iomem *vbase;
 	struct cpc925_mc_pdata *pdata;
 	struct resource *r;
@@ -962,9 +974,16 @@
 		goto err2;
 	}
 
-	nr_channels = cpc925_mc_get_channels(vbase);
-	mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
-			CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+	nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = CPC925_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = nr_channels;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+			    sizeof(struct cpc925_mc_pdata));
 	if (!mci) {
 		cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
 		res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 4122326..3186512 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
  * This file may be distributed under the terms of the
  * GNU General Public License.
  *
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ *	http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ *	ftp://download.intel.com/design/intarch/datashts/31345803.pdf
  *
  * Written by Tom Zimmerman
  *
@@ -13,8 +17,6 @@
  * 	Wang Zhenyu at intel.com
  * 	Dave Jiang at mvista.com
  *
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
  */
 
 #include <linux/module.h>
@@ -187,6 +189,25 @@
 	I3100 = 3
 };
 
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ *	slot	   single-ranked	double-ranked
+ *	dimm #1 -> rank #4		NA
+ *	dimm #2 -> rank #3		NA
+ *	dimm #3 -> rank #2		Ranks 2 and 3
+ *	dimm #4 -> rank $1		Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ *	  ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
 struct e752x_pvt {
 	struct pci_dev *bridge_ck;
 	struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@
 	channel = !(error_one & 1);
 
 	/* e752x mc reads 34:6 of the DRAM linear address */
-	edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
-			sec1_syndrome, row, channel, "e752x CE");
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+			     page, offset_in_page(sec1_add << 4), sec1_syndrome,
+			     row, channel, -1,
+			     "e752x CE", "", NULL);
 }
 
 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@
 			edac_mc_find_csrow_by_page(mci, block_page);
 
 		/* e752x mc reads 34:6 of the DRAM linear address */
-		edac_mc_handle_ue(mci, block_page,
-				offset_in_page(error_2b << 4),
-				row, "e752x UE from Read");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					block_page,
+					offset_in_page(error_2b << 4), 0,
+					 row, -1, -1,
+					"e752x UE from Read", "", NULL);
+
 	}
 	if (error_one & 0x0404) {
 		error_2b = scrb_add;
@@ -401,9 +427,11 @@
 			edac_mc_find_csrow_by_page(mci, block_page);
 
 		/* e752x mc reads 34:6 of the DRAM linear address */
-		edac_mc_handle_ue(mci, block_page,
-				offset_in_page(error_2b << 4),
-				row, "e752x UE from Scruber");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					block_page,
+					offset_in_page(error_2b << 4), 0,
+					row, -1, -1,
+					"e752x UE from Scruber", "", NULL);
 	}
 }
 
@@ -426,7 +454,9 @@
 		return;
 
 	debugf3("%s()\n", __func__);
-	edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+			     -1, -1, -1,
+			     "e752x UE log memory write", "", NULL);
 }
 
 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@
 	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
 	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
 	u8 value;
-	u32 dra, drc, cumul_size;
+	u32 dra, drc, cumul_size, i, nr_pages;
 
 	dra = 0;
 	for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@
 		dra |= dra_reg << (index * 8);
 	}
 	pci_read_config_dword(pdev, E752X_DRC, &drc);
-	drc_chan = dual_channel_active(ddrcsr);
+	drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
 	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
 	drc_ddim = (drc >> 20) & 0x3;
 
@@ -1078,26 +1108,33 @@
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
+		nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
-		csrow->mtype = MEM_RDDR;	/* only one type supported */
-		csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
 
-		/*
-		 * if single channel or x8 devices then SECDED
-		 * if dual channel and x4 then S4ECD4ED
-		 */
-		if (drc_ddim) {
-			if (drc_chan && mem_dev) {
-				csrow->edac_mode = EDAC_S4ECD4ED;
-				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
-			} else {
-				csrow->edac_mode = EDAC_SECDED;
-				mci->edac_cap |= EDAC_FLAG_SECDED;
-			}
-		} else
-			csrow->edac_mode = EDAC_NONE;
+		for (i = 0; i < csrow->nr_channels; i++) {
+			struct dimm_info *dimm = csrow->channels[i].dimm;
+
+			debugf3("Initializing rank at (%i,%i)\n", index, i);
+			dimm->nr_pages = nr_pages / csrow->nr_channels;
+			dimm->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
+			dimm->mtype = MEM_RDDR;	/* only one type supported */
+			dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+			/*
+			* if single channel or x8 devices then SECDED
+			* if dual channel and x4 then S4ECD4ED
+			*/
+			if (drc_ddim) {
+				if (drc_chan && mem_dev) {
+					dimm->edac_mode = EDAC_S4ECD4ED;
+					mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+				} else {
+					dimm->edac_mode = EDAC_SECDED;
+					mci->edac_cap |= EDAC_FLAG_SECDED;
+				}
+			} else
+				dimm->edac_mode = EDAC_NONE;
+		}
 	}
 }
 
@@ -1226,6 +1263,7 @@
 	u16 pci_data;
 	u8 stat8;
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct e752x_pvt *pvt;
 	u16 ddrcsr;
 	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@
 	/* Dual channel = 1, Single channel = 0 */
 	drc_chan = dual_channel_active(ddrcsr);
 
-	mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
-	if (mci == NULL) {
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = E752X_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = drc_chan + 1;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+	if (mci == NULL)
 		return -ENOMEM;
-	}
 
 	debugf3("%s(): init mci\n", __func__);
 	mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87..9a9c1a5 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
  * Based on work by Dan Hollis <goemon at anime dot net> and others.
  *	http://www.anime.net/~goemon/linux-ecc/
  *
+ * Datasheet:
+ *	http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
  * Contributors:
  *	Eric Biederman (Linux Networx)
  *	Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
 #endif				/* PCI_DEVICE_ID_INTEL_7505_1_ERR */
 
 #define E7XXX_NR_CSROWS		8	/* number of csrows */
-#define E7XXX_NR_DIMMS		8	/* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS		8	/* 2 channels, 4 dimms/channel */
 
 /* E7XXX register addresses - device 0 function 0 */
 #define E7XXX_DRB		0x60	/* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@
 	row = edac_mc_find_csrow_by_page(mci, page);
 	/* convert syndrome to channel */
 	channel = e7xxx_find_channel(syndrome);
-	edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+			     row, channel, -1, "e7xxx CE", "", NULL);
 }
 
 static void process_ce_no_info(struct mem_ctl_info *mci)
 {
 	debugf3("%s()\n", __func__);
-	edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+			     "e7xxx CE log register overflow", "", NULL);
 }
 
 static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@
 	/* FIXME - should use PAGE_SHIFT */
 	block_page = error_2b >> 6;	/* convert to 4k address */
 	row = edac_mc_find_csrow_by_page(mci, block_page);
-	edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+			     row, -1, -1, "e7xxx UE", "", NULL);
 }
 
 static void process_ue_no_info(struct mem_ctl_info *mci)
 {
 	debugf3("%s()\n", __func__);
-	edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+			     "e7xxx UE log register overflow", "", NULL);
 }
 
 static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@
 			int dev_idx, u32 drc)
 {
 	unsigned long last_cumul_size;
-	int index;
+	int index, j;
 	u8 value;
-	u32 dra, cumul_size;
+	u32 dra, cumul_size, nr_pages;
 	int drc_chan, drc_drbg, drc_ddim, mem_dev;
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 
 	pci_read_config_dword(pdev, E7XXX_DRA, &dra);
 	drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
+		nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
-		csrow->mtype = MEM_RDDR;	/* only one type supported */
-		csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
 
-		/*
-		 * if single channel or x8 devices then SECDED
-		 * if dual channel and x4 then S4ECD4ED
-		 */
-		if (drc_ddim) {
-			if (drc_chan && mem_dev) {
-				csrow->edac_mode = EDAC_S4ECD4ED;
-				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
-			} else {
-				csrow->edac_mode = EDAC_SECDED;
-				mci->edac_cap |= EDAC_FLAG_SECDED;
-			}
-		} else
-			csrow->edac_mode = EDAC_NONE;
+		for (j = 0; j < drc_chan + 1; j++) {
+			dimm = csrow->channels[j].dimm;
+
+			dimm->nr_pages = nr_pages / (drc_chan + 1);
+			dimm->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
+			dimm->mtype = MEM_RDDR;	/* only one type supported */
+			dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+			/*
+			* if single channel or x8 devices then SECDED
+			* if dual channel and x4 then S4ECD4ED
+			*/
+			if (drc_ddim) {
+				if (drc_chan && mem_dev) {
+					dimm->edac_mode = EDAC_S4ECD4ED;
+					mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+				} else {
+					dimm->edac_mode = EDAC_SECDED;
+					mci->edac_cap |= EDAC_FLAG_SECDED;
+				}
+			} else
+				dimm->edac_mode = EDAC_NONE;
+		}
 	}
 }
 
@@ -406,6 +422,7 @@
 {
 	u16 pci_data;
 	struct mem_ctl_info *mci = NULL;
+	struct edac_mc_layer layers[2];
 	struct e7xxx_pvt *pvt = NULL;
 	u32 drc;
 	int drc_chan;
@@ -416,8 +433,21 @@
 	pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 
 	drc_chan = dual_channel_active(drc, dev_idx);
-	mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+	/*
+	 * According with the datasheet, this device has a maximum of
+	 * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+	 * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+	 * That means that the DIMM is mapped as CSROWs, and the channel
+	 * will map the rank. So, an error to either channel should be
+	 * attributed to the same dimm.
+	 */
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = E7XXX_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = drc_chan + 1;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b73941..117490d 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@
 
 #endif				/* CONFIG_PCI */
 
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-					  unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+				   unsigned n_layers,
+				   struct edac_mc_layer *layers,
+				   unsigned sz_pvt);
 extern int edac_mc_add_mc(struct mem_ctl_info *mci);
 extern void edac_mc_free(struct mem_ctl_info *mci);
 extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@
 extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
 extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
 				      unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted.  When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information.  The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
-			      unsigned long page_frame_number,
-			      unsigned long offset_in_page,
-			      unsigned long syndrome, int row, int channel,
-			      const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
-				      const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
-			      unsigned long page_frame_number,
-			      unsigned long offset_in_page, int row,
-			      const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
-				      const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
-				  unsigned int channel0, unsigned int channel1,
-				  char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
-				  unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+			  struct mem_ctl_info *mci,
+			  const unsigned long page_frame_number,
+			  const unsigned long offset_in_page,
+			  const unsigned long syndrome,
+			  const int layer0,
+			  const int layer1,
+			  const int layer2,
+			  const char *msg,
+			  const char *other_detail,
+			  const void *mcelog);
 
 /*
  * edac_device APIs
@@ -496,6 +480,7 @@
 extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
 				int inst_nr, int block_nr, const char *msg);
 extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
 
 /*
  * edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4b..ee3f1f8 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@
 	unsigned total_size;
 	unsigned count;
 	unsigned instance, block, attr;
-	void *pvt;
+	void *pvt, *p;
 	int err;
 
 	debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@
 	 * to be at least as stringent as what the compiler would
 	 * provide if we could simply hardcode everything into a single struct.
 	 */
-	dev_ctl = (struct edac_device_ctl_info *)NULL;
+	p = NULL;
+	dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
 
 	/* Calc the 'end' offset past end of ONE ctl_info structure
 	 * which will become the start of the 'instance' array
 	 */
-	dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+	dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
 
 	/* Calc the 'end' offset past the instance array within the ctl_info
 	 * which will become the start of the block array
 	 */
-	dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+	count = nr_instances * nr_blocks;
+	dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
 
 	/* Calc the 'end' offset past the dev_blk array
 	 * which will become the start of the attrib array, if any.
 	 */
-	count = nr_instances * nr_blocks;
-	dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
-	/* Check for case of when an attribute array is specified */
-	if (nr_attrib > 0) {
-		/* calc how many nr_attrib we need */
+	/* calc how many nr_attrib we need */
+	if (nr_attrib > 0)
 		count *= nr_attrib;
+	dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
 
-		/* Calc the 'end' offset past the attributes array */
-		pvt = edac_align_ptr(&dev_attrib[count], sz_private);
-	} else {
-		/* no attribute array specified */
-		pvt = edac_align_ptr(dev_attrib, sz_private);
-	}
+	/* Calc the 'end' offset past the attributes array */
+	pvt = edac_align_ptr(&p, sz_private, 1);
 
 	/* 'pvt' now points to where the private data area is.
 	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef773..de5ba86e 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@
 {
 	debugf4("\tchannel = %p\n", chan);
 	debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
-	debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
-	debugf4("\tchannel->label = '%s'\n", chan->label);
 	debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+	debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+	int i;
+
+	debugf4("\tdimm = %p\n", dimm);
+	debugf4("\tdimm->label = '%s'\n", dimm->label);
+	debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+	debugf4("\tdimm location ");
+	for (i = 0; i < dimm->mci->n_layers; i++) {
+		printk(KERN_CONT "%d", dimm->location[i]);
+		if (i < dimm->mci->n_layers - 1)
+			printk(KERN_CONT ".");
+	}
+	printk(KERN_CONT "\n");
+	debugf4("\tdimm->grain = %d\n", dimm->grain);
+	debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
 }
 
 static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@
 	debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
 	debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
 	debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
-	debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
 	debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
 	debugf4("\tcsrow->channels = %p\n", csrow->channels);
 	debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@
 	debugf4("\tmci->edac_check = %p\n", mci->edac_check);
 	debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
 		mci->nr_csrows, mci->csrows);
+	debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+		mci->tot_dimms, mci->dimms);
 	debugf3("\tdev = %p\n", mci->dev);
 	debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
 	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p:		pointer to a pointer with the memory offset to be used. At
+ *		return, this will be incremented to point to the next offset
+ * @size:	Size of the data structure to be reserved
+ * @n_elems:	Number of elements that should be reserved
  *
  * If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
  */
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
 {
 	unsigned align, r;
+	void *ptr = *p;
 
-	/* Here we assume that the alignment of a "long long" is the most
+	*p += size * n_elems;
+
+	/*
+	 * 'p' can possibly be an unaligned item X such that sizeof(X) is
+	 * 'size'.  Adjust 'p' so that its alignment is at least as
+	 * stringent as what the compiler would provide for X and return
+	 * the aligned result.
+	 * Here we assume that the alignment of a "long long" is the most
 	 * stringent alignment that the compiler will ever provide by default.
 	 * As far as I know, this is a reasonable assumption.
 	 */
@@ -127,19 +164,23 @@
 	else
 		return (char *)ptr;
 
-	r = size % align;
+	r = (unsigned long)p % align;
 
 	if (r == 0)
 		return (char *)ptr;
 
+	*p += align - r;
+
 	return (void *)(((unsigned long)ptr) + align - r);
 }
 
 /**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt:	size of private storage needed
- * @nr_csrows:	Number of CWROWS needed for this MC
- * @nr_chans:	Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num:		Memory controller number
+ * @n_layers:		Number of MC hierarchy layers
+ * layers:		Describes each layer as seen by the Memory Controller
+ * @size_pvt:		size of private storage needed
+ *
  *
  * Everything is kmalloc'ed as one big chunk - more efficient.
  * Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@
  *
  * Use edac_mc_free() to free mc structures allocated by this function.
  *
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
  * Returns:
- *	NULL allocation failed
- *	struct mem_ctl_info pointer
+ *	On failure: NULL
+ *	On success: struct mem_ctl_info pointer
  */
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-				unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+				   unsigned n_layers,
+				   struct edac_mc_layer *layers,
+				   unsigned sz_pvt)
 {
 	struct mem_ctl_info *mci;
-	struct csrow_info *csi, *csrow;
+	struct edac_mc_layer *layer;
+	struct csrow_info *csi, *csr;
 	struct rank_info *chi, *chp, *chan;
-	void *pvt;
-	unsigned size;
-	int row, chn;
-	int err;
+	struct dimm_info *dimm;
+	u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+	unsigned pos[EDAC_MAX_LAYERS];
+	unsigned size, tot_dimms = 1, count = 1;
+	unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+	void *pvt, *p, *ptr = NULL;
+	int i, j, err, row, chn, n, len;
+	bool per_rank = false;
+
+	BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+	/*
+	 * Calculate the total amount of dimms and csrows/cschannels while
+	 * in the old API emulation mode
+	 */
+	for (i = 0; i < n_layers; i++) {
+		tot_dimms *= layers[i].size;
+		if (layers[i].is_virt_csrow)
+			tot_csrows *= layers[i].size;
+		else
+			tot_channels *= layers[i].size;
+
+		if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+			per_rank = true;
+	}
 
 	/* Figure out the offsets of the various items from the start of an mc
 	 * structure.  We want the alignment of each item to be at least as
 	 * stringent as what the compiler would provide if we could simply
 	 * hardcode everything into a single struct.
 	 */
-	mci = (struct mem_ctl_info *)0;
-	csi = edac_align_ptr(&mci[1], sizeof(*csi));
-	chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
-	pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+	mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+	layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+	csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+	chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+	dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+	for (i = 0; i < n_layers; i++) {
+		count *= layers[i].size;
+		debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+		ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+		ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+		tot_errcount += 2 * count;
+	}
+
+	debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+	pvt = edac_align_ptr(&ptr, sz_pvt, 1);
 	size = ((unsigned long)pvt) + sz_pvt;
 
+	debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+		__func__, size,
+		tot_dimms,
+		per_rank ? "ranks" : "dimms",
+		tot_csrows * tot_channels);
 	mci = kzalloc(size, GFP_KERNEL);
 	if (mci == NULL)
 		return NULL;
@@ -180,28 +266,103 @@
 	/* Adjust pointers so they point within the memory we just allocated
 	 * rather than an imaginary chunk of memory located at address 0.
 	 */
+	layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
 	csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
 	chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+	dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+	for (i = 0; i < n_layers; i++) {
+		mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+		mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+	}
 	pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
 
 	/* setup index and various internal pointers */
-	mci->mc_idx = edac_index;
+	mci->mc_idx = mc_num;
 	mci->csrows = csi;
+	mci->dimms  = dimm;
+	mci->tot_dimms = tot_dimms;
 	mci->pvt_info = pvt;
-	mci->nr_csrows = nr_csrows;
+	mci->n_layers = n_layers;
+	mci->layers = layer;
+	memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+	mci->nr_csrows = tot_csrows;
+	mci->num_cschannel = tot_channels;
+	mci->mem_is_per_rank = per_rank;
 
-	for (row = 0; row < nr_csrows; row++) {
-		csrow = &csi[row];
-		csrow->csrow_idx = row;
-		csrow->mci = mci;
-		csrow->nr_channels = nr_chans;
-		chp = &chi[row * nr_chans];
-		csrow->channels = chp;
+	/*
+	 * Fill the csrow struct
+	 */
+	for (row = 0; row < tot_csrows; row++) {
+		csr = &csi[row];
+		csr->csrow_idx = row;
+		csr->mci = mci;
+		csr->nr_channels = tot_channels;
+		chp = &chi[row * tot_channels];
+		csr->channels = chp;
 
-		for (chn = 0; chn < nr_chans; chn++) {
+		for (chn = 0; chn < tot_channels; chn++) {
 			chan = &chp[chn];
 			chan->chan_idx = chn;
-			chan->csrow = csrow;
+			chan->csrow = csr;
+		}
+	}
+
+	/*
+	 * Fill the dimm struct
+	 */
+	memset(&pos, 0, sizeof(pos));
+	row = 0;
+	chn = 0;
+	debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+		per_rank ? "ranks" : "dimms");
+	for (i = 0; i < tot_dimms; i++) {
+		chan = &csi[row].channels[chn];
+		dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+			       pos[0], pos[1], pos[2]);
+		dimm->mci = mci;
+
+		debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+			i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+			pos[0], pos[1], pos[2], row, chn);
+
+		/*
+		 * Copy DIMM location and initialize it.
+		 */
+		len = sizeof(dimm->label);
+		p = dimm->label;
+		n = snprintf(p, len, "mc#%u", mc_num);
+		p += n;
+		len -= n;
+		for (j = 0; j < n_layers; j++) {
+			n = snprintf(p, len, "%s#%u",
+				     edac_layer_name[layers[j].type],
+				     pos[j]);
+			p += n;
+			len -= n;
+			dimm->location[j] = pos[j];
+
+			if (len <= 0)
+				break;
+		}
+
+		/* Link it to the csrows old API data */
+		chan->dimm = dimm;
+		dimm->csrow = row;
+		dimm->cschannel = chn;
+
+		/* Increment csrow location */
+		row++;
+		if (row == tot_csrows) {
+			row = 0;
+			chn++;
+		}
+
+		/* Increment dimm location */
+		for (j = n_layers - 1; j >= 0; j--) {
+			pos[j]++;
+			if (pos[j] < layers[j].size)
+				break;
+			pos[j] = 0;
 		}
 	}
 
@@ -490,7 +651,6 @@
  * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  *                 create sysfs entries associated with mci structure
  * @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
  *
  * Return:
  *	0	Success
@@ -517,6 +677,8 @@
 				edac_mc_dump_channel(&mci->csrows[i].
 						channels[j]);
 		}
+		for (i = 0; i < mci->tot_dimms; i++)
+			edac_mc_dump_dimm(&mci->dimms[i]);
 	}
 #endif
 	mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@
 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
 {
 	struct csrow_info *csrows = mci->csrows;
-	int row, i;
+	int row, i, j, n;
 
 	debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
 	row = -1;
 
 	for (i = 0; i < mci->nr_csrows; i++) {
 		struct csrow_info *csrow = &csrows[i];
-
-		if (csrow->nr_pages == 0)
+		n = 0;
+		for (j = 0; j < csrow->nr_channels; j++) {
+			struct dimm_info *dimm = csrow->channels[j].dimm;
+			n += dimm->nr_pages;
+		}
+		if (n == 0)
 			continue;
 
 		debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@
 }
 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
 
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
-		unsigned long page_frame_number,
-		unsigned long offset_in_page, unsigned long syndrome,
-		int row, int channel, const char *msg)
+const char *edac_layer_name[] = {
+	[EDAC_MC_LAYER_BRANCH] = "branch",
+	[EDAC_MC_LAYER_CHANNEL] = "channel",
+	[EDAC_MC_LAYER_SLOT] = "slot",
+	[EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
+
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+				    bool enable_per_layer_report,
+				    const int pos[EDAC_MAX_LAYERS])
+{
+	int i, index = 0;
+
+	mci->ce_mc++;
+
+	if (!enable_per_layer_report) {
+		mci->ce_noinfo_count++;
+		return;
+	}
+
+	for (i = 0; i < mci->n_layers; i++) {
+		if (pos[i] < 0)
+			break;
+		index += pos[i];
+		mci->ce_per_layer[i][index]++;
+
+		if (i < mci->n_layers - 1)
+			index *= mci->layers[i + 1].size;
+	}
+}
+
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+				    bool enable_per_layer_report,
+				    const int pos[EDAC_MAX_LAYERS])
+{
+	int i, index = 0;
+
+	mci->ue_mc++;
+
+	if (!enable_per_layer_report) {
+		mci->ce_noinfo_count++;
+		return;
+	}
+
+	for (i = 0; i < mci->n_layers; i++) {
+		if (pos[i] < 0)
+			break;
+		index += pos[i];
+		mci->ue_per_layer[i][index]++;
+
+		if (i < mci->n_layers - 1)
+			index *= mci->layers[i + 1].size;
+	}
+}
+
+static void edac_ce_error(struct mem_ctl_info *mci,
+			  const int pos[EDAC_MAX_LAYERS],
+			  const char *msg,
+			  const char *location,
+			  const char *label,
+			  const char *detail,
+			  const char *other_detail,
+			  const bool enable_per_layer_report,
+			  const unsigned long page_frame_number,
+			  const unsigned long offset_in_page,
+			  u32 grain)
 {
 	unsigned long remapped_page;
 
-	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
-	/* FIXME - maybe make panic on INTERNAL ERROR an option */
-	if (row >= mci->nr_csrows || row < 0) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: row out of range "
-			"(%d >= %d)\n", row, mci->nr_csrows);
-		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-		return;
+	if (edac_mc_get_log_ce()) {
+		if (other_detail && *other_detail)
+			edac_mc_printk(mci, KERN_WARNING,
+				       "CE %s on %s (%s%s - %s)\n",
+				       msg, label, location,
+				       detail, other_detail);
+		else
+			edac_mc_printk(mci, KERN_WARNING,
+				       "CE %s on %s (%s%s)\n",
+				       msg, label, location,
+				       detail);
 	}
-
-	if (channel >= mci->csrows[row].nr_channels || channel < 0) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: channel out of range "
-			"(%d >= %d)\n", channel,
-			mci->csrows[row].nr_channels);
-		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-
-	if (edac_mc_get_log_ce())
-		/* FIXME - put in DIMM location */
-		edac_mc_printk(mci, KERN_WARNING,
-			"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
-			"0x%lx, row %d, channel %d, label \"%s\": %s\n",
-			page_frame_number, offset_in_page,
-			mci->csrows[row].grain, syndrome, row, channel,
-			mci->csrows[row].channels[channel].label, msg);
-
-	mci->ce_count++;
-	mci->csrows[row].ce_count++;
-	mci->csrows[row].channels[channel].ce_count++;
+	edac_inc_ce_error(mci, enable_per_layer_report, pos);
 
 	if (mci->scrub_mode & SCRUB_SW_SRC) {
 		/*
-		 * Some MC's can remap memory so that it is still available
-		 * at a different address when PCI devices map into memory.
-		 * MC's that can't do this lose the memory where PCI devices
-		 * are mapped.  This mapping is MC dependent and so we call
-		 * back into the MC driver for it to map the MC page to
-		 * a physical (CPU) page which can then be mapped to a virtual
-		 * page - which can then be scrubbed.
-		 */
+			* Some memory controllers (called MCs below) can remap
+			* memory so that it is still available at a different
+			* address when PCI devices map into memory.
+			* MC's that can't do this, lose the memory where PCI
+			* devices are mapped. This mapping is MC-dependent
+			* and so we call back into the MC driver for it to
+			* map the MC page to a physical (CPU) page which can
+			* then be mapped to a virtual page - which can then
+			* be scrubbed.
+			*/
 		remapped_page = mci->ctl_page_to_phys ?
 			mci->ctl_page_to_phys(mci, page_frame_number) :
 			page_frame_number;
 
-		edac_mc_scrub_block(remapped_page, offset_in_page,
-				mci->csrows[row].grain);
+		edac_mc_scrub_block(remapped_page,
+					offset_in_page, grain);
 	}
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
 
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+			  const int pos[EDAC_MAX_LAYERS],
+			  const char *msg,
+			  const char *location,
+			  const char *label,
+			  const char *detail,
+			  const char *other_detail,
+			  const bool enable_per_layer_report)
 {
-	if (edac_mc_get_log_ce())
-		edac_mc_printk(mci, KERN_WARNING,
-			"CE - no information available: %s\n", msg);
+	if (edac_mc_get_log_ue()) {
+		if (other_detail && *other_detail)
+			edac_mc_printk(mci, KERN_WARNING,
+				       "UE %s on %s (%s%s - %s)\n",
+			               msg, label, location, detail,
+				       other_detail);
+		else
+			edac_mc_printk(mci, KERN_WARNING,
+				       "UE %s on %s (%s%s)\n",
+			               msg, label, location, detail);
+	}
 
-	mci->ce_noinfo_count++;
-	mci->ce_count++;
+	if (edac_mc_get_panic_on_ue()) {
+		if (other_detail && *other_detail)
+			panic("UE %s on %s (%s%s - %s)\n",
+			      msg, label, location, detail, other_detail);
+		else
+			panic("UE %s on %s (%s%s)\n",
+			      msg, label, location, detail);
+	}
+
+	edac_inc_ue_error(mci, enable_per_layer_report, pos);
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
 
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
-		unsigned long page_frame_number,
-		unsigned long offset_in_page, int row, const char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+			  struct mem_ctl_info *mci,
+			  const unsigned long page_frame_number,
+			  const unsigned long offset_in_page,
+			  const unsigned long syndrome,
+			  const int layer0,
+			  const int layer1,
+			  const int layer2,
+			  const char *msg,
+			  const char *other_detail,
+			  const void *mcelog)
 {
-	int len = EDAC_MC_LABEL_LEN * 4;
-	char labels[len + 1];
-	char *pos = labels;
-	int chan;
-	int chars;
+	/* FIXME: too much for stack: move it to some pre-alocated area */
+	char detail[80], location[80];
+	char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+	char *p;
+	int row = -1, chan = -1;
+	int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+	int i;
+	u32 grain;
+	bool enable_per_layer_report = false;
 
 	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
 
-	/* FIXME - maybe make panic on INTERNAL ERROR an option */
-	if (row >= mci->nr_csrows || row < 0) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: row out of range "
-			"(%d >= %d)\n", row, mci->nr_csrows);
-		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-		return;
+	/*
+	 * Check if the event report is consistent and if the memory
+	 * location is known. If it is known, enable_per_layer_report will be
+	 * true, the DIMM(s) label info will be filled and the per-layer
+	 * error counters will be incremented.
+	 */
+	for (i = 0; i < mci->n_layers; i++) {
+		if (pos[i] >= (int)mci->layers[i].size) {
+			if (type == HW_EVENT_ERR_CORRECTED)
+				p = "CE";
+			else
+				p = "UE";
+
+			edac_mc_printk(mci, KERN_ERR,
+				       "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+				       edac_layer_name[mci->layers[i].type],
+				       pos[i], mci->layers[i].size);
+			/*
+			 * Instead of just returning it, let's use what's
+			 * known about the error. The increment routines and
+			 * the DIMM filter logic will do the right thing by
+			 * pointing the likely damaged DIMMs.
+			 */
+			pos[i] = -1;
+		}
+		if (pos[i] >= 0)
+			enable_per_layer_report = true;
 	}
 
-	chars = snprintf(pos, len + 1, "%s",
-			 mci->csrows[row].channels[0].label);
-	len -= chars;
-	pos += chars;
+	/*
+	 * Get the dimm label/grain that applies to the match criteria.
+	 * As the error algorithm may not be able to point to just one memory
+	 * stick, the logic here will get all possible labels that could
+	 * pottentially be affected by the error.
+	 * On FB-DIMM memory controllers, for uncorrected errors, it is common
+	 * to have only the MC channel and the MC dimm (also called "branch")
+	 * but the channel is not known, as the memory is arranged in pairs,
+	 * where each memory belongs to a separate channel within the same
+	 * branch.
+	 */
+	grain = 0;
+	p = label;
+	*p = '\0';
+	for (i = 0; i < mci->tot_dimms; i++) {
+		struct dimm_info *dimm = &mci->dimms[i];
 
-	for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
-		chan++) {
-		chars = snprintf(pos, len + 1, ":%s",
-				 mci->csrows[row].channels[chan].label);
-		len -= chars;
-		pos += chars;
+		if (layer0 >= 0 && layer0 != dimm->location[0])
+			continue;
+		if (layer1 >= 0 && layer1 != dimm->location[1])
+			continue;
+		if (layer2 >= 0 && layer2 != dimm->location[2])
+			continue;
+
+		/* get the max grain, over the error match range */
+		if (dimm->grain > grain)
+			grain = dimm->grain;
+
+		/*
+		 * If the error is memory-controller wide, there's no need to
+		 * seek for the affected DIMMs because the whole
+		 * channel/memory controller/...  may be affected.
+		 * Also, don't show errors for empty DIMM slots.
+		 */
+		if (enable_per_layer_report && dimm->nr_pages) {
+			if (p != label) {
+				strcpy(p, OTHER_LABEL);
+				p += strlen(OTHER_LABEL);
+			}
+			strcpy(p, dimm->label);
+			p += strlen(p);
+			*p = '\0';
+
+			/*
+			 * get csrow/channel of the DIMM, in order to allow
+			 * incrementing the compat API counters
+			 */
+			debugf4("%s: %s csrows map: (%d,%d)\n",
+				__func__,
+				mci->mem_is_per_rank ? "rank" : "dimm",
+				dimm->csrow, dimm->cschannel);
+
+			if (row == -1)
+				row = dimm->csrow;
+			else if (row >= 0 && row != dimm->csrow)
+				row = -2;
+
+			if (chan == -1)
+				chan = dimm->cschannel;
+			else if (chan >= 0 && chan != dimm->cschannel)
+				chan = -2;
+		}
 	}
 
-	if (edac_mc_get_log_ue())
-		edac_mc_printk(mci, KERN_EMERG,
-			"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
-			"labels \"%s\": %s\n", page_frame_number,
-			offset_in_page, mci->csrows[row].grain, row,
-			labels, msg);
+	if (!enable_per_layer_report) {
+		strcpy(label, "any memory");
+	} else {
+		debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+			__func__, row, chan);
+		if (p == label)
+			strcpy(label, "unknown memory");
+		if (type == HW_EVENT_ERR_CORRECTED) {
+			if (row >= 0) {
+				mci->csrows[row].ce_count++;
+				if (chan >= 0)
+					mci->csrows[row].channels[chan].ce_count++;
+			}
+		} else
+			if (row >= 0)
+				mci->csrows[row].ue_count++;
+	}
 
-	if (edac_mc_get_panic_on_ue())
-		panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
-			"row %d, labels \"%s\": %s\n", mci->mc_idx,
+	/* Fill the RAM location data */
+	p = location;
+	for (i = 0; i < mci->n_layers; i++) {
+		if (pos[i] < 0)
+			continue;
+
+		p += sprintf(p, "%s:%d ",
+			     edac_layer_name[mci->layers[i].type],
+			     pos[i]);
+	}
+
+	/* Memory type dependent details about the error */
+	if (type == HW_EVENT_ERR_CORRECTED) {
+		snprintf(detail, sizeof(detail),
+			"page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
 			page_frame_number, offset_in_page,
-			mci->csrows[row].grain, row, labels, msg);
+			grain, syndrome);
+		edac_ce_error(mci, pos, msg, location, label, detail,
+			      other_detail, enable_per_layer_report,
+			      page_frame_number, offset_in_page, grain);
+	} else {
+		snprintf(detail, sizeof(detail),
+			"page:0x%lx offset:0x%lx grain:%d",
+			page_frame_number, offset_in_page, grain);
 
-	mci->ue_count++;
-	mci->csrows[row].ue_count++;
+		edac_ue_error(mci, pos, msg, location, label, detail,
+			      other_detail, enable_per_layer_report);
+	}
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
-{
-	if (edac_mc_get_panic_on_ue())
-		panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
-
-	if (edac_mc_get_log_ue())
-		edac_mc_printk(mci, KERN_WARNING,
-			"UE - no information available: %s\n", msg);
-	mci->ue_noinfo_count++;
-	mci->ue_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
-			unsigned int csrow,
-			unsigned int channela,
-			unsigned int channelb, char *msg)
-{
-	int len = EDAC_MC_LABEL_LEN * 4;
-	char labels[len + 1];
-	char *pos = labels;
-	int chars;
-
-	if (csrow >= mci->nr_csrows) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: row out of range (%d >= %d)\n",
-			csrow, mci->nr_csrows);
-		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-
-	if (channela >= mci->csrows[csrow].nr_channels) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: channel-a out of range "
-			"(%d >= %d)\n",
-			channela, mci->csrows[csrow].nr_channels);
-		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-
-	if (channelb >= mci->csrows[csrow].nr_channels) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: channel-b out of range "
-			"(%d >= %d)\n",
-			channelb, mci->csrows[csrow].nr_channels);
-		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-
-	mci->ue_count++;
-	mci->csrows[csrow].ue_count++;
-
-	/* Generate the DIMM labels from the specified channels */
-	chars = snprintf(pos, len + 1, "%s",
-			 mci->csrows[csrow].channels[channela].label);
-	len -= chars;
-	pos += chars;
-	chars = snprintf(pos, len + 1, "-%s",
-			 mci->csrows[csrow].channels[channelb].label);
-
-	if (edac_mc_get_log_ue())
-		edac_mc_printk(mci, KERN_EMERG,
-			"UE row %d, channel-a= %d channel-b= %d "
-			"labels \"%s\": %s\n", csrow, channela, channelb,
-			labels, msg);
-
-	if (edac_mc_get_panic_on_ue())
-		panic("UE row %d, channel-a= %d channel-b= %d "
-			"labels \"%s\": %s\n", csrow, channela,
-			channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
-
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
-			unsigned int csrow, unsigned int channel, char *msg)
-{
-
-	/* Ensure boundary values */
-	if (csrow >= mci->nr_csrows) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: row out of range (%d >= %d)\n",
-			csrow, mci->nr_csrows);
-		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-	if (channel >= mci->csrows[csrow].nr_channels) {
-		/* something is wrong */
-		edac_mc_printk(mci, KERN_ERR,
-			"INTERNAL ERROR: channel out of range (%d >= %d)\n",
-			channel, mci->csrows[csrow].nr_channels);
-		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-		return;
-	}
-
-	if (edac_mc_get_log_ce())
-		/* FIXME - put in DIMM location */
-		edac_mc_printk(mci, KERN_WARNING,
-			"CE row %d, channel %d, label \"%s\": %s\n",
-			csrow, channel,
-			mci->csrows[csrow].channels[channel].label, msg);
-
-	mci->ce_count++;
-	mci->csrows[csrow].ce_count++;
-	mci->csrows[csrow].channels[channel].ce_count++;
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f5..f6a29b0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@
 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
 				int private)
 {
-	return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+	int i;
+	u32 nr_pages = 0;
+
+	for (i = 0; i < csrow->nr_channels; i++)
+		nr_pages += csrow->channels[i].dimm->nr_pages;
+
+	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
 }
 
 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
 				int private)
 {
-	return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+	return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
 }
 
 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
 				int private)
 {
-	return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+	return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
 }
 
 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
 				int private)
 {
-	return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+	return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
 }
 
 /* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@
 				char *data, int channel)
 {
 	/* if field has not been initialized, there is nothing to send */
-	if (!csrow->channels[channel].label[0])
+	if (!csrow->channels[channel].dimm->label[0])
 		return 0;
 
 	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
-			csrow->channels[channel].label);
+			csrow->channels[channel].dimm->label);
 }
 
 static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@
 	ssize_t max_size = 0;
 
 	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
-	strncpy(csrow->channels[channel].label, data, max_size);
-	csrow->channels[channel].label[max_size] = '\0';
+	strncpy(csrow->channels[channel].dimm->label, data, max_size);
+	csrow->channels[channel].dimm->label[max_size] = '\0';
 
 	return max_size;
 }
@@ -419,8 +425,8 @@
 
 	mci->ue_noinfo_count = 0;
 	mci->ce_noinfo_count = 0;
-	mci->ue_count = 0;
-	mci->ce_count = 0;
+	mci->ue_mc = 0;
+	mci->ce_mc = 0;
 
 	for (row = 0; row < mci->nr_csrows; row++) {
 		struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@
 /* default attribute files for the MCI object */
 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
 {
-	return sprintf(data, "%d\n", mci->ue_count);
+	return sprintf(data, "%d\n", mci->ue_mc);
 }
 
 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
 {
-	return sprintf(data, "%d\n", mci->ce_count);
+	return sprintf(data, "%d\n", mci->ce_mc);
 }
 
 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@
 
 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
 {
-	int total_pages, csrow_idx;
+	int total_pages = 0, csrow_idx, j;
 
-	for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
-		csrow_idx++) {
+	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
 		struct csrow_info *csrow = &mci->csrows[csrow_idx];
 
-		if (!csrow->nr_pages)
-			continue;
+		for (j = 0; j < csrow->nr_channels; j++) {
+			struct dimm_info *dimm = csrow->channels[j].dimm;
 
-		total_pages += csrow->nr_pages;
+			total_pages += dimm->nr_pages;
+		}
 	}
 
 	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@
  */
 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
 {
-	int i;
+	int i, j;
 	int err;
 	struct csrow_info *csrow;
 	struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@
 	/* Make directories for each CSROW object under the mc<id> kobject
 	 */
 	for (i = 0; i < mci->nr_csrows; i++) {
-		csrow = &mci->csrows[i];
+		int nr_pages = 0;
 
-		/* Only expose populated CSROWs */
-		if (csrow->nr_pages > 0) {
+		csrow = &mci->csrows[i];
+		for (j = 0; j < csrow->nr_channels; j++)
+			nr_pages += csrow->channels[j].dimm->nr_pages;
+
+		if (nr_pages > 0) {
 			err = edac_create_csrow_object(mci, csrow, i);
 			if (err) {
 				debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@
 
 	return 0;
 
-	/* CSROW error: backout what has already been registered,  */
 fail1:
 	for (i--; i >= 0; i--) {
-		if (csrow->nr_pages > 0) {
+		int nr_pages = 0;
+
+		csrow = &mci->csrows[i];
+		for (j = 0; j < csrow->nr_channels; j++)
+			nr_pages += csrow->channels[j].dimm->nr_pages;
+		if (nr_pages > 0)
 			kobject_put(&mci->csrows[i].kobj);
-		}
 	}
 
 	/* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@
  */
 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
 {
-	int i;
+	struct csrow_info *csrow;
+	int i, j;
 
 	debugf0("%s()\n", __func__);
 
 	/* remove all csrow kobjects */
 	debugf4("%s()  unregister this mci kobj\n", __func__);
 	for (i = 0; i < mci->nr_csrows; i++) {
-		if (mci->csrows[i].nr_pages > 0) {
+		int nr_pages = 0;
+
+		csrow = &mci->csrows[i];
+		for (j = 0; j < csrow->nr_channels; j++)
+			nr_pages += csrow->channels[j].dimm->nr_pages;
+		if (nr_pages > 0) {
 			debugf0("%s()  unreg csrow-%d\n", __func__, i);
 			kobject_put(&mci->csrows[i].kobj);
 		}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b4..0ea7d14 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@
 					   *edac_dev, unsigned long value);
 extern void edac_mc_reset_delay_period(int value);
 
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
 
 /*
  * EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5..f1ac866 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@
 						const char *edac_pci_name)
 {
 	struct edac_pci_ctl_info *pci;
-	void *pvt;
+	void *p = NULL, *pvt;
 	unsigned int size;
 
 	debugf1("%s()\n", __func__);
 
-	pci = (struct edac_pci_ctl_info *)0;
-	pvt = edac_align_ptr(&pci[1], sz_pvt);
+	pci = edac_align_ptr(&p, sizeof(*pci), 1);
+	pvt = edac_align_ptr(&p, 1, sz_pvt);
 	size = ((unsigned long)pvt) + sz_pvt;
 
 	/* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a..8ad1744 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@
 		return 1;
 
 	if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1,
+				     "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
@@ -256,10 +258,15 @@
 	row = edac_mc_find_csrow_by_page(mci, pfn);
 
 	if (info->errsts & I3000_ERRSTS_UE)
-		edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     pfn, offset, 0,
+				     row, -1, -1,
+				     "i3000 UE", "", NULL);
 	else
-		edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
-				multi_chan ? channel : 0, "i3000 CE");
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     pfn, offset, info->derrsyn,
+				     row, multi_chan ? channel : 0, -1,
+				     "i3000 CE", "", NULL);
 
 	return 1;
 }
@@ -304,9 +311,10 @@
 static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	int rc;
-	int i;
+	int i, j;
 	struct mem_ctl_info *mci = NULL;
-	unsigned long last_cumul_size;
+	struct edac_mc_layer layers[2];
+	unsigned long last_cumul_size, nr_pages;
 	int interleaved, nr_channels;
 	unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
 	unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@
 	 */
 	interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
 	nr_channels = interleaved ? 2 : 1;
-	mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = I3000_RANKS / nr_channels;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = nr_channels;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
 	if (!mci)
 		return -ENOMEM;
 
@@ -386,19 +401,23 @@
 			cumul_size <<= 1;
 		debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
 			__func__, i, cumul_size);
-		if (cumul_size == last_cumul_size) {
-			csrow->mtype = MEM_EMPTY;
+		if (cumul_size == last_cumul_size)
 			continue;
-		}
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
+		nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = I3000_DEAP_GRAIN;
-		csrow->mtype = MEM_DDR2;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = EDAC_UNKNOWN;
+
+		for (j = 0; j < nr_channels; j++) {
+			struct dimm_info *dimm = csrow->channels[j].dimm;
+
+			dimm->nr_pages = nr_pages / nr_channels;
+			dimm->grain = I3000_DEAP_GRAIN;
+			dimm->mtype = MEM_DDR2;
+			dimm->dtype = DEV_UNKNOWN;
+			dimm->edac_mode = EDAC_UNKNOWN;
+		}
 	}
 
 	/*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c..bbe43ef 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
 
 #define PCI_DEVICE_ID_INTEL_3200_HB    0x29f0
 
+#define I3200_DIMMS		4
 #define I3200_RANKS		8
 #define I3200_RANKS_PER_CHANNEL	4
 #define I3200_CHANNELS		2
@@ -217,21 +218,25 @@
 		return;
 
 	if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1, "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
 	for (channel = 0; channel < nr_channels; channel++) {
 		log = info->eccerrlog[channel];
 		if (log & I3200_ECCERRLOG_UE) {
-			edac_mc_handle_ue(mci, 0, 0,
-				eccerrlog_row(channel, log),
-				"i3200 UE");
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     0, 0, 0,
+					     eccerrlog_row(channel, log),
+					     -1, -1,
+					     "i3000 UE", "", NULL);
 		} else if (log & I3200_ECCERRLOG_CE) {
-			edac_mc_handle_ce(mci, 0, 0,
-				eccerrlog_syndrome(log),
-				eccerrlog_row(channel, log), 0,
-				"i3200 CE");
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     0, 0, eccerrlog_syndrome(log),
+					     eccerrlog_row(channel, log),
+					     -1, -1,
+					     "i3000 UE", "", NULL);
 		}
 	}
 }
@@ -319,9 +324,9 @@
 static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	int rc;
-	int i;
+	int i, j;
 	struct mem_ctl_info *mci = NULL;
-	unsigned long last_page;
+	struct edac_mc_layer layers[2];
 	u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
 	bool stacked;
 	void __iomem *window;
@@ -336,8 +341,14 @@
 	i3200_get_drbs(window, drbs);
 	nr_channels = how_many_channels(pdev);
 
-	mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
-		nr_channels, 0);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = I3200_DIMMS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = nr_channels;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+			    sizeof(struct i3200_priv));
 	if (!mci)
 		return -ENOMEM;
 
@@ -366,7 +377,6 @@
 	 * cumulative; the last one will contain the total memory
 	 * contained in all ranks.
 	 */
-	last_page = -1UL;
 	for (i = 0; i < mci->nr_csrows; i++) {
 		unsigned long nr_pages;
 		struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@
 			i / I3200_RANKS_PER_CHANNEL,
 			i % I3200_RANKS_PER_CHANNEL);
 
-		if (nr_pages == 0) {
-			csrow->mtype = MEM_EMPTY;
+		if (nr_pages == 0)
 			continue;
+
+		for (j = 0; j < nr_channels; j++) {
+			struct dimm_info *dimm = csrow->channels[j].dimm;
+
+			dimm->nr_pages = nr_pages / nr_channels;
+			dimm->grain = nr_pages << PAGE_SHIFT;
+			dimm->mtype = MEM_DDR2;
+			dimm->dtype = DEV_UNKNOWN;
+			dimm->edac_mode = EDAC_UNKNOWN;
 		}
-
-		csrow->first_page = last_page + 1;
-		last_page += nr_pages;
-		csrow->last_page = last_page;
-		csrow->nr_pages = nr_pages;
-
-		csrow->grain = nr_pages << PAGE_SHIFT;
-		csrow->mtype = MEM_DDR2;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = EDAC_UNKNOWN;
 	}
 
 	i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8..11ea835 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
 #define MTR3		0x8C
 
 #define NUM_MTRS		4
-#define CHANNELS_PER_BRANCH	(2)
+#define CHANNELS_PER_BRANCH	2
+#define MAX_BRANCHES		2
 
 /* Defines to extract the vaious fields from the
  *	MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@
 	char msg[EDAC_MC_LABEL_LEN + 1 + 160];
 	char *specific = NULL;
 	u32 allErrors;
-	int branch;
 	int channel;
 	int bank;
 	int rank;
@@ -485,8 +485,7 @@
 	if (!allErrors)
 		return;		/* if no error, return now */
 
-	branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
-	channel = branch;
+	channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
 
 	/* Use the NON-Recoverable macros to extract data */
 	bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@
 	ras = NREC_RAS(info->nrecmemb);
 	cas = NREC_CAS(info->nrecmemb);
 
-	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
-		"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
-		rank, channel, channel + 1, branch >> 1, bank,
+	debugf0("\t\tCSROW= %d  Channel= %d "
+		"(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+		rank, channel, bank,
 		rdwr ? "Write" : "Read", ras, cas);
 
 	/* Only 1 bit will be on */
@@ -533,13 +532,14 @@
 
 	/* Form out message */
 	snprintf(msg, sizeof(msg),
-		 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
-		 "FATAL Err=0x%x (%s))",
-		 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
-		 allErrors, specific);
+		 "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+		 bank, ras, cas, allErrors, specific);
 
 	/* Call the helper to output message */
-	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+	edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+			     channel >> 1, channel & 1, rank,
+			     rdwr ? "Write error" : "Read error",
+			     msg, NULL);
 }
 
 /*
@@ -633,13 +633,14 @@
 
 		/* Form out message */
 		snprintf(msg, sizeof(msg),
-			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
-			 "CAS=%d, UE Err=0x%x (%s))",
-			 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
-			 ue_errors, specific);
+			 "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+			 rank, bank, ras, cas, ue_errors, specific);
 
 		/* Call the helper to output message */
-		edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				channel >> 1, -1, rank,
+				rdwr ? "Write error" : "Read error",
+				msg, NULL);
 	}
 
 	/* Check correctable errors */
@@ -685,13 +686,16 @@
 
 		/* Form out message */
 		snprintf(msg, sizeof(msg),
-			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+			 "Rank=%d Bank=%d RDWR=%s RAS=%d "
 			 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
 			 rdwr ? "Write" : "Read", ras, cas, ce_errors,
 			 specific);
 
 		/* Call the helper to output message */
-		edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+				channel >> 1, channel % 2, rank,
+				rdwr ? "Write error" : "Read error",
+				msg, NULL);
 	}
 
 	if (!misc_messages)
@@ -731,11 +735,12 @@
 
 		/* Form out message */
 		snprintf(msg, sizeof(msg),
-			 "(Branch=%d Err=%#x (%s))", branch >> 1,
-			 misc_errors, specific);
+			 "Err=%#x (%s)", misc_errors, specific);
 
 		/* Call the helper to output message */
-		edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+				branch >> 1, -1, -1,
+				"Misc error", msg, NULL);
 	}
 }
 
@@ -956,14 +961,14 @@
  *
  *	return the proper MTR register as determine by the csrow and channel desired
  */
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
 {
 	int mtr;
 
 	if (channel < CHANNELS_PER_BRANCH)
-		mtr = pvt->b0_mtr[csrow >> 1];
+		mtr = pvt->b0_mtr[slot];
 	else
-		mtr = pvt->b1_mtr[csrow >> 1];
+		mtr = pvt->b1_mtr[slot];
 
 	return mtr;
 }
@@ -988,37 +993,34 @@
 	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
 }
 
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
 			struct i5000_dimm_info *dinfo)
 {
 	int mtr;
 	int amb_present_reg;
 	int addrBits;
 
-	mtr = determine_mtr(pvt, csrow, channel);
+	mtr = determine_mtr(pvt, slot, channel);
 	if (MTR_DIMMS_PRESENT(mtr)) {
 		amb_present_reg = determine_amb_present_reg(pvt, channel);
 
-		/* Determine if there is  a  DIMM present in this DIMM slot */
-		if (amb_present_reg & (1 << (csrow >> 1))) {
+		/* Determine if there is a DIMM present in this DIMM slot */
+		if (amb_present_reg) {
 			dinfo->dual_rank = MTR_DIMM_RANK(mtr);
 
-			if (!((dinfo->dual_rank == 0) &&
-				((csrow & 0x1) == 0x1))) {
-				/* Start with the number of bits for a Bank
-				 * on the DRAM */
-				addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
-				/* Add thenumber of ROW bits */
-				addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
-				/* add the number of COLUMN bits */
-				addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+			/* Start with the number of bits for a Bank
+				* on the DRAM */
+			addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+			/* Add the number of ROW bits */
+			addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+			/* add the number of COLUMN bits */
+			addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
 
-				addrBits += 6;	/* add 64 bits per DIMM */
-				addrBits -= 20;	/* divide by 2^^20 */
-				addrBits -= 3;	/* 8 bits per bytes */
+			addrBits += 6;	/* add 64 bits per DIMM */
+			addrBits -= 20;	/* divide by 2^^20 */
+			addrBits -= 3;	/* 8 bits per bytes */
 
-				dinfo->megabytes = 1 << addrBits;
-			}
+			dinfo->megabytes = 1 << addrBits;
 		}
 	}
 }
@@ -1032,10 +1034,9 @@
 static void calculate_dimm_size(struct i5000_pvt *pvt)
 {
 	struct i5000_dimm_info *dinfo;
-	int csrow, max_csrows;
+	int slot, channel, branch;
 	char *p, *mem_buffer;
 	int space, n;
-	int channel;
 
 	/* ================= Generate some debug output ================= */
 	space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@
 		return;
 	}
 
-	n = snprintf(p, space, "\n");
-	p += n;
-	space -= n;
-
-	/* Scan all the actual CSROWS (which is # of DIMMS * 2)
+	/* Scan all the actual slots
 	 * and calculate the information for each DIMM
-	 * Start with the highest csrow first, to display it first
-	 * and work toward the 0th csrow
+	 * Start with the highest slot first, to display it first
+	 * and work toward the 0th slot
 	 */
-	max_csrows = pvt->maxdimmperch * 2;
-	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+	for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
 
-		/* on an odd csrow, first output a 'boundary' marker,
+		/* on an odd slot, first output a 'boundary' marker,
 		 * then reset the message buffer  */
-		if (csrow & 0x1) {
-			n = snprintf(p, space, "---------------------------"
+		if (slot & 0x1) {
+			n = snprintf(p, space, "--------------------------"
 				"--------------------------------");
 			p += n;
 			space -= n;
@@ -1069,30 +1065,39 @@
 			p = mem_buffer;
 			space = PAGE_SIZE;
 		}
-		n = snprintf(p, space, "csrow %2d    ", csrow);
+		n = snprintf(p, space, "slot %2d    ", slot);
 		p += n;
 		space -= n;
 
 		for (channel = 0; channel < pvt->maxch; channel++) {
-			dinfo = &pvt->dimm_info[csrow][channel];
-			handle_channel(pvt, csrow, channel, dinfo);
-			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
+			dinfo = &pvt->dimm_info[slot][channel];
+			handle_channel(pvt, slot, channel, dinfo);
+			if (dinfo->megabytes)
+				n = snprintf(p, space, "%4d MB %dR| ",
+					     dinfo->megabytes, dinfo->dual_rank + 1);
+			else
+				n = snprintf(p, space, "%4d MB   | ", 0);
 			p += n;
 			space -= n;
 		}
-		n = snprintf(p, space, "\n");
 		p += n;
 		space -= n;
+		debugf2("%s\n", mem_buffer);
+		p = mem_buffer;
+		space = PAGE_SIZE;
 	}
 
 	/* Output the last bottom 'boundary' marker */
-	n = snprintf(p, space, "---------------------------"
-		"--------------------------------\n");
+	n = snprintf(p, space, "--------------------------"
+		"--------------------------------");
 	p += n;
 	space -= n;
+	debugf2("%s\n", mem_buffer);
+	p = mem_buffer;
+	space = PAGE_SIZE;
 
 	/* now output the 'channel' labels */
-	n = snprintf(p, space, "            ");
+	n = snprintf(p, space, "           ");
 	p += n;
 	space -= n;
 	for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@
 		p += n;
 		space -= n;
 	}
-	n = snprintf(p, space, "\n");
+	debugf2("%s\n", mem_buffer);
+	p = mem_buffer;
+	space = PAGE_SIZE;
+
+	n = snprintf(p, space, "           ");
 	p += n;
-	space -= n;
+	for (branch = 0; branch < MAX_BRANCHES; branch++) {
+		n = snprintf(p, space, "       branch %d       | ", branch);
+		p += n;
+		space -= n;
+	}
 
 	/* output the last message and free buffer */
 	debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@
 static int i5000_init_csrows(struct mem_ctl_info *mci)
 {
 	struct i5000_pvt *pvt;
-	struct csrow_info *p_csrow;
+	struct dimm_info *dimm;
 	int empty, channel_count;
 	int max_csrows;
-	int mtr, mtr1;
+	int mtr;
 	int csrow_megs;
 	int channel;
-	int csrow;
+	int slot;
 
 	pvt = mci->pvt_info;
 
@@ -1250,44 +1263,41 @@
 
 	empty = 1;		/* Assume NO memory */
 
-	for (csrow = 0; csrow < max_csrows; csrow++) {
-		p_csrow = &mci->csrows[csrow];
-
-		p_csrow->csrow_idx = csrow;
-
-		/* use branch 0 for the basis */
-		mtr = pvt->b0_mtr[csrow >> 1];
-		mtr1 = pvt->b1_mtr[csrow >> 1];
-
-		/* if no DIMMS on this row, continue */
-		if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
-			continue;
-
-		/* FAKE OUT VALUES, FIXME */
-		p_csrow->first_page = 0 + csrow * 20;
-		p_csrow->last_page = 9 + csrow * 20;
-		p_csrow->page_mask = 0xFFF;
-
-		p_csrow->grain = 8;
-
-		csrow_megs = 0;
+	/*
+	 * FIXME: The memory layout used to map slot/channel into the
+	 * real memory architecture is weird: branch+slot are "csrows"
+	 * and channel is channel. That required an extra array (dimm_info)
+	 * to map the dimms. A good cleanup would be to remove this array,
+	 * and do a loop here with branch, channel, slot
+	 */
+	for (slot = 0; slot < max_csrows; slot++) {
 		for (channel = 0; channel < pvt->maxch; channel++) {
-			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+
+			mtr = determine_mtr(pvt, slot, channel);
+
+			if (!MTR_DIMMS_PRESENT(mtr))
+				continue;
+
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+				       channel / MAX_BRANCHES,
+				       channel % MAX_BRANCHES, slot);
+
+			csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+			dimm->grain = 8;
+
+			/* Assume DDR2 for now */
+			dimm->mtype = MEM_FB_DDR2;
+
+			/* ask what device type on this row */
+			if (MTR_DRAM_WIDTH(mtr))
+				dimm->dtype = DEV_X8;
+			else
+				dimm->dtype = DEV_X4;
+
+			dimm->edac_mode = EDAC_S8ECD8ED;
+			dimm->nr_pages = csrow_megs << 8;
 		}
 
-		p_csrow->nr_pages = csrow_megs << 8;
-
-		/* Assume DDR2 for now */
-		p_csrow->mtype = MEM_FB_DDR2;
-
-		/* ask what device type on this row */
-		if (MTR_DRAM_WIDTH(mtr))
-			p_csrow->dtype = DEV_X8;
-		else
-			p_csrow->dtype = DEV_X4;
-
-		p_csrow->edac_mode = EDAC_S8ECD8ED;
-
 		empty = 0;
 	}
 
@@ -1317,7 +1327,7 @@
 }
 
 /*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
  *
  *	ask the device how many channels are present and how many CSROWS
  *	 as well
@@ -1332,7 +1342,7 @@
 	 * supported on this memory controller
 	 */
 	pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
-	*num_dimms_per_channel = (int)value *2;
+	*num_dimms_per_channel = (int)value;
 
 	pci_read_config_byte(pdev, MAXCH, &value);
 	*num_channels = (int)value;
@@ -1348,10 +1358,10 @@
 static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[3];
 	struct i5000_pvt *pvt;
 	int num_channels;
 	int num_dimms_per_channel;
-	int num_csrows;
 
 	debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
 		__FILE__, __func__,
@@ -1377,14 +1387,22 @@
 	 */
 	i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
 					&num_channels);
-	num_csrows = num_dimms_per_channel * 2;
 
-	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-		__func__, num_channels, num_dimms_per_channel, num_csrows);
+	debugf0("MC: %s(): Number of Branches=2 Channels= %d  DIMMS= %d\n",
+		__func__, num_channels, num_dimms_per_channel);
 
 	/* allocate a new MC control structure */
-	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
 
+	layers[0].type = EDAC_MC_LAYER_BRANCH;
+	layers[0].size = MAX_BRANCHES;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = num_channels / MAX_BRANCHES;
+	layers[1].is_virt_csrow = false;
+	layers[2].type = EDAC_MC_LAYER_SLOT;
+	layers[2].size = num_dimms_per_channel;
+	layers[2].is_virt_csrow = true;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749..e9e7c2a 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
  * rows for each respective channel are laid out one after another,
  * the first half belonging to channel 0, the second half belonging
  * to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
  */
 #include <linux/module.h>
 #include <linux/init.h>
@@ -410,14 +415,6 @@
 	return csrow / priv->ranksperchan;
 }
 
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
-				    int chan, int rank)
-{
-	const struct i5100_priv *priv = mci->pvt_info;
-
-	return chan * priv->ranksperchan + rank;
-}
-
 static void i5100_handle_ce(struct mem_ctl_info *mci,
 			    int chan,
 			    unsigned bank,
@@ -427,17 +424,17 @@
 			    unsigned ras,
 			    const char *msg)
 {
-	const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+	char detail[80];
 
-	printk(KERN_ERR
-		"CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
-		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
-		chan, bank, rank, syndrome, cas, ras,
-		csrow, mci->csrows[csrow].channels[0].label, msg);
+	/* Form out message */
+	snprintf(detail, sizeof(detail),
+		 "bank %u, cas %u, ras %u\n",
+		 bank, cas, ras);
 
-	mci->ce_count++;
-	mci->csrows[csrow].ce_count++;
-	mci->csrows[csrow].channels[0].ce_count++;
+	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+			     0, 0, syndrome,
+			     chan, rank, -1,
+			     msg, detail, NULL);
 }
 
 static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@
 			    unsigned ras,
 			    const char *msg)
 {
-	const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+	char detail[80];
 
-	printk(KERN_ERR
-		"UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
-		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
-		chan, bank, rank, syndrome, cas, ras,
-		csrow, mci->csrows[csrow].channels[0].label, msg);
+	/* Form out message */
+	snprintf(detail, sizeof(detail),
+		 "bank %u, cas %u, ras %u\n",
+		 bank, cas, ras);
 
-	mci->ue_count++;
-	mci->csrows[csrow].ue_count++;
+	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+			     0, 0, syndrome,
+			     chan, rank, -1,
+			     msg, detail, NULL);
 }
 
 static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@
 static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
 {
 	int i;
-	unsigned long total_pages = 0UL;
 	struct i5100_priv *priv = mci->pvt_info;
 
-	for (i = 0; i < mci->nr_csrows; i++) {
+	for (i = 0; i < mci->tot_dimms; i++) {
+		struct dimm_info *dimm;
 		const unsigned long npages = i5100_npages(mci, i);
 		const unsigned chan = i5100_csrow_to_chan(mci, i);
 		const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@
 		if (!npages)
 			continue;
 
-		/*
-		 * FIXME: these two are totally bogus -- I don't see how to
-		 * map them correctly to this structure...
-		 */
-		mci->csrows[i].first_page = total_pages;
-		mci->csrows[i].last_page = total_pages + npages - 1;
-		mci->csrows[i].page_mask = 0UL;
+		dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+			       chan, rank, 0);
 
-		mci->csrows[i].nr_pages = npages;
-		mci->csrows[i].grain = 32;
-		mci->csrows[i].csrow_idx = i;
-		mci->csrows[i].dtype =
-			(priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
-		mci->csrows[i].ue_count = 0;
-		mci->csrows[i].ce_count = 0;
-		mci->csrows[i].mtype = MEM_RDDR2;
-		mci->csrows[i].edac_mode = EDAC_SECDED;
-		mci->csrows[i].mci = mci;
-		mci->csrows[i].nr_channels = 1;
-		mci->csrows[i].channels[0].chan_idx = 0;
-		mci->csrows[i].channels[0].ce_count = 0;
-		mci->csrows[i].channels[0].csrow = mci->csrows + i;
-		snprintf(mci->csrows[i].channels[0].label,
-			 sizeof(mci->csrows[i].channels[0].label),
-			 "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
+		dimm->nr_pages = npages;
+		if (npages) {
+			dimm->grain = 32;
+			dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+					DEV_X4 : DEV_X8;
+			dimm->mtype = MEM_RDDR2;
+			dimm->edac_mode = EDAC_SECDED;
+			snprintf(dimm->label, sizeof(dimm->label),
+				"DIMM%u",
+				i5100_rank_to_slot(mci, chan, rank));
+		}
 
-		total_pages += npages;
+		debugf2("dimm channel %d, rank %d, size %ld\n",
+			chan, rank, (long)PAGES_TO_MiB(npages));
 	}
 }
 
@@ -881,6 +869,7 @@
 {
 	int rc;
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct i5100_priv *priv;
 	struct pci_dev *ch0mm, *ch1mm;
 	int ret = 0;
@@ -941,7 +930,14 @@
 		goto bail_ch1;
 	}
 
-	mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+	layers[0].type = EDAC_MC_LAYER_CHANNEL;
+	layers[0].size = 2;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_SLOT;
+	layers[1].size = ranksperch;
+	layers[1].is_virt_csrow = true;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+			    sizeof(*priv));
 	if (!mci) {
 		ret = -ENOMEM;
 		goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a10..6640c29 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
  * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
  * 	http://developer.intel.com/design/chipsets/datashts/313070.htm
  *
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
  */
 
 #include <linux/module.h>
@@ -44,12 +48,10 @@
 	edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
 
 /* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH	4
+#define MAX_BRANCHES		2
 #define CHANNELS_PER_BRANCH	2
-#define MAX_DIMMS_PER_CHANNEL	NUM_MTRS_PER_BRANCH
-#define	MAX_CHANNELS		4
-/* max possible csrows per channel */
-#define MAX_CSROWS		(MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL	4
+#define	MAX_CHANNELS		(MAX_BRANCHES * CHANNELS_PER_BRANCH)
 
 /* Device 16,
  * Function 0: System Address
@@ -347,16 +349,16 @@
 
 	u16 mir0, mir1;
 
-	u16 b0_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
+	u16 b0_mtr[DIMMS_PER_CHANNEL];	/* Memory Technlogy Reg */
 	u16 b0_ambpresent0;			/* Branch 0, Channel 0 */
 	u16 b0_ambpresent1;			/* Brnach 0, Channel 1 */
 
-	u16 b1_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
+	u16 b1_mtr[DIMMS_PER_CHANNEL];	/* Memory Technlogy Reg */
 	u16 b1_ambpresent0;			/* Branch 1, Channel 8 */
 	u16 b1_ambpresent1;			/* Branch 1, Channel 1 */
 
 	/* DIMM information matrix, allocating architecture maximums */
-	struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+	struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
 
 	/* Actual values for this controller */
 	int maxch;				/* Max channels */
@@ -532,13 +534,15 @@
 	int ras, cas;
 	int errnum;
 	char *type = NULL;
+	enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
 
 	if (!allErrors)
 		return;		/* if no error, return now */
 
-	if (allErrors &  ERROR_FAT_MASK)
+	if (allErrors &  ERROR_FAT_MASK) {
 		type = "FATAL";
-	else if (allErrors & FERR_NF_UNCORRECTABLE)
+		tp_event = HW_EVENT_ERR_FATAL;
+	} else if (allErrors & FERR_NF_UNCORRECTABLE)
 		type = "NON-FATAL uncorrected";
 	else
 		type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@
 	ras = nrec_ras(info);
 	cas = nrec_cas(info);
 
-	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+	debugf0("\t\tDIMM= %d  Channels= %d,%d  (Branch= %d "
 		"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
 		rank, channel, channel + 1, branch >> 1, bank,
 		buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@
 
 	/* Form out message */
 	snprintf(msg, sizeof(msg),
-		 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
-		 "RAS=%d CAS=%d %s Err=0x%lx (%s))",
-		 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
-		 type, allErrors, error_name[errnum]);
+		 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+		 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
 
-	/* Call the helper to output message */
-	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+	edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+			     branch >> 1, -1, rank,
+			     rdwr ? "Write error" : "Read error",
+			     msg, NULL);
 }
 
 /*
@@ -630,7 +634,7 @@
 		/* Only 1 bit will be on */
 		errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
 
-		debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
+		debugf0("\t\tDIMM= %d Channel= %d  (Branch %d "
 			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
 			rank, channel, branch >> 1, bank,
 			rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@
 			 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
 			 allErrors, error_name[errnum]);
 
-		/* Call the helper to output message */
-		edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+				     branch >> 1, channel % 2, rank,
+				     rdwr ? "Write error" : "Read error",
+				     msg, NULL);
 
 		return;
 	}
@@ -831,8 +837,8 @@
 /*
  *	determine_amb_present
  *
- *		the information is contained in NUM_MTRS_PER_BRANCH different
- *		registers determining which of the NUM_MTRS_PER_BRANCH requires
+ *		the information is contained in DIMMS_PER_CHANNEL different
+ *		registers determining which of the DIMMS_PER_CHANNEL requires
  *              knowing which channel is in question
  *
  *	2 branches, each with 2 channels
@@ -861,11 +867,11 @@
 }
 
 /*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
  *
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
  */
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
 {
 	int mtr;
 	int n;
@@ -873,11 +879,11 @@
 	/* There is one MTR for each slot pair of FB-DIMMs,
 	   Each slot pair may be at branch 0 or branch 1.
 	 */
-	n = csrow;
+	n = dimm;
 
-	if (n >= NUM_MTRS_PER_BRANCH) {
-		debugf0("ERROR: trying to access an invalid csrow: %d\n",
-			csrow);
+	if (n >= DIMMS_PER_CHANNEL) {
+		debugf0("ERROR: trying to access an invalid dimm: %d\n",
+			dimm);
 		return 0;
 	}
 
@@ -913,19 +919,19 @@
 	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
 }
 
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
 			struct i5400_dimm_info *dinfo)
 {
 	int mtr;
 	int amb_present_reg;
 	int addrBits;
 
-	mtr = determine_mtr(pvt, csrow, channel);
+	mtr = determine_mtr(pvt, dimm, channel);
 	if (MTR_DIMMS_PRESENT(mtr)) {
 		amb_present_reg = determine_amb_present_reg(pvt, channel);
 
 		/* Determine if there is a DIMM present in this DIMM slot */
-		if (amb_present_reg & (1 << csrow)) {
+		if (amb_present_reg & (1 << dimm)) {
 			/* Start with the number of bits for a Bank
 			 * on the DRAM */
 			addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@
 static void calculate_dimm_size(struct i5400_pvt *pvt)
 {
 	struct i5400_dimm_info *dinfo;
-	int csrow, max_csrows;
+	int dimm, max_dimms;
 	char *p, *mem_buffer;
 	int space, n;
-	int channel;
+	int channel, branch;
 
 	/* ================= Generate some debug output ================= */
 	space = PAGE_SIZE;
@@ -968,32 +974,32 @@
 		return;
 	}
 
-	/* Scan all the actual CSROWS
+	/* Scan all the actual DIMMS
 	 * and calculate the information for each DIMM
-	 * Start with the highest csrow first, to display it first
-	 * and work toward the 0th csrow
+	 * Start with the highest dimm first, to display it first
+	 * and work toward the 0th dimm
 	 */
-	max_csrows = pvt->maxdimmperch;
-	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+	max_dimms = pvt->maxdimmperch;
+	for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
 
-		/* on an odd csrow, first output a 'boundary' marker,
+		/* on an odd dimm, first output a 'boundary' marker,
 		 * then reset the message buffer  */
-		if (csrow & 0x1) {
+		if (dimm & 0x1) {
 			n = snprintf(p, space, "---------------------------"
-					"--------------------------------");
+					"-------------------------------");
 			p += n;
 			space -= n;
 			debugf2("%s\n", mem_buffer);
 			p = mem_buffer;
 			space = PAGE_SIZE;
 		}
-		n = snprintf(p, space, "csrow %2d    ", csrow);
+		n = snprintf(p, space, "dimm %2d    ", dimm);
 		p += n;
 		space -= n;
 
 		for (channel = 0; channel < pvt->maxch; channel++) {
-			dinfo = &pvt->dimm_info[csrow][channel];
-			handle_channel(pvt, csrow, channel, dinfo);
+			dinfo = &pvt->dimm_info[dimm][channel];
+			handle_channel(pvt, dimm, channel, dinfo);
 			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
 			p += n;
 			space -= n;
@@ -1005,7 +1011,7 @@
 
 	/* Output the last bottom 'boundary' marker */
 	n = snprintf(p, space, "---------------------------"
-			"--------------------------------");
+			"-------------------------------");
 	p += n;
 	space -= n;
 	debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@
 	space = PAGE_SIZE;
 
 	/* now output the 'channel' labels */
-	n = snprintf(p, space, "            ");
+	n = snprintf(p, space, "           ");
 	p += n;
 	space -= n;
 	for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@
 		space -= n;
 	}
 
+	space -= n;
+	debugf2("%s\n", mem_buffer);
+	p = mem_buffer;
+	space = PAGE_SIZE;
+
+	n = snprintf(p, space, "           ");
+	p += n;
+	for (branch = 0; branch < MAX_BRANCHES; branch++) {
+		n = snprintf(p, space, "       branch %d       | ", branch);
+		p += n;
+		space -= n;
+	}
+
 	/* output the last message and free buffer */
 	debugf2("%s\n", mem_buffer);
 	kfree(mem_buffer);
@@ -1080,7 +1099,7 @@
 	debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
 
 	/* Get the set of MTR[0-3] regs by each branch */
-	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+	for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
 		int where = MTR0 + (slot_row * sizeof(u16));
 
 		/* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@
 	/* Read and dump branch 0's MTRs */
 	debugf2("\nMemory Technology Registers:\n");
 	debugf2("   Branch 0:\n");
-	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+	for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
 		decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
 
 	pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@
 	} else {
 		/* Read and dump  branch 1's MTRs */
 		debugf2("   Branch 1:\n");
-		for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+		for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
 			decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
 
 		pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@
 }
 
 /*
- *	i5400_init_csrows	Initialize the 'csrows' table within
+ *	i5400_init_dimms	Initialize the 'dimms' table within
  *				the mci control	structure with the
  *				addressing of memory.
  *
@@ -1149,64 +1168,68 @@
  *		0	success
  *		1	no actual memory found on this MC
  */
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
 {
 	struct i5400_pvt *pvt;
-	struct csrow_info *p_csrow;
-	int empty, channel_count;
-	int max_csrows;
+	struct dimm_info *dimm;
+	int ndimms, channel_count;
+	int max_dimms;
 	int mtr;
-	int csrow_megs;
-	int channel;
-	int csrow;
+	int size_mb;
+	int  channel, slot;
 
 	pvt = mci->pvt_info;
 
 	channel_count = pvt->maxch;
-	max_csrows = pvt->maxdimmperch;
+	max_dimms = pvt->maxdimmperch;
 
-	empty = 1;		/* Assume NO memory */
+	ndimms = 0;
 
-	for (csrow = 0; csrow < max_csrows; csrow++) {
-		p_csrow = &mci->csrows[csrow];
+	/*
+	 * FIXME: remove  pvt->dimm_info[slot][channel] and use the 3
+	 * layers here.
+	 */
+	for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+	     channel++) {
+		for (slot = 0; slot < mci->layers[2].size; slot++) {
+			mtr = determine_mtr(pvt, slot, channel);
 
-		p_csrow->csrow_idx = csrow;
+			/* if no DIMMS on this slot, continue */
+			if (!MTR_DIMMS_PRESENT(mtr))
+				continue;
 
-		/* use branch 0 for the basis */
-		mtr = determine_mtr(pvt, csrow, 0);
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+				       channel / 2, channel % 2, slot);
 
-		/* if no DIMMS on this row, continue */
-		if (!MTR_DIMMS_PRESENT(mtr))
-			continue;
+			size_mb =  pvt->dimm_info[slot][channel].megabytes;
 
-		/* FAKE OUT VALUES, FIXME */
-		p_csrow->first_page = 0 + csrow * 20;
-		p_csrow->last_page = 9 + csrow * 20;
-		p_csrow->page_mask = 0xFFF;
+			debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+				__func__, dimm - mci->dimms,
+				channel / 2, channel % 2, slot,
+				size_mb / 1000, size_mb % 1000);
 
-		p_csrow->grain = 8;
-
-		csrow_megs = 0;
-		for (channel = 0; channel < pvt->maxch; channel++)
-			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
-		p_csrow->nr_pages = csrow_megs << 8;
-
-		/* Assume DDR2 for now */
-		p_csrow->mtype = MEM_FB_DDR2;
-
-		/* ask what device type on this row */
-		if (MTR_DRAM_WIDTH(mtr))
-			p_csrow->dtype = DEV_X8;
-		else
-			p_csrow->dtype = DEV_X4;
-
-		p_csrow->edac_mode = EDAC_S8ECD8ED;
-
-		empty = 0;
+			dimm->nr_pages = size_mb << 8;
+			dimm->grain = 8;
+			dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+			dimm->mtype = MEM_FB_DDR2;
+			/*
+			 * The eccc mechanism is SDDC (aka SECC), with
+			 * is similar to Chipkill.
+			 */
+			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+					  EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+			ndimms++;
+		}
 	}
 
-	return empty;
+	/*
+	 * When just one memory is provided, it should be at location (0,0,0).
+	 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+	 */
+	if (ndimms == 1)
+		mci->dimms[0].edac_mode = EDAC_SECDED;
+
+	return (ndimms == 0);
 }
 
 /*
@@ -1242,9 +1265,7 @@
 {
 	struct mem_ctl_info *mci;
 	struct i5400_pvt *pvt;
-	int num_channels;
-	int num_dimms_per_channel;
-	int num_csrows;
+	struct edac_mc_layer layers[3];
 
 	if (dev_idx >= ARRAY_SIZE(i5400_devs))
 		return -EINVAL;
@@ -1258,23 +1279,21 @@
 	if (PCI_FUNC(pdev->devfn) != 0)
 		return -ENODEV;
 
-	/* As we don't have a motherboard identification routine to determine
-	 * actual number of slots/dimms per channel, we thus utilize the
-	 * resource as specified by the chipset. Thus, we might have
-	 * have more DIMMs per channel than actually on the mobo, but this
-	 * allows the driver to support up to the chipset max, without
-	 * some fancy mobo determination.
+	/*
+	 * allocate a new MC control structure
+	 *
+	 * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
 	 */
-	num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
-	num_channels = MAX_CHANNELS;
-	num_csrows = num_dimms_per_channel;
-
-	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-		__func__, num_channels, num_dimms_per_channel, num_csrows);
-
-	/* allocate a new MC control structure */
-	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+	layers[0].type = EDAC_MC_LAYER_BRANCH;
+	layers[0].size = MAX_BRANCHES;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = CHANNELS_PER_BRANCH;
+	layers[1].is_virt_csrow = false;
+	layers[2].type = EDAC_MC_LAYER_SLOT;
+	layers[2].size = DIMMS_PER_CHANNEL;
+	layers[2].is_virt_csrow = true;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (mci == NULL)
 		return -ENOMEM;
 
@@ -1284,8 +1303,8 @@
 
 	pvt = mci->pvt_info;
 	pvt->system_address = pdev;	/* Record this device in our private */
-	pvt->maxch = num_channels;
-	pvt->maxdimmperch = num_dimms_per_channel;
+	pvt->maxch = MAX_CHANNELS;
+	pvt->maxdimmperch = DIMMS_PER_CHANNEL;
 
 	/* 'get' the pci devices we want to reserve for our use */
 	if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@
 	/* Set the function pointer to an actual operation function */
 	mci->edac_check = i5400_check_error;
 
-	/* initialize the MC control structure 'csrows' table
+	/* initialize the MC control structure 'dimms' table
 	 * with the mapping and control information */
-	if (i5400_init_csrows(mci)) {
+	if (i5400_init_dimms(mci)) {
 		debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
-			"    because i5400_init_csrows() returned nonzero "
+			"    because i5400_init_dimms() returned nonzero "
 			"value\n");
-		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
+		mci->edac_cap = EDAC_FLAG_NONE;	/* no dimms found */
 	} else {
 		debugf1("MC: Enable error reporting now\n");
 		i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3b..97c22fd 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@
 				FERR_FAT_FBD, error_reg);
 
 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
-			"FATAL (Branch=%d DRAM-Bank=%d %s "
-			"RAS=%d CAS=%d Err=0x%lx (%s))",
-			branch, bank,
-			is_wr ? "RDWR" : "RD",
-			ras, cas,
-			errors, specific);
+			 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+			 bank, ras, cas, errors, specific);
 
-		/* Call the helper to output message */
-		edac_mc_handle_fbd_ue(mci, rank, branch << 1,
-				      (branch << 1) + 1,
-				      pvt->tmp_prt_buffer);
+		edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+				     branch, -1, rank,
+				     is_wr ? "Write error" : "Read error",
+				     pvt->tmp_prt_buffer, NULL);
+
 	}
 
 	/* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@
 
 		/* Form out message */
 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
-			"Corrected error (Branch=%d, Channel %d), "
-			" DRAM-Bank=%d %s "
-			"RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
-			branch, channel,
-			bank,
-			is_wr ? "RDWR" : "RD",
-			ras, cas,
-			errors, syndrome, specific);
+			 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+			 bank, ras, cas, errors, specific);
 
-		/*
-		 * Call the helper to output message
-		 * NOTE: Errors are reported per-branch, and not per-channel
-		 *	 Currently, we don't know how to identify the right
-		 *	 channel.
-		 */
-		edac_mc_handle_fbd_ce(mci, rank, channel,
-				      pvt->tmp_prt_buffer);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+				     syndrome,
+				     branch >> 1, channel % 2, rank,
+				     is_wr ? "Write error" : "Read error",
+				     pvt->tmp_prt_buffer, NULL);
 	}
 	return;
 }
@@ -617,8 +605,7 @@
 static int decode_mtr(struct i7300_pvt *pvt,
 		      int slot, int ch, int branch,
 		      struct i7300_dimm_info *dinfo,
-		      struct csrow_info *p_csrow,
-		      u32 *nr_pages)
+		      struct dimm_info *dimm)
 {
 	int mtr, ans, addrBits, channel;
 
@@ -650,7 +637,6 @@
 	addrBits -= 3;	/* 8 bits per bytes */
 
 	dinfo->megabytes = 1 << addrBits;
-	*nr_pages = dinfo->megabytes << 8;
 
 	debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
 
@@ -663,11 +649,6 @@
 	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
 	debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
 
-	p_csrow->grain = 8;
-	p_csrow->mtype = MEM_FB_DDR2;
-	p_csrow->csrow_idx = slot;
-	p_csrow->page_mask = 0;
-
 	/*
 	 * The type of error detection actually depends of the
 	 * mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@
 	 * See datasheet Sections 7.3.6 to 7.3.8
 	 */
 
+	dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+	dimm->grain = 8;
+	dimm->mtype = MEM_FB_DDR2;
 	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
-		p_csrow->edac_mode = EDAC_SECDED;
+		dimm->edac_mode = EDAC_SECDED;
 		debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
 	} else {
 		debugf2("\t\tECC code is on Lockstep mode\n");
 		if (MTR_DRAM_WIDTH(mtr) == 8)
-			p_csrow->edac_mode = EDAC_S8ECD8ED;
+			dimm->edac_mode = EDAC_S8ECD8ED;
 		else
-			p_csrow->edac_mode = EDAC_S4ECD4ED;
+			dimm->edac_mode = EDAC_S4ECD4ED;
 	}
 
 	/* ask what device type on this row */
@@ -694,9 +678,9 @@
 			IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
 					    "enhanced" : "normal");
 
-		p_csrow->dtype = DEV_X8;
+		dimm->dtype = DEV_X8;
 	} else
-		p_csrow->dtype = DEV_X4;
+		dimm->dtype = DEV_X4;
 
 	return mtr;
 }
@@ -774,11 +758,10 @@
 {
 	struct i7300_pvt *pvt;
 	struct i7300_dimm_info *dinfo;
-	struct csrow_info *p_csrow;
 	int rc = -ENODEV;
 	int mtr;
 	int ch, branch, slot, channel;
-	u32 last_page = 0, nr_pages;
+	struct dimm_info *dimm;
 
 	pvt = mci->pvt_info;
 
@@ -809,25 +792,23 @@
 			pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 					where,
 					&pvt->mtr[slot][branch]);
-			for (ch = 0; ch < MAX_BRANCHES; ch++) {
+			for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
 				int channel = to_channel(ch, branch);
 
+				dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+					       mci->n_layers, branch, ch, slot);
+
 				dinfo = &pvt->dimm_info[slot][channel];
-				p_csrow = &mci->csrows[slot];
 
 				mtr = decode_mtr(pvt, slot, ch, branch,
-						 dinfo, p_csrow, &nr_pages);
+						 dinfo, dimm);
+
 				/* if no DIMMS on this row, continue */
 				if (!MTR_DIMMS_PRESENT(mtr))
 					continue;
 
-				/* Update per_csrow memory count */
-				p_csrow->nr_pages += nr_pages;
-				p_csrow->first_page = last_page;
-				last_page += nr_pages;
-				p_csrow->last_page = last_page;
-
 				rc = 0;
+
 			}
 		}
 	}
@@ -1042,10 +1023,8 @@
 				    const struct pci_device_id *id)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[3];
 	struct i7300_pvt *pvt;
-	int num_channels;
-	int num_dimms_per_channel;
-	int num_csrows;
 	int rc;
 
 	/* wake up device */
@@ -1062,23 +1041,17 @@
 	if (PCI_FUNC(pdev->devfn) != 0)
 		return -ENODEV;
 
-	/* As we don't have a motherboard identification routine to determine
-	 * actual number of slots/dimms per channel, we thus utilize the
-	 * resource as specified by the chipset. Thus, we might have
-	 * have more DIMMs per channel than actually on the mobo, but this
-	 * allows the driver to support up to the chipset max, without
-	 * some fancy mobo determination.
-	 */
-	num_dimms_per_channel = MAX_SLOTS;
-	num_channels = MAX_CHANNELS;
-	num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
-	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-		__func__, num_channels, num_dimms_per_channel, num_csrows);
-
 	/* allocate a new MC control structure */
-	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+	layers[0].type = EDAC_MC_LAYER_BRANCH;
+	layers[0].size = MAX_BRANCHES;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = MAX_CH_PER_BRANCH;
+	layers[1].is_virt_csrow = true;
+	layers[2].type = EDAC_MC_LAYER_SLOT;
+	layers[2].size = MAX_SLOTS;
+	layers[2].is_virt_csrow = true;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc..a499c7e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@
 };
 
 struct i7core_channel {
-	u32		ranks;
+	bool		is_3dimms_present;
+	bool		is_single_4rank;
+	bool		has_4rank;
 	u32		dimms;
 };
 
@@ -257,7 +259,6 @@
 	struct i7core_channel	channel[NUM_CHANS];
 
 	int		ce_count_available;
-	int 		csrow_map[NUM_CHANS][MAX_DIMMS];
 
 			/* ECC corrected errors counts per udimm */
 	unsigned long	udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@
 /****************************************************************************
 			Memory check routines
  ****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
-					  unsigned func)
-{
-	struct i7core_dev *i7core_dev = get_i7core_dev(socket);
-	int i;
 
-	if (!i7core_dev)
-		return NULL;
-
-	for (i = 0; i < i7core_dev->n_devs; i++) {
-		if (!i7core_dev->pdev[i])
-			continue;
-
-		if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
-		    PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
-			return i7core_dev->pdev[i];
-		}
-	}
-
-	return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket:	Quick Path Interconnect socket
- * @channels:	Number of channels that will be returned
- * @csrows:	Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
-				      unsigned *csrows)
-{
-	struct pci_dev *pdev = NULL;
-	int i, j;
-	u32 status, control;
-
-	*channels = 0;
-	*csrows = 0;
-
-	pdev = get_pdev_slot_func(socket, 3, 0);
-	if (!pdev) {
-		i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
-			      socket);
-		return -ENODEV;
-	}
-
-	/* Device 3 function 0 reads */
-	pci_read_config_dword(pdev, MC_STATUS, &status);
-	pci_read_config_dword(pdev, MC_CONTROL, &control);
-
-	for (i = 0; i < NUM_CHANS; i++) {
-		u32 dimm_dod[3];
-		/* Check if the channel is active */
-		if (!(control & (1 << (8 + i))))
-			continue;
-
-		/* Check if the channel is disabled */
-		if (status & (1 << i))
-			continue;
-
-		pdev = get_pdev_slot_func(socket, i + 4, 1);
-		if (!pdev) {
-			i7core_printk(KERN_ERR, "Couldn't find socket %d "
-						"fn %d.%d!!!\n",
-						socket, i + 4, 1);
-			return -ENODEV;
-		}
-		/* Devices 4-6 function 1 */
-		pci_read_config_dword(pdev,
-				MC_DOD_CH_DIMM0, &dimm_dod[0]);
-		pci_read_config_dword(pdev,
-				MC_DOD_CH_DIMM1, &dimm_dod[1]);
-		pci_read_config_dword(pdev,
-				MC_DOD_CH_DIMM2, &dimm_dod[2]);
-
-		(*channels)++;
-
-		for (j = 0; j < 3; j++) {
-			if (!DIMM_PRESENT(dimm_dod[j]))
-				continue;
-			(*csrows)++;
-		}
-	}
-
-	debugf0("Number of active channels on socket %d: %d\n",
-		socket, *channels);
-
-	return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
 {
 	struct i7core_pvt *pvt = mci->pvt_info;
-	struct csrow_info *csr;
 	struct pci_dev *pdev;
 	int i, j;
-	int csrow = 0;
-	unsigned long last_page = 0;
 	enum edac_type mode;
 	enum mem_type mtype;
+	struct dimm_info *dimm;
 
 	/* Get data from the MC register, function 0 */
 	pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@
 		pci_read_config_dword(pvt->pci_ch[i][0],
 				MC_CHANNEL_DIMM_INIT_PARAMS, &data);
 
-		pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
-						4 : 2;
+
+		if (data & THREE_DIMMS_PRESENT)
+			pvt->channel[i].is_3dimms_present = true;
+
+		if (data & SINGLE_QUAD_RANK_PRESENT)
+			pvt->channel[i].is_single_4rank = true;
+
+		if (data & QUAD_RANK_PRESENT)
+			pvt->channel[i].has_4rank = true;
 
 		if (data & REGISTERED_DIMM)
 			mtype = MEM_RDDR3;
 		else
 			mtype = MEM_DDR3;
-#if 0
-		if (data & THREE_DIMMS_PRESENT)
-			pvt->channel[i].dimms = 3;
-		else if (data & SINGLE_QUAD_RANK_PRESENT)
-			pvt->channel[i].dimms = 1;
-		else
-			pvt->channel[i].dimms = 2;
-#endif
 
 		/* Devices 4-6 function 1 */
 		pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@
 				MC_DOD_CH_DIMM2, &dimm_dod[2]);
 
 		debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
-			"%d ranks, %cDIMMs\n",
+			"%s%s%s%cDIMMs\n",
 			i,
 			RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
 			data,
-			pvt->channel[i].ranks,
+			pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+			pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+			pvt->channel[i].has_4rank ? "HAS_4R " : "",
 			(data & REGISTERED_DIMM) ? 'R' : 'U');
 
 		for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@
 			if (!DIMM_PRESENT(dimm_dod[j]))
 				continue;
 
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+				       i, j, 0);
 			banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
 			ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
 			rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@
 			/* DDR3 has 8 I/O banks */
 			size = (rows * cols * banks * ranks) >> (20 - 3);
 
-			pvt->channel[i].dimms++;
-
 			debugf0("\tdimm %d %d Mb offset: %x, "
 				"bank: %d, rank: %d, row: %#x, col: %#x\n",
 				j, size,
@@ -714,44 +615,28 @@
 
 			npages = MiB_TO_PAGES(size);
 
-			csr = &mci->csrows[csrow];
-			csr->first_page = last_page + 1;
-			last_page += npages;
-			csr->last_page = last_page;
-			csr->nr_pages = npages;
-
-			csr->page_mask = 0;
-			csr->grain = 8;
-			csr->csrow_idx = csrow;
-			csr->nr_channels = 1;
-
-			csr->channels[0].chan_idx = i;
-			csr->channels[0].ce_count = 0;
-
-			pvt->csrow_map[i][j] = csrow;
+			dimm->nr_pages = npages;
 
 			switch (banks) {
 			case 4:
-				csr->dtype = DEV_X4;
+				dimm->dtype = DEV_X4;
 				break;
 			case 8:
-				csr->dtype = DEV_X8;
+				dimm->dtype = DEV_X8;
 				break;
 			case 16:
-				csr->dtype = DEV_X16;
+				dimm->dtype = DEV_X16;
 				break;
 			default:
-				csr->dtype = DEV_UNKNOWN;
+				dimm->dtype = DEV_UNKNOWN;
 			}
 
-			csr->edac_mode = mode;
-			csr->mtype = mtype;
-			snprintf(csr->channels[0].label,
-					sizeof(csr->channels[0].label),
-					"CPU#%uChannel#%u_DIMM#%u",
-					pvt->i7core_dev->socket, i, j);
-
-			csrow++;
+			snprintf(dimm->label, sizeof(dimm->label),
+				 "CPU#%uChannel#%u_DIMM#%u",
+				 pvt->i7core_dev->socket, i, j);
+			dimm->grain = 8;
+			dimm->edac_mode = mode;
+			dimm->mtype = mtype;
 		}
 
 		pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@
 /****************************************************************************
 			Error check routines
  ****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
 				      const int chan,
 				      const int dimm,
 				      const int add)
 {
-	char *msg;
-	struct i7core_pvt *pvt = mci->pvt_info;
-	int row = pvt->csrow_map[chan][dimm], i;
+	int i;
 
 	for (i = 0; i < add; i++) {
-		msg = kasprintf(GFP_KERNEL, "Corrected error "
-				"(Socket=%d channel=%d dimm=%d)",
-				pvt->i7core_dev->socket, chan, dimm);
-
-		edac_mc_handle_fbd_ce(mci, row, 0, msg);
-		kfree (msg);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+				     chan, dimm, -1, "error", "", NULL);
 	}
 }
 
@@ -1623,11 +1502,11 @@
 
 	/*updated the edac core */
 	if (add0 != 0)
-		i7core_rdimm_update_csrow(mci, chan, 0, add0);
+		i7core_rdimm_update_errcount(mci, chan, 0, add0);
 	if (add1 != 0)
-		i7core_rdimm_update_csrow(mci, chan, 1, add1);
+		i7core_rdimm_update_errcount(mci, chan, 1, add1);
 	if (add2 != 0)
-		i7core_rdimm_update_csrow(mci, chan, 2, add2);
+		i7core_rdimm_update_errcount(mci, chan, 2, add2);
 
 }
 
@@ -1747,20 +1626,30 @@
 				    const struct mce *m)
 {
 	struct i7core_pvt *pvt = mci->pvt_info;
-	char *type, *optype, *err, *msg;
+	char *type, *optype, *err, msg[80];
+	enum hw_event_mc_err_type tp_event;
 	unsigned long error = m->status & 0x1ff0000l;
+	bool uncorrected_error = m->mcgstatus & 1ll << 61;
+	bool ripv = m->mcgstatus & 1;
 	u32 optypenum = (m->status >> 4) & 0x07;
 	u32 core_err_cnt = (m->status >> 38) & 0x7fff;
 	u32 dimm = (m->misc >> 16) & 0x3;
 	u32 channel = (m->misc >> 18) & 0x3;
 	u32 syndrome = m->misc >> 32;
 	u32 errnum = find_first_bit(&error, 32);
-	int csrow;
 
-	if (m->mcgstatus & 1)
-		type = "FATAL";
-	else
-		type = "NON_FATAL";
+	if (uncorrected_error) {
+		if (ripv) {
+			type = "FATAL";
+			tp_event = HW_EVENT_ERR_FATAL;
+		} else {
+			type = "NON_FATAL";
+			tp_event = HW_EVENT_ERR_UNCORRECTED;
+		}
+	} else {
+		type = "CORRECTED";
+		tp_event = HW_EVENT_ERR_CORRECTED;
+	}
 
 	switch (optypenum) {
 	case 0:
@@ -1815,27 +1704,20 @@
 		err = "unknown";
 	}
 
-	/* FIXME: should convert addr into bank and rank information */
-	msg = kasprintf(GFP_ATOMIC,
-		"%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
-		"syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
-		type, (long long) m->addr, m->cpu, dimm, channel,
-		syndrome, core_err_cnt, (long long)m->status,
-		(long long)m->misc, optype, err);
+	snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
 
-	debugf0("%s", msg);
-
-	csrow = pvt->csrow_map[channel][dimm];
-
-	/* Call the helper to output message */
-	if (m->mcgstatus & 1)
-		edac_mc_handle_fbd_ue(mci, csrow, 0,
-				0 /* FIXME: should be channel here */, msg);
-	else if (!pvt->is_registered)
-		edac_mc_handle_fbd_ce(mci, csrow,
-				0 /* FIXME: should be channel here */, msg);
-
-	kfree(msg);
+	/*
+	 * Call the helper to output message
+	 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+	 * only one event
+	 */
+	if (uncorrected_error || !pvt->is_registered)
+		edac_mc_handle_error(tp_event, mci,
+				     m->addr >> PAGE_SHIFT,
+				     m->addr & ~PAGE_MASK,
+				     syndrome,
+				     channel, dimm, -1,
+				     err, msg, m);
 }
 
 /*
@@ -1932,12 +1814,6 @@
 	if (mce->bank != 8)
 		return NOTIFY_DONE;
 
-#ifdef CONFIG_SMP
-	/* Only handle if it is the right mc controller */
-	if (mce->socketid != pvt->i7core_dev->socket)
-		return NOTIFY_DONE;
-#endif
-
 	smp_rmb();
 	if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
 		smp_wmb();
@@ -2234,8 +2110,6 @@
 	if (pvt->enable_scrub)
 		disable_sdram_scrub_setting(mci);
 
-	mce_unregister_decode_chain(&i7_mce_dec);
-
 	/* Disable EDAC polling */
 	i7core_pci_ctl_release(pvt);
 
@@ -2252,15 +2126,19 @@
 {
 	struct mem_ctl_info *mci;
 	struct i7core_pvt *pvt;
-	int rc, channels, csrows;
-
-	/* Check the number of active and not disabled channels */
-	rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
-	if (unlikely(rc < 0))
-		return rc;
+	int rc;
+	struct edac_mc_layer layers[2];
 
 	/* allocate a new MC control structure */
-	mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+	layers[0].type = EDAC_MC_LAYER_CHANNEL;
+	layers[0].size = NUM_CHANS;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_SLOT;
+	layers[1].size = MAX_DIMMS;
+	layers[1].is_virt_csrow = true;
+	mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+			    sizeof(*pvt));
 	if (unlikely(!mci))
 		return -ENOMEM;
 
@@ -2336,8 +2214,6 @@
 	/* DCLK for scrub rate setting */
 	pvt->dclk_freq = get_dclk_freq();
 
-	mce_register_decode_chain(&i7_mce_dec);
-
 	return 0;
 
 fail0:
@@ -2481,8 +2357,10 @@
 
 	pci_rc = pci_register_driver(&i7core_driver);
 
-	if (pci_rc >= 0)
+	if (pci_rc >= 0) {
+		mce_register_decode_chain(&i7_mce_dec);
 		return 0;
+	}
 
 	i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
 		      pci_rc);
@@ -2498,6 +2376,7 @@
 {
 	debugf2("MC: " __FILE__ ": %s()\n", __func__);
 	pci_unregister_driver(&i7core_driver);
+	mce_unregister_decode_chain(&i7_mce_dec);
 }
 
 module_init(i7core_init);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f..52072c2 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
  * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
  *
  * Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf 
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
  * references to this document given in [].
  *
  * This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@
 	if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
 		error_found = 1;
 		if (handle_errors)
-			edac_mc_handle_ce(mci, page, pageoffset,
-				/* 440BX/GX don't make syndrome information
-				 * available */
-				0, edac_mc_find_csrow_by_page(mci, page), 0,
-				mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     page, pageoffset, 0,
+					     edac_mc_find_csrow_by_page(mci, page),
+					     0, -1, mci->ctl_name, "", NULL);
 	}
 
 	if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
 		error_found = 1;
 		if (handle_errors)
-			edac_mc_handle_ue(mci, page, pageoffset,
-					edac_mc_find_csrow_by_page(mci, page),
-					mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     page, pageoffset, 0,
+					     edac_mc_find_csrow_by_page(mci, page),
+					     0, -1, mci->ctl_name, "", NULL);
 	}
 
 	return error_found;
@@ -189,6 +189,7 @@
 				enum mem_type mtype)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	int index;
 	u8 drbar, dramc;
 	u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@
 	row_high_limit_last = 0;
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
+
 		pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
 		debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
 			mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@
 		row_base = row_high_limit_last;
 		csrow->first_page = row_base >> PAGE_SHIFT;
 		csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
-		csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+		dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
 		/* EAP reports in 4kilobyte granularity [61] */
-		csrow->grain = 1 << 12;
-		csrow->mtype = mtype;
+		dimm->grain = 1 << 12;
+		dimm->mtype = mtype;
 		/* I don't think 440BX can tell you device type? FIXME? */
-		csrow->dtype = DEV_UNKNOWN;
+		dimm->dtype = DEV_UNKNOWN;
 		/* Mode is global to all rows on 440BX */
-		csrow->edac_mode = edac_mode;
+		dimm->edac_mode = edac_mode;
 		row_high_limit_last = row_high_limit;
 	}
 }
@@ -232,6 +235,7 @@
 static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	u8 dramc;
 	u32 nbxcfg, ecc_mode;
 	enum mem_type mtype;
@@ -245,8 +249,13 @@
 	if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
 		return -EIO;
 
-	mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = I82443BXGX_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = I82443BXGX_NR_CHANS;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092..0804505 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@
 				struct i82860_error_info *info,
 				int handle_errors)
 {
+	struct dimm_info *dimm;
 	int row;
 
 	if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@
 		return 1;
 
 	if ((info->errsts ^ info->errsts2) & 0x0003) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1, "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
 	info->eap >>= PAGE_SHIFT;
 	row = edac_mc_find_csrow_by_page(mci, info->eap);
+	dimm = mci->csrows[row].channels[0].dimm;
 
 	if (info->errsts & 0x0002)
-		edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     info->eap, 0, 0,
+				     dimm->location[0], dimm->location[1], -1,
+				     "i82860 UE", "", NULL);
 	else
-		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
-				"i82860 UE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     info->eap, 0, info->derrsyn,
+				     dimm->location[0], dimm->location[1], -1,
+				     "i82860 CE", "", NULL);
 
 	return 1;
 }
@@ -140,6 +148,7 @@
 	u16 value;
 	u32 cumul_size;
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	int index;
 
 	pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@
 	 */
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
+
 		pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
 		cumul_size = (value & I82860_GBA_MASK) <<
 			(I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
+		dimm->nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 12;	/* I82860_EAP has 4KiB reolution */
-		csrow->mtype = MEM_RMBS;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+		dimm->grain = 1 << 12;	/* I82860_EAP has 4KiB reolution */
+		dimm->mtype = MEM_RMBS;
+		dimm->dtype = DEV_UNKNOWN;
+		dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
 	}
 }
 
 static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct i82860_error_info discard;
 
-	/* RDRAM has channels but these don't map onto the abstractions that
-	   edac uses.
-	   The device groups from the GRA registers seem to map reasonably
-	   well onto the notion of a chip select row.
-	   There are 16 GRA registers and since the name is associated with
-	   the channel and the GRA registers map to physical devices so we are
-	   going to make 1 channel for group.
+	/*
+	 * RDRAM has channels but these don't map onto the csrow abstraction.
+	 * According with the datasheet, there are 2 Rambus channels, supporting
+	 * up to 16 direct RDRAM devices.
+	 * The device groups from the GRA registers seem to map reasonably
+	 * well onto the notion of a chip select row.
+	 * There are 16 GRA registers and since the name is associated with
+	 * the channel and the GRA registers map to physical devices so we are
+	 * going to make 1 channel for group.
 	 */
-	mci = edac_mc_alloc(0, 16, 1, 0);
-
+	layers[0].type = EDAC_MC_LAYER_CHANNEL;
+	layers[0].size = 2;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_SLOT;
+	layers[1].size = 8;
+	layers[1].is_virt_csrow = true;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
 	if (!mci)
 		return -ENOMEM;
 
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d8..b613e31 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
 #endif				/* PCI_DEVICE_ID_INTEL_82875_6 */
 
 /* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS		8
+#define I82875P_NR_CSROWS(nr_chans)	(I82875P_NR_DIMMS / (nr_chans))
 
 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
 #define I82875P_EAP		0x58	/* Error Address Pointer (32b)
@@ -235,7 +236,9 @@
 		return 1;
 
 	if ((info->errsts ^ info->errsts2) & 0x0081) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1,
+				     "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
@@ -243,11 +246,15 @@
 	row = edac_mc_find_csrow_by_page(mci, info->eap);
 
 	if (info->errsts & 0x0080)
-		edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     info->eap, 0, 0,
+				     row, -1, -1,
+				     "i82875p UE", "", NULL);
 	else
-		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
-				multi_chan ? (info->des & 0x1) : 0,
-				"i82875p CE");
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     info->eap, 0, info->derrsyn,
+				     row, multi_chan ? (info->des & 0x1) : 0,
+				     -1, "i82875p CE", "", NULL);
 
 	return 1;
 }
@@ -342,11 +349,13 @@
 				void __iomem * ovrfl_window, u32 drc)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
+	unsigned nr_chans = dual_channel_active(drc) + 1;
 	unsigned long last_cumul_size;
 	u8 value;
 	u32 drc_ddim;		/* DRAM Data Integrity Mode 0=none,2=edac */
-	u32 cumul_size;
-	int index;
+	u32 cumul_size, nr_pages;
+	int index, j;
 
 	drc_ddim = (drc >> 18) & 0x1;
 	last_cumul_size = 0;
@@ -369,12 +378,18 @@
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
+		nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 12;	/* I82875P_EAP has 4KiB reolution */
-		csrow->mtype = MEM_DDR;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+		for (j = 0; j < nr_chans; j++) {
+			dimm = csrow->channels[j].dimm;
+
+			dimm->nr_pages = nr_pages / nr_chans;
+			dimm->grain = 1 << 12;	/* I82875P_EAP has 4KiB reolution */
+			dimm->mtype = MEM_DDR;
+			dimm->dtype = DEV_UNKNOWN;
+			dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+		}
 	}
 }
 
@@ -382,6 +397,7 @@
 {
 	int rc = -ENODEV;
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct i82875p_pvt *pvt;
 	struct pci_dev *ovrfl_pdev;
 	void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@
 		return -ENODEV;
 	drc = readl(ovrfl_window + I82875P_DRC);
 	nr_chans = dual_channel_active(drc) + 1;
-	mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
-			nr_chans, 0);
 
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = I82875P_NR_CSROWS(nr_chans);
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = nr_chans;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (!mci) {
 		rc = -ENOMEM;
 		goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368..433332c 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
 #define PCI_DEVICE_ID_INTEL_82975_0	0x277c
 #endif				/* PCI_DEVICE_ID_INTEL_82975_0 */
 
-#define I82975X_NR_CSROWS(nr_chans)		(8/(nr_chans))
+#define I82975X_NR_DIMMS		8
+#define I82975X_NR_CSROWS(nr_chans)	(I82975X_NR_DIMMS / (nr_chans))
 
 /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
 #define I82975X_EAP		0x58	/* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@
 		return 1;
 
 	if ((info->errsts ^ info->errsts2) & 0x0003) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1, "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
@@ -309,13 +311,18 @@
 	chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
 	offst = info->eap
 			& ((1 << PAGE_SHIFT) -
-				(1 << mci->csrows[row].grain));
+			   (1 << mci->csrows[row].channels[chan].dimm->grain));
 
 	if (info->errsts & 0x0002)
-		edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     page, offst, 0,
+				     row, -1, -1,
+				     "i82975x UE", "", NULL);
 	else
-		edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
-				chan, "i82975x CE");
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     page, offst, info->derrsyn,
+				     row, chan ? chan : 0, -1,
+				     "i82975x CE", "", NULL);
 
 	return 1;
 }
@@ -370,8 +377,10 @@
 	struct csrow_info *csrow;
 	unsigned long last_cumul_size;
 	u8 value;
-	u32 cumul_size;
+	u32 cumul_size, nr_pages;
 	int index, chan;
+	struct dimm_info *dimm;
+	enum dev_type dtype;
 
 	last_cumul_size = 0;
 
@@ -400,28 +409,33 @@
 		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
 			cumul_size);
 
+		nr_pages = cumul_size - last_cumul_size;
+		if (!nr_pages)
+			continue;
+
 		/*
 		 * Initialise dram labels
 		 * index values:
 		 *   [0-7] for single-channel; i.e. csrow->nr_channels = 1
 		 *   [0-3] for dual-channel; i.e. csrow->nr_channels = 2
 		 */
-		for (chan = 0; chan < csrow->nr_channels; chan++)
-			strncpy(csrow->channels[chan].label,
+		dtype = i82975x_dram_type(mch_window, index);
+		for (chan = 0; chan < csrow->nr_channels; chan++) {
+			dimm = mci->csrows[index].channels[chan].dimm;
+
+			dimm->nr_pages = nr_pages / csrow->nr_channels;
+			strncpy(csrow->channels[chan].dimm->label,
 					labels[(index >> 1) + (chan * 2)],
 					EDAC_MC_LABEL_LEN);
-
-		if (cumul_size == last_cumul_size)
-			continue;	/* not populated */
+			dimm->grain = 1 << 7;	/* 128Byte cache-line resolution */
+			dimm->dtype = i82975x_dram_type(mch_window, index);
+			dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+			dimm->edac_mode = EDAC_SECDED; /* only supported */
+		}
 
 		csrow->first_page = last_cumul_size;
 		csrow->last_page = cumul_size - 1;
-		csrow->nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 7;	/* 128Byte cache-line resolution */
-		csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
-		csrow->dtype = i82975x_dram_type(mch_window, index);
-		csrow->edac_mode = EDAC_SECDED; /* only supported */
 	}
 }
 
@@ -463,6 +477,7 @@
 {
 	int rc = -ENODEV;
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct i82975x_pvt *pvt;
 	void __iomem *mch_window;
 	u32 mchbar;
@@ -531,8 +546,13 @@
 	chans = dual_channel_active(mch_window) + 1;
 
 	/* assuming only one controller, index thus is 0 */
-	mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
-					chans, 0);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = I82975X_NR_DIMMS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = I82975X_NR_CSROWS(chans);
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 	if (!mci) {
 		rc = -ENOMEM;
 		goto fail1;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5..8c87a5e 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
 
 #include <asm/mce.h>
 
-#define BIT_64(n)			(U64_C(1) << (n))
-
 #define EC(x)				((x) & 0xffff)
 #define XEC(x, mask)			(((x) >> 16) & mask)
 
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a6..0e37462 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@
 		mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
 
 	if (err_detect & DDR_EDE_SBE)
-		edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
-				  syndrome, row_index, 0, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     pfn, err_addr & ~PAGE_MASK, syndrome,
+				     row_index, 0, -1,
+				     mci->ctl_name, "", NULL);
 
 	if (err_detect & DDR_EDE_MBE)
-		edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
-				  row_index, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     pfn, err_addr & ~PAGE_MASK, syndrome,
+				     row_index, 0, -1,
+				     mci->ctl_name, "", NULL);
 
 	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
 }
@@ -883,6 +887,7 @@
 {
 	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	u32 sdram_ctl;
 	u32 sdtype;
 	enum mem_type mtype;
@@ -929,6 +934,8 @@
 		u32 end;
 
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
+
 		cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
 				  (index * MPC85XX_MC_CS_BNDS_OFS));
 
@@ -944,19 +951,21 @@
 
 		csrow->first_page = start;
 		csrow->last_page = end;
-		csrow->nr_pages = end + 1 - start;
-		csrow->grain = 8;
-		csrow->mtype = mtype;
-		csrow->dtype = DEV_UNKNOWN;
+
+		dimm->nr_pages = end + 1 - start;
+		dimm->grain = 8;
+		dimm->mtype = mtype;
+		dimm->dtype = DEV_UNKNOWN;
 		if (sdram_ctl & DSC_X32_EN)
-			csrow->dtype = DEV_X32;
-		csrow->edac_mode = EDAC_SECDED;
+			dimm->dtype = DEV_X32;
+		dimm->edac_mode = EDAC_SECDED;
 	}
 }
 
 static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct mpc85xx_mc_pdata *pdata;
 	struct resource r;
 	u32 sdram_ctl;
@@ -965,7 +974,14 @@
 	if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
 		return -ENOMEM;
 
-	mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = 4;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = 1;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+			    sizeof(*pdata));
 	if (!mci) {
 		devres_release_group(&op->dev, mpc85xx_mc_err_probe);
 		return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff36..b0bb5a3 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@
 
 	/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
 	if (!(reg & 0x1))
-		edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
-				  err_addr & PAGE_MASK, syndrome, 0, 0,
-				  mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     err_addr >> PAGE_SHIFT,
+				     err_addr & PAGE_MASK, syndrome,
+				     0, 0, -1,
+				     mci->ctl_name, "", NULL);
 	else	/* 2 bit error, UE */
-		edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
-				  err_addr & PAGE_MASK, 0, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     err_addr >> PAGE_SHIFT,
+				     err_addr & PAGE_MASK, 0,
+				     0, 0, -1,
+				     mci->ctl_name, "", NULL);
 
 	/* clear the error */
 	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@
 				struct mv64x60_mc_pdata *pdata)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
+
 	u32 devtype;
 	u32 ctl;
 
@@ -664,35 +671,36 @@
 	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
 
 	csrow = &mci->csrows[0];
-	csrow->first_page = 0;
-	csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
-	csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-	csrow->grain = 8;
+	dimm = csrow->channels[0].dimm;
 
-	csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+	dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+	dimm->grain = 8;
+
+	dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
 
 	devtype = (ctl >> 20) & 0x3;
 	switch (devtype) {
 	case 0x0:
-		csrow->dtype = DEV_X32;
+		dimm->dtype = DEV_X32;
 		break;
 	case 0x2:		/* could be X8 too, but no way to tell */
-		csrow->dtype = DEV_X16;
+		dimm->dtype = DEV_X16;
 		break;
 	case 0x3:
-		csrow->dtype = DEV_X4;
+		dimm->dtype = DEV_X4;
 		break;
 	default:
-		csrow->dtype = DEV_UNKNOWN;
+		dimm->dtype = DEV_UNKNOWN;
 		break;
 	}
 
-	csrow->edac_mode = EDAC_SECDED;
+	dimm->edac_mode = EDAC_SECDED;
 }
 
 static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct mv64x60_mc_pdata *pdata;
 	struct resource *r;
 	u32 ctl;
@@ -701,7 +709,14 @@
 	if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
 		return -ENOMEM;
 
-	mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = 1;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = 1;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+			    sizeof(struct mv64x60_mc_pdata));
 	if (!mci) {
 		printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
 		devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee4..b095a90 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@
 	/* uncorrectable/multi-bit errors */
 	if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
 		      MCDEBUG_ERRSTA_RFL_STATUS)) {
-		edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
-				  cs, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+				     mci->csrows[cs].first_page, 0, 0,
+				     cs, 0, -1, mci->ctl_name, "", NULL);
 	}
 
 	/* correctable/single-bit errors */
-	if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
-		edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
-				  0, cs, 0, mci->ctl_name);
-	}
+	if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     mci->csrows[cs].first_page, 0, 0,
+				     cs, 0, -1, mci->ctl_name, "", NULL);
 }
 
 static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@
 				   enum edac_type edac_mode)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	u32 rankcfg;
 	int index;
 
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
 
 		pci_read_config_dword(pdev,
 				      MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@
 		switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
 			MCDRAM_RANKCFG_TYPE_SIZE_S) {
 		case 0:
-			csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+			dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
 			break;
 		case 1:
-			csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+			dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
 			break;
 		case 2:
 		case 3:
-			csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+			dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
 			break;
 		case 4:
-			csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+			dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
 			break;
 		case 5:
-			csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+			dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
 			break;
 		default:
 			edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@
 		}
 
 		csrow->first_page = last_page_in_mmc;
-		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-		last_page_in_mmc += csrow->nr_pages;
+		csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+		last_page_in_mmc += dimm->nr_pages;
 		csrow->page_mask = 0;
-		csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
-		csrow->mtype = MEM_DDR;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = edac_mode;
+		dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+		dimm->mtype = MEM_DDR;
+		dimm->dtype = DEV_UNKNOWN;
+		dimm->edac_mode = edac_mode;
 	}
 	return 0;
 }
@@ -189,6 +192,7 @@
 		const struct pci_device_id *ent)
 {
 	struct mem_ctl_info *mci = NULL;
+	struct edac_mc_layer layers[2];
 	u32 errctl1, errcor, scrub, mcen;
 
 	pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@
 		MCDEBUG_ERRCTL1_RFL_LOG_EN;
 	pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
 
-	mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
-				system_mmc_id++);
-
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = PASEMI_EDAC_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = PASEMI_EDAC_NR_CHANS;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+			    0);
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69..f3f9fed 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@
 
 	for (row = 0; row < mci->nr_csrows; row++)
 		if (ppc4xx_edac_check_bank_error(status, row))
-			edac_mc_handle_ce_no_info(mci, message);
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     0, 0, 0,
+					     row, 0, -1,
+					     message, "", NULL);
 }
 
 /**
@@ -755,7 +758,10 @@
 
 	for (row = 0; row < mci->nr_csrows; row++)
 		if (ppc4xx_edac_check_bank_error(status, row))
-			edac_mc_handle_ue(mci, page, offset, row, message);
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     page, offset, 0,
+					     row, 0, -1,
+					     message, "", NULL);
 }
 
 /**
@@ -895,9 +901,8 @@
 	enum mem_type mtype;
 	enum dev_type dtype;
 	enum edac_type edac_mode;
-	int row;
-	u32 mbxcf, size;
-	static u32 ppc4xx_last_page;
+	int row, j;
+	u32 mbxcf, size, nr_pages;
 
 	/* Establish the memory type and width */
 
@@ -948,7 +953,7 @@
 		case SDRAM_MBCF_SZ_2GB:
 		case SDRAM_MBCF_SZ_4GB:
 		case SDRAM_MBCF_SZ_8GB:
-			csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+			nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
 			break;
 		default:
 			ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@
 			goto done;
 		}
 
-		csi->first_page = ppc4xx_last_page;
-		csi->last_page	= csi->first_page + csi->nr_pages - 1;
-		csi->page_mask	= 0;
-
 		/*
 		 * It's unclear exactly what grain should be set to
 		 * here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@
 		 * possible values would be the PLB width (16), the
 		 * page size (PAGE_SIZE) or the memory width (2 or 4).
 		 */
+		for (j = 0; j < csi->nr_channels; j++) {
+			struct dimm_info *dimm = csi->channels[j].dimm;
 
-		csi->grain	= 1;
+			dimm->nr_pages  = nr_pages / csi->nr_channels;
+			dimm->grain	= 1;
 
-		csi->mtype	= mtype;
-		csi->dtype	= dtype;
+			dimm->mtype	= mtype;
+			dimm->dtype	= dtype;
 
-		csi->edac_mode	= edac_mode;
-
-		ppc4xx_last_page += csi->nr_pages;
+			dimm->edac_mode	= edac_mode;
+		}
 	}
 
  done:
@@ -1236,6 +1239,7 @@
 	dcr_host_t dcr_host;
 	const struct device_node *np = op->dev.of_node;
 	struct mem_ctl_info *mci = NULL;
+	struct edac_mc_layer layers[2];
 	static int ppc4xx_edac_instance;
 
 	/*
@@ -1281,12 +1285,14 @@
 	 * controller instance and perform the appropriate
 	 * initialization.
 	 */
-
-	mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
-			    ppc4xx_edac_nr_csrows,
-			    ppc4xx_edac_nr_chans,
-			    ppc4xx_edac_instance);
-
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = ppc4xx_edac_nr_csrows;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = ppc4xx_edac_nr_chans;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+			    sizeof(struct ppc4xx_edac_pdata));
 	if (mci == NULL) {
 		ppc4xx_edac_printk(KERN_ERR, "%s: "
 				   "Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad..e1cacd1 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@
 		error_found = 1;
 
 		if (handle_errors)
-			edac_mc_handle_ce(mci, page, 0,	/* not avail */
-					syndrome,
-					edac_mc_find_csrow_by_page(mci, page),
-					0, mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     page, 0, syndrome,
+					     edac_mc_find_csrow_by_page(mci, page),
+					     0, -1,
+					     mci->ctl_name, "", NULL);
 	}
 
 	if (info->eapr & BIT(1)) {	/* UE? */
@@ -190,9 +191,11 @@
 
 		if (handle_errors)
 			/* 82600 doesn't give enough info */
-			edac_mc_handle_ue(mci, page, 0,
-					edac_mc_find_csrow_by_page(mci, page),
-					mci->ctl_name);
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     page, 0, 0,
+					     edac_mc_find_csrow_by_page(mci, page),
+					     0, -1,
+					     mci->ctl_name, "", NULL);
 	}
 
 	return error_found;
@@ -216,6 +219,7 @@
 			u8 dramcr)
 {
 	struct csrow_info *csrow;
+	struct dimm_info *dimm;
 	int index;
 	u8 drbar;		/* SDRAM Row Boundary Address Register */
 	u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@
 
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
+		dimm = csrow->channels[0].dimm;
 
 		/* find the DRAM Chip Select Base address and mask */
 		pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@
 
 		csrow->first_page = row_base >> PAGE_SHIFT;
 		csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
-		csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+		dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
 		/* Error address is top 19 bits - so granularity is      *
 		 * 14 bits                                               */
-		csrow->grain = 1 << 14;
-		csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+		dimm->grain = 1 << 14;
+		dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
 		/* FIXME - check that this is unknowable with this chipset */
-		csrow->dtype = DEV_UNKNOWN;
+		dimm->dtype = DEV_UNKNOWN;
 
 		/* Mode is global on 82600 */
-		csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+		dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
 		row_high_limit_last = row_high_limit;
 	}
 }
@@ -264,6 +270,7 @@
 static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	u8 dramcr;
 	u32 eapr;
 	u32 scrub_disabled;
@@ -278,8 +285,13 @@
 	debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
 		sdram_refresh_rate);
 	debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
-	mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = R82600_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = R82600_NR_CHANS;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
 	if (mci == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f..36ad17e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@
 	struct sbridge_info	info;
 	struct sbridge_channel	channel[NUM_CHANNELS];
 
-	int 			csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
 	/* Memory type detection */
 	bool			is_mirrored, is_lockstep, is_close_pg;
 
@@ -487,29 +485,14 @@
 }
 
 /**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
  * bus:		Device bus
- * @channels:	Number of channels that will be returned
- * @csrows:	Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
  */
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
-				      unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
 {
 	struct pci_dev *pdev = NULL;
-	int i, j;
 	u32 mcmtr;
 
-	*channels = 0;
-	*csrows = 0;
-
 	pdev = get_pdev_slot_func(bus, 15, 0);
 	if (!pdev) {
 		sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@
 		sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
 		return -ENODEV;
 	}
-
-	for (i = 0; i < NUM_CHANNELS; i++) {
-		u32 mtr;
-
-		/* Device 15 functions 2 - 5  */
-		pdev = get_pdev_slot_func(bus, 15, 2 + i);
-		if (!pdev) {
-			sbridge_printk(KERN_ERR, "Couldn't find PCI device "
-						 "%2x.%02d.%d!!!\n",
-						 bus, 15, 2 + i);
-			return -ENODEV;
-		}
-		(*channels)++;
-
-		for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
-			pci_read_config_dword(pdev, mtr_regs[j], &mtr);
-			debugf1("Bus#%02x channel #%d  MTR%d = %x\n", bus, i, j, mtr);
-			if (IS_DIMM_PRESENT(mtr))
-				(*csrows)++;
-		}
-	}
-
-	debugf0("Number of active channels: %d, number of active dimms: %d\n",
-		*channels, *csrows);
-
 	return 0;
 }
 
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
 {
 	struct sbridge_pvt *pvt = mci->pvt_info;
-	struct csrow_info *csr;
+	struct dimm_info *dimm;
 	int i, j, banks, ranks, rows, cols, size, npages;
-	int csrow = 0;
-	unsigned long last_page = 0;
 	u32 reg;
 	enum edac_type mode;
 	enum mem_type mtype;
@@ -599,7 +555,7 @@
 		pvt->is_close_pg = false;
 	}
 
-	pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+	pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
 	if (IS_RDIMM_ENABLED(reg)) {
 		/* FIXME: Can also be LRDIMM */
 		debugf0("Memory is registered\n");
@@ -616,6 +572,8 @@
 		u32 mtr;
 
 		for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+				       i, j, 0);
 			pci_read_config_dword(pvt->pci_tad[i],
 					      mtr_regs[j], &mtr);
 			debugf4("Channel #%d  MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@
 					pvt->sbridge_dev->mc, i, j,
 					size, npages,
 					banks, ranks, rows, cols);
-				csr = &mci->csrows[csrow];
 
-				csr->first_page = last_page;
-				csr->last_page = last_page + npages - 1;
-				csr->page_mask = 0UL;	/* Unused */
-				csr->nr_pages = npages;
-				csr->grain = 32;
-				csr->csrow_idx = csrow;
-				csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
-				csr->ce_count = 0;
-				csr->ue_count = 0;
-				csr->mtype = mtype;
-				csr->edac_mode = mode;
-				csr->nr_channels = 1;
-				csr->channels[0].chan_idx = i;
-				csr->channels[0].ce_count = 0;
-				pvt->csrow_map[i][j] = csrow;
-				snprintf(csr->channels[0].label,
-					 sizeof(csr->channels[0].label),
+				dimm->nr_pages = npages;
+				dimm->grain = 32;
+				dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+				dimm->mtype = mtype;
+				dimm->edac_mode = mode;
+				snprintf(dimm->label, sizeof(dimm->label),
 					 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
 					 pvt->sbridge_dev->source_id, i, j);
-				last_page += npages;
-				csrow++;
 			}
 		}
 	}
@@ -844,11 +788,10 @@
 				 u8 *socket,
 				 long *channel_mask,
 				 u8 *rank,
-				 char *area_type)
+				 char **area_type, char *msg)
 {
 	struct mem_ctl_info	*new_mci;
 	struct sbridge_pvt *pvt = mci->pvt_info;
-	char			msg[256];
 	int 			n_rir, n_sads, n_tads, sad_way, sck_xch;
 	int			sad_interl, idx, base_ch;
 	int			interleave_mode;
@@ -870,12 +813,10 @@
 	 */
 	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
 		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	if (addr >= (u64)pvt->tohm) {
 		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 
@@ -892,7 +833,6 @@
 		limit = SAD_LIMIT(reg);
 		if (limit <= prv) {
 			sprintf(msg, "Can't discover the memory socket");
-			edac_mc_handle_ce_no_info(mci, msg);
 			return -EINVAL;
 		}
 		if  (addr <= limit)
@@ -901,10 +841,9 @@
 	}
 	if (n_sads == MAX_SAD) {
 		sprintf(msg, "Can't discover the memory socket");
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
-	area_type = get_dram_attr(reg);
+	*area_type = get_dram_attr(reg);
 	interleave_mode = INTERLEAVE_MODE(reg);
 
 	pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@
 		break;
 	default:
 		sprintf(msg, "Can't discover socket interleave");
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	*socket = sad_interleave[idx];
@@ -957,7 +895,6 @@
 	if (!new_mci) {
 		sprintf(msg, "Struct for socket #%u wasn't initialized",
 			*socket);
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	mci = new_mci;
@@ -973,7 +910,6 @@
 		limit = TAD_LIMIT(reg);
 		if (limit <= prv) {
 			sprintf(msg, "Can't discover the memory channel");
-			edac_mc_handle_ce_no_info(mci, msg);
 			return -EINVAL;
 		}
 		if  (addr <= limit)
@@ -1013,7 +949,6 @@
 		break;
 	default:
 		sprintf(msg, "Can't discover the TAD target");
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	*channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@
 			break;
 		default:
 			sprintf(msg, "Invalid mirror set. Can't decode addr");
-			edac_mc_handle_ce_no_info(mci, msg);
 			return -EINVAL;
 		}
 	} else
@@ -1055,7 +989,6 @@
 	if (offset > addr) {
 		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
 			offset, addr);
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	addr -= offset;
@@ -1095,7 +1028,6 @@
 	if (n_rir == MAX_RIR_RANGES) {
 		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
 			ch_addr);
-		edac_mc_handle_ce_no_info(mci, msg);
 		return -EINVAL;
 	}
 	rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@
 {
 	struct mem_ctl_info *new_mci;
 	struct sbridge_pvt *pvt = mci->pvt_info;
-	char *type, *optype, *msg, *recoverable_msg;
+	enum hw_event_mc_err_type tp_event;
+	char *type, *optype, msg[256];
 	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
 	bool overflow = GET_BITFIELD(m->status, 62, 62);
 	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@
 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
 	long channel_mask, first_channel;
 	u8  rank, socket;
-	int csrow, rc, dimm;
-	char *area_type = "Unknown";
+	int rc, dimm;
+	char *area_type = NULL;
 
-	if (ripv)
-		type = "NON_FATAL";
-	else
-		type = "FATAL";
+	if (uncorrected_error) {
+		if (ripv) {
+			type = "FATAL";
+			tp_event = HW_EVENT_ERR_FATAL;
+		} else {
+			type = "NON_FATAL";
+			tp_event = HW_EVENT_ERR_UNCORRECTED;
+		}
+	} else {
+		type = "CORRECTED";
+		tp_event = HW_EVENT_ERR_CORRECTED;
+	}
 
 	/*
 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@
 	} else {
 		switch (optypenum) {
 		case 0:
-			optype = "generic undef request";
+			optype = "generic undef request error";
 			break;
 		case 1:
-			optype = "memory read";
+			optype = "memory read error";
 			break;
 		case 2:
-			optype = "memory write";
+			optype = "memory write error";
 			break;
 		case 3:
-			optype = "addr/cmd";
+			optype = "addr/cmd error";
 			break;
 		case 4:
-			optype = "memory scrubbing";
+			optype = "memory scrubbing error";
 			break;
 		default:
 			optype = "reserved";
@@ -1466,13 +1407,13 @@
 	}
 
 	rc = get_memory_error_data(mci, m->addr, &socket,
-				   &channel_mask, &rank, area_type);
+				   &channel_mask, &rank, &area_type, msg);
 	if (rc < 0)
-		return;
+		goto err_parsing;
 	new_mci = get_mci_for_node_id(socket);
 	if (!new_mci) {
-		edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
-		return;
+		strcpy(msg, "Error: socket got corrupted!");
+		goto err_parsing;
 	}
 	mci = new_mci;
 	pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@
 	else
 		dimm = 2;
 
-	csrow = pvt->csrow_map[first_channel][dimm];
-
-	if (uncorrected_error && recoverable)
-		recoverable_msg = " recoverable";
-	else
-		recoverable_msg = "";
 
 	/*
-	 * FIXME: What should we do with "channel" information on mcelog?
-	 * Probably, we can just discard it, as the channel information
-	 * comes from the get_memory_error_data() address decoding
+	 * FIXME: On some memory configurations (mirror, lockstep), the
+	 * Memory Controller can't point the error to a single DIMM. The
+	 * EDAC core should be handling the channel mask, in order to point
+	 * to the group of dimm's where the error may be happening.
 	 */
-	msg = kasprintf(GFP_ATOMIC,
-			"%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
-			"addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
-			core_err_cnt,
-			area_type,
-			optype,
-			type,
-			recoverable_msg,
-			overflow ? "OVERFLOW" : "",
-			m->cpu,
-			mscod, errcode,
-			channel,		/* 1111b means not specified */
-			(long long) m->addr,
-			socket,
-			first_channel,		/* This is the real channel on SB */
-			channel_mask,
-			rank);
+	snprintf(msg, sizeof(msg),
+		 "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+		 core_err_cnt,
+		 overflow ? " OVERFLOW" : "",
+		 (uncorrected_error && recoverable) ? " recoverable" : "",
+		 area_type,
+		 mscod, errcode,
+		 socket,
+		 channel_mask,
+		 rank);
 
 	debugf0("%s", msg);
 
-	/* Call the helper to output message */
-	if (uncorrected_error)
-		edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
-	else
-		edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+	/* FIXME: need support for channel mask */
 
-	kfree(msg);
+	/* Call the helper to output message */
+	edac_mc_handle_error(tp_event, mci,
+			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+			     channel, dimm, -1,
+			     optype, msg, m);
+	return;
+err_parsing:
+	edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+			     -1, -1, -1,
+			     msg, "", m);
+
 }
 
 /*
@@ -1669,8 +1604,6 @@
 	debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
 		__func__, mci, &sbridge_dev->pdev[0]->dev);
 
-	mce_unregister_decode_chain(&sbridge_mce_dec);
-
 	/* Remove MC sysfs nodes */
 	edac_mc_del_mc(mci->dev);
 
@@ -1683,16 +1616,25 @@
 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
 {
 	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
 	struct sbridge_pvt *pvt;
-	int rc, channels, csrows;
+	int rc;
 
 	/* Check the number of active and not disabled channels */
-	rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+	rc = check_if_ecc_is_active(sbridge_dev->bus);
 	if (unlikely(rc < 0))
 		return rc;
 
 	/* allocate a new MC control structure */
-	mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+	layers[0].type = EDAC_MC_LAYER_CHANNEL;
+	layers[0].size = NUM_CHANNELS;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_SLOT;
+	layers[1].size = MAX_DIMMS;
+	layers[1].is_virt_csrow = true;
+	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+			    sizeof(*pvt));
+
 	if (unlikely(!mci))
 		return -ENOMEM;
 
@@ -1738,7 +1680,6 @@
 		goto fail0;
 	}
 
-	mce_register_decode_chain(&sbridge_mce_dec);
 	return 0;
 
 fail0:
@@ -1867,8 +1808,10 @@
 
 	pci_rc = pci_register_driver(&sbridge_driver);
 
-	if (pci_rc >= 0)
+	if (pci_rc >= 0) {
+		mce_register_decode_chain(&sbridge_mce_dec);
 		return 0;
+	}
 
 	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
 		      pci_rc);
@@ -1884,6 +1827,7 @@
 {
 	debugf2("MC: " __FILE__ ": %s()\n", __func__);
 	pci_unregister_driver(&sbridge_driver);
+	mce_unregister_decode_chain(&sbridge_mce_dec);
 }
 
 module_init(sbridge_init);
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d009..7bb4614 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@
 	if (mem_error.sbe_count != priv->ce_count) {
 		dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
 		priv->ce_count = mem_error.sbe_count;
-		edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+				     0, 0, 0,
+				     0, 0, -1,
+				     mci->ctl_name, "", NULL);
 	}
 }
 
@@ -84,6 +87,7 @@
 	struct csrow_info	*csrow = &mci->csrows[0];
 	struct tile_edac_priv	*priv = mci->pvt_info;
 	struct mshim_mem_info	mem_info;
+	struct dimm_info *dimm = csrow->channels[0].dimm;
 
 	if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
 		sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@
 	}
 
 	if (mem_info.mem_ecc)
-		csrow->edac_mode = EDAC_SECDED;
+		dimm->edac_mode = EDAC_SECDED;
 	else
-		csrow->edac_mode = EDAC_NONE;
+		dimm->edac_mode = EDAC_NONE;
 	switch (mem_info.mem_type) {
 	case DDR2:
-		csrow->mtype = MEM_DDR2;
+		dimm->mtype = MEM_DDR2;
 		break;
 
 	case DDR3:
-		csrow->mtype = MEM_DDR3;
+		dimm->mtype = MEM_DDR3;
 		break;
 
 	default:
 		return -1;
 	}
 
-	csrow->first_page = 0;
-	csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
-	csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-	csrow->grain = TILE_EDAC_ERROR_GRAIN;
-	csrow->dtype = DEV_UNKNOWN;
+	dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+	dimm->grain = TILE_EDAC_ERROR_GRAIN;
+	dimm->dtype = DEV_UNKNOWN;
 
 	return 0;
 }
@@ -123,6 +125,7 @@
 	char			hv_file[32];
 	int			hv_devhdl;
 	struct mem_ctl_info	*mci;
+	struct edac_mc_layer	layers[2];
 	struct tile_edac_priv	*priv;
 	int			rc;
 
@@ -132,8 +135,14 @@
 		return -EINVAL;
 
 	/* A TILE MC has a single channel and one chip-select row. */
-	mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
-		TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = TILE_EDAC_NR_CSROWS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = TILE_EDAC_NR_CHANS;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+			    sizeof(struct tile_edac_priv));
 	if (mci == NULL)
 		return -ENOMEM;
 	priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297..1ac7962 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@
 		return;
 
 	if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
-		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+				     -1, -1, -1,
+				     "UE overwrote CE", "", NULL);
 		info->errsts = info->errsts2;
 	}
 
 	for (channel = 0; channel < x38_channel_num; channel++) {
 		log = info->eccerrlog[channel];
 		if (log & X38_ECCERRLOG_UE) {
-			edac_mc_handle_ue(mci, 0, 0,
-				eccerrlog_row(channel, log), "x38 UE");
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+					     0, 0, 0,
+					     eccerrlog_row(channel, log),
+					     -1, -1,
+					     "x38 UE", "", NULL);
 		} else if (log & X38_ECCERRLOG_CE) {
-			edac_mc_handle_ce(mci, 0, 0,
-				eccerrlog_syndrome(log),
-				eccerrlog_row(channel, log), 0, "x38 CE");
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+					     0, 0, eccerrlog_syndrome(log),
+					     eccerrlog_row(channel, log),
+					     -1, -1,
+					     "x38 CE", "", NULL);
 		}
 	}
 }
@@ -317,9 +324,9 @@
 static int x38_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	int rc;
-	int i;
+	int i, j;
 	struct mem_ctl_info *mci = NULL;
-	unsigned long last_page;
+	struct edac_mc_layer layers[2];
 	u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
 	bool stacked;
 	void __iomem *window;
@@ -335,7 +342,13 @@
 	how_many_channel(pdev);
 
 	/* FIXME: unconventional pvt_info usage */
-	mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = X38_RANKS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = x38_channel_num;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
 	if (!mci)
 		return -ENOMEM;
 
@@ -363,7 +376,6 @@
 	 * cumulative; the last one will contain the total memory
 	 * contained in all ranks.
 	 */
-	last_page = -1UL;
 	for (i = 0; i < mci->nr_csrows; i++) {
 		unsigned long nr_pages;
 		struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@
 			i / X38_RANKS_PER_CHANNEL,
 			i % X38_RANKS_PER_CHANNEL);
 
-		if (nr_pages == 0) {
-			csrow->mtype = MEM_EMPTY;
+		if (nr_pages == 0)
 			continue;
+
+		for (j = 0; j < x38_channel_num; j++) {
+			struct dimm_info *dimm = csrow->channels[j].dimm;
+
+			dimm->nr_pages = nr_pages / x38_channel_num;
+			dimm->grain = nr_pages << PAGE_SHIFT;
+			dimm->mtype = MEM_DDR2;
+			dimm->dtype = DEV_UNKNOWN;
+			dimm->edac_mode = EDAC_UNKNOWN;
 		}
-
-		csrow->first_page = last_page + 1;
-		last_page += nr_pages;
-		csrow->last_page = last_page;
-		csrow->nr_pages = nr_pages;
-
-		csrow->grain = nr_pages << PAGE_SHIFT;
-		csrow->mtype = MEM_DDR2;
-		csrow->dtype = DEV_UNKNOWN;
-		csrow->edac_mode = EDAC_UNKNOWN;
 	}
 
 	x38_clear_error_info(mci);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e4..a4ed30b 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@
 	[5] = "Charge-downstream",
 	[6] = "MHL",
 	[7] = "Dock-desk",
-	[7] = "Dock-card",
-	[8] = "JIG",
+	[8] = "Dock-card",
+	[9] = "JIG",
 
 	NULL,
 };
@@ -514,6 +514,7 @@
 
 	extcon_dev_unregister(info->edev);
 
+	kfree(info->edev);
 	kfree(info);
 
 	return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a70..159aeb0 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@
 #if defined(CONFIG_ANDROID)
 	if (switch_class)
 		ret = class_compat_create_link(switch_class, edev->dev,
-					       dev);
+					       NULL);
 #endif /* CONFIG_ANDROID */
 
 	spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b..8a0dcc1 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@
 	if (ret < 0)
 		goto err_request_irq;
 
+	platform_set_drvdata(pdev, extcon_data);
 	/* Perform initial detection */
 	gpio_extcon_work(&extcon_data->work.work);
 
@@ -146,6 +147,7 @@
 	struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
 
 	cancel_delayed_work_sync(&extcon_data->work);
+	free_irq(extcon_data->irq, extcon_data);
 	gpio_free(extcon_data->gpio);
 	extcon_dev_unregister(&extcon_data->edev);
 	devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index aa3642c..c4067d0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -114,6 +114,14 @@
 	depends on ARCH_EP93XX
 	select GPIO_GENERIC
 
+config GPIO_MM_LANTIQ
+	bool "Lantiq Memory mapped GPIOs"
+	depends on LANTIQ && SOC_XWAY
+	help
+	  This enables support for memory mapped GPIOs on the External Bus Unit
+	  (EBU) found on Lantiq SoCs. The gpios are output only as they are
+	  created by attaching a 16bit latch to the bus.
+
 config GPIO_MPC5200
 	def_bool y
 	depends on PPC_MPC52xx
@@ -167,6 +175,14 @@
 	help
 	  Say yes here to support the PXA GPIO device
 
+config GPIO_STA2X11
+	bool "STA2x11/ConneXt GPIO support"
+	depends on MFD_STA2X11
+	select GENERIC_IRQ_CHIP
+	help
+	  Say yes here to support the STA2x11/ConneXt GPIO device.
+	  The GPIO module has 128 GPIO pins with alternate functions.
+
 config GPIO_XILINX
 	bool "Xilinx GPIO support"
 	depends on PPC_OF || MICROBLAZE
@@ -180,13 +196,13 @@
 	  Say yes here to support the NEC VR4100 series General-purpose I/O Uint
 
 config GPIO_SCH
-	tristate "Intel SCH/TunnelCreek GPIO"
+	tristate "Intel SCH/TunnelCreek/Centerton GPIO"
 	depends on PCI && X86
 	select MFD_CORE
 	select LPC_SCH
 	help
-	  Say yes here to support GPIO interface on Intel Poulsbo SCH
-	  or Intel Tunnel Creek processor.
+	  Say yes here to support GPIO interface on Intel Poulsbo SCH,
+	  Intel Tunnel Creek processor or Intel Centerton processor.
 	  The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
 	  powered by the core power rail and are turned off during sleep
 	  modes (S3 and higher). The remaining four GPIOs are powered by
@@ -195,6 +211,22 @@
 	  system from the Suspend-to-RAM state.
 	  The Intel Tunnel Creek processor has 5 GPIOs powered by the
 	  core power rail and 9 from suspend power supply.
+	  The Intel Centerton processor has a total of 30 GPIO pins.
+	  Twenty-one are powered by the core power rail and 9 from the
+	  suspend power supply.
+
+config GPIO_ICH
+	tristate "Intel ICH GPIO"
+	depends on PCI && X86
+	select MFD_CORE
+	select LPC_ICH
+	help
+	  Say yes here to support the GPIO functionality of a number of Intel
+	  ICH-based chipsets.  Currently supported devices: ICH6, ICH7, ICH8
+	  ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+	  Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+	  If unsure, say N.
 
 config GPIO_VX855
 	tristate "VIA VX855/VX875 GPIO"
@@ -334,6 +366,16 @@
 	  This enables support for the GPIOs found on the STMPE I/O
 	  Expanders.
 
+config GPIO_STP_XWAY
+	bool "XWAY STP GPIOs"
+	depends on SOC_XWAY
+	help
+	  This enables support for the Serial To Parallel (STP) unit found on
+	  XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+	  that can be up to 24 bit. This peripheral is aimed at driving leds.
+	  Some of the gpios/leds can be auto updated by the soc with dsl and
+	  phy status.
+
 config GPIO_TC3589X
 	bool "TC3589X GPIOs"
 	depends on MFD_TC3589X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 07a79e2..0f55662 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_GPIO_EM)		+= gpio-em.o
 obj-$(CONFIG_GPIO_EP93XX)	+= gpio-ep93xx.o
 obj-$(CONFIG_GPIO_GE_FPGA)	+= gpio-ge.o
+obj-$(CONFIG_GPIO_ICH)		+= gpio-ich.o
 obj-$(CONFIG_GPIO_IT8761E)	+= gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)	+= gpio-janz-ttl.o
 obj-$(CONFIG_ARCH_KS8695)	+= gpio-ks8695.o
@@ -32,6 +33,7 @@
 obj-$(CONFIG_GPIO_MC9S08DZ60)	+= gpio-mc9s08dz60.o
 obj-$(CONFIG_GPIO_MCP23S08)	+= gpio-mcp23s08.o
 obj-$(CONFIG_GPIO_ML_IOH)	+= gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ)	+= gpio-mm-lantiq.o
 obj-$(CONFIG_GPIO_MPC5200)	+= gpio-mpc5200.o
 obj-$(CONFIG_GPIO_MPC8XXX)	+= gpio-mpc8xxx.o
 obj-$(CONFIG_GPIO_MSIC)		+= gpio-msic.o
@@ -51,7 +53,9 @@
 obj-$(CONFIG_ARCH_SA1100)	+= gpio-sa1100.o
 obj-$(CONFIG_GPIO_SCH)		+= gpio-sch.o
 obj-$(CONFIG_GPIO_SODAVILLE)	+= gpio-sodaville.o
+obj-$(CONFIG_GPIO_STA2X11)	+= gpio-sta2x11.o
 obj-$(CONFIG_GPIO_STMPE)	+= gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY)	+= gpio-stp-xway.o
 obj-$(CONFIG_GPIO_SX150X)	+= gpio-sx150x.o
 obj-$(CONFIG_GPIO_TC3589X)	+= gpio-tc3589x.o
 obj-$(CONFIG_ARCH_TEGRA)	+= gpio-tegra.o
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644
index 0000000..b7c0651
--- /dev/null
+++ b/drivers/gpio/gpio-ich.c
@@ -0,0 +1,419 @@
+/*
+ * Intel ICH6-10, Series 5 and 6 GPIO driver
+ *
+ * Copyright (C) 2010 Extreme Engineering Solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define DRV_NAME "gpio_ich"
+
+/*
+ * GPIO register offsets in GPIO I/O space.
+ * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
+ * LVLx registers.  Logic in the read/write functions takes a register and
+ * an absolute bit number and determines the proper register offset and bit
+ * number in that register.  For example, to read the value of GPIO bit 50
+ * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
+ * bit 18 (50%32).
+ */
+enum GPIO_REG {
+	GPIO_USE_SEL = 0,
+	GPIO_IO_SEL,
+	GPIO_LVL,
+};
+
+static const u8 ichx_regs[3][3] = {
+	{0x00, 0x30, 0x40},	/* USE_SEL[1-3] offsets */
+	{0x04, 0x34, 0x44},	/* IO_SEL[1-3] offsets */
+	{0x0c, 0x38, 0x48},	/* LVL[1-3] offsets */
+};
+
+#define ICHX_WRITE(val, reg, base_res)	outl(val, (reg) + (base_res)->start)
+#define ICHX_READ(reg, base_res)	inl((reg) + (base_res)->start)
+
+struct ichx_desc {
+	/* Max GPIO pins the chipset can have */
+	uint ngpio;
+
+	/* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
+	bool uses_gpe0;
+
+	/* USE_SEL is bogus on some chipsets, eg 3100 */
+	u32 use_sel_ignore[3];
+
+	/* Some chipsets have quirks, let these use their own request/get */
+	int (*request)(struct gpio_chip *chip, unsigned offset);
+	int (*get)(struct gpio_chip *chip, unsigned offset);
+};
+
+static struct {
+	spinlock_t lock;
+	struct platform_device *dev;
+	struct gpio_chip chip;
+	struct resource *gpio_base;	/* GPIO IO base */
+	struct resource *pm_base;	/* Power Mangagment IO base */
+	struct ichx_desc *desc;	/* Pointer to chipset-specific description */
+	u32 orig_gpio_ctrl;	/* Orig CTRL value, used to restore on exit */
+} ichx_priv;
+
+static int modparam_gpiobase = -1;	/* dynamic */
+module_param_named(gpiobase, modparam_gpiobase, int, 0444);
+MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
+			   "which is the default.");
+
+static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
+{
+	unsigned long flags;
+	u32 data, tmp;
+	int reg_nr = nr / 32;
+	int bit = nr & 0x1f;
+	int ret = 0;
+
+	spin_lock_irqsave(&ichx_priv.lock, flags);
+
+	data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+	if (val)
+		data |= 1 << bit;
+	else
+		data &= ~(1 << bit);
+	ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+	tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+	if (verify && data != tmp)
+		ret = -EPERM;
+
+	spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+	return ret;
+}
+
+static int ichx_read_bit(int reg, unsigned nr)
+{
+	unsigned long flags;
+	u32 data;
+	int reg_nr = nr / 32;
+	int bit = nr & 0x1f;
+
+	spin_lock_irqsave(&ichx_priv.lock, flags);
+
+	data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+
+	spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+	return data & (1 << bit) ? 1 : 0;
+}
+
+static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+	/*
+	 * Try setting pin as an input and verify it worked since many pins
+	 * are output-only.
+	 */
+	if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+					int val)
+{
+	/* Set GPIO output value. */
+	ichx_write_bit(GPIO_LVL, nr, val, 0);
+
+	/*
+	 * Try setting pin as an output and verify it worked since many pins
+	 * are input-only.
+	 */
+	if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+	return ichx_read_bit(GPIO_LVL, nr);
+}
+
+static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+	unsigned long flags;
+	u32 data;
+
+	/*
+	 * GPI 0 - 15 need to be read from the power management registers on
+	 * a ICH6/3100 bridge.
+	 */
+	if (nr < 16) {
+		if (!ichx_priv.pm_base)
+			return -ENXIO;
+
+		spin_lock_irqsave(&ichx_priv.lock, flags);
+
+		/* GPI 0 - 15 are latched, write 1 to clear*/
+		ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
+		data = ICHX_READ(0, ichx_priv.pm_base);
+
+		spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+		return (data >> 16) & (1 << nr) ? 1 : 0;
+	} else {
+		return ichx_gpio_get(chip, nr);
+	}
+}
+
+static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+	/*
+	 * Note we assume the BIOS properly set a bridge's USE value.  Some
+	 * chips (eg Intel 3100) have bogus USE values though, so first see if
+	 * the chipset's USE value can be trusted for this specific bit.
+	 * If it can't be trusted, assume that the pin can be used as a GPIO.
+	 */
+	if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
+		return 1;
+
+	return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
+}
+
+static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+	/*
+	 * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
+	 * bridge as they are controlled by USE register bits 0 and 1.  See
+	 * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
+	 * additional info.
+	 */
+	if (nr == 16 || nr == 17)
+		nr -= 16;
+
+	return ichx_gpio_request(chip, nr);
+}
+
+static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+{
+	ichx_write_bit(GPIO_LVL, nr, val, 0);
+}
+
+static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+{
+	chip->owner = THIS_MODULE;
+	chip->label = DRV_NAME;
+	chip->dev = &ichx_priv.dev->dev;
+
+	/* Allow chip-specific overrides of request()/get() */
+	chip->request = ichx_priv.desc->request ?
+		ichx_priv.desc->request : ichx_gpio_request;
+	chip->get = ichx_priv.desc->get ?
+		ichx_priv.desc->get : ichx_gpio_get;
+
+	chip->set = ichx_gpio_set;
+	chip->direction_input = ichx_gpio_direction_input;
+	chip->direction_output = ichx_gpio_direction_output;
+	chip->base = modparam_gpiobase;
+	chip->ngpio = ichx_priv.desc->ngpio;
+	chip->can_sleep = 0;
+	chip->dbg_show = NULL;
+}
+
+/* ICH6-based, 631xesb-based */
+static struct ichx_desc ich6_desc = {
+	/* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
+	.request = ich6_gpio_request,
+	.get = ich6_gpio_get,
+
+	/* GPIO 0-15 are read in the GPE0_STS PM register */
+	.uses_gpe0 = true,
+
+	.ngpio = 50,
+};
+
+/* Intel 3100 */
+static struct ichx_desc i3100_desc = {
+	/*
+	 * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
+	 * the Intel 3100.  See "Table 712. GPIO Summary Table" of 3100
+	 * Datasheet for more info.
+	 */
+	.use_sel_ignore = {0x00130000, 0x00010000, 0x0},
+
+	/* The 3100 needs fixups for GPIO 0 - 17 */
+	.request = ich6_gpio_request,
+	.get = ich6_gpio_get,
+
+	/* GPIO 0-15 are read in the GPE0_STS PM register */
+	.uses_gpe0 = true,
+
+	.ngpio = 50,
+};
+
+/* ICH7 and ICH8-based */
+static struct ichx_desc ich7_desc = {
+	.ngpio = 50,
+};
+
+/* ICH9-based */
+static struct ichx_desc ich9_desc = {
+	.ngpio = 61,
+};
+
+/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
+static struct ichx_desc ich10_cons_desc = {
+	.ngpio = 61,
+};
+static struct ichx_desc ich10_corp_desc = {
+	.ngpio = 72,
+};
+
+/* Intel 5 series, 6 series, 3400 series, and C200 series */
+static struct ichx_desc intel5_desc = {
+	.ngpio = 76,
+};
+
+static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+{
+	struct resource *res_base, *res_pm;
+	int err;
+	struct lpc_ich_info *ich_info = pdev->dev.platform_data;
+
+	if (!ich_info)
+		return -ENODEV;
+
+	ichx_priv.dev = pdev;
+
+	switch (ich_info->gpio_version) {
+	case ICH_I3100_GPIO:
+		ichx_priv.desc = &i3100_desc;
+		break;
+	case ICH_V5_GPIO:
+		ichx_priv.desc = &intel5_desc;
+		break;
+	case ICH_V6_GPIO:
+		ichx_priv.desc = &ich6_desc;
+		break;
+	case ICH_V7_GPIO:
+		ichx_priv.desc = &ich7_desc;
+		break;
+	case ICH_V9_GPIO:
+		ichx_priv.desc = &ich9_desc;
+		break;
+	case ICH_V10CORP_GPIO:
+		ichx_priv.desc = &ich10_corp_desc;
+		break;
+	case ICH_V10CONS_GPIO:
+		ichx_priv.desc = &ich10_cons_desc;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
+	if (!res_base || !res_base->start || !res_base->end)
+		return -ENODEV;
+
+	if (!request_region(res_base->start, resource_size(res_base),
+				pdev->name))
+		return -EBUSY;
+
+	ichx_priv.gpio_base = res_base;
+
+	/*
+	 * If necessary, determine the I/O address of ACPI/power management
+	 * registers which are needed to read the the GPE0 register for GPI pins
+	 * 0 - 15 on some chipsets.
+	 */
+	if (!ichx_priv.desc->uses_gpe0)
+		goto init;
+
+	res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
+	if (!res_pm) {
+		pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
+		goto init;
+	}
+
+	if (!request_region(res_pm->start, resource_size(res_pm),
+			pdev->name)) {
+		pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
+		goto init;
+	}
+
+	ichx_priv.pm_base = res_pm;
+
+init:
+	ichx_gpiolib_setup(&ichx_priv.chip);
+	err = gpiochip_add(&ichx_priv.chip);
+	if (err) {
+		pr_err("Failed to register GPIOs\n");
+		goto add_err;
+	}
+
+	pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
+	       ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
+
+	return 0;
+
+add_err:
+	release_region(ichx_priv.gpio_base->start,
+			resource_size(ichx_priv.gpio_base));
+	if (ichx_priv.pm_base)
+		release_region(ichx_priv.pm_base->start,
+				resource_size(ichx_priv.pm_base));
+	return err;
+}
+
+static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+{
+	int err;
+
+	err = gpiochip_remove(&ichx_priv.chip);
+	if (err) {
+		dev_err(&pdev->dev, "%s failed, %d\n",
+				"gpiochip_remove()", err);
+		return err;
+	}
+
+	release_region(ichx_priv.gpio_base->start,
+				resource_size(ichx_priv.gpio_base));
+	if (ichx_priv.pm_base)
+		release_region(ichx_priv.pm_base->start,
+				resource_size(ichx_priv.pm_base));
+
+	return 0;
+}
+
+static struct platform_driver ichx_gpio_driver = {
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= DRV_NAME,
+	},
+	.probe		= ichx_gpio_probe,
+	.remove		= __devexit_p(ichx_gpio_remove),
+};
+
+module_platform_driver(ichx_gpio_driver);
+
+MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
+MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 0000000..2983dfb
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON	0x1e7ff		/* 16 bit access, slowest timing */
+#define LTQ_EBU_WP	0x80000000	/* write protect bit */
+
+struct ltq_mm {
+	struct of_mm_gpio_chip mmchip;
+	u16 shadow;	/* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip:     Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ebu_lock, flags);
+	ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+	__raw_writew(chip->shadow, chip->mmchip.regs);
+	ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+	spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct ltq_mm *chip =
+		container_of(mm_gc, struct ltq_mm, mmchip);
+
+	if (value)
+		chip->shadow |= (1 << offset);
+	else
+		chip->shadow &= ~(1 << offset);
+	ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+	ltq_mm_set(gc, offset, value);
+
+	return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+	struct ltq_mm *chip =
+		container_of(mm_gc, struct ltq_mm, mmchip);
+
+	/* tell the ebu controller which memory address we will be using */
+	ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+	ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct ltq_mm *chip;
+	const __be32 *shadow;
+	int ret = 0;
+
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		return -ENOENT;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->mmchip.gc.ngpio = 16;
+	chip->mmchip.gc.label = "gpio-mm-ltq";
+	chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+	chip->mmchip.gc.set = ltq_mm_set;
+	chip->mmchip.save_regs = ltq_mm_save_regs;
+
+	/* store the shadow value if one was passed by the devicetree */
+	shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+	if (shadow)
+		chip->shadow = be32_to_cpu(*shadow);
+
+	ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+	if (ret)
+		kfree(chip);
+	return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+	{ .compatible = "lantiq,gpio-mm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+	.probe = ltq_mm_probe,
+	.driver = {
+		.name = "gpio-mm-ltq",
+		.owner = THIS_MODULE,
+		.of_match_table = ltq_mm_match,
+	},
+};
+
+static int __init ltq_mm_init(void)
+{
+	return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b413650..39e4956 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -25,23 +25,25 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/basic_mmio_gpio.h>
 #include <linux/module.h>
-#include <mach/mxs.h>
 
 #define MXS_SET		0x4
 #define MXS_CLR		0x8
 
-#define PINCTRL_DOUT(n)		((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n)		((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n)		((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n)	((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n)	((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n)	((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n)	((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n)	((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+#define PINCTRL_DOUT(p)		((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
+#define PINCTRL_DIN(p)		((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
+#define PINCTRL_DOE(p)		((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
+#define PINCTRL_PIN2IRQ(p)	((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
+#define PINCTRL_IRQEN(p)	((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
+#define PINCTRL_IRQLEV(p)	((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
+#define PINCTRL_IRQPOL(p)	((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
+#define PINCTRL_IRQSTAT(p)	((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
 
 #define GPIO_INT_FALL_EDGE	0x0
 #define GPIO_INT_LOW_LEV	0x1
@@ -52,14 +54,30 @@
 
 #define irq_to_gpio(irq)	((irq) - MXS_GPIO_IRQ_START)
 
+enum mxs_gpio_id {
+	IMX23_GPIO,
+	IMX28_GPIO,
+};
+
 struct mxs_gpio_port {
 	void __iomem *base;
 	int id;
 	int irq;
 	int virtual_irq_start;
 	struct bgpio_chip bgc;
+	enum mxs_gpio_id devid;
 };
 
+static inline int is_imx23_gpio(struct mxs_gpio_port *port)
+{
+	return port->devid == IMX23_GPIO;
+}
+
+static inline int is_imx28_gpio(struct mxs_gpio_port *port)
+{
+	return port->devid == IMX28_GPIO;
+}
+
 /* Note: This driver assumes 32 GPIOs are handled in one register */
 
 static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@
 	}
 
 	/* set level or edge */
-	pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+	pin_addr = port->base + PINCTRL_IRQLEV(port);
 	if (edge & GPIO_INT_LEV_MASK)
 		writel(pin_mask, pin_addr + MXS_SET);
 	else
 		writel(pin_mask, pin_addr + MXS_CLR);
 
 	/* set polarity */
-	pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+	pin_addr = port->base + PINCTRL_IRQPOL(port);
 	if (edge & GPIO_INT_POL_MASK)
 		writel(pin_mask, pin_addr + MXS_SET);
 	else
 		writel(pin_mask, pin_addr + MXS_CLR);
 
 	writel(1 << (gpio & 0x1f),
-	       port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+	       port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
 
 	return 0;
 }
@@ -117,8 +135,8 @@
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-	irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
-			readl(port->base + PINCTRL_IRQEN(port->id));
+	irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
+			readl(port->base + PINCTRL_IRQEN(port));
 
 	while (irq_stat != 0) {
 		int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = mxs_gpio_set_irq_type;
 	ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
-	ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
-	ct->regs.mask = PINCTRL_IRQEN(port->id);
+	ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+	ct->regs.mask = PINCTRL_IRQEN(port);
 
 	irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
 }
@@ -179,60 +197,83 @@
 	return port->virtual_irq_start + offset;
 }
 
+static struct platform_device_id mxs_gpio_ids[] = {
+	{
+		.name = "imx23-gpio",
+		.driver_data = IMX23_GPIO,
+	}, {
+		.name = "imx28-gpio",
+		.driver_data = IMX28_GPIO,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
+
+static const struct of_device_id mxs_gpio_dt_ids[] = {
+	{ .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
+	{ .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
+
 static int __devinit mxs_gpio_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *of_id =
+			of_match_device(mxs_gpio_dt_ids, &pdev->dev);
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *parent;
 	static void __iomem *base;
 	struct mxs_gpio_port *port;
 	struct resource *iores = NULL;
 	int err;
 
-	port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
 	if (!port)
 		return -ENOMEM;
 
-	port->id = pdev->id;
+	if (np) {
+		port->id = of_alias_get_id(np, "gpio");
+		if (port->id < 0)
+			return port->id;
+		port->devid = (enum mxs_gpio_id) of_id->data;
+	} else {
+		port->id = pdev->id;
+		port->devid = pdev->id_entry->driver_data;
+	}
 	port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
 
+	port->irq = platform_get_irq(pdev, 0);
+	if (port->irq < 0)
+		return port->irq;
+
 	/*
 	 * map memory region only once, as all the gpio ports
 	 * share the same one
 	 */
 	if (!base) {
-		iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		if (!iores) {
-			err = -ENODEV;
-			goto out_kfree;
+		if (np) {
+			parent = of_get_parent(np);
+			base = of_iomap(parent, 0);
+			of_node_put(parent);
+		} else {
+			iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+			base = devm_request_and_ioremap(&pdev->dev, iores);
 		}
-
-		if (!request_mem_region(iores->start, resource_size(iores),
-					pdev->name)) {
-			err = -EBUSY;
-			goto out_kfree;
-		}
-
-		base = ioremap(iores->start, resource_size(iores));
-		if (!base) {
-			err = -ENOMEM;
-			goto out_release_mem;
-		}
+		if (!base)
+			return -EADDRNOTAVAIL;
 	}
 	port->base = base;
 
-	port->irq = platform_get_irq(pdev, 0);
-	if (port->irq < 0) {
-		err = -EINVAL;
-		goto out_iounmap;
-	}
-
 	/*
 	 * select the pin interrupt functionality but initially
 	 * disable the interrupts
 	 */
-	writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
-	writel(0, port->base + PINCTRL_IRQEN(port->id));
+	writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+	writel(0, port->base + PINCTRL_IRQEN(port));
 
 	/* clear address has to be used to clear IRQSTAT bits */
-	writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+	writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
 
 	/* gpio-mxs can be a generic irq chip */
 	mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@
 	irq_set_handler_data(port->irq, port);
 
 	err = bgpio_init(&port->bgc, &pdev->dev, 4,
-			 port->base + PINCTRL_DIN(port->id),
-			 port->base + PINCTRL_DOUT(port->id), NULL,
-			 port->base + PINCTRL_DOE(port->id), NULL, 0);
+			 port->base + PINCTRL_DIN(port),
+			 port->base + PINCTRL_DOUT(port), NULL,
+			 port->base + PINCTRL_DOE(port), NULL, 0);
 	if (err)
-		goto out_iounmap;
+		return err;
 
 	port->bgc.gc.to_irq = mxs_gpio_to_irq;
 	port->bgc.gc.base = port->id * 32;
 
 	err = gpiochip_add(&port->bgc.gc);
-	if (err)
-		goto out_bgpio_remove;
+	if (err) {
+		bgpio_remove(&port->bgc);
+		return err;
+	}
 
 	return 0;
-
-out_bgpio_remove:
-	bgpio_remove(&port->bgc);
-out_iounmap:
-	if (iores)
-		iounmap(port->base);
-out_release_mem:
-	if (iores)
-		release_mem_region(iores->start, resource_size(iores));
-out_kfree:
-	kfree(port);
-	dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
-	return err;
 }
 
 static struct platform_driver mxs_gpio_driver = {
 	.driver		= {
 		.name	= "gpio-mxs",
 		.owner	= THIS_MODULE,
+		.of_match_table = mxs_gpio_dt_ids,
 	},
 	.probe		= mxs_gpio_probe,
+	.id_table	= mxs_gpio_ids,
 };
 
 static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 421f6af..b6453d0 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,6 +2454,12 @@
 		},
 	}, {
 		.chip	= {
+			.base	= EXYNOS5_GPC4(0),
+			.ngpio	= EXYNOS5_GPIO_C4_NR,
+			.label	= "GPC4",
+		},
+	}, {
+		.chip	= {
 			.base	= EXYNOS5_GPD0(0),
 			.ngpio	= EXYNOS5_GPIO_D0_NR,
 			.label	= "GPD0",
@@ -2826,8 +2832,11 @@
 		goto err_ioremap1;
 	}
 
+	/* need to set base address for gpc4 */
+	exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+
 	/* need to set base address for gpx */
-	chip = &exynos5_gpios_1[20];
+	chip = &exynos5_gpios_1[21];
 	gpx_base = gpio_base1 + 0xC00;
 	for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
 		chip->base = gpx_base;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8cadf4d..424dce8 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -232,6 +232,14 @@
 			sch_gpio_resume.ngpio = 9;
 			break;
 
+		case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+			sch_gpio_core.base = 0;
+			sch_gpio_core.ngpio = 21;
+
+			sch_gpio_resume.base = 21;
+			sch_gpio_resume.ngpio = 9;
+			break;
+
 		default:
 			return -ENODEV;
 	}
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644
index 0000000..38416be
--- /dev/null
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -0,0 +1,435 @@
+/*
+ * STMicroelectronics ConneXt (STA2X11) GPIO driver
+ *
+ * Copyright 2012 ST Microelectronics (Alessandro Rubini)
+ * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
+ * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+struct gsta_regs {
+	u32 dat;		/* 0x00 */
+	u32 dats;
+	u32 datc;
+	u32 pdis;
+	u32 dir;		/* 0x10 */
+	u32 dirs;
+	u32 dirc;
+	u32 unused_1c;
+	u32 afsela;		/* 0x20 */
+	u32 unused_24[7];
+	u32 rimsc;		/* 0x40 */
+	u32 fimsc;
+	u32 is;
+	u32 ic;
+};
+
+struct gsta_gpio {
+	spinlock_t			lock;
+	struct device			*dev;
+	void __iomem			*reg_base;
+	struct gsta_regs __iomem	*regs[GSTA_NR_BLOCKS];
+	struct gpio_chip		gpio;
+	int				irq_base;
+	/* FIXME: save the whole config here (AF, ...) */
+	unsigned			irq_type[GSTA_NR_GPIO];
+};
+
+static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
+{
+	return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+}
+
+static inline u32 __bit(int nr)
+{
+	return 1U << (nr % GSTA_GPIO_PER_BLOCK);
+}
+
+/*
+ * gpio methods
+ */
+
+static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+	struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+
+	if (val)
+		writel(bit, &regs->dats);
+	else
+		writel(bit, &regs->datc);
+}
+
+static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+	struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+
+	return readl(&regs->dat) & bit;
+}
+
+static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+				      int val)
+{
+	struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+
+	writel(bit, &regs->dirs);
+	/* Data register after direction, otherwise pullup/down is selected */
+	if (val)
+		writel(bit, &regs->dats);
+	else
+		writel(bit, &regs->datc);
+	return 0;
+}
+
+static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+	struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+
+	writel(bit, &regs->dirc);
+	return 0;
+}
+
+static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+	struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+	return chip->irq_base + offset;
+}
+
+static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
+{
+	struct gpio_chip *gpio = &chip->gpio;
+
+	/*
+	 * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
+	 * from the end. However, for compatibility, we need the first
+	 * ConneXt device to start from gpio 0: it's the main chipset
+	 * on most boards so documents and drivers assume gpio0..gpio127
+	 */
+	static int gpio_base;
+
+	gpio->label = dev_name(chip->dev);
+	gpio->owner = THIS_MODULE;
+	gpio->direction_input = gsta_gpio_direction_input;
+	gpio->get = gsta_gpio_get;
+	gpio->direction_output = gsta_gpio_direction_output;
+	gpio->set = gsta_gpio_set;
+	gpio->dbg_show = NULL;
+	gpio->base = gpio_base;
+	gpio->ngpio = GSTA_NR_GPIO;
+	gpio->can_sleep = 0;
+	gpio->to_irq = gsta_gpio_to_irq;
+
+	/*
+	 * After the first device, turn to dynamic gpio numbers.
+	 * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
+	 */
+	if (!gpio_base)
+		gpio_base = -1;
+}
+
+/*
+ * Special method: alternate functions and pullup/pulldown. This is only
+ * invoked on startup to configure gpio's according to platform data.
+ * FIXME : this functionality shall be managed (and exported to other drivers)
+ * via the pin control subsystem.
+ */
+static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
+{
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	unsigned long flags;
+	u32 bit = __bit(nr);
+	u32 val;
+	int err = 0;
+
+	pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
+
+	if (cfg == PINMUX_TYPE_NONE)
+		return;
+
+	/* Alternate function or not? */
+	spin_lock_irqsave(&chip->lock, flags);
+	val = readl(&regs->afsela);
+	if (cfg == PINMUX_TYPE_FUNCTION)
+		val |= bit;
+	else
+		val &= ~bit;
+	writel(val | bit, &regs->afsela);
+	if (cfg == PINMUX_TYPE_FUNCTION) {
+		spin_unlock_irqrestore(&chip->lock, flags);
+		return;
+	}
+
+	/* not alternate function: set details */
+	switch (cfg) {
+	case PINMUX_TYPE_OUTPUT_LOW:
+		writel(bit, &regs->dirs);
+		writel(bit, &regs->datc);
+		break;
+	case PINMUX_TYPE_OUTPUT_HIGH:
+		writel(bit, &regs->dirs);
+		writel(bit, &regs->dats);
+		break;
+	case PINMUX_TYPE_INPUT:
+		writel(bit, &regs->dirc);
+		val = readl(&regs->pdis) | bit;
+		writel(val, &regs->pdis);
+		break;
+	case PINMUX_TYPE_INPUT_PULLUP:
+		writel(bit, &regs->dirc);
+		val = readl(&regs->pdis) & ~bit;
+		writel(val, &regs->pdis);
+		writel(bit, &regs->dats);
+		break;
+	case PINMUX_TYPE_INPUT_PULLDOWN:
+		writel(bit, &regs->dirc);
+		val = readl(&regs->pdis) & ~bit;
+		writel(val, &regs->pdis);
+		writel(bit, &regs->datc);
+		break;
+	default:
+		err = 1;
+	}
+	spin_unlock_irqrestore(&chip->lock, flags);
+	if (err)
+		pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
+		       __func__, chip, nr, cfg);
+}
+
+/*
+ * Irq methods
+ */
+
+static void gsta_irq_disable(struct irq_data *data)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	struct gsta_gpio *chip = gc->private;
+	int nr = data->irq - chip->irq_base;
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
+		val = readl(&regs->rimsc) & ~bit;
+		writel(val, &regs->rimsc);
+	}
+	if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
+		val = readl(&regs->fimsc) & ~bit;
+		writel(val, &regs->fimsc);
+	}
+	spin_unlock_irqrestore(&chip->lock, flags);
+	return;
+}
+
+static void gsta_irq_enable(struct irq_data *data)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+	struct gsta_gpio *chip = gc->private;
+	int nr = data->irq - chip->irq_base;
+	struct gsta_regs __iomem *regs = __regs(chip, nr);
+	u32 bit = __bit(nr);
+	u32 val;
+	int type;
+	unsigned long flags;
+
+	type = chip->irq_type[nr];
+
+	spin_lock_irqsave(&chip->lock, flags);
+	val = readl(&regs->rimsc);
+	if (type & IRQ_TYPE_EDGE_RISING)
+		writel(val | bit, &regs->rimsc);
+	else
+		writel(val & ~bit, &regs->rimsc);
+	val = readl(&regs->rimsc);
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		writel(val | bit, &regs->fimsc);
+	else
+		writel(val & ~bit, &regs->fimsc);
+	spin_unlock_irqrestore(&chip->lock, flags);
+	return;
+}
+
+static int gsta_irq_type(struct irq_data *d, unsigned int type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct gsta_gpio *chip = gc->private;
+	int nr = d->irq - chip->irq_base;
+
+	/* We only support edge interrupts */
+	if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
+		pr_debug("%s: unsupported type 0x%x\n", __func__, type);
+		return -EINVAL;
+	}
+
+	chip->irq_type[nr] = type; /* used for enable/disable */
+
+	gsta_irq_enable(d);
+	return 0;
+}
+
+static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
+{
+	struct gsta_gpio *chip = dev_id;
+	struct gsta_regs __iomem *regs;
+	u32 is;
+	int i, nr, base;
+	irqreturn_t ret = IRQ_NONE;
+
+	for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+		regs = chip->regs[i];
+		base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
+		while ((is = readl(&regs->is))) {
+			nr = __ffs(is);
+			irq = base + nr;
+			generic_handle_irq(irq);
+			writel(1 << nr, &regs->ic);
+			ret = IRQ_HANDLED;
+		}
+	}
+	return ret;
+}
+
+static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+{
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+
+	gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
+				     chip->reg_base, handle_simple_irq);
+	gc->private = chip;
+	ct = gc->chip_types;
+
+	ct->chip.irq_set_type = gsta_irq_type;
+	ct->chip.irq_disable = gsta_irq_disable;
+	ct->chip.irq_enable = gsta_irq_enable;
+
+	/* FIXME: this makes at most 32 interrupts. Request 0 by now */
+	irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
+			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+	/* Set up all all 128 interrupts: code from setup_generic_chip */
+	{
+		struct irq_chip_type *ct = gc->chip_types;
+		int i, j;
+		for (j = 0; j < GSTA_NR_GPIO; j++) {
+			i = chip->irq_base + j;
+			irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+			irq_set_chip_data(i, gc);
+			irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+		}
+		gc->irq_cnt = i - gc->irq_base;
+	}
+}
+
+/* The platform device used here is instantiated by the MFD device */
+static int __devinit gsta_probe(struct platform_device *dev)
+{
+	int i, err;
+	struct pci_dev *pdev;
+	struct sta2x11_gpio_pdata *gpio_pdata;
+	struct gsta_gpio *chip;
+	struct resource *res;
+
+	pdev = *(struct pci_dev **)(dev->dev.platform_data);
+	gpio_pdata = dev_get_platdata(&pdev->dev);
+
+	if (gpio_pdata == NULL)
+		dev_err(&dev->dev, "no gpio config\n");
+	pr_debug("gpio config: %p\n", gpio_pdata);
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+	chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
+	chip->dev = &dev->dev;
+	chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+
+	for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+		chip->regs[i] = chip->reg_base + i * 4096;
+		/* disable all irqs */
+		writel(0, &chip->regs[i]->rimsc);
+		writel(0, &chip->regs[i]->fimsc);
+		writel(~0, &chip->regs[i]->ic);
+	}
+	spin_lock_init(&chip->lock);
+	gsta_gpio_setup(chip);
+	for (i = 0; i < GSTA_NR_GPIO; i++)
+		gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+
+	/* 384 was used in previous code: be compatible for other drivers */
+	err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+	if (err < 0) {
+		dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
+			 -err);
+		return err;
+	}
+	chip->irq_base = err;
+	gsta_alloc_irq_chip(chip);
+
+	err = request_irq(pdev->irq, gsta_gpio_handler,
+			     IRQF_SHARED, KBUILD_MODNAME, chip);
+	if (err < 0) {
+		dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
+			-err);
+		goto err_free_descs;
+	}
+
+	err = gpiochip_add(&chip->gpio);
+	if (err < 0) {
+		dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
+			-err);
+		goto err_free_irq;
+	}
+
+	platform_set_drvdata(dev, chip);
+	return 0;
+
+err_free_irq:
+	free_irq(pdev->irq, chip);
+err_free_descs:
+	irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
+	return err;
+}
+
+static struct platform_driver sta2x11_gpio_platform_driver = {
+	.driver = {
+		.name	= "sta2x11-gpio",
+		.owner	= THIS_MODULE,
+	},
+	.probe = gsta_probe,
+};
+
+module_platform_driver(sta2x11_gpio_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 0000000..e35096b
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0		0x00
+/* control register 1 */
+#define XWAY_STP_CON1		0x04
+/* data register 0 */
+#define XWAY_STP_CPU0		0x08
+/* data register 1 */
+#define XWAY_STP_CPU1		0x0C
+/* access register */
+#define XWAY_STP_AR		0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU	BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ		0
+#define XWAY_STP_4HZ		BIT(23)
+#define XWAY_STP_8HZ		BIT(24)
+#define XWAY_STP_10HZ		(BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK	(0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI	BIT(31)
+#define XWAY_STP_UPD_MASK	(BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT	24
+#define XWAY_STP_ADSL_MASK	0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK	0x3
+#define XWAY_STP_PHY1_SHIFT	27
+#define XWAY_STP_PHY2_SHIFT	15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0		BIT(0)
+#define XWAY_STP_GROUP1		BIT(1)
+#define XWAY_STP_GROUP2		BIT(2)
+#define XWAY_STP_GROUP_MASK	(0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING	BIT(26)
+#define XWAY_STP_EDGE_MASK	BIT(26)
+
+#define xway_stp_r32(m, reg)		__raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg)	__raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+		ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+		m + reg)
+
+struct xway_stp {
+	struct gpio_chip gc;
+	void __iomem *virt;
+	u32 edge;	/* rising or falling edge triggered shift register */
+	u16 shadow;	/* shadow the shift registers state */
+	u8 groups;	/* we can drive 1-3 groups of 8bit each */
+	u8 dsl;		/* the 2 LSBs can be driven by the dsl core */
+	u8 phy1;	/* 3 bits can be driven by phy1 */
+	u8 phy2;	/* 3 bits can be driven by phy2 */
+	u8 reserved;	/* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+	struct xway_stp *chip =
+		container_of(gc, struct xway_stp, gc);
+
+	if (val)
+		chip->shadow |= BIT(gpio);
+	else
+		chip->shadow &= ~BIT(gpio);
+	xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+	xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+	xway_stp_set(gc, gpio, val);
+
+	return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+	struct xway_stp *chip =
+		container_of(gc, struct xway_stp, gc);
+
+	if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+		dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+	/* sane defaults */
+	xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+	xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+	xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+	xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+	xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+	/* apply edge trigger settings for the shift register */
+	xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+				chip->edge, XWAY_STP_CON0);
+
+	/* apply led group settings */
+	xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+				chip->groups, XWAY_STP_CON1);
+
+	/* tell the hardware which pins are controlled by the dsl modem */
+	xway_stp_w32_mask(chip->virt,
+			XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+			chip->dsl << XWAY_STP_ADSL_SHIFT,
+			XWAY_STP_CON0);
+
+	/* tell the hardware which pins are controlled by the phys */
+	xway_stp_w32_mask(chip->virt,
+			XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+			chip->phy1 << XWAY_STP_PHY1_SHIFT,
+			XWAY_STP_CON0);
+	xway_stp_w32_mask(chip->virt,
+			XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+			chip->phy2 << XWAY_STP_PHY2_SHIFT,
+			XWAY_STP_CON1);
+
+	/* mask out the hw driven bits in gpio_request */
+	chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+	/*
+	 * if we have pins that are driven by hw, we need to tell the stp what
+	 * clock to use as a timer.
+	 */
+	if (chip->reserved)
+		xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+			XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+	return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	const __be32 *shadow, *groups, *dsl, *phy;
+	struct xway_stp *chip;
+	struct clk *clk;
+	int ret = 0;
+
+	if (!res) {
+		dev_err(&pdev->dev, "failed to request STP resource\n");
+		return -ENOENT;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+	if (!chip->virt) {
+		dev_err(&pdev->dev, "failed to remap STP memory\n");
+		return -ENOMEM;
+	}
+	chip->gc.dev = &pdev->dev;
+	chip->gc.label = "stp-xway";
+	chip->gc.direction_output = xway_stp_dir_out;
+	chip->gc.set = xway_stp_set;
+	chip->gc.request = xway_stp_request;
+	chip->gc.base = -1;
+	chip->gc.owner = THIS_MODULE;
+
+	/* store the shadow value if one was passed by the devicetree */
+	shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+	if (shadow)
+		chip->shadow = be32_to_cpu(*shadow);
+
+	/* find out which gpio groups should be enabled */
+	groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+	if (groups)
+		chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+	else
+		chip->groups = XWAY_STP_GROUP0;
+	chip->gc.ngpio = fls(chip->groups) * 8;
+
+	/* find out which gpios are controlled by the dsl core */
+	dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+	if (dsl)
+		chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+	/* find out which gpios are controlled by the phys */
+	if (of_machine_is_compatible("lantiq,ar9") ||
+			of_machine_is_compatible("lantiq,gr9") ||
+			of_machine_is_compatible("lantiq,vr9")) {
+		phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+		if (phy)
+			chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+		phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+		if (phy)
+			chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+	}
+
+	/* check which edge trigger we should use, default to a falling edge */
+	if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+		chip->edge = XWAY_STP_FALLING;
+
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Failed to get clock\n");
+		return PTR_ERR(clk);
+	}
+	clk_enable(clk);
+
+	ret = xway_stp_hw_init(chip);
+	if (!ret)
+		ret = gpiochip_add(&chip->gc);
+
+	if (!ret)
+		dev_info(&pdev->dev, "Init done\n");
+
+	return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+	{ .compatible = "lantiq,gpio-stp-xway" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+	.probe = xway_stp_probe,
+	.driver = {
+		.name = "gpio-stp-xway",
+		.owner = THIS_MODULE,
+		.of_match_table = xway_stp_match,
+	},
+};
+
+int __init xway_stp_init(void)
+{
+	return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7eef648..c1ad288 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -18,14 +18,27 @@
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
+#include <linux/platform_device.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
+
+struct tps65910_gpio {
+	struct gpio_chip gpio_chip;
+	struct tps65910 *tps65910;
+};
+
+static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
+{
+	return container_of(chip, struct tps65910_gpio, gpio_chip);
+}
 
 static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-	struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
-	uint8_t val;
+	struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+	struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+	unsigned int val;
 
-	tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+	tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
 
 	if (val & GPIO_STS_MASK)
 		return 1;
@@ -36,83 +49,170 @@
 static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
 			      int value)
 {
-	struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+	struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+	struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
 	if (value)
-		tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+		tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
 						GPIO_SET_MASK);
 	else
-		tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+		tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
 						GPIO_SET_MASK);
 }
 
 static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
 				int value)
 {
-	struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+	struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+	struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
 	/* Set the initial value */
 	tps65910_gpio_set(gc, offset, value);
 
-	return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+	return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
 						GPIO_CFG_MASK);
 }
 
 static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
 {
-	struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+	struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+	struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
-	return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+	return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
 						GPIO_CFG_MASK);
 }
 
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+#ifdef CONFIG_OF
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+		struct tps65910 *tps65910, int chip_ngpio)
 {
+	struct tps65910_board *tps65910_board = tps65910->of_plat_data;
+	unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
+	int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
 	int ret;
-	struct tps65910_board *board_data;
+	int idx;
 
-	if (!gpio_base)
-		return;
+	tps65910_board->gpio_base = -1;
+	ret = of_property_read_u32_array(tps65910->dev->of_node,
+			"ti,en-gpio-sleep", prop_array, ngpio);
+	if (ret < 0) {
+		dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
+		return tps65910_board;
+	}
 
-	tps65910->gpio.owner		= THIS_MODULE;
-	tps65910->gpio.label		= tps65910->i2c_client->name;
-	tps65910->gpio.dev		= tps65910->dev;
-	tps65910->gpio.base		= gpio_base;
+	for (idx = 0; idx < ngpio; idx++)
+		tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
+
+	return tps65910_board;
+}
+#else
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+		struct tps65910 *tps65910, int chip_ngpio)
+{
+	return NULL;
+}
+#endif
+
+static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+{
+	struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+	struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
+	struct tps65910_gpio *tps65910_gpio;
+	int ret;
+	int i;
+
+	tps65910_gpio = devm_kzalloc(&pdev->dev,
+				sizeof(*tps65910_gpio), GFP_KERNEL);
+	if (!tps65910_gpio) {
+		dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+		return -ENOMEM;
+	}
+
+	tps65910_gpio->tps65910 = tps65910;
+
+	tps65910_gpio->gpio_chip.owner = THIS_MODULE;
+	tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
 
 	switch(tps65910_chip_id(tps65910)) {
 	case TPS65910:
-		tps65910->gpio.ngpio	= TPS65910_NUM_GPIO;
+		tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
 		break;
 	case TPS65911:
-		tps65910->gpio.ngpio	= TPS65911_NUM_GPIO;
+		tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
 		break;
 	default:
-		return;
+		return -EINVAL;
 	}
-	tps65910->gpio.can_sleep	= 1;
+	tps65910_gpio->gpio_chip.can_sleep = 1;
+	tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
+	tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
+	tps65910_gpio->gpio_chip.set	= tps65910_gpio_set;
+	tps65910_gpio->gpio_chip.get	= tps65910_gpio_get;
+	tps65910_gpio->gpio_chip.dev = &pdev->dev;
+	if (pdata && pdata->gpio_base)
+		tps65910_gpio->gpio_chip.base = pdata->gpio_base;
+	else
+		tps65910_gpio->gpio_chip.base = -1;
 
-	tps65910->gpio.direction_input	= tps65910_gpio_input;
-	tps65910->gpio.direction_output	= tps65910_gpio_output;
-	tps65910->gpio.set		= tps65910_gpio_set;
-	tps65910->gpio.get		= tps65910_gpio_get;
+	if (!pdata && tps65910->dev->of_node)
+		pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
+			tps65910_gpio->gpio_chip.ngpio);
 
-	/* Configure sleep control for gpios */
-	board_data = dev_get_platdata(tps65910->dev);
-	if (board_data) {
-		int i;
-		for (i = 0; i < tps65910->gpio.ngpio; ++i) {
-			if (board_data->en_gpio_sleep[i]) {
-				ret = tps65910_set_bits(tps65910,
-					TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
-				if (ret < 0)
-					dev_warn(tps65910->dev,
-						"GPIO Sleep setting failed\n");
-			}
-		}
+	if (!pdata)
+		goto skip_init;
+
+	/* Configure sleep control for gpios if provided */
+	for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
+		if (!pdata->en_gpio_sleep[i])
+			continue;
+
+		ret = tps65910_reg_set_bits(tps65910,
+			TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
+		if (ret < 0)
+			dev_warn(tps65910->dev,
+				"GPIO Sleep setting failed with err %d\n", ret);
 	}
 
-	ret = gpiochip_add(&tps65910->gpio);
+skip_init:
+	ret = gpiochip_add(&tps65910_gpio->gpio_chip);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+		return ret;
+	}
 
-	if (ret)
-		dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+	platform_set_drvdata(pdev, tps65910_gpio);
+
+	return ret;
 }
+
+static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+{
+	struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
+
+	return gpiochip_remove(&tps65910_gpio->gpio_chip);
+}
+
+static struct platform_driver tps65910_gpio_driver = {
+	.driver.name    = "tps65910-gpio",
+	.driver.owner   = THIS_MODULE,
+	.probe		= tps65910_gpio_probe,
+	.remove		= __devexit_p(tps65910_gpio_remove),
+};
+
+static int __init tps65910_gpio_init(void)
+{
+	return platform_driver_register(&tps65910_gpio_driver);
+}
+subsys_initcall(tps65910_gpio_init);
+
+static void __exit tps65910_gpio_exit(void)
+{
+	platform_driver_unregister(&tps65910_gpio_driver);
+}
+module_exit(tps65910_gpio_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index deb949e..e56a216 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -102,10 +102,8 @@
 	struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
 	struct wm831x *wm831x = wm831x_gpio->wm831x;
 
-	if (!wm831x->irq_base)
-		return -EINVAL;
-
-	return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+	return irq_create_mapping(wm831x->irq_domain,
+				  WM831X_IRQ_GPIO_1 + offset);
 }
 
 static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d703823..7053140 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@
 	{0,}
 };
 
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+	kfree(ap);
+}
+
 static int __devinit
 cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	cirrus_kick_out_firmware_fb(pdev);
+
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8..64ea597cb 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@
 		struct ttm_bo_device bdev;
 		atomic_t validate_sequence;
 	} ttm;
-
+	bool mm_inited;
 };
 
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11..50e170f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@
 				    pci_resource_len(dev->pdev, 0),
 				    DRM_MTRR_WC);
 
+	cirrus->mm_inited = true;
 	return 0;
 }
 
 void cirrus_mm_fini(struct cirrus_device *cirrus)
 {
 	struct drm_device *dev = cirrus->dev;
+
+	if (!cirrus->mm_inited)
+		return;
+
 	ttm_bo_device_release(&cirrus->ttm.bdev);
 
 	cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 92cea9d..08a7aa7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2116,7 +2116,7 @@
 	return ret;
 }
 
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
 {
 	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
@@ -2185,7 +2185,7 @@
 	}
 }
 
-static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 {
 	int ret, hsub, vsub, num_planes, i;
 
@@ -3126,7 +3126,7 @@
 EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
 static bool drm_property_change_is_valid(struct drm_property *property,
-					 __u64 value)
+					 uint64_t value)
 {
 	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
 		return false;
@@ -3136,7 +3136,7 @@
 		return true;
 	} else if (property->flags & DRM_MODE_PROP_BITMASK) {
 		int i;
-		__u64 valid_mask = 0;
+		uint64_t valid_mask = 0;
 		for (i = 0; i < property->num_values; i++)
 			valid_mask |= (1ULL << property->values[i]);
 		return !(value & ~valid_mask);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 608bddf..5873e48 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED	(1 << 5)
 /* use +hsync +vsync for detailed mode */
 #define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING	(1 << 7)
 
 struct detailed_mode_closure {
 	struct drm_connector *connector;
@@ -120,6 +122,9 @@
 	/* Samsung SyncMaster 22[5-6]BW */
 	{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
 	{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+	/* ViewSonic VA2026w */
+	{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 };
 
 /*** DDC fetch and block validation ***/
@@ -144,6 +149,10 @@
 }
 EXPORT_SYMBOL(drm_edid_header_is_valid);
 
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+		 "Minimum number of valid EDID header bytes (0-8, default 6)");
 
 /*
  * Sanity check the EDID block (base or extension).  Return 0 if the block
@@ -155,10 +164,13 @@
 	u8 csum = 0;
 	struct edid *edid = (struct edid *)raw_edid;
 
+	if (edid_fixup > 8 || edid_fixup < 0)
+		edid_fixup = 6;
+
 	if (block == 0) {
 		int score = drm_edid_header_is_valid(raw_edid);
 		if (score == 8) ;
-		else if (score >= 6) {
+		else if (score >= edid_fixup) {
 			DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
 			memcpy(raw_edid, edid_header, sizeof(edid_header));
 		} else {
@@ -598,7 +610,7 @@
 drm_monitor_supports_rb(struct edid *edid)
 {
 	if (edid->revision >= 4) {
-		bool ret;
+		bool ret = false;
 		drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
 		return ret;
 	}
@@ -885,12 +897,19 @@
 				"Wrong Hsync/Vsync pulse width\n");
 		return NULL;
 	}
+
+	if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+		mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+		if (!mode)
+			return NULL;
+
+		goto set_size;
+	}
+
 	mode = drm_mode_create(dev);
 	if (!mode)
 		return NULL;
 
-	mode->type = DRM_MODE_TYPE_DRIVER;
-
 	if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
 		timing->pixel_clock = cpu_to_le16(1088);
 
@@ -914,8 +933,6 @@
 
 	drm_mode_do_interlace_quirk(mode, pt);
 
-	drm_mode_set_name(mode);
-
 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
 	}
@@ -925,6 +942,7 @@
 	mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
 		DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
+set_size:
 	mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
 	mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
@@ -938,6 +956,9 @@
 		mode->height_mm = edid->height_cm * 10;
 	}
 
+	mode->type = DRM_MODE_TYPE_DRIVER;
+	drm_mode_set_name(mode);
+
 	return mode;
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4209531..d6de2e07f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -244,8 +244,8 @@
 };
 
 static struct drm_driver exynos_drm_driver = {
-	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
-				  DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_MODESET |
+					DRIVER_GEM | DRIVER_PRIME,
 	.load			= exynos_drm_load,
 	.unload			= exynos_drm_unload,
 	.open			= exynos_drm_open,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7b..23d5ad3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@
 		manager_ops->commit(manager->dev);
 }
 
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-	return encoder->crtc;
-}
-
 static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
 	.dpms		= exynos_drm_encoder_dpms,
 	.mode_fixup	= exynos_drm_encoder_mode_fixup,
 	.mode_set	= exynos_drm_encoder_mode_set,
 	.prepare	= exynos_drm_encoder_prepare,
 	.commit		= exynos_drm_encoder_commit,
-	.get_crtc	= exynos_drm_encoder_get_crtc,
 };
 
 static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f82a299..4ccfe43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+	unsigned int i;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
 	drm_framebuffer_cleanup(fb);
 
+	for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+		struct drm_gem_object *obj;
+
+		if (exynos_fb->exynos_gem_obj[i] == NULL)
+			continue;
+
+		obj = &exynos_fb->exynos_gem_obj[i]->base;
+		drm_gem_object_unreference_unlocked(obj);
+	}
+
 	kfree(exynos_fb);
 	exynos_fb = NULL;
 }
@@ -134,11 +145,11 @@
 		return ERR_PTR(-ENOENT);
 	}
 
-	drm_gem_object_unreference_unlocked(obj);
-
 	fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-	if (IS_ERR(fb))
+	if (IS_ERR(fb)) {
+		drm_gem_object_unreference_unlocked(obj);
 		return fb;
+	}
 
 	exynos_fb = to_exynos_fb(fb);
 	nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@
 			return ERR_PTR(-ENOENT);
 		}
 
-		drm_gem_object_unreference_unlocked(obj);
-
 		exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d..5082375 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
 static inline int exynos_drm_format_num_buffers(uint32_t format)
 {
 	switch (format) {
-	case DRM_FORMAT_NV12M:
+	case DRM_FORMAT_NV12:
 	case DRM_FORMAT_NV12MT:
 		return 2;
-	case DRM_FORMAT_YUV420M:
+	case DRM_FORMAT_YUV420:
 		return 3;
 	default:
 		return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fc91293..5c8b683 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -689,7 +689,6 @@
 				   struct drm_device *dev, uint32_t handle,
 				   uint64_t *offset)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct drm_gem_object *obj;
 	int ret = 0;
 
@@ -710,15 +709,13 @@
 		goto unlock;
 	}
 
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-
-	if (!exynos_gem_obj->base.map_list.map) {
-		ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+	if (!obj->map_list.map) {
+		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 
-	*offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef010..e2147a2 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -365,7 +365,7 @@
 	switch (win_data->pixel_format) {
 	case DRM_FORMAT_NV12MT:
 		tiled_mode = true;
-	case DRM_FORMAT_NV12M:
+	case DRM_FORMAT_NV12:
 		crcb_mode = false;
 		buf_num = 2;
 		break;
@@ -601,18 +601,20 @@
 	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
 
 	/* setting graphical layers */
-
 	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
 	val |= MXR_GRP_CFG_WIN_BLEND_EN;
+	val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+	val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
 	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
 
 	/* the same configuration for both layers */
 	mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
-	val |= MXR_GRP_CFG_BLEND_PRE_MUL;
-	val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
 	mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
 
+	/* setting video layers */
+	val = MXR_GRP_CFG_ALPHA_VAL(0);
+	mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
 	/* configuration of Video Processor Registers */
 	vp_win_reset(ctx);
 	vp_default_filter(res);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5..fa94391 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@
 		return -EINVAL;
 
 	/* This is all entirely broken */
-	down_write(&current->mm->mmap_sem);
 	old_fops = file_priv->filp->f_op;
 	file_priv->filp->f_op = &i810_buffer_fops;
 	dev_priv->mmap_buffer = buf;
-	buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+	buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
 					    PROT_READ | PROT_WRITE,
 					    MAP_SHARED, buf->bus_address);
 	dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@
 		retcode = PTR_ERR(buf_priv->virtual);
 		buf_priv->virtual = NULL;
 	}
-	up_write(&current->mm->mmap_sem);
 
 	return retcode;
 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eb2b3c2..5363e9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2032,6 +2032,8 @@
 				 1, minor);
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
 				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+				 1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 238a521..9fe9ebe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -233,6 +233,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@
 	.has_blt_ring = 1,
 	.has_llc = 1,
 	.has_pch_split = 1,
+	.has_force_wake = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
@@ -1139,10 +1145,9 @@
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       (((dev_priv)->info->gen >= 6) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE)) && \
-       (!IS_VALLEYVIEW((dev_priv)->dev))
+	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+	 ((reg) < 0x40000) &&            \
+	 ((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 377c21f..b0b676a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,7 @@
 	u8 is_ivybridge:1;
 	u8 is_valleyview:1;
 	u8 has_pch_split:1;
+	u8 has_force_wake:1;
 	u8 is_haswell:1;
 	u8 has_fbc:1;
 	u8 has_pipe_cxsr:1;
@@ -942,6 +943,9 @@
 
 	/* prime dma-buf support */
 	struct sg_table *sg_table;
+	void *dma_buf_vmapping;
+	int vmapping_count;
+
 	/**
 	 * Used for performing relocations during execbuffer insertion.
 	 */
@@ -1098,6 +1102,8 @@
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
 #include "i915_trace.h"
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1e5c66..288d7b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2063,10 +2063,8 @@
 	if (obj->gtt_space == NULL)
 		return 0;
 
-	if (obj->pin_count != 0) {
-		DRM_ERROR("Attempting to unbind pinned buffer\n");
-		return -EINVAL;
-	}
+	if (obj->pin_count)
+		return -EBUSY;
 
 	ret = i915_gem_object_finish_gpu(obj);
 	if (ret)
@@ -3293,6 +3291,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
+	u32 mask;
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (obj == NULL)
@@ -3303,8 +3302,15 @@
 		return NULL;
 	}
 
+	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+		/* 965gm cannot relocate objects above 4GiB. */
+		mask &= ~__GFP_HIGHMEM;
+		mask |= __GFP_DMA32;
+	}
+
 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+	mapping_set_gfp_mask(mapping, mask);
 
 	i915_gem_info_add_obj(dev_priv, size);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 8e26917..aa308e1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -74,6 +74,59 @@
 	}
 }
 
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (obj->dma_buf_vmapping) {
+		obj->vmapping_count++;
+		goto out_unlock;
+	}
+
+	if (!obj->pages) {
+		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
+			return ERR_PTR(ret);
+		}
+	}
+
+	obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+	if (!obj->dma_buf_vmapping) {
+		DRM_ERROR("failed to vmap object\n");
+		goto out_unlock;
+	}
+
+	obj->vmapping_count = 1;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return;
+
+	--obj->vmapping_count;
+	if (obj->vmapping_count == 0) {
+		vunmap(obj->dma_buf_vmapping);
+		obj->dma_buf_vmapping = NULL;
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 {
 	return NULL;
@@ -93,6 +146,11 @@
 
 }
 
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.map_dma_buf = i915_gem_map_dma_buf,
 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@
 	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
 	.kunmap = i915_gem_dmabuf_kunmap,
 	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+	.mmap = i915_gem_dmabuf_mmap,
+	.vmap = i915_gem_dmabuf_vmap,
+	.vunmap = i915_gem_dmabuf_vunmap,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cc4a633..ed3224c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    rps_work);
-	u8 new_delay = dev_priv->cur_delay;
 	u32 pm_iir, pm_imr;
+	u8 new_delay;
 
 	spin_lock_irq(&dev_priv->rps_lock);
 	pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@
 	I915_WRITE(GEN6_PMIMR, 0);
 	spin_unlock_irq(&dev_priv->rps_lock);
 
-	if (!pm_iir)
+	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
 
 	mutex_lock(&dev_priv->dev->struct_mutex);
-	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		if (dev_priv->cur_delay != dev_priv->max_delay)
-			new_delay = dev_priv->cur_delay + 1;
-		if (new_delay > dev_priv->max_delay)
-			new_delay = dev_priv->max_delay;
-	} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
-		gen6_gt_force_wake_get(dev_priv);
-		if (dev_priv->cur_delay != dev_priv->min_delay)
-			new_delay = dev_priv->cur_delay - 1;
-		if (new_delay < dev_priv->min_delay) {
-			new_delay = dev_priv->min_delay;
-			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
-				   ((new_delay << 16) & 0x3f0000));
-		} else {
-			/* Make sure we continue to get down interrupts
-			 * until we hit the minimum frequency */
-			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
-		}
-		gen6_gt_force_wake_put(dev_priv);
-	}
+
+	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+		new_delay = dev_priv->cur_delay + 1;
+	else
+		new_delay = dev_priv->cur_delay - 1;
 
 	gen6_set_rps(dev_priv->dev, new_delay);
-	dev_priv->cur_delay = new_delay;
 
-	/*
-	 * rps_lock not held here because clearing is non-destructive. There is
-	 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
-	 * by holding struct_mutex for the duration of the write.
-	 */
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
@@ -435,7 +412,6 @@
 	 */
 
 	spin_lock_irqsave(&dev_priv->rps_lock, flags);
-	WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
 	dev_priv->pm_iir |= pm_iir;
 	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
 	POSTING_READ(GEN6_PMIMR);
@@ -533,7 +509,7 @@
 	return ret;
 }
 
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
@@ -573,6 +549,35 @@
 		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+				 SDE_AUDIO_POWER_SHIFT_CPT);
+
+	if (pch_iir & SDE_AUX_MASK_CPT)
+		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+	if (pch_iir & SDE_GMBUS_CPT)
+		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK_CPT)
+		for_each_pipe(pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
+}
+
 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -614,7 +619,7 @@
 
 			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
 				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-			pch_irq_handler(dev, pch_iir);
+			cpt_irq_handler(dev, pch_iir);
 
 			/* clear PCH hotplug event before clear CPU irq */
 			I915_WRITE(SDEIIR, pch_iir);
@@ -707,7 +712,10 @@
 	if (de_iir & DE_PCH_EVENT) {
 		if (pch_iir & hotplug_mask)
 			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-		pch_irq_handler(dev, pch_iir);
+		if (HAS_PCH_CPT(dev))
+			cpt_irq_handler(dev, pch_iir);
+		else
+			ibx_irq_handler(dev, pch_iir);
 	}
 
 	if (de_iir & DE_PCU_EVENT) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d49b95..48d5e8e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -210,6 +210,14 @@
 #define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
 #define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
 #define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT		(1<<8)
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
@@ -3313,7 +3321,7 @@
 
 /* PCH */
 
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
 #define SDE_AUDIO_POWER_D	(1 << 27)
 #define SDE_AUDIO_POWER_C	(1 << 26)
 #define SDE_AUDIO_POWER_B	(1 << 25)
@@ -3349,15 +3357,44 @@
 #define SDE_TRANSA_CRC_ERR	(1 << 1)
 #define SDE_TRANSA_FIFO_UNDER	(1 << 0)
 #define SDE_TRANS_MASK		(0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT	(1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT	(1 << 31)
+#define SDE_AUDIO_POWER_C_CPT	(1 << 30)
+#define SDE_AUDIO_POWER_B_CPT	(1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT   29
+#define SDE_AUDIO_POWER_MASK_CPT    (7 << 29)
+#define SDE_AUXD_CPT		(1 << 27)
+#define SDE_AUXC_CPT		(1 << 26)
+#define SDE_AUXB_CPT		(1 << 25)
+#define SDE_AUX_MASK_CPT	(7 << 25)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
+#define SDE_CRT_HOTPLUG_CPT	(1 << 19)
 #define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
 				 SDE_PORTD_HOTPLUG_CPT |	\
 				 SDE_PORTC_HOTPLUG_CPT |	\
 				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT		(1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT	(1 << 9)
+#define SDE_FDI_RXC_CPT		(1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT	(1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT	(1 << 5)
+#define SDE_FDI_RXB_CPT		(1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT	(1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT	(1 << 1)
+#define SDE_FDI_RXA_CPT		(1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT	(SDE_AUDIO_CP_REQ_C_CPT | \
+				 SDE_AUDIO_CP_REQ_B_CPT | \
+				 SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT	(SDE_AUDIO_CP_CHG_C_CPT | \
+				 SDE_AUDIO_CP_CHG_B_CPT | \
+				 SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT	(SDE_FDI_RXC_CPT | \
+				 SDE_FDI_RXB_CPT | \
+				 SDE_FDI_RXA_CPT)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0ede02a..a748e5c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -740,8 +740,11 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
 		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
-		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+		 * otherwise we get blank eDP screen after S3 on some machines
+		 */
 		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
 		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ee61ad1..a8538ac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -910,9 +910,10 @@
 
 /* For ILK+ */
 static void assert_pch_pll(struct drm_i915_private *dev_priv,
-			   struct intel_crtc *intel_crtc, bool state)
+			   struct intel_pch_pll *pll,
+			   struct intel_crtc *crtc,
+			   bool state)
 {
-	int reg;
 	u32 val;
 	bool cur_state;
 
@@ -921,30 +922,37 @@
 		return;
 	}
 
-	if (!intel_crtc->pch_pll) {
-		WARN(1, "asserting PCH PLL enabled with no PLL\n");
+	if (WARN (!pll,
+		  "asserting PCH PLL %s with no PLL\n", state_string(state)))
 		return;
-	}
 
-	if (HAS_PCH_CPT(dev_priv->dev)) {
+	val = I915_READ(pll->pll_reg);
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	WARN(cur_state != state,
+	     "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+	     pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+	/* Make sure the selected PLL is correctly attached to the transcoder */
+	if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
 		u32 pch_dpll;
 
 		pch_dpll = I915_READ(PCH_DPLL_SEL);
-
-		/* Make sure the selected PLL is enabled to the transcoder */
-		WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
-		     "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+		cur_state = pll->pll_reg == _PCH_DPLL_B;
+		if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+			  "PLL[%d] not attached to this transcoder %d: %08x\n",
+			  cur_state, crtc->pipe, pch_dpll)) {
+			cur_state = !!(val >> (4*crtc->pipe + 3));
+			WARN(cur_state != state,
+			     "PLL[%d] not %s on this transcoder %d: %08x\n",
+			     pll->pll_reg == _PCH_DPLL_B,
+			     state_string(state),
+			     crtc->pipe,
+			     val);
+		}
 	}
-
-	reg = intel_crtc->pch_pll->pll_reg;
-	val = I915_READ(reg);
-	cur_state = !!(val & DPLL_VCO_ENABLE);
-	WARN(cur_state != state,
-	     "PCH PLL state assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
 }
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 			  enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@
 	assert_pch_refclk_enabled(dev_priv);
 
 	if (pll->active++ && pll->on) {
-		assert_pch_pll_enabled(dev_priv, intel_crtc);
+		assert_pch_pll_enabled(dev_priv, pll, NULL);
 		return;
 	}
 
@@ -1460,12 +1468,12 @@
 		      intel_crtc->base.base.id);
 
 	if (WARN_ON(pll->active == 0)) {
-		assert_pch_pll_disabled(dev_priv, intel_crtc);
+		assert_pch_pll_disabled(dev_priv, pll, NULL);
 		return;
 	}
 
 	if (--pll->active) {
-		assert_pch_pll_enabled(dev_priv, intel_crtc);
+		assert_pch_pll_enabled(dev_priv, pll, NULL);
 		return;
 	}
 
@@ -1495,7 +1503,9 @@
 	BUG_ON(dev_priv->info->gen < 5);
 
 	/* Make sure PCH DPLL is enabled */
-	assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+	assert_pch_pll_enabled(dev_priv,
+			       to_intel_crtc(crtc)->pch_pll,
+			       to_intel_crtc(crtc));
 
 	/* FDI must be feeding us bits for PCH ports */
 	assert_fdi_tx_enabled(dev_priv, pipe);
@@ -6148,17 +6158,34 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+	uint32_t plane_bit = 0;
 	int ret;
 
 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
 		goto err;
 
+	switch(intel_crtc->plane) {
+	case PLANE_A:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+		break;
+	case PLANE_B:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+		break;
+	case PLANE_C:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+		break;
+	default:
+		WARN_ONCE(1, "unknown plane in flip command\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		goto err_unpin;
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 	intel_ring_emit(ring, (obj->gtt_offset));
 	intel_ring_emit(ring, (MI_NOOP));
@@ -6531,7 +6558,7 @@
 		if (I915_READ(HDMIC) & PORT_DETECTED)
 			intel_hdmi_init(dev, HDMIC);
 
-		if (I915_READ(HDMID) & PORT_DETECTED)
+		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
 			intel_hdmi_init(dev, HDMID);
 
 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6894,19 +6921,6 @@
 	POSTING_READ(vga_reg);
 }
 
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/*
-	 * IVB has CPU eDP backlight regs too, set things up to let the
-	 * PCH regs control the backlight
-	 */
-	I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
-	I915_WRITE(BLC_PWM_CPU_CTL, 0);
-	I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6923,9 +6937,6 @@
 		gen6_enable_rps(dev_priv);
 		gen6_update_ring_freq(dev_priv);
 	}
-
-	if (IS_IVYBRIDGE(dev))
-		ivb_pch_pwm_override(dev);
 }
 
 void intel_modeset_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 71c7096..c044932 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -67,6 +68,8 @@
 	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
 	struct delayed_work panel_vdd_work;
 	bool want_panel_vdd;
+	struct edid *edid; /* cached EDID for eDP */
+	int edid_mode_count;
 };
 
 /**
@@ -266,6 +269,9 @@
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
 
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
 	return MODE_OK;
 }
 
@@ -368,7 +374,7 @@
 	int recv_bytes;
 	uint32_t status;
 	uint32_t aux_clock_divider;
-	int try, precharge = 5;
+	int try, precharge;
 
 	intel_dp_check_edp(intel_dp);
 	/* The clock divider is based off the hrawclk,
@@ -388,6 +394,11 @@
 	else
 		aux_clock_divider = intel_hrawclk(dev) / 2;
 
+	if (IS_GEN6(dev))
+		precharge = 3;
+	else
+		precharge = 5;
+
 	/* Try to wait for any previous AUX channel activity */
 	for (try = 0; try < 3; try++) {
 		status = I915_READ(ch_ctl);
@@ -702,6 +713,9 @@
 		mode->clock = intel_dp->panel_fixed_mode->clock;
 	}
 
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return false;
+
 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
 		      "max bw %02x pixel clock %iKHz\n",
 		      max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1168,10 @@
 
 	DRM_DEBUG_KMS("Turn eDP power off\n");
 
-	WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
-	ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
 	pp = ironlake_get_pp_control(dev_priv);
-	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
 
@@ -1266,18 +1279,16 @@
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+
+	/* Make sure the panel is off before trying to change the mode. But also
+	 * ensure that we have vdd while we switch off the panel. */
+	ironlake_edp_panel_vdd_on(intel_dp);
 	ironlake_edp_backlight_off(intel_dp);
 	ironlake_edp_panel_off(intel_dp);
 
-	/* Wake up the sink first */
-	ironlake_edp_panel_vdd_on(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	intel_dp_link_down(intel_dp);
 	ironlake_edp_panel_vdd_off(intel_dp, false);
-
-	/* Make sure the panel is off before trying to
-	 * change the mode
-	 */
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1320,11 @@
 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
 	if (mode != DRM_MODE_DPMS_ON) {
+		/* Switching the panel off requires vdd. */
+		ironlake_edp_panel_vdd_on(intel_dp);
 		ironlake_edp_backlight_off(intel_dp);
 		ironlake_edp_panel_off(intel_dp);
 
-		ironlake_edp_panel_vdd_on(intel_dp);
 		intel_dp_sink_dpms(intel_dp, mode);
 		intel_dp_link_down(intel_dp);
 		ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1969,6 +1981,8 @@
 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
+	ironlake_edp_panel_vdd_on(intel_dp);
+
 	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
 			      buf[0], buf[1], buf[2]);
@@ -1976,6 +1990,8 @@
 	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
 			      buf[0], buf[1], buf[2]);
+
+	ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static bool
@@ -2112,10 +2128,22 @@
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct edid	*edid;
+	int size;
 
-	ironlake_edp_panel_vdd_on(intel_dp);
+	if (is_edp(intel_dp)) {
+		if (!intel_dp->edid)
+			return NULL;
+
+		size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+		edid = kmalloc(size, GFP_KERNEL);
+		if (!edid)
+			return NULL;
+
+		memcpy(edid, intel_dp->edid, size);
+		return edid;
+	}
+
 	edid = drm_get_edid(connector, adapter);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 	return edid;
 }
 
@@ -2125,9 +2153,17 @@
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	int	ret;
 
-	ironlake_edp_panel_vdd_on(intel_dp);
+	if (is_edp(intel_dp)) {
+		drm_mode_connector_update_edid_property(connector,
+							intel_dp->edid);
+		ret = drm_add_edid_modes(connector, intel_dp->edid);
+		drm_edid_to_eld(connector,
+				intel_dp->edid);
+		connector->display_info.raw_edid = NULL;
+		return intel_dp->edid_mode_count;
+	}
+
 	ret = intel_ddc_get_modes(connector, adapter);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 	return ret;
 }
 
@@ -2317,6 +2353,7 @@
 	i2c_del_adapter(&intel_dp->adapter);
 	drm_encoder_cleanup(encoder);
 	if (is_edp(intel_dp)) {
+		kfree(intel_dp->edid);
 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
 		ironlake_panel_vdd_off_sync(intel_dp);
 	}
@@ -2500,11 +2537,14 @@
 			break;
 	}
 
+	intel_dp_i2c_init(intel_dp, intel_connector, name);
+
 	/* Cache some DPCD data in the eDP case */
 	if (is_edp(intel_dp)) {
 		bool ret;
 		struct edp_power_seq	cur, vbt;
 		u32 pp_on, pp_off, pp_div;
+		struct edid *edid;
 
 		pp_on = I915_READ(PCH_PP_ON_DELAYS);
 		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2572,9 +2612,19 @@
 			intel_dp_destroy(&intel_connector->base);
 			return;
 		}
-	}
 
-	intel_dp_i2c_init(intel_dp, intel_connector, name);
+		ironlake_edp_panel_vdd_on(intel_dp);
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			drm_mode_connector_update_edid_property(connector,
+								edid);
+			intel_dp->edid_mode_count =
+				drm_add_edid_modes(connector, edid);
+			drm_edid_to_eld(connector, edid);
+			intel_dp->edid = edid;
+		}
+		ironlake_edp_panel_vdd_off(intel_dp, false);
+	}
 
 	intel_encoder->hot_plug = intel_dp_hot_plug;
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4a9707d..1991a44 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -396,11 +396,22 @@
 	 * Wait for bus to IDLE before clearing NAK.
 	 * If we clear the NAK while bus is still active, then it will stay
 	 * active and the next transaction may fail.
+	 *
+	 * If no ACK is received during the address phase of a transaction, the
+	 * adapter must report -ENXIO. It is not clear what to return if no ACK
+	 * is received at other times. But we have to be careful to not return
+	 * spurious -ENXIO because that will prevent i2c and drm edid functions
+	 * from retrying. So return -ENXIO only when gmbus properly quiescents -
+	 * timing out seems to happen when there _is_ a ddc chip present, but
+	 * it's slow responding and only answers on the 2nd retry.
 	 */
+	ret = -ENXIO;
 	if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
-		     10))
+		     10)) {
 		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
 			      adapter->name);
+		ret = -ETIMEDOUT;
+	}
 
 	/* Toggle the Software Clear Interrupt bit. This has the effect
 	 * of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@
 			 adapter->name, msgs[i].addr,
 			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
-	/*
-	 * If no ACK is received during the address phase of a transaction,
-	 * the adapter must report -ENXIO.
-	 * It is not clear what to return if no ACK is received at other times.
-	 * So, we always return -ENXIO in all NAK cases, to ensure we send
-	 * it at least during the one case that is specified.
-	 */
-	ret = -ENXIO;
 	goto out;
 
 timeout:
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9dee823..08eb04c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -747,6 +747,14 @@
 	},
 	{
 		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard HP t5740e Thin Client",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
 		.ident = "Hewlett-Packard t5745",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff6..d0ce2a5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 swreq;
+	u32 limits;
 
-	swreq = (val & 0x3ff) << 25;
-	I915_WRITE(GEN6_RPNSWREQ, swreq);
+	limits = 0;
+	if (val >= dev_priv->max_delay)
+		val = dev_priv->max_delay;
+	else
+		limits |= dev_priv->max_delay << 24;
+
+	if (val <= dev_priv->min_delay)
+		val = dev_priv->min_delay;
+	else
+		limits |= dev_priv->min_delay << 16;
+
+	if (val == dev_priv->cur_delay)
+		return;
+
+	I915_WRITE(GEN6_RPNSWREQ,
+		   GEN6_FREQUENCY(val) |
+		   GEN6_OFFSET(0) |
+		   GEN6_AGGRESSIVE_TURBO);
+
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+	dev_priv->cur_delay = val;
 }
 
 void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_ring_buffer *ring;
-	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+	u32 rp_state_cap;
+	u32 gt_perf_status;
 	u32 pcu_mbox, rc6_mask = 0;
 	u32 gtfifodbg;
-	int cur_freq, min_freq, max_freq;
 	int rc6_mode;
 	int i;
 
@@ -2352,6 +2374,14 @@
 
 	gen6_gt_force_wake_get(dev_priv);
 
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+	/* In units of 100MHz */
+	dev_priv->max_delay = rp_state_cap & 0xff;
+	dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->cur_delay = 0;
+
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -2399,8 +2429,8 @@
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   18 << 24 |
-		   6 << 16);
+		   dev_priv->max_delay << 24 |
+		   dev_priv->min_delay << 16);
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
 	I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 	I915_WRITE(GEN6_RP_CONTROL,
 		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_MODE |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
 		   GEN6_RP_MEDIA_IS_GFX |
 		   GEN6_RP_ENABLE |
 		   GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
-	min_freq = (rp_state_cap & 0xff0000) >> 16;
-	max_freq = rp_state_cap & 0xff;
-	cur_freq = (gt_perf_status & 0xff00) >> 8;
-
 	/* Check for overclock support */
 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 		     500))
@@ -2440,14 +2466,11 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 	if (pcu_mbox & (1<<31)) { /* OC supported */
-		max_freq = pcu_mbox & 0xff;
+		dev_priv->max_delay = pcu_mbox & 0xff;
 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 	}
 
-	/* In units of 100MHz */
-	dev_priv->max_delay = max_freq;
-	dev_priv->min_delay = min_freq;
-	dev_priv->cur_delay = cur_freq;
+	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@
 		limits |= (dev_priv->min_delay & 0x3f) << 16;
 
 	if (old != limits) {
-		DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
-			  limits, old);
+		/* Note that the known failure case is to read back 0. */
+		DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+				 "expected %08x, was %08x\n", limits, old);
 		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b59b6d5..e5b84ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -266,10 +266,15 @@
 
 static int init_ring_common(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj = ring->obj;
+	int ret = 0;
 	u32 head;
 
+	if (HAS_FORCE_WAKE(dev))
+		gen6_gt_force_wake_get(dev_priv);
+
 	/* Stop the ring if it's running. */
 	I915_WRITE_CTL(ring, 0);
 	I915_WRITE_HEAD(ring, 0);
@@ -317,7 +322,8 @@
 				I915_READ_HEAD(ring),
 				I915_READ_TAIL(ring),
 				I915_READ_START(ring));
-		return -EIO;
+		ret = -EIO;
+		goto out;
 	}
 
 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -326,9 +332,14 @@
 		ring->head = I915_READ_HEAD(ring);
 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 		ring->space = ring_space(ring);
+		ring->last_retired_head = -1;
 	}
 
-	return 0;
+out:
+	if (HAS_FORCE_WAKE(dev))
+		gen6_gt_force_wake_put(dev_priv);
+
+	return ret;
 }
 
 static int
@@ -987,6 +998,10 @@
 	if (ret)
 		goto err_unref;
 
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		goto err_unpin;
+
 	ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
 					 ring->size);
 	if (ring->virtual_start == NULL) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a949b73..b6a9d45 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -783,10 +783,12 @@
 		((v_sync_len & 0x30) >> 4);
 
 	dtd->part2.dtd_flags = 0x18;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
 	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-		dtd->part2.dtd_flags |= 0x2;
+		dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
 	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-		dtd->part2.dtd_flags |= 0x4;
+		dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
 	dtd->part2.sdvo_flags = 0;
 	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@
 	mode->clock = dtd->part1.clock * 10;
 
 	mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-	if (dtd->part2.dtd_flags & 0x2)
+	if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
 		mode->flags |= DRM_MODE_FLAG_PHSYNC;
-	if (dtd->part2.dtd_flags & 0x4)
+	if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
 		mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f..9d03014 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@
 	u16 output_flags;
 } __attribute__((packed));
 
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE	(1 << 7)
+
 /** This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
 	struct {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3346612..a233a51 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@
 		.filter_table = filter_table,
 	},
 	{
+		.name       = "480p",
+		.clock		= 107520,
+		.refresh	= 59940,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 122,
+		.hblank_start   = 842,              .htotal             = 857,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+		.vsync_len      = 12,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 44,               .vi_end_f2          = 44,
+		.nbr_end        = 479,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "576p",
+		.clock		= 107520,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 139,
+		.hblank_start   = 859,              .htotal             = 863,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 48,               .vi_end_f2          = 48,
+		.nbr_end        = 575,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
 		.name       = "720p@60Hz",
 		.clock		= 148800,
 		.refresh	= 60000,
@@ -1194,6 +1242,11 @@
 
 	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
 	I915_WRITE(TV_CTL, save_tv_ctl);
+	POSTING_READ(TV_CTL);
+
+	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
 	/* Restore interrupt config */
 	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f..93e832d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+	kfree(ap);
+}
+
+
 static int __devinit
 mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	mgag200_kick_out_firmware_fb(pdev);
+
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 634d222..8613cb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,6 +123,9 @@
 
 	struct drm_gem_object *gem;
 	int pin_refcnt;
+
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+	int vmapping_count;
 };
 
 #define nouveau_bo_tile_layout(nvbo)				\
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 153b9a1..1074bc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -467,7 +467,7 @@
 	nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
 	ret = drm_fb_helper_init(dev, &nfbdev->helper,
-				 nv_two_heads(dev) ? 2 : 1, 4);
+				 dev->mode_config.num_crtc, 4);
 	if (ret) {
 		kfree(nfbdev);
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index c58aab7..a25cf2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
 
 #include "drmP.h"
 #include "drm.h"
@@ -61,6 +84,48 @@
 
 }
 
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+	struct nouveau_bo *nvbo = dma_buf->priv;
+	struct drm_device *dev = nvbo->gem->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	if (nvbo->vmapping_count) {
+		nvbo->vmapping_count++;
+		goto out_unlock;
+	}
+
+	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+			  &nvbo->dma_buf_vmap);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ERR_PTR(ret);
+	}
+	nvbo->vmapping_count = 1;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct nouveau_bo *nvbo = dma_buf->priv;
+	struct drm_device *dev = nvbo->gem->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	nvbo->vmapping_count--;
+	if (nvbo->vmapping_count == 0) {
+		ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
 static const struct dma_buf_ops nouveau_dmabuf_ops =  {
 	.map_dma_buf = nouveau_gem_map_dma_buf,
 	.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +134,9 @@
 	.kmap_atomic = nouveau_gem_kmap_atomic,
 	.kunmap = nouveau_gem_kunmap,
 	.kunmap_atomic = nouveau_gem_kunmap_atomic,
+	.mmap = nouveau_gem_prime_mmap,
+	.vmap = nouveau_gem_prime_vmap,
+	.vunmap = nouveau_gem_prime_vunmap,
 };
 
 static int
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 01d77d1..3904d79 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1149,7 +1149,9 @@
 	}
 
 	if (tiling_flags & RADEON_TILING_MACRO) {
-		if (rdev->family >= CHIP_CAYMAN)
+		if (rdev->family >= CHIP_TAHITI)
+			tmp = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
 			tmp = rdev->config.cayman.tile_config;
 		else
 			tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@
 	} else if (tiling_flags & RADEON_TILING_MICRO)
 		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
+	if ((rdev->family == CHIP_TAHITI) ||
+	    (rdev->family == CHIP_PITCAIRN))
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+	else if (rdev->family == CHIP_VERDE)
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
 	switch (radeon_crtc->crtc_id) {
 	case 0:
 		WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e7b1ec5..486ccdf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,9 @@
 
 	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
 		r600_hdmi_enable(encoder);
-		if (ASIC_IS_DCE4(rdev))
+		if (ASIC_IS_DCE6(rdev))
+			; /* TODO (use pointers instead of if-s?) */
+		else if (ASIC_IS_DCE4(rdev))
 			evergreen_hdmi_setmode(encoder, adjusted_mode);
 		else
 			r600_hdmi_setmode(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af..7fb3d2e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@
 		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
 		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
 		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+		if ((rdev->family == CHIP_JUNIPER) ||
+		    (rdev->family == CHIP_CYPRESS) ||
+		    (rdev->family == CHIP_HEMLOCK) ||
+		    (rdev->family == CHIP_BARTS))
+			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
 	}
 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@
 /*
  * Core functions
  */
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-						  u32 num_tile_pipes,
-						  u32 num_backends,
-						  u32 backend_disable_mask)
-{
-	u32 backend_map = 0;
-	u32 enabled_backends_mask = 0;
-	u32 enabled_backends_count = 0;
-	u32 cur_pipe;
-	u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
-	u32 cur_backend = 0;
-	u32 i;
-	bool force_no_swizzle;
-
-	if (num_tile_pipes > EVERGREEN_MAX_PIPES)
-		num_tile_pipes = EVERGREEN_MAX_PIPES;
-	if (num_tile_pipes < 1)
-		num_tile_pipes = 1;
-	if (num_backends > EVERGREEN_MAX_BACKENDS)
-		num_backends = EVERGREEN_MAX_BACKENDS;
-	if (num_backends < 1)
-		num_backends = 1;
-
-	for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-		if (((backend_disable_mask >> i) & 1) == 0) {
-			enabled_backends_mask |= (1 << i);
-			++enabled_backends_count;
-		}
-		if (enabled_backends_count == num_backends)
-			break;
-	}
-
-	if (enabled_backends_count == 0) {
-		enabled_backends_mask = 1;
-		enabled_backends_count = 1;
-	}
-
-	if (enabled_backends_count != num_backends)
-		num_backends = enabled_backends_count;
-
-	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
-	switch (rdev->family) {
-	case CHIP_CEDAR:
-	case CHIP_REDWOOD:
-	case CHIP_PALM:
-	case CHIP_SUMO:
-	case CHIP_SUMO2:
-	case CHIP_TURKS:
-	case CHIP_CAICOS:
-		force_no_swizzle = false;
-		break;
-	case CHIP_CYPRESS:
-	case CHIP_HEMLOCK:
-	case CHIP_JUNIPER:
-	case CHIP_BARTS:
-	default:
-		force_no_swizzle = true;
-		break;
-	}
-	if (force_no_swizzle) {
-		bool last_backend_enabled = false;
-
-		force_no_swizzle = false;
-		for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-			if (((enabled_backends_mask >> i) & 1) == 1) {
-				if (last_backend_enabled)
-					force_no_swizzle = true;
-				last_backend_enabled = true;
-			} else
-				last_backend_enabled = false;
-		}
-	}
-
-	switch (num_tile_pipes) {
-	case 1:
-	case 3:
-	case 5:
-	case 7:
-		DRM_ERROR("odd number of pipes!\n");
-		break;
-	case 2:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		break;
-	case 4:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 1;
-			swizzle_pipe[3] = 3;
-		}
-		break;
-	case 6:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 1;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 5;
-		}
-		break;
-	case 8:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-			swizzle_pipe[6] = 6;
-			swizzle_pipe[7] = 7;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 6;
-			swizzle_pipe[4] = 1;
-			swizzle_pipe[5] = 3;
-			swizzle_pipe[6] = 5;
-			swizzle_pipe[7] = 7;
-		}
-		break;
-	}
-
-	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-		while (((1 << cur_backend) & enabled_backends_mask) == 0)
-			cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
-		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-	}
-
-	return backend_map;
-}
-
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
-	u32 cc_rb_backend_disable = 0;
-	u32 cc_gc_shader_pipe_config;
-	u32 gb_addr_config = 0;
+	u32 gb_addr_config;
 	u32 mc_shared_chmap, mc_arb_ramcfg;
-	u32 gb_backend_map;
-	u32 grbm_gfx_index;
 	u32 sx_debug_1;
 	u32 smx_dc_ctl0;
 	u32 sq_config;
@@ -1724,6 +1576,7 @@
 	u32 sq_stack_resource_mgmt_3;
 	u32 vgt_cache_invalidation;
 	u32 hdp_host_path_cntl, tmp;
+	u32 disabled_rb_mask;
 	int i, j, num_shader_engines, ps_thread_count;
 
 	switch (rdev->family) {
@@ -1748,6 +1601,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_JUNIPER:
 		rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_REDWOOD:
 		rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_CEDAR:
 	default:
@@ -1812,6 +1668,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_PALM:
 		rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_SUMO:
 		rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_SUMO2:
 		rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_BARTS:
 		rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_TURKS:
 		rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_CAICOS:
 		rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	}
 
@@ -1960,20 +1823,6 @@
 
 	evergreen_fix_pci_max_read_req_size(rdev);
 
-	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
-	cc_gc_shader_pipe_config |=
-		INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
-				  & EVERGREEN_MAX_PIPES_MASK);
-	cc_gc_shader_pipe_config |=
-		INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
-			       & EVERGREEN_MAX_SIMDS_MASK);
-
-	cc_rb_backend_disable =
-		BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
-				& EVERGREEN_MAX_BACKENDS_MASK);
-
-
 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
 	if ((rdev->family == CHIP_PALM) ||
 	    (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@
 	else
 		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-	switch (rdev->config.evergreen.max_tile_pipes) {
-	case 1:
-	default:
-		gb_addr_config |= NUM_PIPES(0);
-		break;
-	case 2:
-		gb_addr_config |= NUM_PIPES(1);
-		break;
-	case 4:
-		gb_addr_config |= NUM_PIPES(2);
-		break;
-	case 8:
-		gb_addr_config |= NUM_PIPES(3);
-		break;
-	}
-
-	gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-	gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
-	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
-	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
-	gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
-	gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
-	if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
-		gb_addr_config |= ROW_SIZE(2);
-	else
-		gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
-	if (rdev->ddev->pdev->device == 0x689e) {
-		u32 efuse_straps_4;
-		u32 efuse_straps_3;
-		u8 efuse_box_bit_131_124;
-
-		WREG32(RCU_IND_INDEX, 0x204);
-		efuse_straps_4 = RREG32(RCU_IND_DATA);
-		WREG32(RCU_IND_INDEX, 0x203);
-		efuse_straps_3 = RREG32(RCU_IND_DATA);
-		efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
-		switch(efuse_box_bit_131_124) {
-		case 0x00:
-			gb_backend_map = 0x76543210;
-			break;
-		case 0x55:
-			gb_backend_map = 0x77553311;
-			break;
-		case 0x56:
-			gb_backend_map = 0x77553300;
-			break;
-		case 0x59:
-			gb_backend_map = 0x77552211;
-			break;
-		case 0x66:
-			gb_backend_map = 0x77443300;
-			break;
-		case 0x99:
-			gb_backend_map = 0x66552211;
-			break;
-		case 0x5a:
-			gb_backend_map = 0x77552200;
-			break;
-		case 0xaa:
-			gb_backend_map = 0x66442200;
-			break;
-		case 0x95:
-			gb_backend_map = 0x66553311;
-			break;
-		default:
-			DRM_ERROR("bad backend map, using default\n");
-			gb_backend_map =
-				evergreen_get_tile_pipe_to_backend_map(rdev,
-								       rdev->config.evergreen.max_tile_pipes,
-								       rdev->config.evergreen.max_backends,
-								       ((EVERGREEN_MAX_BACKENDS_MASK <<
-								   rdev->config.evergreen.max_backends) &
-									EVERGREEN_MAX_BACKENDS_MASK));
-			break;
-		}
-	} else if (rdev->ddev->pdev->device == 0x68b9) {
-		u32 efuse_straps_3;
-		u8 efuse_box_bit_127_124;
-
-		WREG32(RCU_IND_INDEX, 0x203);
-		efuse_straps_3 = RREG32(RCU_IND_DATA);
-		efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
-		switch(efuse_box_bit_127_124) {
-		case 0x0:
-			gb_backend_map = 0x00003210;
-			break;
-		case 0x5:
-		case 0x6:
-		case 0x9:
-		case 0xa:
-			gb_backend_map = 0x00003311;
-			break;
-		default:
-			DRM_ERROR("bad backend map, using default\n");
-			gb_backend_map =
-				evergreen_get_tile_pipe_to_backend_map(rdev,
-								       rdev->config.evergreen.max_tile_pipes,
-								       rdev->config.evergreen.max_backends,
-								       ((EVERGREEN_MAX_BACKENDS_MASK <<
-								   rdev->config.evergreen.max_backends) &
-									EVERGREEN_MAX_BACKENDS_MASK));
-			break;
-		}
-	} else {
-		switch (rdev->family) {
-		case CHIP_CYPRESS:
-		case CHIP_HEMLOCK:
-		case CHIP_BARTS:
-			gb_backend_map = 0x66442200;
-			break;
-		case CHIP_JUNIPER:
-			gb_backend_map = 0x00002200;
-			break;
-		default:
-			gb_backend_map =
-				evergreen_get_tile_pipe_to_backend_map(rdev,
-								       rdev->config.evergreen.max_tile_pipes,
-								       rdev->config.evergreen.max_backends,
-								       ((EVERGREEN_MAX_BACKENDS_MASK <<
-									 rdev->config.evergreen.max_backends) &
-									EVERGREEN_MAX_BACKENDS_MASK));
-		}
-	}
-
 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
 	 * not have bank info, so create a custom tiling dword.
 	 * bits 3:0   num_pipes
@@ -2136,45 +1857,54 @@
 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->config.evergreen.tile_config |= 1 << 4;
-	else
-		rdev->config.evergreen.tile_config |=
-			((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
-	rdev->config.evergreen.tile_config |=
-		((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			rdev->config.evergreen.tile_config |= 1 << 4;
+		else
+			rdev->config.evergreen.tile_config |= 0 << 4;
+	}
+	rdev->config.evergreen.tile_config |= 0 << 8;
 	rdev->config.evergreen.tile_config |=
 		((gb_addr_config & 0x30000000) >> 28) << 12;
 
-	rdev->config.evergreen.backend_map = gb_backend_map;
-	WREG32(GB_BACKEND_MAP, gb_backend_map);
+	num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
+
+	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+		u32 efuse_straps_4;
+		u32 efuse_straps_3;
+
+		WREG32(RCU_IND_INDEX, 0x204);
+		efuse_straps_4 = RREG32(RCU_IND_DATA);
+		WREG32(RCU_IND_INDEX, 0x203);
+		efuse_straps_3 = RREG32(RCU_IND_DATA);
+		tmp = (((efuse_straps_4 & 0xf) << 4) |
+		      ((efuse_straps_3 & 0xf0000000) >> 28));
+	} else {
+		tmp = 0;
+		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+			u32 rb_disable_bitmap;
+
+			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+			tmp <<= 4;
+			tmp |= rb_disable_bitmap;
+		}
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
-	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
-
-	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
-		u32 rb = cc_rb_backend_disable | (0xf0 << 16);
-		u32 sp = cc_gc_shader_pipe_config;
-		u32 gfx = grbm_gfx_index | SE_INDEX(i);
-
-		if (i == num_shader_engines) {
-			rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
-			sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
-		}
-
-		WREG32(GRBM_GFX_INDEX, gfx);
-		WREG32(RLC_GFX_INDEX, gfx);
-
-		WREG32(CC_RB_BACKEND_DISABLE, rb);
-		WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
-		WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
-		WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
-        }
-
-	grbm_gfx_index |= SE_BROADCAST_WRITES;
-	WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
-	WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+	tmp = gb_addr_config & NUM_PIPES_MASK;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+					EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+	WREG32(GB_BACKEND_MAP, tmp);
 
 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
 	WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@
 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
+	if (rdev->family <= CHIP_SUMO2)
+		WREG32(SMX_SAR_CTL0, 0x00010000);
+
 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
 					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
 					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 4e7dd2b..c1655412 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@
 	u32			cb_color_view[12];
 	u32			cb_color_pitch[12];
 	u32			cb_color_slice[12];
+	u32			cb_color_slice_idx[12];
 	u32			cb_color_attrib[12];
 	u32			cb_color_cmask_slice[8];/* unused */
 	u32			cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@
 		track->cb_color_info[i] = 0;
 		track->cb_color_view[i] = 0xFFFFFFFF;
 		track->cb_color_pitch[i] = 0;
-		track->cb_color_slice[i] = 0;
+		track->cb_color_slice[i] = 0xfffffff;
+		track->cb_color_slice_idx[i] = 0;
 	}
 	track->cb_target_mask = 0xFFFFFFFF;
 	track->cb_shader_mask = 0xFFFFFFFF;
 	track->cb_dirty = true;
 
+	track->db_depth_slice = 0xffffffff;
 	track->db_depth_view = 0xFFFFC000;
 	track->db_depth_size = 0xFFFFFFFF;
 	track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@
 {
 	struct evergreen_cs_track *track = p->track;
 	unsigned palign, halign, tileb, slice_pt;
+	unsigned mtile_pr, mtile_ps, mtileb;
 
 	tileb = 64 * surf->bpe * surf->nsamples;
-	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
-	palign = MAX(8, palign);
 	slice_pt = 1;
 	if (tileb > surf->tsplit) {
 		slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@
 	/* macro tile width & height */
 	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
 	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
-	surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+	mtileb = (palign / 8) * (halign / 8) * tileb;;
+	mtile_pr = surf->nbx / palign;
+	mtile_ps = (mtile_pr * surf->nby) / halign;
+	surf->layer_size = mtile_ps * mtileb * slice_pt;
 	surf->base_align = (palign / 8) * (halign / 8) * tileb;
 	surf->palign = palign;
 	surf->halign = halign;
@@ -434,6 +439,39 @@
 
 	offset += surf.layer_size * mslice;
 	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+		/* old ddx are broken they allocate bo with w*h*bpp but
+		 * program slice with ALIGN(h, 8), catch this and patch
+		 * command stream.
+		 */
+		if (!surf.mode) {
+			volatile u32 *ib = p->ib.ptr;
+			unsigned long tmp, nby, bsize, size, min = 0;
+
+			/* find the height the ddx wants */
+			if (surf.nby > 8) {
+				min = surf.nby - 8;
+			}
+			bsize = radeon_bo_size(track->cb_color_bo[id]);
+			tmp = track->cb_color_bo_offset[id] << 8;
+			for (nby = surf.nby; nby > min; nby--) {
+				size = nby * surf.nbx * surf.bpe * surf.nsamples;
+				if ((tmp + size * mslice) <= bsize) {
+					break;
+				}
+			}
+			if (nby > min) {
+				surf.nby = nby;
+				slice = ((nby * surf.nbx) / 64) - 1;
+				if (!evergreen_surface_check(p, &surf, "cb")) {
+					/* check if this one works */
+					tmp += surf.layer_size * mslice;
+					if (tmp <= bsize) {
+						ib[track->cb_color_slice_idx[id]] = slice;
+						goto old_ddx_ok;
+					}
+				}
+			}
+		}
 		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
 			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
 			 __func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@
 			surf.tsplit, surf.mtilea);
 		return -EINVAL;
 	}
+old_ddx_ok:
 
 	return 0;
 }
@@ -1532,6 +1571,7 @@
 	case CB_COLOR7_SLICE:
 		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
 		track->cb_dirty = true;
 		break;
 	case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@
 	case CB_COLOR11_SLICE:
 		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
 		track->cb_dirty = true;
 		break;
 	case CB_COLOR0_ATTRIB:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index a51f880..65c5416 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -156,9 +156,6 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	uint32_t offset;
 
-	if (ASIC_IS_DCE5(rdev))
-		return;
-
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bf..b50b15c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
 #define EVERGREEN_MAX_PIPES_MASK        0xFF
 #define EVERGREEN_MAX_LDS_NUM           0xFFFF
 
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+
 /* Registers */
 
 #define RCU_IND_INDEX           			0x100
@@ -54,6 +63,7 @@
 #define		BACKEND_DISABLE(x)     			((x) << 16)
 #define GB_ADDR_CONFIG  				0x98F8
 #define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x0000000f
 #define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
 #define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
 #define		NUM_SHADER_ENGINES(x)			((x) << 12)
@@ -452,6 +462,7 @@
 #define	MC_VM_MD_L1_TLB0_CNTL				0x2654
 #define	MC_VM_MD_L1_TLB1_CNTL				0x2658
 #define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
 
 #define	FUS_MC_VM_MD_L1_TLB0_CNTL			0x265C
 #define	FUS_MC_VM_MD_L1_TLB1_CNTL			0x2660
@@ -492,6 +503,7 @@
 #define	SCRATCH_UMSK					0x8540
 #define	SCRATCH_ADDR					0x8544
 
+#define	SMX_SAR_CTL0					0xA008
 #define	SMX_DC_CTL0					0xA020
 #define		USE_HASH_FUNCTION				(1 << 0)
 #define		NUMBER_OF_SETS(x)				((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b01c2dd..b7bf18e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@
 /*
  * Core functions
  */
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-					       u32 num_tile_pipes,
-					       u32 num_backends_per_asic,
-					       u32 *backend_disable_mask_per_asic,
-					       u32 num_shader_engines)
-{
-	u32 backend_map = 0;
-	u32 enabled_backends_mask = 0;
-	u32 enabled_backends_count = 0;
-	u32 num_backends_per_se;
-	u32 cur_pipe;
-	u32 swizzle_pipe[CAYMAN_MAX_PIPES];
-	u32 cur_backend = 0;
-	u32 i;
-	bool force_no_swizzle;
-
-	/* force legal values */
-	if (num_tile_pipes < 1)
-		num_tile_pipes = 1;
-	if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
-		num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-	if (num_shader_engines < 1)
-		num_shader_engines = 1;
-	if (num_shader_engines > rdev->config.cayman.max_shader_engines)
-		num_shader_engines = rdev->config.cayman.max_shader_engines;
-	if (num_backends_per_asic < num_shader_engines)
-		num_backends_per_asic = num_shader_engines;
-	if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
-		num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
-	/* make sure we have the same number of backends per se */
-	num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-	/* set up the number of backends per se */
-	num_backends_per_se = num_backends_per_asic / num_shader_engines;
-	if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
-		num_backends_per_se = rdev->config.cayman.max_backends_per_se;
-		num_backends_per_asic = num_backends_per_se * num_shader_engines;
-	}
-
-	/* create enable mask and count for enabled backends */
-	for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-		if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-			enabled_backends_mask |= (1 << i);
-			++enabled_backends_count;
-		}
-		if (enabled_backends_count == num_backends_per_asic)
-			break;
-	}
-
-	/* force the backends mask to match the current number of backends */
-	if (enabled_backends_count != num_backends_per_asic) {
-		u32 this_backend_enabled;
-		u32 shader_engine;
-		u32 backend_per_se;
-
-		enabled_backends_mask = 0;
-		enabled_backends_count = 0;
-		*backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
-		for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-			/* calc the current se */
-			shader_engine = i / rdev->config.cayman.max_backends_per_se;
-			/* calc the backend per se */
-			backend_per_se = i % rdev->config.cayman.max_backends_per_se;
-			/* default to not enabled */
-			this_backend_enabled = 0;
-			if ((shader_engine < num_shader_engines) &&
-			    (backend_per_se < num_backends_per_se))
-				this_backend_enabled = 1;
-			if (this_backend_enabled) {
-				enabled_backends_mask |= (1 << i);
-				*backend_disable_mask_per_asic &= ~(1 << i);
-				++enabled_backends_count;
-			}
-		}
-	}
-
-
-	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
-	switch (rdev->family) {
-	case CHIP_CAYMAN:
-	case CHIP_ARUBA:
-		force_no_swizzle = true;
-		break;
-	default:
-		force_no_swizzle = false;
-		break;
-	}
-	if (force_no_swizzle) {
-		bool last_backend_enabled = false;
-
-		force_no_swizzle = false;
-		for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-			if (((enabled_backends_mask >> i) & 1) == 1) {
-				if (last_backend_enabled)
-					force_no_swizzle = true;
-				last_backend_enabled = true;
-			} else
-				last_backend_enabled = false;
-		}
-	}
-
-	switch (num_tile_pipes) {
-	case 1:
-	case 3:
-	case 5:
-	case 7:
-		DRM_ERROR("odd number of pipes!\n");
-		break;
-	case 2:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		break;
-	case 4:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 1;
-			swizzle_pipe[3] = 3;
-		}
-		break;
-	case 6:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 1;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 5;
-		}
-		break;
-	case 8:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-			swizzle_pipe[6] = 6;
-			swizzle_pipe[7] = 7;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 6;
-			swizzle_pipe[4] = 1;
-			swizzle_pipe[5] = 3;
-			swizzle_pipe[6] = 5;
-			swizzle_pipe[7] = 7;
-		}
-		break;
-	}
-
-	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-		while (((1 << cur_backend) & enabled_backends_mask) == 0)
-			cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
-		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-		cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-	}
-
-	return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
-					    u32 disable_mask_per_se,
-					    u32 max_disable_mask_per_se,
-					    u32 num_shader_engines)
-{
-	u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-	u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-	if (num_shader_engines == 1)
-		return disable_mask_per_asic;
-	else if (num_shader_engines == 2)
-		return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-	else
-		return 0xffffffff;
-}
-
 static void cayman_gpu_init(struct radeon_device *rdev)
 {
-	u32 cc_rb_backend_disable = 0;
-	u32 cc_gc_shader_pipe_config;
 	u32 gb_addr_config = 0;
 	u32 mc_shared_chmap, mc_arb_ramcfg;
-	u32 gb_backend_map;
 	u32 cgts_tcc_disable;
 	u32 sx_debug_1;
 	u32 smx_dc_ctl0;
-	u32 gc_user_shader_pipe_config;
-	u32 gc_user_rb_backend_disable;
-	u32 cgts_user_tcc_disable;
 	u32 cgts_sm_ctrl_reg;
 	u32 hdp_host_path_cntl;
 	u32 tmp;
+	u32 disabled_rb_mask;
 	int i, j;
 
 	switch (rdev->family) {
@@ -650,6 +452,7 @@
 		rdev->config.cayman.sc_prim_fifo_size = 0x100;
 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_ARUBA:
 	default:
@@ -657,15 +460,28 @@
 		rdev->config.cayman.max_pipes_per_simd = 4;
 		rdev->config.cayman.max_tile_pipes = 2;
 		if ((rdev->pdev->device == 0x9900) ||
-		    (rdev->pdev->device == 0x9901)) {
+		    (rdev->pdev->device == 0x9901) ||
+		    (rdev->pdev->device == 0x9905) ||
+		    (rdev->pdev->device == 0x9906) ||
+		    (rdev->pdev->device == 0x9907) ||
+		    (rdev->pdev->device == 0x9908) ||
+		    (rdev->pdev->device == 0x9909) ||
+		    (rdev->pdev->device == 0x9910) ||
+		    (rdev->pdev->device == 0x9917)) {
 			rdev->config.cayman.max_simds_per_se = 6;
 			rdev->config.cayman.max_backends_per_se = 2;
 		} else if ((rdev->pdev->device == 0x9903) ||
-			   (rdev->pdev->device == 0x9904)) {
+			   (rdev->pdev->device == 0x9904) ||
+			   (rdev->pdev->device == 0x990A) ||
+			   (rdev->pdev->device == 0x9913) ||
+			   (rdev->pdev->device == 0x9918)) {
 			rdev->config.cayman.max_simds_per_se = 4;
 			rdev->config.cayman.max_backends_per_se = 2;
-		} else if ((rdev->pdev->device == 0x9990) ||
-			   (rdev->pdev->device == 0x9991)) {
+		} else if ((rdev->pdev->device == 0x9919) ||
+			   (rdev->pdev->device == 0x9990) ||
+			   (rdev->pdev->device == 0x9991) ||
+			   (rdev->pdev->device == 0x9994) ||
+			   (rdev->pdev->device == 0x99A0)) {
 			rdev->config.cayman.max_simds_per_se = 3;
 			rdev->config.cayman.max_backends_per_se = 1;
 		} else {
@@ -687,6 +503,7 @@
 		rdev->config.cayman.sc_prim_fifo_size = 0x40;
 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	}
 
@@ -706,39 +523,6 @@
 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
-	cgts_tcc_disable = 0xffff0000;
-	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
-		cgts_tcc_disable &= ~(1 << (16 + i));
-	gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-	gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
-	cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-	rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
-	tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
-	rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
-	rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-	tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
-	rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
-	tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-	rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
-	tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-	rdev->config.cayman.backend_disable_mask_per_asic =
-		cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
-						 rdev->config.cayman.num_shader_engines);
-	rdev->config.cayman.backend_map =
-		cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-						    rdev->config.cayman.num_backends_per_se *
-						    rdev->config.cayman.num_shader_engines,
-						    &rdev->config.cayman.backend_disable_mask_per_asic,
-						    rdev->config.cayman.num_shader_engines);
-	tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-	rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
-	tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
-	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
-	if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
-		rdev->config.cayman.mem_max_burst_length_bytes = 512;
 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
 	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
 	if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@
 	rdev->config.cayman.num_gpus = 1;
 	rdev->config.cayman.multi_gpu_tile_size = 64;
 
-	//gb_addr_config = 0x02011003
-#if 0
-	gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
-	gb_addr_config = 0;
-	switch (rdev->config.cayman.num_tile_pipes) {
-	case 1:
-	default:
-		gb_addr_config |= NUM_PIPES(0);
-		break;
-	case 2:
-		gb_addr_config |= NUM_PIPES(1);
-		break;
-	case 4:
-		gb_addr_config |= NUM_PIPES(2);
-		break;
-	case 8:
-		gb_addr_config |= NUM_PIPES(3);
-		break;
-	}
-
-	tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
-	gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
-	tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
-	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-	switch (rdev->config.cayman.num_gpus) {
-	case 1:
-	default:
-		gb_addr_config |= NUM_GPUS(0);
-		break;
-	case 2:
-		gb_addr_config |= NUM_GPUS(1);
-		break;
-	case 4:
-		gb_addr_config |= NUM_GPUS(2);
-		break;
-	}
-	switch (rdev->config.cayman.multi_gpu_tile_size) {
-	case 16:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-		break;
-	case 32:
-	default:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-		break;
-	case 64:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-		break;
-	case 128:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-		break;
-	}
-	switch (rdev->config.cayman.mem_row_size_in_kb) {
-	case 1:
-	default:
-		gb_addr_config |= ROW_SIZE(0);
-		break;
-	case 2:
-		gb_addr_config |= ROW_SIZE(1);
-		break;
-	case 4:
-		gb_addr_config |= ROW_SIZE(2);
-		break;
-	}
-#endif
-
 	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
 	rdev->config.cayman.num_tile_pipes = (1 << tmp);
 	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@
 	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
 	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
-	//gb_backend_map = 0x76541032;
-#if 0
-	gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
-	gb_backend_map =
-		cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-						    rdev->config.cayman.num_backends_per_se *
-						    rdev->config.cayman.num_shader_engines,
-						    &rdev->config.cayman.backend_disable_mask_per_asic,
-						    rdev->config.cayman.num_shader_engines);
-#endif
+
 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
 	 * not have bank info, so create a custom tiling dword.
 	 * bits 3:0   num_pipes
@@ -865,34 +572,50 @@
 
 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
 	if (rdev->flags & RADEON_IS_IGP)
-		rdev->config.evergreen.tile_config |= 1 << 4;
-	else
-		rdev->config.cayman.tile_config |=
-			((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+		rdev->config.cayman.tile_config |= 1 << 4;
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			rdev->config.cayman.tile_config |= 1 << 4;
+		else
+			rdev->config.cayman.tile_config |= 0 << 4;
+	}
 	rdev->config.cayman.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 	rdev->config.cayman.tile_config |=
 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-	rdev->config.cayman.backend_map = gb_backend_map;
-	WREG32(GB_BACKEND_MAP, gb_backend_map);
+	tmp = 0;
+	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+		u32 rb_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+		tmp <<= 4;
+		tmp |= rb_disable_bitmap;
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-	/* primary versions */
-	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+	tmp = gb_addr_config & NUM_PIPES_MASK;
+	tmp = r6xx_remap_render_backend(rdev, tmp,
+					rdev->config.cayman.max_backends_per_se *
+					rdev->config.cayman.max_shader_engines,
+					CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+	WREG32(GB_BACKEND_MAP, tmp);
 
+	cgts_tcc_disable = 0xffff0000;
+	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+		cgts_tcc_disable &= ~(1 << (16 + i));
 	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
 	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
-	/* user versions */
-	WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
 	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
 	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
@@ -1580,6 +1303,10 @@
 	if (r)
 		return r;
 
+	r = r600_audio_init(rdev);
+	if (r)
+		return r;
+
 	return 0;
 }
 
@@ -1606,6 +1333,7 @@
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+	r600_audio_fini(rdev);
 	/* FIXME: we should wait for ring to be empty */
 	radeon_ib_pool_suspend(rdev);
 	radeon_vm_manager_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046..a0b9806 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
 #define CAYMAN_MAX_TCC               16
 #define CAYMAN_MAX_TCC_MASK          0xFF
 
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
 #define DMIF_ADDR_CONFIG  				0xBD4
 #define	SRBM_GFX_CNTL				        0x0E44
 #define		RINGID(x)					(((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
 #define	CGTS_SYS_TCC_DISABLE				0x3F90
 #define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
 
+#define RLC_GFX_INDEX           			0x3FC4
+
 #define	CONFIG_MEMSIZE					0x5428
 
 #define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
@@ -212,6 +217,12 @@
 #define		SOFT_RESET_VGT					(1 << 14)
 #define		SOFT_RESET_IA					(1 << 15)
 
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
 #define	SCRATCH_REG0					0x8500
 #define	SCRATCH_REG1					0x8504
 #define	SCRATCH_REG2					0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d..bff6272 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@
 	return r600_gpu_soft_reset(rdev);
 }
 
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
-					     u32 num_backends,
-					     u32 backend_disable_mask)
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+			      u32 tiling_pipe_num,
+			      u32 max_rb_num,
+			      u32 total_max_rb_num,
+			      u32 disabled_rb_mask)
 {
-	u32 backend_map = 0;
-	u32 enabled_backends_mask;
-	u32 enabled_backends_count;
-	u32 cur_pipe;
-	u32 swizzle_pipe[R6XX_MAX_PIPES];
-	u32 cur_backend;
-	u32 i;
+	u32 rendering_pipe_num, rb_num_width, req_rb_num;
+	u32 pipe_rb_ratio, pipe_rb_remain;
+	u32 data = 0, mask = 1 << (max_rb_num - 1);
+	unsigned i, j;
 
-	if (num_tile_pipes > R6XX_MAX_PIPES)
-		num_tile_pipes = R6XX_MAX_PIPES;
-	if (num_tile_pipes < 1)
-		num_tile_pipes = 1;
-	if (num_backends > R6XX_MAX_BACKENDS)
-		num_backends = R6XX_MAX_BACKENDS;
-	if (num_backends < 1)
-		num_backends = 1;
+	/* mask out the RBs that don't exist on that asic */
+	disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
 
-	enabled_backends_mask = 0;
-	enabled_backends_count = 0;
-	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
-		if (((backend_disable_mask >> i) & 1) == 0) {
-			enabled_backends_mask |= (1 << i);
-			++enabled_backends_count;
+	rendering_pipe_num = 1 << tiling_pipe_num;
+	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+	BUG_ON(rendering_pipe_num < req_rb_num);
+
+	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+	if (rdev->family <= CHIP_RV740) {
+		/* r6xx/r7xx */
+		rb_num_width = 2;
+	} else {
+		/* eg+ */
+		rb_num_width = 4;
+	}
+
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(mask & disabled_rb_mask)) {
+			for (j = 0; j < pipe_rb_ratio; j++) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+			}
+			if (pipe_rb_remain) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+				pipe_rb_remain--;
+			}
 		}
-		if (enabled_backends_count == num_backends)
-			break;
+		mask >>= 1;
 	}
 
-	if (enabled_backends_count == 0) {
-		enabled_backends_mask = 1;
-		enabled_backends_count = 1;
-	}
-
-	if (enabled_backends_count != num_backends)
-		num_backends = enabled_backends_count;
-
-	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
-	switch (num_tile_pipes) {
-	case 1:
-		swizzle_pipe[0] = 0;
-		break;
-	case 2:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		break;
-	case 3:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		swizzle_pipe[2] = 2;
-		break;
-	case 4:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		swizzle_pipe[2] = 2;
-		swizzle_pipe[3] = 3;
-		break;
-	case 5:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		swizzle_pipe[2] = 2;
-		swizzle_pipe[3] = 3;
-		swizzle_pipe[4] = 4;
-		break;
-	case 6:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 2;
-		swizzle_pipe[2] = 4;
-		swizzle_pipe[3] = 5;
-		swizzle_pipe[4] = 1;
-		swizzle_pipe[5] = 3;
-		break;
-	case 7:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 2;
-		swizzle_pipe[2] = 4;
-		swizzle_pipe[3] = 6;
-		swizzle_pipe[4] = 1;
-		swizzle_pipe[5] = 3;
-		swizzle_pipe[6] = 5;
-		break;
-	case 8:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 2;
-		swizzle_pipe[2] = 4;
-		swizzle_pipe[3] = 6;
-		swizzle_pipe[4] = 1;
-		swizzle_pipe[5] = 3;
-		swizzle_pipe[6] = 5;
-		swizzle_pipe[7] = 7;
-		break;
-	}
-
-	cur_backend = 0;
-	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-		while (((1 << cur_backend) & enabled_backends_mask) == 0)
-			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
-		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-	}
-
-	return backend_map;
+	return data;
 }
 
 int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@
 {
 	u32 tiling_config;
 	u32 ramcfg;
-	u32 backend_map;
 	u32 cc_rb_backend_disable;
 	u32 cc_gc_shader_pipe_config;
 	u32 tmp;
@@ -1511,8 +1448,9 @@
 	u32 sq_thread_resource_mgmt = 0;
 	u32 sq_stack_resource_mgmt_1 = 0;
 	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 disabled_rb_mask;
 
-	/* FIXME: implement */
+	rdev->config.r600.tiling_group_size = 256;
 	switch (rdev->family) {
 	case CHIP_R600:
 		rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@
 	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
 	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
 	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-		rdev->config.r600.tiling_group_size = 512;
-	else
-		rdev->config.r600.tiling_group_size = 256;
+
 	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
 	if (tmp > 3) {
 		tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@
 	tiling_config |= BANK_SWAPS(1);
 
 	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-	cc_rb_backend_disable |=
-		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+	tmp = R6XX_MAX_BACKENDS -
+		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+	if (tmp < rdev->config.r600.max_backends) {
+		rdev->config.r600.max_backends = tmp;
+	}
 
-	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-	cc_gc_shader_pipe_config |=
-		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-	cc_gc_shader_pipe_config |=
-		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+	tmp = R6XX_MAX_PIPES -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.r600.max_pipes) {
+		rdev->config.r600.max_pipes = tmp;
+	}
+	tmp = R6XX_MAX_SIMDS -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.r600.max_simds) {
+		rdev->config.r600.max_simds = tmp;
+	}
 
-	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-							(R6XX_MAX_BACKENDS -
-							 r600_count_pipe_bits((cc_rb_backend_disable &
-									       R6XX_MAX_BACKENDS_MASK) >> 16)),
-							(cc_rb_backend_disable >> 16));
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+					R6XX_MAX_BACKENDS, disabled_rb_mask);
+	tiling_config |= tmp << 16;
+	rdev->config.r600.backend_map = tmp;
+
 	rdev->config.r600.tile_config = tiling_config;
-	rdev->config.r600.backend_map = backend_map;
-	tiling_config |= BACKEND_MAP(backend_map);
 	WREG32(GB_TILING_CONFIG, tiling_config);
 	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
 	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
 
-	/* Setup pipes */
-	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
 	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
 	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1900,6 +1839,7 @@
 	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
 			       NUM_CLIP_SEQ(3)));
 	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+	WREG32(VC_ENHANCE, 0);
 }
 
 
@@ -2487,6 +2427,12 @@
 	if (r)
 		return r;
 
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
 	return 0;
 }
 
@@ -2523,12 +2469,6 @@
 		return r;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r) {
-		DRM_ERROR("radeon: audio resume failed\n");
-		return r;
-	}
-
 	return r;
 }
 
@@ -2638,9 +2578,6 @@
 		rdev->accel_working = false;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r)
-		return r; /* TODO error handling */
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77..79b5591 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,7 +57,7 @@
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-	return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+	return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
 		|| rdev->family == CHIP_RS600
 		|| rdev->family == CHIP_RS690
 		|| rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 	int base_rate = 48000;
 
 	switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@
 		WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
 		WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
-		/* Some magic trigger or src sel? */
-		WREG32_P(0x5ac, 0x01, ~0x77);
+		/* Select DTO source */
+		WREG32(0x5ac, radeon_crtc->crtc_id);
 	} else {
 		switch (dig->dig_encoder) {
 		case 0:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0133f5f..ca87f7a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2079,6 +2079,48 @@
 			return -EINVAL;
 		}
 		break;
+	case PACKET3_STRMOUT_BASE_UPDATE:
+		if (p->family < CHIP_RV770) {
+			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+			return -EINVAL;
+		}
+		if (pkt->count != 1) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+			return -EINVAL;
+		}
+		if (idx_value > 3) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+			return -EINVAL;
+		}
+		{
+			u64 offset;
+
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+				return -EINVAL;
+			}
+
+			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+				return -EINVAL;
+			}
+
+			offset = radeon_get_ib_value(p, idx+1) << 8;
+			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+					  offset, track->vgt_strmout_bo_offset[idx_value]);
+				return -EINVAL;
+			}
+
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
 	case PACKET3_SURFACE_BASE_UPDATE:
 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e..82a0a4c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -322,9 +322,6 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	uint32_t offset;
 
-	if (ASIC_IS_DCE5(rdev))
-		return;
-
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
@@ -348,7 +345,6 @@
 		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
 		       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
 		       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
-		       HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
 		       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
 		       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
 	}
@@ -484,7 +480,7 @@
 	uint32_t offset;
 	u32 hdmi;
 
-	if (ASIC_IS_DCE5(rdev))
+	if (ASIC_IS_DCE6(rdev))
 		return;
 
 	/* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	uint32_t offset;
 
-	if (ASIC_IS_DCE5(rdev))
+	if (ASIC_IS_DCE6(rdev))
 		return;
 
 	/* Called for ATOM_ENCODER_MODE_HDMI only */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b2..025fd5b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
 #define		BACKEND_MAP(x)					((x) << 16)
 
 #define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
@@ -483,6 +485,7 @@
 #define		TC_L2_SIZE(x)					((x)<<5)
 #define		L2_DISABLE_LATE_HIT				(1<<9)
 
+#define	VC_ENHANCE					0x9714
 
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x)<<0)
@@ -1161,6 +1164,7 @@
 #define	PACKET3_SET_CTL_CONST				0x6F
 #define		PACKET3_SET_CTL_CONST_OFFSET			0x0003cff0
 #define		PACKET3_SET_CTL_CONST_END			0x0003e200
+#define	PACKET3_STRMOUT_BASE_UPDATE			0x72 /* r7xx */
 #define	PACKET3_SURFACE_BASE_UPDATE			0x73
 
 
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1dc3a4a..fefcca5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -346,6 +346,9 @@
 	/* Constant after initialization */
 	struct radeon_device		*rdev;
 	struct drm_gem_object		gem_base;
+
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+	int vmapping_count;
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
@@ -848,7 +851,6 @@
 	s32			priority;
 };
 
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
@@ -1372,9 +1374,9 @@
 
 struct si_asic {
 	unsigned max_shader_engines;
-	unsigned max_pipes_per_simd;
 	unsigned max_tile_pipes;
-	unsigned max_simds_per_se;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
 	unsigned max_backends_per_se;
 	unsigned max_texture_channel_caches;
 	unsigned max_gprs;
@@ -1385,7 +1387,6 @@
 	unsigned sc_hiz_tile_fifo_size;
 	unsigned sc_earlyz_tile_fifo_size;
 
-	unsigned num_shader_engines;
 	unsigned num_tile_pipes;
 	unsigned num_backends_per_se;
 	unsigned backend_disable_mask_per_asic;
@@ -1846,6 +1847,11 @@
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
 extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+				     u32 tiling_pipe_num,
+				     u32 max_rb_num,
+				     u32 total_max_rb_num,
+				     u32 enabled_rb_mask);
 
 /*
  * evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f6e69b8..b1e3820 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -444,7 +444,9 @@
 	 */
 	if ((dev->pdev->device == 0x9498) &&
 	    (dev->pdev->subsystem_vendor == 0x1682) &&
-	    (dev->pdev->subsystem_device == 0x2452)) {
+	    (dev->pdev->subsystem_device == 0x2452) &&
+	    (i2c_bus->valid == false) &&
+	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
 		struct radeon_device *rdev = dev->dev_private;
 		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c7d64a7..142f894 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@
 					   sync_to_ring, p->ring);
 }
 
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
 	struct drm_radeon_cs *cs = data;
@@ -245,23 +246,25 @@
 		}
 	}
 
-	if ((p->cs_flags & RADEON_CS_USE_VM) &&
-	    !p->rdev->vm_manager.enabled) {
-		DRM_ERROR("VM not active on asic!\n");
-		return -EINVAL;
+	/* these are KMS only */
+	if (p->rdev) {
+		if ((p->cs_flags & RADEON_CS_USE_VM) &&
+		    !p->rdev->vm_manager.enabled) {
+			DRM_ERROR("VM not active on asic!\n");
+			return -EINVAL;
+		}
+
+		/* we only support VM on SI+ */
+		if ((p->rdev->family >= CHIP_TAHITI) &&
+		    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+			DRM_ERROR("VM required on SI+!\n");
+			return -EINVAL;
+		}
+
+		if (radeon_cs_get_ring(p, ring, priority))
+			return -EINVAL;
 	}
 
-	/* we only support VM on SI+ */
-	if ((p->rdev->family >= CHIP_TAHITI) &&
-	    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
-		DRM_ERROR("VM required on SI+!\n");
-		return -EINVAL;
-	}
-
-	if (radeon_cs_get_ring(p, ring, priority))
-		return -EINVAL;
-
-
 	/* deal with non-vm */
 	if ((p->chunk_ib_idx != -1) &&
 	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
@@ -580,7 +583,7 @@
 	return 0;
 }
 
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
 {
 	int new_page;
 	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
@@ -623,3 +626,28 @@
 
 	return new_page;
 }
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	u32 pg_idx, pg_offset;
+	u32 idx_value = 0;
+	int new_page;
+
+	pg_idx = (idx * 4) / PAGE_SIZE;
+	pg_offset = (idx * 4) % PAGE_SIZE;
+
+	if (ibc->kpage_idx[0] == pg_idx)
+		return ibc->kpage[0][pg_offset/4];
+	if (ibc->kpage_idx[1] == pg_idx)
+		return ibc->kpage[1][pg_offset/4];
+
+	new_page = radeon_cs_update_pages(p, pg_idx);
+	if (new_page < 0) {
+		p->parser_error = new_page;
+		return 0;
+	}
+
+	idx_value = ibc->kpage[new_page][pg_offset/4];
+	return idx_value;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f0bb2b5..2c4d53f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
  *   2.13.0 - virtual memory support, streamout
  *   2.14.0 - add evergreen tiling informations
  *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	15
+#define KMS_DRIVER_MINOR	17
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e..59d4493 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -476,12 +476,18 @@
 
 	mutex_lock(&vm->mutex);
 	if (last_pfn > vm->last_pfn) {
-		/* grow va space 32M by 32M */
-		unsigned align = ((32 << 20) >> 12) - 1;
+		/* release mutex and lock in right order */
+		mutex_unlock(&vm->mutex);
 		radeon_mutex_lock(&rdev->cs_mutex);
-		radeon_vm_unbind_locked(rdev, vm);
+		mutex_lock(&vm->mutex);
+		/* and check again */
+		if (last_pfn > vm->last_pfn) {
+			/* grow va space 32M by 32M */
+			unsigned align = ((32 << 20) >> 12) - 1;
+			radeon_vm_unbind_locked(rdev, vm);
+			vm->last_pfn = (last_pfn + align) & ~align;
+		}
 		radeon_mutex_unlock(&rdev->cs_mutex);
-		vm->last_pfn = (last_pfn + align) & ~align;
 	}
 	head = &vm->va;
 	last_offset = 0;
@@ -595,8 +601,8 @@
 	if (bo_va == NULL)
 		return 0;
 
-	mutex_lock(&vm->mutex);
 	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&vm->mutex);
 	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
 	radeon_mutex_unlock(&rdev->cs_mutex);
 	list_del(&bo_va->vm_list);
@@ -641,9 +647,8 @@
 	struct radeon_bo_va *bo_va, *tmp;
 	int r;
 
-	mutex_lock(&vm->mutex);
-
 	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&vm->mutex);
 	radeon_vm_unbind_locked(rdev, vm);
 	radeon_mutex_unlock(&rdev->cs_mutex);
 
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5..5c58d7d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@
 		break;
 	case RADEON_INFO_MAX_PIPES:
 		if (rdev->family >= CHIP_TAHITI)
-			value = rdev->config.si.max_pipes_per_simd;
+			value = rdev->config.si.max_cu_per_sh;
 		else if (rdev->family >= CHIP_CAYMAN)
 			value = rdev->config.cayman.max_pipes_per_simd;
 		else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0882554..5b37e28 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -801,9 +801,13 @@
 		int i;
 
 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-			not_processed += radeon_fence_count_emitted(rdev, i);
-			if (not_processed >= 3)
-				break;
+			struct radeon_ring *ring = &rdev->ring[i];
+
+			if (ring->ready) {
+				not_processed += radeon_fence_count_emitted(rdev, i);
+				if (not_processed >= 3)
+					break;
+			}
 		}
 
 		if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b8f835d..6bef46a 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -85,6 +85,47 @@
 
 }
 
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+	struct radeon_bo *bo = dma_buf->priv;
+	struct drm_device *dev = bo->rdev->ddev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	if (bo->vmapping_count) {
+		bo->vmapping_count++;
+		goto out_unlock;
+	}
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+			  &bo->dma_buf_vmap);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ERR_PTR(ret);
+	}
+	bo->vmapping_count = 1;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct radeon_bo *bo = dma_buf->priv;
+	struct drm_device *dev = bo->rdev->ddev;
+
+	mutex_lock(&dev->struct_mutex);
+	bo->vmapping_count--;
+	if (bo->vmapping_count == 0) {
+		ttm_bo_kunmap(&bo->dma_buf_vmap);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
 const static struct dma_buf_ops radeon_dmabuf_ops =  {
 	.map_dma_buf = radeon_gem_map_dma_buf,
 	.unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@
 	.kmap_atomic = radeon_gem_kmap_atomic,
 	.kunmap = radeon_gem_kunmap,
 	.kunmap_atomic = radeon_gem_kunmap_atomic,
+	.mmap = radeon_gem_prime_mmap,
+	.vmap = radeon_gem_prime_vmap,
+	.vunmap = radeon_gem_prime_vunmap,
 };
 
 static int radeon_prime_create(struct drm_device *dev,
@@ -125,11 +169,17 @@
 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
 	int ret = 0;
 
-	/* pin buffer into GTT */
-	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-	if (ret)
+	ret = radeon_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
 		return ERR_PTR(ret);
 
+	/* pin buffer into GTT */
+	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+	if (ret) {
+		radeon_bo_unreserve(bo);
+		return ERR_PTR(ret);
+	}
+	radeon_bo_unreserve(bo);
 	return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 493a7be..983658c 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -39,31 +39,6 @@
  */
 int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	u32 pg_idx, pg_offset;
-	u32 idx_value = 0;
-	int new_page;
-
-	pg_idx = (idx * 4) / PAGE_SIZE;
-	pg_offset = (idx * 4) % PAGE_SIZE;
-
-	if (ibc->kpage_idx[0] == pg_idx)
-		return ibc->kpage[0][pg_offset/4];
-	if (ibc->kpage_idx[1] == pg_idx)
-		return ibc->kpage[1][pg_offset/4];
-
-	new_page = radeon_cs_update_pages(p, pg_idx);
-	if (new_page < 0) {
-		p->parser_error = new_page;
-		return 0;
-	}
-
-	idx_value = ibc->kpage[new_page][pg_offset/4];
-	return idx_value;
-}
-
 int radeon_ib_get(struct radeon_device *rdev, int ring,
 		  struct radeon_ib *ib, unsigned size)
 {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef..e95c5e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@
 		return r;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r) {
-		dev_err(rdev->dev, "failed initializing audio\n");
-		return r;
-	}
-
 	r = radeon_ib_pool_start(rdev);
 	if (r)
 		return r;
@@ -922,6 +916,12 @@
 	if (r)
 		return r;
 
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277dde..159b6a4 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@
 		return r;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r) {
-		dev_err(rdev->dev, "failed initializing audio\n");
-		return r;
-	}
-
 	r = radeon_ib_pool_start(rdev);
 	if (r)
 		return r;
@@ -651,6 +645,12 @@
 	if (r)
 		return r;
 
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473b..b4f51c5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@
 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	if (rdev->family == CHIP_RV740)
+		WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@
 /*
  * Core functions
  */
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-					     u32 num_tile_pipes,
-					     u32 num_backends,
-					     u32 backend_disable_mask)
-{
-	u32 backend_map = 0;
-	u32 enabled_backends_mask;
-	u32 enabled_backends_count;
-	u32 cur_pipe;
-	u32 swizzle_pipe[R7XX_MAX_PIPES];
-	u32 cur_backend;
-	u32 i;
-	bool force_no_swizzle;
-
-	if (num_tile_pipes > R7XX_MAX_PIPES)
-		num_tile_pipes = R7XX_MAX_PIPES;
-	if (num_tile_pipes < 1)
-		num_tile_pipes = 1;
-	if (num_backends > R7XX_MAX_BACKENDS)
-		num_backends = R7XX_MAX_BACKENDS;
-	if (num_backends < 1)
-		num_backends = 1;
-
-	enabled_backends_mask = 0;
-	enabled_backends_count = 0;
-	for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
-		if (((backend_disable_mask >> i) & 1) == 0) {
-			enabled_backends_mask |= (1 << i);
-			++enabled_backends_count;
-		}
-		if (enabled_backends_count == num_backends)
-			break;
-	}
-
-	if (enabled_backends_count == 0) {
-		enabled_backends_mask = 1;
-		enabled_backends_count = 1;
-	}
-
-	if (enabled_backends_count != num_backends)
-		num_backends = enabled_backends_count;
-
-	switch (rdev->family) {
-	case CHIP_RV770:
-	case CHIP_RV730:
-		force_no_swizzle = false;
-		break;
-	case CHIP_RV710:
-	case CHIP_RV740:
-	default:
-		force_no_swizzle = true;
-		break;
-	}
-
-	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
-	switch (num_tile_pipes) {
-	case 1:
-		swizzle_pipe[0] = 0;
-		break;
-	case 2:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		break;
-	case 3:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 1;
-		}
-		break;
-	case 4:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 3;
-			swizzle_pipe[3] = 1;
-		}
-		break;
-	case 5:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 1;
-			swizzle_pipe[4] = 3;
-		}
-		break;
-	case 6:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 5;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 1;
-		}
-		break;
-	case 7:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-			swizzle_pipe[6] = 6;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 6;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 1;
-			swizzle_pipe[6] = 5;
-		}
-		break;
-	case 8:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-			swizzle_pipe[6] = 6;
-			swizzle_pipe[7] = 7;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 6;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 1;
-			swizzle_pipe[6] = 7;
-			swizzle_pipe[7] = 5;
-		}
-		break;
-	}
-
-	cur_backend = 0;
-	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-		while (((1 << cur_backend) & enabled_backends_mask) == 0)
-			cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
-		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-		cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-	}
-
-	return backend_map;
-}
-
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
 	int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@
 	u32 sq_thread_resource_mgmt;
 	u32 hdp_host_path_cntl;
 	u32 sq_dyn_gpr_size_simd_ab_0;
-	u32 backend_map;
 	u32 gb_tiling_config = 0;
 	u32 cc_rb_backend_disable = 0;
 	u32 cc_gc_shader_pipe_config = 0;
 	u32 mc_arb_ramcfg;
-	u32 db_debug4;
+	u32 db_debug4, tmp;
+	u32 inactive_pipes, shader_pipe_config;
+	u32 disabled_rb_mask;
+	unsigned active_number;
 
 	/* setup chip specs */
+	rdev->config.rv770.tiling_group_size = 256;
 	switch (rdev->family) {
 	case CHIP_RV770:
 		rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@
 	/* setup tiling, simd, pipe config */
 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
+	shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+	inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+	for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+		if (!(inactive_pipes & tmp)) {
+			active_number++;
+		}
+		tmp <<= 1;
+	}
+	if (active_number == 1) {
+		WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+	} else {
+		WREG32(SPI_CONFIG_CNTL, 0);
+	}
+
+	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+	if (tmp < rdev->config.rv770.max_backends) {
+		rdev->config.rv770.max_backends = tmp;
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.rv770.max_pipes) {
+		rdev->config.rv770.max_pipes = tmp;
+	}
+	tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.rv770.max_simds) {
+		rdev->config.rv770.max_simds = tmp;
+	}
+
 	switch (rdev->config.rv770.max_tile_pipes) {
 	case 1:
 	default:
-		gb_tiling_config |= PIPE_TILING(0);
+		gb_tiling_config = PIPE_TILING(0);
 		break;
 	case 2:
-		gb_tiling_config |= PIPE_TILING(1);
+		gb_tiling_config = PIPE_TILING(1);
 		break;
 	case 4:
-		gb_tiling_config |= PIPE_TILING(2);
+		gb_tiling_config = PIPE_TILING(2);
 		break;
 	case 8:
-		gb_tiling_config |= PIPE_TILING(3);
+		gb_tiling_config = PIPE_TILING(3);
 		break;
 	}
 	rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+	tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+					R7XX_MAX_BACKENDS, disabled_rb_mask);
+	gb_tiling_config |= tmp << 16;
+	rdev->config.rv770.backend_map = tmp;
+
 	if (rdev->family == CHIP_RV770)
 		gb_tiling_config |= BANK_TILING(1);
-	else
-		gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			gb_tiling_config |= BANK_TILING(1);
+		else
+			gb_tiling_config |= BANK_TILING(0);
+	}
 	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
 	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-	if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-		rdev->config.rv770.tiling_group_size = 512;
-	else
-		rdev->config.rv770.tiling_group_size = 256;
 	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
 		gb_tiling_config |= ROW_TILING(3);
 		gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@
 	}
 
 	gb_tiling_config |= BANK_SWAPS(1);
-
-	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-	cc_rb_backend_disable |=
-		BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
-	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-	cc_gc_shader_pipe_config |=
-		INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
-	cc_gc_shader_pipe_config |=
-		INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
-	if (rdev->family == CHIP_RV740)
-		backend_map = 0x28;
-	else
-		backend_map = r700_get_tile_pipe_to_backend_map(rdev,
-								rdev->config.rv770.max_tile_pipes,
-								(R7XX_MAX_BACKENDS -
-								 r600_count_pipe_bits((cc_rb_backend_disable &
-										       R7XX_MAX_BACKENDS_MASK) >> 16)),
-								(cc_rb_backend_disable >> 16));
-
 	rdev->config.rv770.tile_config = gb_tiling_config;
-	rdev->config.rv770.backend_map = backend_map;
-	gb_tiling_config |= BACKEND_MAP(backend_map);
 
 	WREG32(GB_TILING_CONFIG, gb_tiling_config);
 	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
-	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
-	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
-	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-	WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
-
 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
 	WREG32(CGTS_TCC_DISABLE, 0);
 	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
 	WREG32(CGTS_USER_TCC_DISABLE, 0);
 
-	num_qd_pipes =
-		R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+	num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
 	WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
 
@@ -776,6 +616,9 @@
 				       ACK_FLUSH_CTL(3) |
 				       SYNC_FLUSH_CTL));
 
+	if (rdev->family != CHIP_RV770)
+		WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
 	db_debug3 = RREG32(DB_DEBUG3);
 	db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
 	switch (rdev->family) {
@@ -809,8 +652,6 @@
 
 	WREG32(VGT_NUM_INSTANCES, 1);
 
-	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
 	WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@
 
 	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
 					  NUM_CLIP_SEQ(3)));
-
+	WREG32(VC_ENHANCE, 0);
 }
 
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1118,6 +959,12 @@
 	if (r)
 		return r;
 
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
 	return 0;
 }
 
@@ -1140,12 +987,6 @@
 		return r;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r) {
-		dev_err(rdev->dev, "radeon: audio init failed\n");
-		return r;
-	}
-
 	return r;
 
 }
@@ -1254,12 +1095,6 @@
 		rdev->accel_working = false;
 	}
 
-	r = r600_audio_init(rdev);
-	if (r) {
-		dev_err(rdev->dev, "radeon: audio init failed\n");
-		return r;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f7..b0adfc5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
 #define		BACKEND_MAP(x)					((x) << 16)
 
 #define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
 #define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT			    8
 #define		INACTIVE_SIMDS(x)				((x) << 16)
 #define		INACTIVE_SIMDS_MASK				0x00FF0000
 
@@ -174,6 +177,7 @@
 #define	MC_VM_MD_L1_TLB0_CNTL				0x2654
 #define	MC_VM_MD_L1_TLB1_CNTL				0x2658
 #define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
 #define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
 #define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
 #define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
@@ -207,6 +211,7 @@
 #define	SCRATCH_UMSK					0x8540
 #define	SCRATCH_ADDR					0x8544
 
+#define	SMX_SAR_CTL0					0xA008
 #define	SMX_DC_CTL0					0xA020
 #define		USE_HASH_FUNCTION				(1 << 0)
 #define		CACHE_DEPTH(x)					((x) << 1)
@@ -306,6 +311,8 @@
 #define	TCP_CNTL					0x9610
 #define	TCP_CHAN_STEER					0x9614
 
+#define	VC_ENHANCE					0x9714
+
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x)<<0)
 #define			VC_ONLY						0
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e..c7b61f1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@
 /*
  * Core functions
  */
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-					   u32 num_tile_pipes,
-					   u32 num_backends_per_asic,
-					   u32 *backend_disable_mask_per_asic,
-					   u32 num_shader_engines)
-{
-	u32 backend_map = 0;
-	u32 enabled_backends_mask = 0;
-	u32 enabled_backends_count = 0;
-	u32 num_backends_per_se;
-	u32 cur_pipe;
-	u32 swizzle_pipe[SI_MAX_PIPES];
-	u32 cur_backend = 0;
-	u32 i;
-	bool force_no_swizzle;
-
-	/* force legal values */
-	if (num_tile_pipes < 1)
-		num_tile_pipes = 1;
-	if (num_tile_pipes > rdev->config.si.max_tile_pipes)
-		num_tile_pipes = rdev->config.si.max_tile_pipes;
-	if (num_shader_engines < 1)
-		num_shader_engines = 1;
-	if (num_shader_engines > rdev->config.si.max_shader_engines)
-		num_shader_engines = rdev->config.si.max_shader_engines;
-	if (num_backends_per_asic < num_shader_engines)
-		num_backends_per_asic = num_shader_engines;
-	if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
-		num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
-	/* make sure we have the same number of backends per se */
-	num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-	/* set up the number of backends per se */
-	num_backends_per_se = num_backends_per_asic / num_shader_engines;
-	if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
-		num_backends_per_se = rdev->config.si.max_backends_per_se;
-		num_backends_per_asic = num_backends_per_se * num_shader_engines;
-	}
-
-	/* create enable mask and count for enabled backends */
-	for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-		if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-			enabled_backends_mask |= (1 << i);
-			++enabled_backends_count;
-		}
-		if (enabled_backends_count == num_backends_per_asic)
-			break;
-	}
-
-	/* force the backends mask to match the current number of backends */
-	if (enabled_backends_count != num_backends_per_asic) {
-		u32 this_backend_enabled;
-		u32 shader_engine;
-		u32 backend_per_se;
-
-		enabled_backends_mask = 0;
-		enabled_backends_count = 0;
-		*backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
-		for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-			/* calc the current se */
-			shader_engine = i / rdev->config.si.max_backends_per_se;
-			/* calc the backend per se */
-			backend_per_se = i % rdev->config.si.max_backends_per_se;
-			/* default to not enabled */
-			this_backend_enabled = 0;
-			if ((shader_engine < num_shader_engines) &&
-			    (backend_per_se < num_backends_per_se))
-				this_backend_enabled = 1;
-			if (this_backend_enabled) {
-				enabled_backends_mask |= (1 << i);
-				*backend_disable_mask_per_asic &= ~(1 << i);
-				++enabled_backends_count;
-			}
-		}
-	}
-
-
-	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
-	switch (rdev->family) {
-	case CHIP_TAHITI:
-	case CHIP_PITCAIRN:
-	case CHIP_VERDE:
-		force_no_swizzle = true;
-		break;
-	default:
-		force_no_swizzle = false;
-		break;
-	}
-	if (force_no_swizzle) {
-		bool last_backend_enabled = false;
-
-		force_no_swizzle = false;
-		for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-			if (((enabled_backends_mask >> i) & 1) == 1) {
-				if (last_backend_enabled)
-					force_no_swizzle = true;
-				last_backend_enabled = true;
-			} else
-				last_backend_enabled = false;
-		}
-	}
-
-	switch (num_tile_pipes) {
-	case 1:
-	case 3:
-	case 5:
-	case 7:
-		DRM_ERROR("odd number of pipes!\n");
-		break;
-	case 2:
-		swizzle_pipe[0] = 0;
-		swizzle_pipe[1] = 1;
-		break;
-	case 4:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 1;
-			swizzle_pipe[3] = 3;
-		}
-		break;
-	case 6:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 1;
-			swizzle_pipe[4] = 3;
-			swizzle_pipe[5] = 5;
-		}
-		break;
-	case 8:
-		if (force_no_swizzle) {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 1;
-			swizzle_pipe[2] = 2;
-			swizzle_pipe[3] = 3;
-			swizzle_pipe[4] = 4;
-			swizzle_pipe[5] = 5;
-			swizzle_pipe[6] = 6;
-			swizzle_pipe[7] = 7;
-		} else {
-			swizzle_pipe[0] = 0;
-			swizzle_pipe[1] = 2;
-			swizzle_pipe[2] = 4;
-			swizzle_pipe[3] = 6;
-			swizzle_pipe[4] = 1;
-			swizzle_pipe[5] = 3;
-			swizzle_pipe[6] = 5;
-			swizzle_pipe[7] = 7;
-		}
-		break;
-	}
-
-	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-		while (((1 << cur_backend) & enabled_backends_mask) == 0)
-			cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
-		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-		cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-	}
-
-	return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
-					u32 disable_mask_per_se,
-					u32 max_disable_mask_per_se,
-					u32 num_shader_engines)
-{
-	u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-	u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-	if (num_shader_engines == 1)
-		return disable_mask_per_asic;
-	else if (num_shader_engines == 2)
-		return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-	else
-		return 0xffffffff;
-}
-
 static void si_tiling_mode_table_init(struct radeon_device *rdev)
 {
 	const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@
 		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
 }
 
+static void si_select_se_sh(struct radeon_device *rdev,
+			    u32 se_num, u32 sh_num)
+{
+	u32 data = INSTANCE_BROADCAST_WRITES;
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+	u32 i, mask = 0;
+
+	for (i = 0; i < bit_width; i++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+	return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	if (data & 1)
+		data &= INACTIVE_CUS_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = si_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask, active_cu;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+			      u32 max_rb_num, u32 se_num,
+			      u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	if (data & 1)
+		data &= BACKEND_DISABLE_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+	return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+			u32 se_num, u32 sh_per_se,
+			u32 max_rb_num)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	for (i = 0; i < se_num; i++) {
+		si_select_se_sh(rdev, i, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
 static void si_gpu_init(struct radeon_device *rdev)
 {
-	u32 cc_rb_backend_disable = 0;
-	u32 cc_gc_shader_array_config;
 	u32 gb_addr_config = 0;
 	u32 mc_shared_chmap, mc_arb_ramcfg;
-	u32 gb_backend_map;
-	u32 cgts_tcc_disable;
 	u32 sx_debug_1;
-	u32 gc_user_shader_array_config;
-	u32 gc_user_rb_backend_disable;
-	u32 cgts_user_tcc_disable;
 	u32 hdp_host_path_cntl;
 	u32 tmp;
 	int i, j;
@@ -1581,9 +1520,9 @@
 	switch (rdev->family) {
 	case CHIP_TAHITI:
 		rdev->config.si.max_shader_engines = 2;
-		rdev->config.si.max_pipes_per_simd = 4;
 		rdev->config.si.max_tile_pipes = 12;
-		rdev->config.si.max_simds_per_se = 8;
+		rdev->config.si.max_cu_per_sh = 8;
+		rdev->config.si.max_sh_per_se = 2;
 		rdev->config.si.max_backends_per_se = 4;
 		rdev->config.si.max_texture_channel_caches = 12;
 		rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@
 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_PITCAIRN:
 		rdev->config.si.max_shader_engines = 2;
-		rdev->config.si.max_pipes_per_simd = 4;
 		rdev->config.si.max_tile_pipes = 8;
-		rdev->config.si.max_simds_per_se = 5;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
 		rdev->config.si.max_backends_per_se = 4;
 		rdev->config.si.max_texture_channel_caches = 8;
 		rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@
 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_VERDE:
 	default:
 		rdev->config.si.max_shader_engines = 1;
-		rdev->config.si.max_pipes_per_simd = 4;
 		rdev->config.si.max_tile_pipes = 4;
-		rdev->config.si.max_simds_per_se = 2;
+		rdev->config.si.max_cu_per_sh = 2;
+		rdev->config.si.max_sh_per_se = 2;
 		rdev->config.si.max_backends_per_se = 4;
 		rdev->config.si.max_texture_channel_caches = 4;
 		rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@
 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	}
 
@@ -1648,31 +1590,7 @@
 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-	cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
-	cgts_tcc_disable = 0xffff0000;
-	for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
-		cgts_tcc_disable &= ~(1 << (16 + i));
-	gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-	gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
-	cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-	rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
 	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
-	tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-	rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
-	tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-	rdev->config.si.backend_disable_mask_per_asic =
-		si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
-					     rdev->config.si.num_shader_engines);
-	rdev->config.si.backend_map =
-		si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-						rdev->config.si.num_backends_per_se *
-						rdev->config.si.num_shader_engines,
-						&rdev->config.si.backend_disable_mask_per_asic,
-						rdev->config.si.num_shader_engines);
-	tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-	rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
 	rdev->config.si.mem_max_burst_length_bytes = 256;
 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
 	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@
 	rdev->config.si.num_gpus = 1;
 	rdev->config.si.multi_gpu_tile_size = 64;
 
-	gb_addr_config = 0;
-	switch (rdev->config.si.num_tile_pipes) {
-	case 1:
-		gb_addr_config |= NUM_PIPES(0);
-		break;
-	case 2:
-		gb_addr_config |= NUM_PIPES(1);
-		break;
-	case 4:
-		gb_addr_config |= NUM_PIPES(2);
-		break;
-	case 8:
-	default:
-		gb_addr_config |= NUM_PIPES(3);
-		break;
-	}
-
-	tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
-	gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
-	tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
-	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-	switch (rdev->config.si.num_gpus) {
-	case 1:
-	default:
-		gb_addr_config |= NUM_GPUS(0);
-		break;
-	case 2:
-		gb_addr_config |= NUM_GPUS(1);
-		break;
-	case 4:
-		gb_addr_config |= NUM_GPUS(2);
-		break;
-	}
-	switch (rdev->config.si.multi_gpu_tile_size) {
-	case 16:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-		break;
-	case 32:
-	default:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-		break;
-	case 64:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-		break;
-	case 128:
-		gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-		break;
-	}
+	/* fix up row size */
+	gb_addr_config &= ~ROW_SIZE_MASK;
 	switch (rdev->config.si.mem_row_size_in_kb) {
 	case 1:
 	default:
@@ -1745,26 +1616,6 @@
 		break;
 	}
 
-	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
-	rdev->config.si.num_tile_pipes = (1 << tmp);
-	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
-	rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
-	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
-	rdev->config.si.num_shader_engines = tmp + 1;
-	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
-	rdev->config.si.num_gpus = tmp + 1;
-	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
-	rdev->config.si.multi_gpu_tile_size = 1 << tmp;
-	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
-	rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
-	gb_backend_map =
-		si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-						rdev->config.si.num_backends_per_se *
-						rdev->config.si.num_shader_engines,
-						&rdev->config.si.backend_disable_mask_per_asic,
-						rdev->config.si.num_shader_engines);
-
 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
 	 * not have bank info, so create a custom tiling dword.
 	 * bits 3:0   num_pipes
@@ -1789,34 +1640,30 @@
 		rdev->config.si.tile_config |= (3 << 0);
 		break;
 	}
-	rdev->config.si.tile_config |=
-		((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+	if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+		rdev->config.si.tile_config |= 1 << 4;
+	else
+		rdev->config.si.tile_config |= 0 << 4;
 	rdev->config.si.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 	rdev->config.si.tile_config |=
 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-	rdev->config.si.backend_map = gb_backend_map;
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-	/* primary versions */
-	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
-	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
-
-	/* user versions */
-	WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-	WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
-	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
-
 	si_tiling_mode_table_init(rdev);
 
+	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+		    rdev->config.si.max_sh_per_se,
+		    rdev->config.si.max_backends_per_se);
+
+	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+		     rdev->config.si.max_sh_per_se,
+		     rdev->config.si.max_cu_per_sh);
+
+
 	/* set HW defaults for 3D engine */
 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
 				     ROQ_IB2_START(0x2b)));
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a..501f9d43 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
 #define SI_DC_GPIO_HPD_EN                        0x65b8
 #define SI_DC_GPIO_HPD_Y                         0x65bc
 
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
 #endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c4..db40679 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
 #ifndef SI_H
 #define SI_H
 
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+
 #define	CG_MULT_THERMAL_STATUS					0x714
 #define		ASIC_MAX_TEMP(x)				((x) << 0)
 #define		ASIC_MAX_TEMP_MASK				0x000001ff
@@ -408,6 +413,12 @@
 #define		SOFT_RESET_IA					(1 << 15)
 
 #define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
 
 #define GRBM_INT_CNTL                                   0x8060
 #       define RDERR_INT_ENABLE                         (1 << 0)
@@ -480,6 +491,8 @@
 #define	VGT_TF_MEMORY_BASE				0x89B8
 
 #define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
 #define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
 
 #define	PA_CL_ENHANCE					0x8A14
@@ -688,6 +701,12 @@
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
 #define VGT_EVENT_INITIATOR                             0x28a90
 #       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
 #       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d1..dd14cd1 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
+	idr_init(&dev_priv->object_idr);
 	dev->dev_private = (void *)dev_priv;
 	dev_priv->chipset = chipset;
-	idr_init(&dev->object_name_idr);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd..36f4b28 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1204,6 +1204,7 @@
 			(*destroy)(bo);
 		else
 			kfree(bo);
+		ttm_mem_global_free(mem_glob, acc_size);
 		return -EINVAL;
 	}
 	bo->destroy = destroy;
@@ -1307,22 +1308,14 @@
 			struct ttm_buffer_object **p_bo)
 {
 	struct ttm_buffer_object *bo;
-	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 	size_t acc_size;
 	int ret;
 
-	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-	if (unlikely(ret != 0))
-		return ret;
-
 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
-	if (unlikely(bo == NULL)) {
-		ttm_mem_global_free(mem_glob, acc_size);
+	if (unlikely(bo == NULL))
 		return -ENOMEM;
-	}
 
+	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
 				buffer_start, interruptible,
 			  persistent_swap_storage, acc_size, NULL, NULL);
@@ -1834,6 +1827,7 @@
 			spin_unlock(&glob->lru_lock);
 			(void) ttm_bo_cleanup_refs(bo, false, false, false);
 			kref_put(&bo->list_kref, ttm_bo_release_list);
+			spin_lock(&glob->lru_lock);
 			continue;
 		}
 
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4d02c46..6e52069 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
 
 static struct drm_driver driver;
 
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
 static struct usb_device_id id_table[] = {
-	{.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+	{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+	 .bInterfaceSubClass = 0x00,
+	 .bInterfaceProtocol = 0x00,
+	 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+			USB_DEVICE_ID_MATCH_INT_CLASS |
+			USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+			USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
 	{},
 };
 MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee3..ce9a611 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@
 	if (!fb->active_16)
 		return 0;
 
-	if (!fb->obj->vmapping)
-		udl_gem_vmap(fb->obj);
+	if (!fb->obj->vmapping) {
+		ret = udl_gem_vmap(fb->obj);
+		if (ret == -ENOMEM) {
+			DRM_ERROR("failed to vmap fb\n");
+			return 0;
+		}
+		if (!fb->obj->vmapping) {
+			DRM_ERROR("failed to vmapping\n");
+			return 0;
+		}
+	}
 
 	start_cycles = get_cycles();
 
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 40efd32..7bd65bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@
 	int page_count = obj->base.size / PAGE_SIZE;
 	int ret;
 
+	if (obj->base.import_attach) {
+		ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+					       0, obj->base.size, DMA_BIDIRECTIONAL);
+		if (ret)
+			return -EINVAL;
+
+		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+		if (!obj->vmapping)
+			return -ENOMEM;
+		return 0;
+	}
+		
 	ret = udl_gem_get_pages(obj, GFP_KERNEL);
 	if (ret)
 		return ret;
@@ -192,6 +204,13 @@
 
 void udl_gem_vunmap(struct udl_gem_object *obj)
 {
+	if (obj->base.import_attach) {
+		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+		dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+				       obj->base.size, DMA_BIDIRECTIONAL);
+		return;
+	}
+
 	if (obj->vmapping)
 		vunmap(obj->vmapping);
 
@@ -202,12 +221,12 @@
 {
 	struct udl_gem_object *obj = to_udl_bo(gem_obj);
 
-	if (gem_obj->import_attach)
-		drm_prime_gem_destroy(gem_obj, obj->sg);
-
 	if (obj->vmapping)
 		udl_gem_vunmap(obj);
 
+	if (gem_obj->import_attach)
+		drm_prime_gem_destroy(gem_obj, obj->sg);
+
 	if (obj->pages)
 		udl_gem_put_pages(obj);
 
@@ -234,7 +253,7 @@
 
 	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
 	if (ret)
-		return ret;
+		goto out;
 	if (!gobj->base.map_list.map) {
 		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
@@ -257,8 +276,6 @@
 {
 	struct udl_gem_object *obj;
 	int npages;
-	int i;
-	struct scatterlist *iter;
 
 	npages = size / PAGE_SIZE;
 
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index a8d5f09..4c2d836 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -61,7 +61,7 @@
 			u8 length;
 			u16 key;
 
-			key = *((u16 *) desc);
+			key = le16_to_cpu(*((u16 *) desc));
 			desc += sizeof(u16);
 			length = *desc;
 			desc++;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f18225..c126182 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
+	idr_init(&dev_priv->object_idr);
 	dev->dev_private = (void *)dev_priv;
 
 	dev_priv->chipset = chipset;
 
-	idr_init(&dev->object_name_idr);
-
 	pci_set_master(dev->pdev);
 
 	ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5..21ee782 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@
 	cmd += sizeof(remap_cmd) / sizeof(uint32);
 
 	for (i = 0; i < num_pages; ++i) {
-		if (VMW_PPN_SIZE > 4)
+		if (VMW_PPN_SIZE <= 4)
 			*cmd = page_to_pfn(*pages++);
 		else
 			*((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 38f9534..5b3c7d1 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -190,6 +190,19 @@
 	return NULL;
 }
 
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+	struct vga_switcheroo_client *client;
+
+	client = find_client_from_pci(&vgasr_priv.clients, pdev);
+	if (!client)
+		return VGA_SWITCHEROO_NOT_FOUND;
+	if (!vgasr_priv.active)
+		return VGA_SWITCHEROO_INIT;
+	return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
 void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 {
 	struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@
 		vga_switchon(new_client);
 
 	vga_set_default_device(new_client->pdev);
-	set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
-
 	return 0;
 }
 
@@ -308,6 +319,8 @@
 
 	active->active = false;
 
+	set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
 	if (new_client->fb_info) {
 		struct fb_event event;
 		event.info = new_client->fb_info;
@@ -321,11 +334,11 @@
 	if (new_client->ops->reprobe)
 		new_client->ops->reprobe(new_client->pdev);
 
-	set_audio_state(active->id, VGA_SWITCHEROO_OFF);
-
 	if (active->pwr_state == VGA_SWITCHEROO_ON)
 		vga_switchoff(active);
 
+	set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
 	new_client->active = true;
 	return 0;
 }
@@ -371,8 +384,9 @@
 	/* pwr off the device not in use */
 	if (strncmp(usercmd, "OFF", 3) == 0) {
 		list_for_each_entry(client, &vgasr_priv.clients, list) {
-			if (client->active)
+			if (client->active || client_is_audio(client))
 				continue;
+			set_audio_state(client->id, VGA_SWITCHEROO_OFF);
 			if (client->pwr_state == VGA_SWITCHEROO_ON)
 				vga_switchoff(client);
 		}
@@ -381,10 +395,11 @@
 	/* pwr on the device not in use */
 	if (strncmp(usercmd, "ON", 2) == 0) {
 		list_for_each_entry(client, &vgasr_priv.clients, list) {
-			if (client->active)
+			if (client->active || client_is_audio(client))
 				continue;
 			if (client->pwr_state == VGA_SWITCHEROO_OFF)
 				vga_switchon(client);
+			set_audio_state(client->id, VGA_SWITCHEROO_ON);
 		}
 		goto out;
 	}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 034c80a..bef04c1 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,20 +1,11 @@
 #
 # HID driver configuration
 #
-menuconfig HID_SUPPORT
-	bool "HID Devices"
-	depends on INPUT
-	default y
-	---help---
-	  Say Y here to get to see options for various computer-human interface
-	  device drivers. This option alone does not add any kernel code.
-
-	  If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+     depends on INPUT
 
 config HID
-	tristate "Generic HID support"
+	tristate "HID bus support"
 	depends on INPUT
 	default y
 	---help---
@@ -23,14 +14,17 @@
 	  most commonly used to refer to the USB-HID specification, but other
 	  devices (such as, but not strictly limited to, Bluetooth) are
 	  designed using HID specification (this involves certain keyboards,
-	  mice, tablets, etc). This option compiles into kernel the generic
-	  HID layer code (parser, usages, etc.), which can then be used by
-	  transport-specific HID implementation (like USB or Bluetooth).
+	  mice, tablets, etc). This option adds the HID bus to the kernel,
+	  together with generic HID layer code. The HID devices are added and
+	  removed from the HID bus by the transport-layer drivers, such as
+	  usbhid (USB_HID) and hidp (BT_HIDP).
 
 	  For docs and specs, see http://www.usb.org/developers/hidpage/
 
 	  If unsure, say Y.
 
+if HID
+
 config HID_BATTERY_STRENGTH
 	bool "Battery level reporting for HID devices"
 	depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@
 
 	If unsure, say Y.
 
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
-	depends on HID
-
 config HID_GENERIC
 	tristate "Generic HID driver"
 	depends on HID
-	default y
+	default HID
 	---help---
-	Support for generic HID devices.
+	Support for generic devices on the HID bus. This includes most
+	keyboards and mice, joysticks, tablets and digitizers.
 
 	To compile this driver as a module, choose M here: the module
 	will be called hid-generic.
 
 	If unsure, say Y.
 
+menu "Special HID drivers"
+	depends on HID
+
 config HID_A4TECH
 	tristate "A4 tech mice" if EXPERT
 	depends on USB_HID
@@ -662,4 +655,8 @@
 
 endmenu
 
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8e3a6b2..6ac0286 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1880,6 +1880,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9373f53..d1cdd2d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -160,6 +160,9 @@
 #define USB_VENDOR_ID_AVERMEDIA		0x07ca
 #define USB_DEVICE_ID_AVER_FM_MR800	0xb800
 
+#define USB_VENDOR_ID_AXENTIA		0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO	0x7111
+
 #define USB_VENDOR_ID_BAANTO		0x2453
 #define USB_DEVICE_ID_BAANTO_MT_190W2	0x0100
 
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5e8a7ed..0f9c146 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -436,27 +436,37 @@
 
 static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
 {
-	struct dj_report dj_report;
+	struct dj_report *dj_report;
+	int retval;
 
-	memset(&dj_report, 0, sizeof(dj_report));
-	dj_report.report_id = REPORT_ID_DJ_SHORT;
-	dj_report.device_index = 0xFF;
-	dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
-	return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+	dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+	if (!dj_report)
+		return -ENOMEM;
+	dj_report->report_id = REPORT_ID_DJ_SHORT;
+	dj_report->device_index = 0xFF;
+	dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+	retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+	kfree(dj_report);
+	return retval;
 }
 
 static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
 					  unsigned timeout)
 {
-	struct dj_report dj_report;
+	struct dj_report *dj_report;
+	int retval;
 
-	memset(&dj_report, 0, sizeof(dj_report));
-	dj_report.report_id = REPORT_ID_DJ_SHORT;
-	dj_report.device_index = 0xFF;
-	dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
-	dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
-	dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
-	return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+	dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+	if (!dj_report)
+		return -ENOMEM;
+	dj_report->report_id = REPORT_ID_DJ_SHORT;
+	dj_report->device_index = 0xFF;
+	dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+	dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+	dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+	retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+	kfree(dj_report);
+	return retval;
 }
 
 
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 7cf3ffe..40ac665 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -426,8 +426,10 @@
 		__set_bit(EV_ABS, input->evbit);
 
 		input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
-		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
-		input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+				     4, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+				     4, 0);
 		input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
 
 		/* Note: Touch Y position from the device is inverted relative
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 0f20fd1..0108c59 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
 	depends on USB
 
 config USB_HID
-	tristate "USB Human Interface Device (full HID) support"
+	tristate "USB HID transport layer"
 	default y
 	depends on USB && INPUT
 	select HID
 	---help---
-	  Say Y here if you want full HID support to connect USB keyboards,
+	  Say Y here if you want to connect USB keyboards,
 	  mice, joysticks, graphic tablets, or any other HID based devices
 	  to your computer via USB, as well as Uninterruptible Power Supply
 	  (UPS) and monitor control devices.
@@ -81,4 +81,4 @@
 
 endmenu
 
-
+endmenu
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7cd9bf4..6f1d167 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1036,8 +1036,9 @@
 
 config SENSORS_SCH5627
 	tristate "SMSC SCH5627"
-	depends on !PPC
+	depends on !PPC && WATCHDOG
 	select SENSORS_SCH56XX_COMMON
+	select WATCHDOG_CORE
 	help
 	  If you say yes here you get support for the hardware monitoring
 	  features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@
 
 config SENSORS_SCH5636
 	tristate "SMSC SCH5636"
-	depends on !PPC
+	depends on !PPC && WATCHDOG
 	select SENSORS_SCH56XX_COMMON
+	select WATCHDOG_CORE
 	help
 	  SMSC SCH5636 Super I/O chips include an embedded microcontroller for
 	  hardware monitoring solutions, allowing motherboard manufacturers to
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48..2cde9ec 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -8,7 +8,7 @@
  *
  * Based on hdaps.c driver:
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * Fan control based on smcFanControl:
  * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@
 	int i;
 
 	if (send_command(cmd) || send_argument(key)) {
-		pr_warn("%s: read arg fail\n", key);
+		pr_warn("%.4s: read arg fail\n", key);
 		return -EIO;
 	}
 
@@ -223,7 +223,7 @@
 
 	for (i = 0; i < len; i++) {
 		if (__wait_status(0x05)) {
-			pr_warn("%s: read data fail\n", key);
+			pr_warn("%.4s: read data fail\n", key);
 			return -EIO;
 		}
 		buffer[i] = inb(APPLESMC_DATA_PORT);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d5123..637c51c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@
 	return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
+struct tjmax {
+	char const *id;
+	int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+	{ "CPU D410", 100000 },
+	{ "CPU D425", 100000 },
+	{ "CPU D510", 100000 },
+	{ "CPU D525", 100000 },
+	{ "CPU N450", 100000 },
+	{ "CPU N455", 100000 },
+	{ "CPU N470", 100000 },
+	{ "CPU N475", 100000 },
+	{ "CPU  230", 100000 },
+	{ "CPU  330", 125000 },
+};
+
 static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
 				  struct device *dev)
 {
@@ -202,6 +220,13 @@
 	int err;
 	u32 eax, edx;
 	struct pci_dev *host_bridge;
+	int i;
+
+	/* explicit tjmax table entries override heuristics */
+	for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+		if (strstr(c->x86_model_id, tjmax_table[i].id))
+			return tjmax_table[i].tjmax;
+	}
 
 	/* Early chips have no MSR for TjMax */
 
@@ -210,7 +235,8 @@
 
 	/* Atom CPUs */
 
-	if (c->x86_model == 0x1c) {
+	if (c->x86_model == 0x1c || c->x86_model == 0x26
+	    || c->x86_model == 0x27) {
 		usemsr_ee = 0;
 
 		host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@
 			tjmax = 90000;
 
 		pci_dev_put(host_bridge);
+	} else if (c->x86_model == 0x36) {
+		usemsr_ee = 0;
+		tjmax = 100000;
 	}
 
 	if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@
 	 * sensors. We check this bit only, all the early CPUs
 	 * without thermal sensors will be filtered out.
 	 */
-	if (!cpu_has(c, X86_FEATURE_DTS))
+	if (!cpu_has(c, X86_FEATURE_DTHERM))
 		return;
 
 	if (!pdev) {
@@ -765,14 +794,14 @@
 };
 
 static const struct x86_cpu_id coretemp_ids[] = {
-	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 
 static int __init coretemp_init(void)
 {
-	int i, err = -ENODEV;
+	int i, err;
 
 	/*
 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 9691f66..e7d234b 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@
 		data->fan_rpm_control = true;
 		break;
 	default:
-		mutex_unlock(&data->update_lock);
-		return -EINVAL;
+		count = -EINVAL;
+		goto err;
 	}
 
-	read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+	result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+	if (result) {
+		count = result;
+		goto err;
+	}
 
 	if (data->fan_rpm_control)
 		conf_reg |= 0x80;
@@ -463,7 +467,7 @@
 		conf_reg &= ~0x80;
 
 	i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
 	mutex_unlock(&data->update_lock);
 	return count;
 }
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index a9bfd67..e72ba5d 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -590,6 +590,6 @@
 
 module_i2c_driver(jc42_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("JC42 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index d264937..bd75d24 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -567,6 +567,6 @@
 
 module_i2c_driver(pem_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 069b7d3..77476a5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -292,6 +292,6 @@
 
 module_i2c_driver(ltc4261_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("LTC4261 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 822261b..019427d 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -692,6 +692,6 @@
 
 module_i2c_driver(max16065_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("MAX16065 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8ec6dfb..8342275 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -579,7 +579,7 @@
 	}
 
 	/* Note failing to register the watchdog is not a fatal error */
-	data->watchdog = sch56xx_watchdog_register(data->addr,
+	data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
 			(build_code << 24) | (build_id << 8) | hwmon_rev,
 			&data->update_lock, 1);
 
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 906d4ed..96a7e68 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -510,7 +510,7 @@
 	}
 
 	/* Note failing to register the watchdog is not a fatal error */
-	data->watchdog = sch56xx_watchdog_register(data->addr,
+	data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
 					(revision[0] << 8) | revision[1],
 					&data->update_lock, 0);
 
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index ce52fc5..4380f5d 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -66,15 +66,10 @@
 
 struct sch56xx_watchdog_data {
 	u16 addr;
-	u32 revision;
 	struct mutex *io_lock;
-	struct mutex watchdog_lock;
-	struct list_head list; /* member of the watchdog_data_list */
 	struct kref kref;
-	struct miscdevice watchdog_miscdev;
-	unsigned long watchdog_is_open;
-	char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
-	char watchdog_expect_close;
+	struct watchdog_info wdinfo;
+	struct watchdog_device wddev;
 	u8 watchdog_preset;
 	u8 watchdog_control;
 	u8 watchdog_output_enable;
@@ -82,15 +77,6 @@
 
 static struct platform_device *sch56xx_pdev;
 
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
 /* Super I/O functions */
 static inline int superio_inb(int base, int reg)
 {
@@ -272,22 +258,22 @@
  * Watchdog routines
  */
 
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+   all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
 {
 	struct sch56xx_watchdog_data *data =
 		container_of(r, struct sch56xx_watchdog_data, kref);
 	kfree(data);
 }
 
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
-				int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+				unsigned int timeout)
 {
-	int ret, resolution;
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+	unsigned int resolution;
 	u8 control;
+	int ret;
 
 	/* 1 second or 60 second resolution? */
 	if (timeout <= 255)
@@ -298,12 +284,6 @@
 	if (timeout < resolution || timeout > (resolution * 255))
 		return -EINVAL;
 
-	mutex_lock(&data->watchdog_lock);
-	if (!data->addr) {
-		ret = -ENODEV;
-		goto leave;
-	}
-
 	if (resolution == 1)
 		control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
 	else
@@ -316,7 +296,7 @@
 						control);
 		mutex_unlock(data->io_lock);
 		if (ret)
-			goto leave;
+			return ret;
 
 		data->watchdog_control = control;
 	}
@@ -326,38 +306,17 @@
 	 * the watchdog countdown.
 	 */
 	data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+	wddev->timeout = data->watchdog_preset * resolution;
 
-	ret = data->watchdog_preset * resolution;
-leave:
-	mutex_unlock(&data->watchdog_lock);
-	return ret;
+	return 0;
 }
 
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
 {
-	int timeout;
-
-	mutex_lock(&data->watchdog_lock);
-	if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
-		timeout = data->watchdog_preset;
-	else
-		timeout = data->watchdog_preset * 60;
-	mutex_unlock(&data->watchdog_lock);
-
-	return timeout;
-}
-
-static int watchdog_start(struct sch56xx_watchdog_data *data)
-{
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 	int ret;
 	u8 val;
 
-	mutex_lock(&data->watchdog_lock);
-	if (!data->addr) {
-		ret = -ENODEV;
-		goto leave_unlock_watchdog;
-	}
-
 	/*
 	 * The sch56xx's watchdog cannot really be started / stopped
 	 * it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@
 	if (ret)
 		goto leave;
 
-	/* 2. Enable output (if not already enabled) */
-	if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
-		val = data->watchdog_output_enable |
-		      SCH56XX_WDOG_OUTPUT_ENABLE;
-		ret = sch56xx_write_virtual_reg(data->addr,
-						SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-						val);
-		if (ret)
-			goto leave;
+	/* 2. Enable output */
+	val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+	ret = sch56xx_write_virtual_reg(data->addr,
+					SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+	if (ret)
+		goto leave;
 
-		data->watchdog_output_enable = val;
-	}
+	data->watchdog_output_enable = val;
 
 	/* 3. Clear the watchdog event bit if set */
 	val = inb(data->addr + 9);
@@ -405,234 +360,70 @@
 
 leave:
 	mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
-	mutex_unlock(&data->watchdog_lock);
 	return ret;
 }
 
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
 {
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 	int ret;
 
-	mutex_lock(&data->watchdog_lock);
-	if (!data->addr) {
-		ret = -ENODEV;
-		goto leave;
-	}
-
 	/* Reset the watchdog countdown counter */
 	mutex_lock(data->io_lock);
 	ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
 					data->watchdog_preset);
 	mutex_unlock(data->io_lock);
-leave:
-	mutex_unlock(&data->watchdog_lock);
+
 	return ret;
 }
 
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
 {
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 	int ret = 0;
 	u8 val;
 
-	if (!data->addr)
-		return -ENODEV;
+	val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+	mutex_lock(data->io_lock);
+	ret = sch56xx_write_virtual_reg(data->addr,
+					SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+	mutex_unlock(data->io_lock);
+	if (ret)
+		return ret;
 
-	if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
-		val = data->watchdog_output_enable &
-		      ~SCH56XX_WDOG_OUTPUT_ENABLE;
-		mutex_lock(data->io_lock);
-		ret = sch56xx_write_virtual_reg(data->addr,
-						SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-						val);
-		mutex_unlock(data->io_lock);
-		if (ret)
-			return ret;
-
-		data->watchdog_output_enable = val;
-	}
-
-	return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
-	int ret;
-
-	mutex_lock(&data->watchdog_lock);
-	ret = watchdog_stop_unlocked(data);
-	mutex_unlock(&data->watchdog_lock);
-
-	return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
-	struct sch56xx_watchdog_data *data = filp->private_data;
-
-	if (data->watchdog_expect_close) {
-		watchdog_stop(data);
-		data->watchdog_expect_close = 0;
-	} else {
-		watchdog_trigger(data);
-		pr_crit("unexpected close, not stopping watchdog!\n");
-	}
-
-	clear_bit(0, &data->watchdog_is_open);
-
-	mutex_lock(&watchdog_data_mutex);
-	kref_put(&data->kref, sch56xx_watchdog_release_resources);
-	mutex_unlock(&watchdog_data_mutex);
-
+	data->watchdog_output_enable = val;
 	return 0;
 }
 
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
 {
-	struct sch56xx_watchdog_data *pos, *data = NULL;
-	int ret, watchdog_is_open;
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-	/*
-	 * We get called from drivers/char/misc.c with misc_mtx hold, and we
-	 * call misc_register() from sch56xx_watchdog_probe() with
-	 * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
-	 * lock, this is a possible deadlock, so we use mutex_trylock here.
-	 */
-	if (!mutex_trylock(&watchdog_data_mutex))
-		return -ERESTARTSYS;
-	list_for_each_entry(pos, &watchdog_data_list, list) {
-		if (pos->watchdog_miscdev.minor == iminor(inode)) {
-			data = pos;
-			break;
-		}
-	}
-	/* Note we can never not have found data, so we don't check for this */
-	watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
-	if (!watchdog_is_open)
-		kref_get(&data->kref);
-	mutex_unlock(&watchdog_data_mutex);
-
-	if (watchdog_is_open)
-		return -EBUSY;
-
-	filp->private_data = data;
-
-	/* Start the watchdog */
-	ret = watchdog_start(data);
-	if (ret) {
-		watchdog_release(inode, filp);
-		return ret;
-	}
-
-	return nonseekable_open(inode, filp);
+	kref_get(&data->kref);
 }
 
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
-	size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
 {
-	int ret;
-	struct sch56xx_watchdog_data *data = filp->private_data;
+	struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-	if (count) {
-		if (!nowayout) {
-			size_t i;
-
-			/* Clear it in case it was set with a previous write */
-			data->watchdog_expect_close = 0;
-
-			for (i = 0; i != count; i++) {
-				char c;
-				if (get_user(c, buf + i))
-					return -EFAULT;
-				if (c == 'V')
-					data->watchdog_expect_close = 1;
-			}
-		}
-		ret = watchdog_trigger(data);
-		if (ret)
-			return ret;
-	}
-	return count;
+	kref_put(&data->kref, watchdog_release_resources);
 }
 
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
-			   unsigned long arg)
-{
-	struct watchdog_info ident = {
-		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
-		.identity = "sch56xx watchdog"
-	};
-	int i, ret = 0;
-	struct sch56xx_watchdog_data *data = filp->private_data;
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		ident.firmware_version = data->revision;
-		if (!nowayout)
-			ident.options |= WDIOF_MAGICCLOSE;
-		if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
-			ret = -EFAULT;
-		break;
-
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		ret = put_user(0, (int __user *)arg);
-		break;
-
-	case WDIOC_KEEPALIVE:
-		ret = watchdog_trigger(data);
-		break;
-
-	case WDIOC_GETTIMEOUT:
-		i = watchdog_get_timeout(data);
-		ret = put_user(i, (int __user *)arg);
-		break;
-
-	case WDIOC_SETTIMEOUT:
-		if (get_user(i, (int __user *)arg)) {
-			ret = -EFAULT;
-			break;
-		}
-		ret = watchdog_set_timeout(data, i);
-		if (ret >= 0)
-			ret = put_user(ret, (int __user *)arg);
-		break;
-
-	case WDIOC_SETOPTIONS:
-		if (get_user(i, (int __user *)arg)) {
-			ret = -EFAULT;
-			break;
-		}
-
-		if (i & WDIOS_DISABLECARD)
-			ret = watchdog_stop(data);
-		else if (i & WDIOS_ENABLECARD)
-			ret = watchdog_trigger(data);
-		else
-			ret = -EINVAL;
-		break;
-
-	default:
-		ret = -ENOTTY;
-	}
-	return ret;
-}
-
-static const struct file_operations watchdog_fops = {
-	.owner = THIS_MODULE,
-	.llseek = no_llseek,
-	.open = watchdog_open,
-	.release = watchdog_release,
-	.write = watchdog_write,
-	.unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+	.owner		= THIS_MODULE,
+	.start		= watchdog_start,
+	.stop		= watchdog_stop,
+	.ping		= watchdog_trigger,
+	.set_timeout	= watchdog_set_timeout,
+	.ref		= watchdog_ref,
+	.unref		= watchdog_unref,
 };
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
 	u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
 {
 	struct sch56xx_watchdog_data *data;
-	int i, err, control, output_enable;
-	const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+	int err, control, output_enable;
 
 	/* Cache the watchdog registers */
 	mutex_lock(io_lock);
@@ -656,82 +447,55 @@
 		return NULL;
 
 	data->addr = addr;
-	data->revision = revision;
 	data->io_lock = io_lock;
-	data->watchdog_control = control;
-	data->watchdog_output_enable = output_enable;
-	mutex_init(&data->watchdog_lock);
-	INIT_LIST_HEAD(&data->list);
 	kref_init(&data->kref);
 
-	err = watchdog_set_timeout(data, 60);
-	if (err < 0)
-		goto error;
+	strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+		sizeof(data->wdinfo.identity));
+	data->wdinfo.firmware_version = revision;
+	data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+	if (!nowayout)
+		data->wdinfo.options |= WDIOF_MAGICCLOSE;
 
-	/*
-	 * We take the data_mutex lock early so that watchdog_open() cannot
-	 * run when misc_register() has completed, but we've not yet added
-	 * our data to the watchdog_data_list.
-	 */
-	mutex_lock(&watchdog_data_mutex);
-	for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
-		/* Register our watchdog part */
-		snprintf(data->watchdog_name, sizeof(data->watchdog_name),
-			"watchdog%c", (i == 0) ? '\0' : ('0' + i));
-		data->watchdog_miscdev.name = data->watchdog_name;
-		data->watchdog_miscdev.fops = &watchdog_fops;
-		data->watchdog_miscdev.minor = watchdog_minors[i];
-		err = misc_register(&data->watchdog_miscdev);
-		if (err == -EBUSY)
-			continue;
-		if (err)
-			break;
+	data->wddev.info = &data->wdinfo;
+	data->wddev.ops = &watchdog_ops;
+	data->wddev.parent = parent;
+	data->wddev.timeout = 60;
+	data->wddev.min_timeout = 1;
+	data->wddev.max_timeout = 255 * 60;
+	if (nowayout)
+		set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+	if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+		set_bit(WDOG_ACTIVE, &data->wddev.status);
 
-		list_add(&data->list, &watchdog_data_list);
-		pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
-			data->watchdog_name, watchdog_minors[i]);
-		break;
-	}
-	mutex_unlock(&watchdog_data_mutex);
+	/* Since the watchdog uses a downcounter there is no register to read
+	   the BIOS set timeout from (if any was set at all) ->
+	   Choose a preset which will give us a 1 minute timeout */
+	if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+		data->watchdog_preset = 60; /* seconds */
+	else
+		data->watchdog_preset = 1; /* minute */
 
+	data->watchdog_control = control;
+	data->watchdog_output_enable = output_enable;
+
+	watchdog_set_drvdata(&data->wddev, data);
+	err = watchdog_register_device(&data->wddev);
 	if (err) {
 		pr_err("Registering watchdog chardev: %d\n", err);
-		goto error;
-	}
-	if (i == ARRAY_SIZE(watchdog_minors)) {
-		pr_warn("Couldn't register watchdog (no free minor)\n");
-		goto error;
+		kfree(data);
+		return NULL;
 	}
 
 	return data;
-
-error:
-	kfree(data);
-	return NULL;
 }
 EXPORT_SYMBOL(sch56xx_watchdog_register);
 
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
 {
-	mutex_lock(&watchdog_data_mutex);
-	misc_deregister(&data->watchdog_miscdev);
-	list_del(&data->list);
-	mutex_unlock(&watchdog_data_mutex);
-
-	mutex_lock(&data->watchdog_lock);
-	if (data->watchdog_is_open) {
-		pr_warn("platform device unregistered with watchdog "
-			"open! Stopping watchdog.\n");
-		watchdog_stop_unlocked(data);
-	}
-	/* Tell the wdog start/stop/trigger functions our dev is gone */
-	data->addr = 0;
-	data->io_lock = NULL;
-	mutex_unlock(&data->watchdog_lock);
-
-	mutex_lock(&watchdog_data_mutex);
-	kref_put(&data->kref, sch56xx_watchdog_release_resources);
-	mutex_unlock(&watchdog_data_mutex);
+	watchdog_unregister_device(&data->wddev);
+	kref_put(&data->kref, watchdog_release_resources);
+	/* Don't touch data after this it may have been free-ed! */
 }
 EXPORT_SYMBOL(sch56xx_watchdog_unregister);
 
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7475086..704ea2c 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -27,6 +27,6 @@
 int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
 			       int high_nibble);
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
 	u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62..5a3bb3d 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -49,7 +49,6 @@
 
 config I2C_MUX
 	tristate "I2C bus multiplexing support"
-	depends on EXPERIMENTAL
 	help
 	  Say Y here if you want the I2C core to support the ability to
 	  handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 7f0b832..fad22b0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -608,7 +608,7 @@
 
 static u32 bit_func(struct i2c_adapter *adap)
 {
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+	return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
 	       I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 94468a6..7244c8b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -445,20 +445,6 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-iop3xx.
 
-config I2C_IXP2000
-	tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
-	depends on ARCH_IXP2000
-	select I2C_ALGOBIT
-	help
-	  Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
-	  system and are using GPIO lines for an I2C bus.
-
-	  This support is also available as a module. If so, the module
-	  will be called i2c-ixp2000.
-
-	  This driver is deprecated and will be dropped soon. Use i2c-gpio
-	  instead.
-
 config I2C_MPC
 	tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
 	depends on PPC
@@ -483,6 +469,7 @@
 config I2C_MXS
 	tristate "Freescale i.MX28 I2C interface"
 	depends on SOC_IMX28
+	select STMP_DEVICE
 	help
 	  Say Y here if you want to use the I2C bus controller on
 	  the Freescale i.MX28 processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 569567b0d..ce3c2be 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -44,7 +44,6 @@
 obj-$(CONFIG_I2C_IMX)		+= i2c-imx.o
 obj-$(CONFIG_I2C_INTEL_MID)	+= i2c-intel-mid.o
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
-obj-$(CONFIG_I2C_IXP2000)	+= i2c-ixp2000.o
 obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
 obj-$(CONFIG_I2C_MV64XXX)	+= i2c-mv64xxx.o
 obj-$(CONFIG_I2C_MXS)		+= i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a76d85f..79b4bcb 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,7 @@
 	dev->clk = NULL;
 
 	davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
-	free_irq(IRQ_I2C, dev);
+	free_irq(dev->irq, dev);
 	iounmap(dev->base);
 	kfree(dev);
 
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index df87992..1e48bec 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -164,9 +164,15 @@
 
 u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 {
-	u32 value = readl(dev->base + offset);
+	u32 value;
 
-	if (dev->swab)
+	if (dev->accessor_flags & ACCESS_16BIT)
+		value = readw(dev->base + offset) |
+			(readw(dev->base + offset + 2) << 16);
+	else
+		value = readl(dev->base + offset);
+
+	if (dev->accessor_flags & ACCESS_SWAP)
 		return swab32(value);
 	else
 		return value;
@@ -174,10 +180,15 @@
 
 void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
 {
-	if (dev->swab)
+	if (dev->accessor_flags & ACCESS_SWAP)
 		b = swab32(b);
 
-	writel(b, dev->base + offset);
+	if (dev->accessor_flags & ACCESS_16BIT) {
+		writew((u16)b, dev->base + offset);
+		writew((u16)(b >> 16), dev->base + offset + 2);
+	} else {
+		writel(b, dev->base + offset);
+	}
 }
 
 static u32
@@ -251,14 +262,14 @@
 
 	input_clock_khz = dev->get_clk_rate_khz(dev);
 
-	/* Configure register endianess access */
 	reg = dw_readl(dev, DW_IC_COMP_TYPE);
 	if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
-		dev->swab = 1;
-		reg = DW_IC_COMP_TYPE_VALUE;
-	}
-
-	if (reg != DW_IC_COMP_TYPE_VALUE) {
+		/* Configure register endianess access */
+		dev->accessor_flags |= ACCESS_SWAP;
+	} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+		/* Configure register access mode 16bit */
+		dev->accessor_flags |= ACCESS_16BIT;
+	} else if (reg != DW_IC_COMP_TYPE_VALUE) {
 		dev_err(dev->dev, "Unknown Synopsys component type: "
 			"0x%08x\n", reg);
 		return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 02d1a2d..9c1840e 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -82,7 +82,7 @@
 	unsigned int		status;
 	u32			abort_source;
 	int			irq;
-	int			swab;
+	u32			accessor_flags;
 	struct i2c_adapter	adapter;
 	u32			functionality;
 	u32			master_cfg;
@@ -90,6 +90,9 @@
 	unsigned int		rx_fifo_depth;
 };
 
+#define ACCESS_SWAP		0x00000001
+#define ACCESS_16BIT		0x00000002
+
 extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
 extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
 extern int i2c_dw_init(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ba589a..0506fef 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/of_i2c.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include "i2c-designware-core.h"
@@ -95,7 +96,7 @@
 		r = -ENODEV;
 		goto err_free_mem;
 	}
-	clk_enable(dev->clk);
+	clk_prepare_enable(dev->clk);
 
 	dev->functionality =
 		I2C_FUNC_I2C |
@@ -155,7 +156,7 @@
 err_iounmap:
 	iounmap(dev->base);
 err_unuse_clocks:
-	clk_disable(dev->clk);
+	clk_disable_unprepare(dev->clk);
 	clk_put(dev->clk);
 	dev->clk = NULL;
 err_free_mem:
@@ -177,7 +178,7 @@
 	i2c_del_adapter(&dev->adapter);
 	put_device(&pdev->dev);
 
-	clk_disable(dev->clk);
+	clk_disable_unprepare(dev->clk);
 	clk_put(dev->clk);
 	dev->clk = NULL;
 
@@ -198,6 +199,31 @@
 MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
 #endif
 
+#ifdef CONFIG_PM
+static int dw_i2c_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(i_dev->clk);
+
+	return 0;
+}
+
+static int dw_i2c_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+	clk_prepare_enable(i_dev->clk);
+	i2c_dw_init(i_dev);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:i2c_designware");
 
@@ -207,6 +233,7 @@
 		.name	= "i2c_designware",
 		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(dw_i2c_of_match),
+		.pm	= &dw_i2c_dev_pm_ops,
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index c811289b..2f74ae8 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -263,11 +263,6 @@
 	init_waitqueue_head(&pch_event);
 }
 
-static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
-	return cmp1.tv64 < cmp2.tv64;
-}
-
 /**
  * pch_i2c_wait_for_bus_idle() - check the status of bus.
  * @adap:	Pointer to struct i2c_algo_pch_data.
@@ -317,33 +312,6 @@
 }
 
 /**
- * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
- * @adap:	Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
-{
-	long ret;
-	ret = wait_event_timeout(pch_event,
-			(adap->pch_event_flag != 0), msecs_to_jiffies(1000));
-
-	if (ret == 0) {
-		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
-		adap->pch_event_flag = 0;
-		return -ETIMEDOUT;
-	}
-
-	if (adap->pch_event_flag & I2C_ERROR_MASK) {
-		pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
-		adap->pch_event_flag = 0;
-		return -EIO;
-	}
-
-	adap->pch_event_flag = 0;
-
-	return 0;
-}
-
-/**
  * pch_i2c_getack() - to confirm ACK/NACK
  * @adap:	Pointer to struct i2c_algo_pch_data.
  */
@@ -373,6 +341,40 @@
 	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
 }
 
+static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
+{
+	long ret;
+
+	ret = wait_event_timeout(pch_event,
+			(adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+	if (!ret) {
+		pch_err(adap, "%s:wait-event timeout\n", __func__);
+		adap->pch_event_flag = 0;
+		pch_i2c_stop(adap);
+		pch_i2c_init(adap);
+		return -ETIMEDOUT;
+	}
+
+	if (adap->pch_event_flag & I2C_ERROR_MASK) {
+		pch_err(adap, "Lost Arbitration\n");
+		adap->pch_event_flag = 0;
+		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+		pch_i2c_init(adap);
+		return -EAGAIN;
+	}
+
+	adap->pch_event_flag = 0;
+
+	if (pch_i2c_getack(adap)) {
+		pch_dbg(adap, "Receive NACK for slave address"
+			"setting\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
 /**
  * pch_i2c_repstart() - generate repeated start condition in normal mode
  * @adap:	Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@
 		if (first)
 			pch_i2c_start(adap);
 
-		rtn = pch_i2c_wait_for_xfer_complete(adap);
-		if (rtn == 0) {
-			if (pch_i2c_getack(adap)) {
-				pch_dbg(adap, "Receive NACK for slave address"
-					"setting\n");
-				return -EIO;
-			}
-			addr_8_lsb = (addr & I2C_ADDR_MSK);
-			iowrite32(addr_8_lsb, p + PCH_I2CDR);
-		} else if (rtn == -EIO) { /* Arbitration Lost */
-			pch_err(adap, "Lost Arbitration\n");
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMAL_BIT);
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMIF_BIT);
-			pch_i2c_init(adap);
-			return -EAGAIN;
-		} else { /* wait-event timeout */
-			pch_i2c_stop(adap);
-			return -ETIME;
-		}
+		rtn = pch_i2c_wait_for_check_xfer(adap);
+		if (rtn)
+			return rtn;
+
+		addr_8_lsb = (addr & I2C_ADDR_MSK);
+		iowrite32(addr_8_lsb, p + PCH_I2CDR);
 	} else {
 		/* set 7 bit slave address and R/W bit as 0 */
 		iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@
 			pch_i2c_start(adap);
 	}
 
-	rtn = pch_i2c_wait_for_xfer_complete(adap);
-	if (rtn == 0) {
-		if (pch_i2c_getack(adap)) {
-			pch_dbg(adap, "Receive NACK for slave address"
-				"setting\n");
-			return -EIO;
-		}
-	} else if (rtn == -EIO) { /* Arbitration Lost */
-		pch_err(adap, "Lost Arbitration\n");
-		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
-		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
-		pch_i2c_init(adap);
-		return -EAGAIN;
-	} else { /* wait-event timeout */
-		pch_i2c_stop(adap);
-		return -ETIME;
-	}
+	rtn = pch_i2c_wait_for_check_xfer(adap);
+	if (rtn)
+		return rtn;
 
 	for (wrcount = 0; wrcount < length; ++wrcount) {
 		/* write buffer value to I2C data register */
 		iowrite32(buf[wrcount], p + PCH_I2CDR);
 		pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
 
-		rtn = pch_i2c_wait_for_xfer_complete(adap);
-		if (rtn == 0) {
-			if (pch_i2c_getack(adap)) {
-				pch_dbg(adap, "Receive NACK for slave address"
-					"setting\n");
-				return -EIO;
-			}
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMCF_BIT);
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMIF_BIT);
-		} else { /* wait-event timeout */
-			pch_i2c_stop(adap);
-			return -ETIME;
-		}
+		rtn = pch_i2c_wait_for_check_xfer(adap);
+		if (rtn)
+			return rtn;
+
+		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
+		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
 	}
 
 	/* check if this is the last message */
@@ -580,50 +544,21 @@
 		if (first)
 			pch_i2c_start(adap);
 
-		rtn = pch_i2c_wait_for_xfer_complete(adap);
-		if (rtn == 0) {
-			if (pch_i2c_getack(adap)) {
-				pch_dbg(adap, "Receive NACK for slave address"
-					"setting\n");
-				return -EIO;
-			}
-			addr_8_lsb = (addr & I2C_ADDR_MSK);
-			iowrite32(addr_8_lsb, p + PCH_I2CDR);
-		} else if (rtn == -EIO) { /* Arbitration Lost */
-			pch_err(adap, "Lost Arbitration\n");
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMAL_BIT);
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMIF_BIT);
-			pch_i2c_init(adap);
-			return -EAGAIN;
-		} else { /* wait-event timeout */
-			pch_i2c_stop(adap);
-			return -ETIME;
-		}
+		rtn = pch_i2c_wait_for_check_xfer(adap);
+		if (rtn)
+			return rtn;
+
+		addr_8_lsb = (addr & I2C_ADDR_MSK);
+		iowrite32(addr_8_lsb, p + PCH_I2CDR);
+
 		pch_i2c_restart(adap);
-		rtn = pch_i2c_wait_for_xfer_complete(adap);
-		if (rtn == 0) {
-			if (pch_i2c_getack(adap)) {
-				pch_dbg(adap, "Receive NACK for slave address"
-					"setting\n");
-				return -EIO;
-			}
-			addr_2_msb |= I2C_RD;
-			iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
-				  p + PCH_I2CDR);
-		} else if (rtn == -EIO) { /* Arbitration Lost */
-			pch_err(adap, "Lost Arbitration\n");
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMAL_BIT);
-			pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-				   I2CMIF_BIT);
-			pch_i2c_init(adap);
-			return -EAGAIN;
-		} else { /* wait-event timeout */
-			pch_i2c_stop(adap);
-			return -ETIME;
-		}
+
+		rtn = pch_i2c_wait_for_check_xfer(adap);
+		if (rtn)
+			return rtn;
+
+		addr_2_msb |= I2C_RD;
+		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
 	} else {
 		/* 7 address bits + R/W bit */
 		addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@
 	if (first)
 		pch_i2c_start(adap);
 
-	rtn = pch_i2c_wait_for_xfer_complete(adap);
-	if (rtn == 0) {
-		if (pch_i2c_getack(adap)) {
-			pch_dbg(adap, "Receive NACK for slave address"
-				"setting\n");
-			return -EIO;
-		}
-	} else if (rtn == -EIO) { /* Arbitration Lost */
-		pch_err(adap, "Lost Arbitration\n");
-		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
-		pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
-		pch_i2c_init(adap);
-		return -EAGAIN;
-	} else { /* wait-event timeout */
-		pch_i2c_stop(adap);
-		return -ETIME;
-	}
+	rtn = pch_i2c_wait_for_check_xfer(adap);
+	if (rtn)
+		return rtn;
 
 	if (length == 0) {
 		pch_i2c_stop(adap);
@@ -669,18 +590,9 @@
 			if (loop != 1)
 				read_index++;
 
-			rtn = pch_i2c_wait_for_xfer_complete(adap);
-			if (rtn == 0) {
-				if (pch_i2c_getack(adap)) {
-					pch_dbg(adap, "Receive NACK for slave"
-						"address setting\n");
-					return -EIO;
-				}
-			} else { /* wait-event timeout */
-				pch_i2c_stop(adap);
-				return -ETIME;
-			}
-
+			rtn = pch_i2c_wait_for_check_xfer(adap);
+			if (rtn)
+				return rtn;
 		}	/* end for */
 
 		pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@
 		if (length != 1)
 			read_index++;
 
-		rtn = pch_i2c_wait_for_xfer_complete(adap);
-		if (rtn == 0) {
-			if (pch_i2c_getack(adap)) {
-				pch_dbg(adap, "Receive NACK for slave"
-					"address setting\n");
-				return -EIO;
-			}
-		} else { /* wait-event timeout */
-			pch_i2c_stop(adap);
-			return -ETIME;
-		}
+		rtn = pch_i2c_wait_for_check_xfer(adap);
+		if (rtn)
+			return rtn;
 
 		if (last)
 			pch_i2c_stop(adap);
@@ -790,7 +694,7 @@
 
 	ret = mutex_lock_interruptible(&pch_mutex);
 	if (ret)
-		return -ERESTARTSYS;
+		return ret;
 
 	if (adap->p_adapter_info->pch_i2c_suspended) {
 		mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@
 
 		pch_adap->owner = THIS_MODULE;
 		pch_adap->class = I2C_CLASS_HWMON;
-		strcpy(pch_adap->name, KBUILD_MODNAME);
+		strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
 		pch_adap->algo = &pch_algorithm;
 		pch_adap->algo_data = &adap_info->pch_data[i];
 
@@ -963,7 +867,7 @@
 		pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
 
 	for (i = 0; i < adap_info->ch_num; i++)
-		adap_info->pch_data[i].pch_base_address = 0;
+		adap_info->pch_data[i].pch_base_address = NULL;
 
 	pci_set_drvdata(pdev, NULL);
 
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c0330a4..e62d2d9 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -190,12 +190,7 @@
 	adap->dev.parent = &pdev->dev;
 	adap->dev.of_node = pdev->dev.of_node;
 
-	/*
-	 * If "dev->id" is negative we consider it as zero.
-	 * The reason to do so is to avoid sysfs names that only make
-	 * sense when there are multiple adapters.
-	 */
-	adap->nr = (pdev->id != -1) ? pdev->id : 0;
+	adap->nr = pdev->id;
 	ret = i2c_bit_add_numbered_bus(adap);
 	if (ret)
 		goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 56bce9a..8d6b504 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -512,7 +512,7 @@
 	}
 
 	/* Setup i2c_imx driver structure */
-	strcpy(i2c_imx->adapter.name, pdev->name);
+	strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
 	i2c_imx->adapter.owner		= THIS_MODULE;
 	i2c_imx->adapter.algo		= &i2c_imx_algo;
 	i2c_imx->adapter.dev.parent	= &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644
index 5d263f90..0000000
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-ixp2000.c
- *
- * I2C adapter for IXP2000 systems using GPIOs for I2C bus
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
- * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
- *
- * Copyright (c) 2003-2004 MontaVista Software Inc.
- *
- * This file is licensed under  the terms of the GNU General Public 
- * License version 2. This program is licensed "as is" without any 
- * warranty of any kind, whether express or implied.
- *
- * From Jeff Daly:
- *
- * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
- * IXP2000 platform if it uses the HW GPIO in the same manner.  Basically, 
- * SDA and SCL GPIOs have external pullups.  Setting the respective GPIO to 
- * an input will make the signal a '1' via the pullup.  Setting them to 
- * outputs will pull them down. 
- *
- * The GPIOs are open drain signals and are used as configuration strap inputs
- * during power-up so there's generally a buffer on the board that needs to be 
- * 'enabled' to drive the GPIOs.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>	/* Pick up IXP2000-specific bits */
-#include <mach/gpio-ixp2000.h>
-
-static inline int ixp2000_scl_pin(void *data)
-{
-	return ((struct ixp2000_i2c_pins*)data)->scl_pin;
-}
-
-static inline int ixp2000_sda_pin(void *data)
-{
-	return ((struct ixp2000_i2c_pins*)data)->sda_pin;
-}
-
-
-static void ixp2000_bit_setscl(void *data, int val)
-{
-	int i = 5000;
-
-	if (val) {
-		gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
-		while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
-	} else {
-		gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
-	}
-}
-
-static void ixp2000_bit_setsda(void *data, int val)
-{
-	if (val) {
-		gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
-	} else {
-		gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
-	}
-}
-
-static int ixp2000_bit_getscl(void *data)
-{
-	return gpio_line_get(ixp2000_scl_pin(data));
-}
-
-static int ixp2000_bit_getsda(void *data)
-{
-	return gpio_line_get(ixp2000_sda_pin(data));
-}
-
-struct ixp2000_i2c_data {
-	struct ixp2000_i2c_pins *gpio_pins;
-	struct i2c_adapter adapter;
-	struct i2c_algo_bit_data algo_data;
-};
-
-static int ixp2000_i2c_remove(struct platform_device *plat_dev)
-{
-	struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
-
-	platform_set_drvdata(plat_dev, NULL);
-
-	i2c_del_adapter(&drv_data->adapter);
-
-	kfree(drv_data);
-
-	return 0;
-}
-
-static int ixp2000_i2c_probe(struct platform_device *plat_dev)
-{
-	int err;
-	struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
-	struct ixp2000_i2c_data *drv_data = 
-		kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
-
-	if (!drv_data)
-		return -ENOMEM;
-	drv_data->gpio_pins = gpio;
-
-	drv_data->algo_data.data = gpio;
-	drv_data->algo_data.setsda = ixp2000_bit_setsda;
-	drv_data->algo_data.setscl = ixp2000_bit_setscl;
-	drv_data->algo_data.getsda = ixp2000_bit_getsda;
-	drv_data->algo_data.getscl = ixp2000_bit_getscl;
-	drv_data->algo_data.udelay = 6;
-	drv_data->algo_data.timeout = HZ;
-
-	strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
-		sizeof(drv_data->adapter.name));
-	drv_data->adapter.algo_data = &drv_data->algo_data,
-
-	drv_data->adapter.dev.parent = &plat_dev->dev;
-
-	gpio_line_config(gpio->sda_pin, GPIO_IN);
-	gpio_line_config(gpio->scl_pin, GPIO_IN);
-	gpio_line_set(gpio->scl_pin, 0);
-	gpio_line_set(gpio->sda_pin, 0);
-
-	if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
-		dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
-		kfree(drv_data);
-		return err;
-	} 
-
-	platform_set_drvdata(plat_dev, drv_data);
-
-	return 0;
-}
-
-static struct platform_driver ixp2000_i2c_driver = {
-	.probe		= ixp2000_i2c_probe,
-	.remove		= ixp2000_i2c_remove,
-	.driver		= {
-		.name	= "IXP2000-I2C",
-		.owner	= THIS_MODULE,
-	},
-};
-
-module_platform_driver(ixp2000_i2c_driver);
-
-MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:IXP2000-I2C");
-
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 206caac..b76731e 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -64,6 +64,9 @@
 	struct i2c_adapter adap;
 	int irq;
 	u32 real_clk;
+#ifdef CONFIG_PM
+	u8 fdr, dfsrr;
+#endif
 };
 
 struct mpc_i2c_divider {
@@ -703,6 +706,30 @@
 	return 0;
 };
 
+#ifdef CONFIG_PM
+static int mpc_i2c_suspend(struct device *dev)
+{
+	struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+	i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
+	i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
+
+	return 0;
+}
+
+static int mpc_i2c_resume(struct device *dev)
+{
+	struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+	writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
+	writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
+
+	return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#endif
+
 static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
 	.setup = mpc_i2c_setup_512x,
 };
@@ -747,6 +774,9 @@
 		.owner = THIS_MODULE,
 		.name = DRV_NAME,
 		.of_match_table = mpc_i2c_of_match,
+#ifdef CONFIG_PM
+		.pm = &mpc_i2c_pm_ops,
+#endif
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7fa73ee..04eb441 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,8 +27,10 @@
 #include <linux/jiffies.h>
 #include <linux/io.h>
 #include <linux/pinctrl/consumer.h>
-
-#include <mach/common.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
 
 #define DRIVER_NAME "mxs-i2c"
 
@@ -112,13 +114,9 @@
 	struct i2c_adapter adapter;
 };
 
-/*
- * TODO: check if calls to here are really needed. If not, we could get rid of
- * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
- */
 static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
 {
-	mxs_reset_block(i2c->regs);
+	stmp_reset_block(i2c->regs);
 	writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
 	writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
 			i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@
 	adap->algo = &mxs_i2c_algo;
 	adap->dev.parent = dev;
 	adap->nr = pdev->id;
+	adap->dev.of_node = pdev->dev.of_node;
 	i2c_set_adapdata(adap, i2c);
 	err = i2c_add_numbered_adapter(adap);
 	if (err) {
@@ -380,6 +379,8 @@
 		return err;
 	}
 
+	of_i2c_register_devices(adap);
+
 	return 0;
 }
 
@@ -399,10 +400,17 @@
 	return 0;
 }
 
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+	{ .compatible = "fsl,imx28-i2c", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
 static struct platform_driver mxs_i2c_driver = {
 	.driver = {
 		   .name = DRIVER_NAME,
 		   .owner = THIS_MODULE,
+		   .of_match_table = mxs_i2c_dt_ids,
 		   },
 	.remove = __devexit_p(mxs_i2c_remove),
 };
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 03b6157..a26dfb8 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -502,7 +502,8 @@
 /* declare our i2c functionality */
 static u32 nuc900_i2c_func(struct i2c_adapter *adap)
 {
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+		I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 18068de..75194c5 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -55,6 +55,7 @@
 #include <linux/i2c-ocores.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/of_i2c.h>
 
 struct ocores_i2c {
 	void __iomem *base;
@@ -343,6 +344,8 @@
 	if (pdata) {
 		for (i = 0; i < pdata->num_devices; i++)
 			i2c_new_device(&i2c->adap, pdata->devices + i);
+	} else {
+		of_i2c_register_devices(&i2c->adap);
 	}
 
 	return 0;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 2adbf1a..675878f 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -171,7 +171,7 @@
 	i2c->io_size = resource_size(res);
 	i2c->irq = irq;
 
-	i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+	i2c->adap.nr = pdev->id;
 	i2c->adap.owner = THIS_MODULE;
 	snprintf(i2c->adap.name, sizeof(i2c->adap.name),
 		 "PCA9564/PCA9665 at 0x%08lx",
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f673326..a997c7d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1131,11 +1131,6 @@
 	spin_lock_init(&i2c->lock);
 	init_waitqueue_head(&i2c->wait);
 
-	/*
-	 * If "dev->id" is negative we consider it as zero.
-	 * The reason to do so is to avoid sysfs names that only make
-	 * sense when there are multiple adapters.
-	 */
 	i2c->adap.nr = dev->id;
 	snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
 		 i2c->adap.nr);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 737f721..0195915 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -44,8 +44,12 @@
 #include <plat/regs-iic.h>
 #include <plat/iic.h>
 
-/* i2c controller state */
+/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
+#define QUIRK_S3C2440		(1 << 0)
+#define QUIRK_HDMIPHY		(1 << 1)
+#define QUIRK_NO_GPIO		(1 << 2)
 
+/* i2c controller state */
 enum s3c24xx_i2c_state {
 	STATE_IDLE,
 	STATE_START,
@@ -54,14 +58,10 @@
 	STATE_STOP
 };
 
-enum s3c24xx_i2c_type {
-	TYPE_S3C2410,
-	TYPE_S3C2440,
-};
-
 struct s3c24xx_i2c {
 	spinlock_t		lock;
 	wait_queue_head_t	wait;
+	unsigned int            quirks;
 	unsigned int		suspended:1;
 
 	struct i2c_msg		*msg;
@@ -88,26 +88,45 @@
 #endif
 };
 
-/* default platform data removed, dev should always carry data. */
-
-/* s3c24xx_i2c_is2440()
- *
- * return true is this is an s3c2440
-*/
-
-static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
-{
-	struct platform_device *pdev = to_platform_device(i2c->dev);
-	enum s3c24xx_i2c_type type;
+static struct platform_device_id s3c24xx_driver_ids[] = {
+	{
+		.name		= "s3c2410-i2c",
+		.driver_data	= 0,
+	}, {
+		.name		= "s3c2440-i2c",
+		.driver_data	= QUIRK_S3C2440,
+	}, {
+		.name		= "s3c2440-hdmiphy-i2c",
+		.driver_data	= QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
+	}, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
 
 #ifdef CONFIG_OF
-	if (i2c->dev->of_node)
-		return of_device_is_compatible(i2c->dev->of_node,
-				"samsung,s3c2440-i2c");
+static const struct of_device_id s3c24xx_i2c_match[] = {
+	{ .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
+	{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
+	{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
+	  .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+	{},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
 #endif
 
-	type = platform_get_device_id(pdev)->driver_data;
-	return type == TYPE_S3C2440;
+/* s3c24xx_get_device_quirks
+ *
+ * Get controller type either from device tree or platform device variant.
+*/
+
+static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
+{
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match;
+		match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+		return (unsigned int)match->data;
+	}
+
+	return platform_get_device_id(pdev)->driver_data;
 }
 
 /* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@
 	unsigned long iicstat;
 	int timeout = 400;
 
+	/* the timeout for HDMIPHY is reduced to 10 ms because
+	 * the hangup is expected to happen, so waiting 400 ms
+	 * causes only unnecessary system hangup
+	 */
+	if (i2c->quirks & QUIRK_HDMIPHY)
+		timeout = 10;
+
 	while (timeout-- > 0) {
 		iicstat = readl(i2c->regs + S3C2410_IICSTAT);
 
@@ -480,6 +506,15 @@
 		msleep(1);
 	}
 
+	/* hang-up of bus dedicated for HDMIPHY occurred, resetting */
+	if (i2c->quirks & QUIRK_HDMIPHY) {
+		writel(0, i2c->regs + S3C2410_IICCON);
+		writel(0, i2c->regs + S3C2410_IICSTAT);
+		writel(0, i2c->regs + S3C2410_IICDS);
+
+		return 0;
+	}
+
 	return -ETIMEDOUT;
 }
 
@@ -591,7 +626,8 @@
 /* declare our i2c functionality */
 static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
 {
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+		I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
@@ -676,7 +712,7 @@
 
 	writel(iiccon, i2c->regs + S3C2410_IICCON);
 
-	if (s3c24xx_i2c_is2440(i2c)) {
+	if (i2c->quirks & QUIRK_S3C2440) {
 		unsigned long sda_delay;
 
 		if (pdata->sda_delay) {
@@ -761,6 +797,9 @@
 {
 	int idx, gpio, ret;
 
+	if (i2c->quirks & QUIRK_NO_GPIO)
+		return 0;
+
 	for (idx = 0; idx < 2; idx++) {
 		gpio = of_get_gpio(i2c->dev->of_node, idx);
 		if (!gpio_is_valid(gpio)) {
@@ -785,6 +824,10 @@
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 {
 	unsigned int idx;
+
+	if (i2c->quirks & QUIRK_NO_GPIO)
+		return;
+
 	for (idx = 0; idx < 2; idx++)
 		gpio_free(i2c->gpios[idx]);
 }
@@ -906,6 +949,7 @@
 		goto err_noclk;
 	}
 
+	i2c->quirks = s3c24xx_get_device_quirks(pdev);
 	if (pdata)
 		memcpy(i2c->pdata, pdata, sizeof(*pdata));
 	else
@@ -1110,28 +1154,6 @@
 
 /* device driver for platform bus bits */
 
-static struct platform_device_id s3c24xx_driver_ids[] = {
-	{
-		.name		= "s3c2410-i2c",
-		.driver_data	= TYPE_S3C2410,
-	}, {
-		.name		= "s3c2440-i2c",
-		.driver_data	= TYPE_S3C2440,
-	}, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c24xx_i2c_match[] = {
-	{ .compatible = "samsung,s3c2410-i2c" },
-	{ .compatible = "samsung,s3c2440-i2c" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
-#else
-#define s3c24xx_i2c_match NULL
-#endif
-
 static struct platform_driver s3c24xx_i2c_driver = {
 	.probe		= s3c24xx_i2c_probe,
 	.remove		= s3c24xx_i2c_remove,
@@ -1140,7 +1162,7 @@
 		.owner	= THIS_MODULE,
 		.name	= "s3c-i2c",
 		.pm	= S3C24XX_DEV_PM_OPS,
-		.of_match_table = s3c24xx_i2c_match,
+		.of_match_table = of_match_ptr(s3c24xx_i2c_match),
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 675c969..8110ca4 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,6 +27,7 @@
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
+#include <linux/of_i2c.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
@@ -653,6 +654,7 @@
 	adap->dev.parent = &dev->dev;
 	adap->retries = 5;
 	adap->nr = dev->id;
+	adap->dev.of_node = dev->dev.of_node;
 
 	strlcpy(adap->name, dev->name, sizeof(adap->name));
 
@@ -667,6 +669,8 @@
 
 	dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
 		 adap->nr, pd->bus_speed);
+
+	of_i2c_register_devices(adap);
 	return 0;
 
  err_all:
@@ -710,11 +714,18 @@
 	.runtime_resume = sh_mobile_i2c_runtime_nop,
 };
 
+static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+	{ .compatible = "renesas,rmobile-iic", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
+
 static struct platform_driver sh_mobile_i2c_driver = {
 	.driver		= {
 		.name		= "i2c-sh_mobile",
 		.owner		= THIS_MODULE,
 		.pm		= &sh_mobile_i2c_dev_pm_ops,
+		.of_match_table = sh_mobile_i2c_dt_ids,
 	},
 	.probe		= sh_mobile_i2c_probe,
 	.remove		= sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 55e5ea6..8b2e555 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -401,8 +401,6 @@
 			disable_irq_nosync(i2c_dev->irq);
 			i2c_dev->irq_disabled = 1;
 		}
-
-		complete(&i2c_dev->msg_complete);
 		goto err;
 	}
 
@@ -411,7 +409,6 @@
 			i2c_dev->msg_err |= I2C_ERR_NO_ACK;
 		if (status & I2C_INT_ARBITRATION_LOST)
 			i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
-		complete(&i2c_dev->msg_complete);
 		goto err;
 	}
 
@@ -429,14 +426,14 @@
 			tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
 	}
 
+	i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+	if (i2c_dev->is_dvc)
+		dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
 	if (status & I2C_INT_PACKET_XFER_COMPLETE) {
 		BUG_ON(i2c_dev->msg_buf_remaining);
 		complete(&i2c_dev->msg_complete);
 	}
-
-	i2c_writel(i2c_dev, status, I2C_INT_STATUS);
-	if (i2c_dev->is_dvc)
-		dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
 	return IRQ_HANDLED;
 err:
 	/* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@
 	i2c_writel(i2c_dev, status, I2C_INT_STATUS);
 	if (i2c_dev->is_dvc)
 		dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+	complete(&i2c_dev->msg_complete);
 	return IRQ_HANDLED;
 }
 
@@ -476,12 +475,15 @@
 	packet_header = msg->len - 1;
 	i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
 
-	packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
-	packet_header |= I2C_HEADER_IE_ENABLE;
+	packet_header = I2C_HEADER_IE_ENABLE;
 	if (!stop)
 		packet_header |= I2C_HEADER_REPEAT_START;
-	if (msg->flags & I2C_M_TEN)
+	if (msg->flags & I2C_M_TEN) {
+		packet_header |= msg->addr;
 		packet_header |= I2C_HEADER_10BIT_ADDR;
+	} else {
+		packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+	}
 	if (msg->flags & I2C_M_IGNORE_NAK)
 		packet_header |= I2C_HEADER_CONT_ON_NAK;
 	if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f585aea..eec20db 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -104,13 +104,8 @@
 	i2c->algo = i2c_versatile_algo;
 	i2c->algo.data = i2c;
 
-	if (dev->id >= 0) {
-		/* static bus numbering */
-		i2c->adap.nr = dev->id;
-		ret = i2c_bit_add_numbered_bus(&i2c->adap);
-	} else
-		/* dynamic bus numbering */
-		ret = i2c_bit_add_bus(&i2c->adap);
+	i2c->adap.nr = dev->id;
+	ret = i2c_bit_add_numbered_bus(&i2c->adap);
 	if (ret >= 0) {
 		platform_set_drvdata(dev, i2c);
 		of_i2c_register_devices(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2bded76..641d0e5 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
 #include <linux/i2c-xiic.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/of_i2c.h>
 
 #define DRIVER_NAME "xiic-i2c"
 
@@ -705,8 +706,6 @@
 		goto resource_missing;
 
 	pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
-	if (!pdata)
-		return -EINVAL;
 
 	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
 	if (!i2c)
@@ -730,6 +729,7 @@
 	i2c->adap = xiic_adapter;
 	i2c_set_adapdata(&i2c->adap, i2c);
 	i2c->adap.dev.parent = &pdev->dev;
+	i2c->adap.dev.of_node = pdev->dev.of_node;
 
 	xiic_reinit(i2c);
 
@@ -748,9 +748,13 @@
 		goto add_adapter_failed;
 	}
 
-	/* add in known devices to the bus */
-	for (i = 0; i < pdata->num_devices; i++)
-		i2c_new_device(&i2c->adap, pdata->devices + i);
+	if (pdata) {
+		/* add in known devices to the bus */
+		for (i = 0; i < pdata->num_devices; i++)
+			i2c_new_device(&i2c->adap, pdata->devices + i);
+	}
+
+	of_i2c_register_devices(&i2c->adap);
 
 	return 0;
 
@@ -795,12 +799,21 @@
 	return 0;
 }
 
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] __devinitconst = {
+	{ .compatible = "xlnx,xps-iic-2.00.a", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
 static struct platform_driver xiic_i2c_driver = {
 	.probe   = xiic_i2c_probe,
 	.remove  = __devexit_p(xiic_i2c_remove),
 	.driver  = {
 		.owner = THIS_MODULE,
 		.name = DRIVER_NAME,
+		.of_match_table = of_match_ptr(xiic_of_match),
 	},
 };
 
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index feb7dc3..a6ad32b 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -772,6 +772,23 @@
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
 
+/**
+ * i2c_verify_adapter - return parameter as i2c_adapter or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * When traversing the driver model tree, perhaps using driver model
+ * iterators like @device_for_each_child(), you can't assume very much
+ * about the nodes you find.  Use this function to avoid oopses caused
+ * by wrongly treating some non-I2C device as an i2c_adapter.
+ */
+struct i2c_adapter *i2c_verify_adapter(struct device *dev)
+{
+	return (dev->type == &i2c_adapter_type)
+			? to_i2c_adapter(dev)
+			: NULL;
+}
+EXPORT_SYMBOL(i2c_verify_adapter);
+
 #ifdef CONFIG_I2C_COMPAT
 static struct class_compat *i2c_adapter_compat_class;
 #endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 4504832..5ec2261 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -265,19 +265,41 @@
 
 	res = 0;
 	for (i = 0; i < rdwr_arg.nmsgs; i++) {
-		/* Limit the size of the message to a sane amount;
-		 * and don't let length change either. */
-		if ((rdwr_pa[i].len > 8192) ||
-		    (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+		/* Limit the size of the message to a sane amount */
+		if (rdwr_pa[i].len > 8192) {
 			res = -EINVAL;
 			break;
 		}
+
 		data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
 		rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
 		if (IS_ERR(rdwr_pa[i].buf)) {
 			res = PTR_ERR(rdwr_pa[i].buf);
 			break;
 		}
+
+		/*
+		 * If the message length is received from the slave (similar
+		 * to SMBus block read), we must ensure that the buffer will
+		 * be large enough to cope with a message length of
+		 * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+		 * drivers allow. The first byte in the buffer must be
+		 * pre-filled with the number of extra bytes, which must be
+		 * at least one to hold the message length, but can be
+		 * greater (for example to account for a checksum byte at
+		 * the end of the message.)
+		 */
+		if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+			if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+			    rdwr_pa[i].buf[0] < 1 ||
+			    rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+					     I2C_SMBUS_BLOCK_MAX) {
+				res = -EINVAL;
+				break;
+			}
+
+			rdwr_pa[i].len = rdwr_pa[i].buf[0];
+		}
 	}
 	if (res < 0) {
 		int j;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d7a4833..1038c38 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -24,6 +24,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
 
 /* multiplexer per channel data */
 struct i2c_mux_priv {
@@ -31,11 +33,11 @@
 	struct i2c_algorithm algo;
 
 	struct i2c_adapter *parent;
-	void *mux_dev;	/* the mux chip/device */
+	void *mux_priv;	/* the mux chip/device */
 	u32  chan_id;	/* the channel id */
 
-	int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
-	int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+	int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+	int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
 };
 
 static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@
 
 	/* Switch to the right mux port and perform the transfer. */
 
-	ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+	ret = priv->select(parent, priv->mux_priv, priv->chan_id);
 	if (ret >= 0)
 		ret = parent->algo->master_xfer(parent, msgs, num);
 	if (priv->deselect)
-		priv->deselect(parent, priv->mux_dev, priv->chan_id);
+		priv->deselect(parent, priv->mux_priv, priv->chan_id);
 
 	return ret;
 }
@@ -67,12 +69,12 @@
 
 	/* Select the right mux port and perform the transfer. */
 
-	ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+	ret = priv->select(parent, priv->mux_priv, priv->chan_id);
 	if (ret >= 0)
 		ret = parent->algo->smbus_xfer(parent, addr, flags,
 					read_write, command, size, data);
 	if (priv->deselect)
-		priv->deselect(parent, priv->mux_dev, priv->chan_id);
+		priv->deselect(parent, priv->mux_priv, priv->chan_id);
 
 	return ret;
 }
@@ -87,7 +89,8 @@
 }
 
 struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-				void *mux_dev, u32 force_nr, u32 chan_id,
+				struct device *mux_dev,
+				void *mux_priv, u32 force_nr, u32 chan_id,
 				int (*select) (struct i2c_adapter *,
 					       void *, u32),
 				int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@
 
 	/* Set up private adapter data */
 	priv->parent = parent;
-	priv->mux_dev = mux_dev;
+	priv->mux_priv = mux_priv;
 	priv->chan_id = chan_id;
 	priv->select = select;
 	priv->deselect = deselect;
@@ -124,6 +127,25 @@
 	priv->adap.algo_data = priv;
 	priv->adap.dev.parent = &parent->dev;
 
+	/*
+	 * Try to populate the mux adapter's of_node, expands to
+	 * nothing if !CONFIG_OF.
+	 */
+	if (mux_dev->of_node) {
+		struct device_node *child;
+		u32 reg;
+
+		for_each_child_of_node(mux_dev->of_node, child) {
+			ret = of_property_read_u32(child, "reg", &reg);
+			if (ret)
+				continue;
+			if (chan_id == reg) {
+				priv->adap.dev.of_node = child;
+				break;
+			}
+		}
+	}
+
 	if (force_nr) {
 		priv->adap.nr = force_nr;
 		ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@
 	dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
 		 i2c_adapter_id(&priv->adap));
 
+	of_i2c_register_devices(&priv->adap);
+
 	return &priv->adap;
 }
 EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 90b7a01..a0edd98 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -15,7 +15,7 @@
 	  through GPIO pins.
 
 	  This driver can also be built as a module.  If so, the module
-	  will be called gpio-i2cmux.
+	  will be called i2c-mux-gpio.
 
 config I2C_MUX_PCA9541
 	tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@
 	  I2C Master Selector.
 
 	  This driver can also be built as a module.  If so, the module
-	  will be called pca9541.
+	  will be called i2c-mux-pca9541.
 
 config I2C_MUX_PCA954x
 	tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,18 @@
 	  I2C mux/switch devices.
 
 	  This driver can also be built as a module.  If so, the module
-	  will be called pca954x.
+	  will be called i2c-mux-pca954x.
+
+config I2C_MUX_PINCTRL
+	tristate "pinctrl-based I2C multiplexer"
+	depends on PINCTRL
+	help
+	  If you say yes to this option, support will be included for an I2C
+	  multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+	  This is useful for SoCs whose I2C module's signals can be routed to
+	  different sets of pins at run-time.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called pinctrl-i2cmux.
 
 endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4640436..76da869 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,9 @@
 #
 # Makefile for multiplexer I2C chip drivers.
 
-obj-$(CONFIG_I2C_MUX_GPIO)	+= gpio-i2cmux.o
-obj-$(CONFIG_I2C_MUX_PCA9541)	+= pca9541.o
-obj-$(CONFIG_I2C_MUX_PCA954x)	+= pca954x.o
+obj-$(CONFIG_I2C_MUX_GPIO)	+= i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_PCA9541)	+= i2c-mux-pca9541.o
+obj-$(CONFIG_I2C_MUX_PCA954x)	+= i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL)	+= i2c-mux-pinctrl.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/i2c-mux-gpio.c
similarity index 73%
rename from drivers/i2c/muxes/gpio-i2cmux.c
rename to drivers/i2c/muxes/i2c-mux-gpio.c
index e5fa695..68b1f8e 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -10,7 +10,7 @@
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-mux-gpio.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -20,10 +20,10 @@
 struct gpiomux {
 	struct i2c_adapter *parent;
 	struct i2c_adapter **adap; /* child busses */
-	struct gpio_i2cmux_platform_data data;
+	struct i2c_mux_gpio_platform_data data;
 };
 
-static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
 {
 	int i;
 
@@ -31,28 +31,28 @@
 		gpio_set_value(mux->data.gpios[i], val & (1 << i));
 }
 
-static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
 {
 	struct gpiomux *mux = data;
 
-	gpiomux_set(mux, mux->data.values[chan]);
+	i2c_mux_gpio_set(mux, mux->data.values[chan]);
 
 	return 0;
 }
 
-static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
 {
 	struct gpiomux *mux = data;
 
-	gpiomux_set(mux, mux->data.idle);
+	i2c_mux_gpio_set(mux, mux->data.idle);
 
 	return 0;
 }
 
-static int __devinit gpiomux_probe(struct platform_device *pdev)
+static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
 {
 	struct gpiomux *mux;
-	struct gpio_i2cmux_platform_data *pdata;
+	struct i2c_mux_gpio_platform_data *pdata;
 	struct i2c_adapter *parent;
 	int (*deselect) (struct i2c_adapter *, void *, u32);
 	unsigned initial_state;
@@ -86,16 +86,16 @@
 		goto alloc_failed2;
 	}
 
-	if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+	if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
 		initial_state = pdata->idle;
-		deselect = gpiomux_deselect;
+		deselect = i2c_mux_gpio_deselect;
 	} else {
 		initial_state = pdata->values[0];
 		deselect = NULL;
 	}
 
 	for (i = 0; i < pdata->n_gpios; i++) {
-		ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+		ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
 		if (ret)
 			goto err_request_gpio;
 		gpio_direction_output(pdata->gpios[i],
@@ -105,8 +105,8 @@
 	for (i = 0; i < pdata->n_values; i++) {
 		u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
 
-		mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
-						   gpiomux_select, deselect);
+		mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+						   i2c_mux_gpio_select, deselect);
 		if (!mux->adap[i]) {
 			ret = -ENODEV;
 			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
@@ -137,7 +137,7 @@
 	return ret;
 }
 
-static int __devexit gpiomux_remove(struct platform_device *pdev)
+static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
 {
 	struct gpiomux *mux = platform_get_drvdata(pdev);
 	int i;
@@ -156,18 +156,18 @@
 	return 0;
 }
 
-static struct platform_driver gpiomux_driver = {
-	.probe	= gpiomux_probe,
-	.remove	= __devexit_p(gpiomux_remove),
+static struct platform_driver i2c_mux_gpio_driver = {
+	.probe	= i2c_mux_gpio_probe,
+	.remove	= __devexit_p(i2c_mux_gpio_remove),
 	.driver	= {
 		.owner	= THIS_MODULE,
-		.name	= "gpio-i2cmux",
+		.name	= "i2c-mux-gpio",
 	},
 };
 
-module_platform_driver(gpiomux_driver);
+module_platform_driver(i2c_mux_gpio_driver);
 
 MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
 MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-i2cmux");
+MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
similarity index 98%
rename from drivers/i2c/muxes/pca9541.c
rename to drivers/i2c/muxes/i2c-mux-pca9541.c
index e0df9b6..8aacde1 100644
--- a/drivers/i2c/muxes/pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -353,7 +353,8 @@
 	force = 0;
 	if (pdata)
 		force = pdata->modes[0].adap_id;
-	data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
+	data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
+					     force, 0,
 					     pca9541_select_chan,
 					     pca9541_release_chan);
 
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
similarity index 99%
rename from drivers/i2c/muxes/pca954x.c
rename to drivers/i2c/muxes/i2c-mux-pca954x.c
index 0e37ef2..f2dfe0d 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -226,7 +226,7 @@
 		}
 
 		data->virt_adaps[num] =
-			i2c_add_mux_adapter(adap, client,
+			i2c_add_mux_adapter(adap, &client->dev, client,
 				force, num, pca954x_select_chan,
 				(pdata && pdata->modes[num].deselect_on_exit)
 					? pca954x_deselect_mux : NULL);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 0000000..46a6697
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+	struct device *dev;
+	struct i2c_mux_pinctrl_platform_data *pdata;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state **states;
+	struct pinctrl_state *state_idle;
+	struct i2c_adapter *parent;
+	struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+				  u32 chan)
+{
+	struct i2c_mux_pinctrl *mux = data;
+
+	return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+				    u32 chan)
+{
+	struct i2c_mux_pinctrl *mux = data;
+
+	return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+				struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int num_names, i, ret;
+	struct device_node *adapter_np;
+	struct i2c_adapter *adapter;
+
+	if (!np)
+		return 0;
+
+	mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+	if (!mux->pdata) {
+		dev_err(mux->dev,
+			"Cannot allocate i2c_mux_pinctrl_platform_data\n");
+		return -ENOMEM;
+	}
+
+	num_names = of_property_count_strings(np, "pinctrl-names");
+	if (num_names < 0) {
+		dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+			num_names);
+		return num_names;
+	}
+
+	mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+		sizeof(*mux->pdata->pinctrl_states) * num_names,
+		GFP_KERNEL);
+	if (!mux->pdata->pinctrl_states) {
+		dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_names; i++) {
+		ret = of_property_read_string_index(np, "pinctrl-names", i,
+			&mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+		if (ret < 0) {
+			dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+				ret);
+			return ret;
+		}
+		if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+			    "idle")) {
+			if (i != num_names - 1) {
+				dev_err(mux->dev, "idle state must be last\n");
+				return -EINVAL;
+			}
+			mux->pdata->pinctrl_state_idle = "idle";
+		} else {
+			mux->pdata->bus_count++;
+		}
+	}
+
+	adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+	if (!adapter_np) {
+		dev_err(mux->dev, "Cannot parse i2c-parent\n");
+		return -ENODEV;
+	}
+	adapter = of_find_i2c_adapter_by_node(adapter_np);
+	if (!adapter) {
+		dev_err(mux->dev, "Cannot find parent bus\n");
+		return -ENODEV;
+	}
+	mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+	put_device(&adapter->dev);
+
+	return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+					   struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+	struct i2c_mux_pinctrl *mux;
+	int (*deselect)(struct i2c_adapter *, void *, u32);
+	int i, ret;
+
+	mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux) {
+		dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	platform_set_drvdata(pdev, mux);
+
+	mux->dev = &pdev->dev;
+
+	mux->pdata = pdev->dev.platform_data;
+	if (!mux->pdata) {
+		ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+		if (ret < 0)
+			goto err;
+	}
+	if (!mux->pdata) {
+		dev_err(&pdev->dev, "Missing platform data\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	mux->states = devm_kzalloc(&pdev->dev,
+				   sizeof(*mux->states) * mux->pdata->bus_count,
+				   GFP_KERNEL);
+	if (!mux->states) {
+		dev_err(&pdev->dev, "Cannot allocate states\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mux->busses = devm_kzalloc(&pdev->dev,
+				   sizeof(mux->busses) * mux->pdata->bus_count,
+				   GFP_KERNEL);
+	if (!mux->states) {
+		dev_err(&pdev->dev, "Cannot allocate busses\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(mux->pinctrl)) {
+		ret = PTR_ERR(mux->pinctrl);
+		dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+		goto err;
+	}
+	for (i = 0; i < mux->pdata->bus_count; i++) {
+		mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+						mux->pdata->pinctrl_states[i]);
+			if (IS_ERR(mux->states[i])) {
+				ret = PTR_ERR(mux->states[i]);
+				dev_err(&pdev->dev,
+					"Cannot look up pinctrl state %s: %d\n",
+					mux->pdata->pinctrl_states[i], ret);
+				goto err;
+			}
+	}
+	if (mux->pdata->pinctrl_state_idle) {
+		mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+						mux->pdata->pinctrl_state_idle);
+		if (IS_ERR(mux->state_idle)) {
+			ret = PTR_ERR(mux->state_idle);
+			dev_err(&pdev->dev,
+				"Cannot look up pinctrl state %s: %d\n",
+				mux->pdata->pinctrl_state_idle, ret);
+			goto err;
+		}
+
+		deselect = i2c_mux_pinctrl_deselect;
+	} else {
+		deselect = NULL;
+	}
+
+	mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+	if (!mux->parent) {
+		dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+			mux->pdata->parent_bus_num);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	for (i = 0; i < mux->pdata->bus_count; i++) {
+		u32 bus = mux->pdata->base_bus_num ?
+				(mux->pdata->base_bus_num + i) : 0;
+
+		mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+						     mux, bus, i,
+						     i2c_mux_pinctrl_select,
+						     deselect);
+		if (!mux->busses[i]) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+			goto err_del_adapter;
+		}
+	}
+
+	return 0;
+
+err_del_adapter:
+	for (; i > 0; i--)
+		i2c_del_mux_adapter(mux->busses[i - 1]);
+	i2c_put_adapter(mux->parent);
+err:
+	return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+	struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < mux->pdata->bus_count; i++)
+		i2c_del_mux_adapter(mux->busses[i]);
+
+	i2c_put_adapter(mux->parent);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+	{ .compatible = "i2c-mux-pinctrl", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+	.driver	= {
+		.name	= "i2c-mux-pinctrl",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+	},
+	.probe	= i2c_mux_pinctrl_probe,
+	.remove	= __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066..bcb507b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@
  */
 static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 {
-	unsigned long cycle_time;
+	unsigned long cycle_time = 0;
 	int use_dma_info = 0;
 	const u8 xfer_mode = drive->dma_mode;
 
@@ -271,9 +271,9 @@
 
 	ide_set_drivedata(drive, (void *)cycle_time);
 
-	printk("%s: %s selected (peak %dMB/s)\n", drive->name,
-		ide_xfer_verbose(xfer_mode),
-		2000 / (unsigned long)ide_get_drivedata(drive));
+	printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+	       drive->name, ide_xfer_verbose(xfer_mode),
+	       2000 / (cycle_time ? cycle_time : (unsigned long) -1));
 }
 
 static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@
 	.dma_test_irq		= icside_dma_test_irq,
 	.dma_lost_irq		= ide_dma_lost_irq,
 };
-#else
-#define icside_v6_dma_ops NULL
 #endif
 
 static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@
 static const struct ide_port_info icside_v6_port_info __initdata = {
 	.init_dma		= icside_dma_off_init,
 	.port_ops		= &icside_v6_no_dma_port_ops,
-	.dma_ops		= &icside_v6_dma_ops,
 	.host_flags		= IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
 	.mwdma_mask		= ATA_MWDMA2,
 	.swdma_mask		= ATA_SWDMA2,
@@ -518,11 +515,13 @@
 
 	ecard_set_drvdata(ec, state);
 
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
 	if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
 		d.init_dma = icside_dma_init;
 		d.port_ops = &icside_v6_port_ops;
-	} else
-		d.dma_ops = NULL;
+		d.dma_ops  = &icside_v6_dma_ops;
+	}
+#endif
 
 	ret = ide_host_register(host, &d, hws);
 	if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344e..f1e922e 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@
 {
 	int *is_kme = priv_data;
 
-	if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+	if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+	    != IO_DATA_PATH_WIDTH_8) {
 		pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
 		pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
 	}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecef..2ec93da 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@
 	help
 	  The industrial I/O subsystem provides a unified framework for
 	  drivers for many different types of embedded sensors using a
-	  number of different physical interfaces (i2c, spi, etc). See
-	  Documentation/iio for more information.
+	  number of different physical interfaces (i2c, spi, etc).
 
 if IIO
 
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd886..4f947e4 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@
 	 * New channel registration method - relies on the fact a group does
 	 * not need to be initialized if it is name is NULL.
 	 */
-	INIT_LIST_HEAD(&indio_dev->channel_attr_list);
 	if (indio_dev->channels)
 		for (i = 0; i < indio_dev->num_channels; i++) {
 			ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@
 static void iio_dev_release(struct device *device)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(device);
-	cdev_del(&indio_dev->chrdev);
+	if (indio_dev->chrdev.dev)
+		cdev_del(&indio_dev->chrdev);
 	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
 		iio_device_unregister_trigger_consumer(indio_dev);
 	iio_device_unregister_eventset(indio_dev);
 	iio_device_unregister_sysfs(indio_dev);
 	iio_device_unregister_debugfs(indio_dev);
+
+	ida_simple_remove(&iio_ida, indio_dev->id);
+	kfree(indio_dev);
 }
 
 static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@
 		dev_set_drvdata(&dev->dev, (void *)dev);
 		mutex_init(&dev->mlock);
 		mutex_init(&dev->info_exist_lock);
+		INIT_LIST_HEAD(&dev->channel_attr_list);
 
 		dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
 		if (dev->id < 0) {
@@ -778,10 +782,8 @@
 
 void iio_device_free(struct iio_dev *dev)
 {
-	if (dev) {
-		ida_simple_remove(&iio_ida, dev->id);
-		kfree(dev);
-	}
+	if (dev)
+		put_device(&dev->dev);
 }
 EXPORT_SYMBOL(iio_device_free);
 
@@ -902,7 +904,7 @@
 	mutex_lock(&indio_dev->info_exist_lock);
 	indio_dev->info = NULL;
 	mutex_unlock(&indio_dev->info_exist_lock);
-	device_unregister(&indio_dev->dev);
+	device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642..2e826f9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@
 
 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
 {
-	return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+	return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
 		 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
 		((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
 		 (id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e..b18870c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@
 		struct net_device *pdev;
 
 		pdev = ip_dev_find(&init_net, peer_ip);
+		if (!pdev) {
+			err = -ENODEV;
+			goto out;
+		}
 		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
 					n, pdev, 0);
 		if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577..3530c41 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@
 	props->max_mr_size	   = ~0ull;
 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
 	props->max_qp		   = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
-	props->max_qp_wr	   = dev->dev->caps.max_wqes;
+	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
 	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
 					 dev->dev->caps.max_rq_sg);
 	props->max_cq		   = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@
 	int total_eqs = 0;
 	int i, j, eq;
 
-	/* Init eq table */
-	ibdev->eq_table = NULL;
-	ibdev->eq_added = 0;
-
-	/* Legacy mode? */
-	if (dev->caps.comp_pool == 0)
+	/* Legacy mode or comp_pool is not large enough */
+	if (dev->caps.comp_pool == 0 ||
+	    dev->caps.num_ports > dev->caps.comp_pool)
 		return;
 
 	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
 	int i;
-	int total_eqs;
+
+	/* no additional eqs were added */
+	if (!ibdev->eq_table)
+		return;
 
 	/* Reset the advertised EQ number */
 	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@
 		mlx4_release_eq(dev, ibdev->eq_table[i]);
 	}
 
-	total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
-	memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
 	kfree(ibdev->eq_table);
-
-	ibdev->eq_table = NULL;
-	ibdev->eq_added = 0;
 }
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297c..ff36655 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
 
+enum {
+	MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+	MLX4_IB_MAX_HEADROOM	 = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift)	((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE		(MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
 struct mlx4_ib_ucontext {
 	struct ib_ucontext	ibucontext;
 	struct mlx4_uar		uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb3332..8d4ed24 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@
 		       int is_user, int has_rq, struct mlx4_ib_qp *qp)
 {
 	/* Sanity check RQ size before proceeding */
-	if (cap->max_recv_wr  > dev->dev->caps.max_wqes  ||
-	    cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+	if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+	    cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
 		return -EINVAL;
 
 	if (!has_rq) {
@@ -329,8 +329,17 @@
 		qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
 	}
 
-	cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
-	cap->max_recv_sge = qp->rq.max_gs;
+	/* leave userspace return values as they were, so as not to break ABI */
+	if (is_user) {
+		cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
+		cap->max_recv_sge = qp->rq.max_gs;
+	} else {
+		cap->max_recv_wr  = qp->rq.max_post =
+			min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+		cap->max_recv_sge = min(qp->rq.max_gs,
+					min(dev->dev->caps.max_sq_sg,
+					    dev->dev->caps.max_rq_sg));
+	}
 
 	return 0;
 }
@@ -341,8 +350,8 @@
 	int s;
 
 	/* Sanity check SQ size before proceeding */
-	if (cap->max_send_wr	 > dev->dev->caps.max_wqes  ||
-	    cap->max_send_sge	 > dev->dev->caps.max_sq_sg ||
+	if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+	    cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
 	    cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
 	    sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
 		return -EINVAL;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c9..48970af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@
 	u32 max_inline_data;
 	int max_send_sge;
 	int max_recv_sge;
+	int max_srq_sge;
 	int max_mr;
 	u64 max_mr_size;
 	u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@
 	u32 entry_size;
 	u32 max_cnt;
 	u32 max_wqe_idx;
-	u32 free_delta;
 	u16 dbid;		/* qid, where to ring the doorbell. */
 	u32 len;
 	dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e..517ab20 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@
 	u32 rsvd1;
 	u32 num_wqe_allocated;
 	u32 num_rqe_allocated;
-	u32 free_wqe_delta;
-	u32 free_rqe_delta;
 	u32 db_sq_offset;
 	u32 db_rq_offset;
 	u32 db_shift;
@@ -126,8 +124,7 @@
 	u32 db_rq_offset;
 	u32 db_shift;
 
-	u32 free_rqe_delta;
-	u32 rsvd2;
+	u64 rsvd2;
 	u64 rsvd3;
 } __packed;
 
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1..71942af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@
 		break;
 	case OCRDMA_SRQ_LIMIT_EVENT:
 		ib_evt.element.srq = &qp->srq->ibsrq;
-		ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+		ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
 		srq_event = 1;
 		qp_event = 0;
 		break;
@@ -990,8 +990,6 @@
 			      struct ocrdma_dev_attr *attr,
 			      struct ocrdma_mbx_query_config *rsp)
 {
-	int max_q_mem;
-
 	attr->max_pd =
 	    (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
 	    OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@
 	attr->max_recv_sge = (rsp->max_write_send_sge &
 			      OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
 	    OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+	attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+			      OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
 	attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
 				OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
 	    OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@
 	attr->max_inline_data =
 	    attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
 			      sizeof(struct ocrdma_sge));
-	max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
-	/* hw can queue one less then the configured size,
-	 * so publish less by one to stack.
-	 */
 	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-		dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
 		attr->ird = 1;
 		attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
 		attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
-	} else
-		dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
-	dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+	}
+	dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+		 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+	dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+		OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
 }
 
 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@
 	max_wqe_allocated = 1 << max_wqe_allocated;
 	max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
 
-	if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-		qp->sq.free_delta = 0;
-		qp->rq.free_delta = 1;
-	} else
-		qp->sq.free_delta = 1;
-
 	qp->sq.max_cnt = max_wqe_allocated;
 	qp->sq.max_wqe_idx = max_wqe_allocated - 1;
 
 	if (!attrs->srq) {
 		qp->rq.max_cnt = max_rqe_allocated;
 		qp->rq.max_wqe_idx = max_rqe_allocated - 1;
-		qp->rq.free_delta = 1;
 	}
 }
 
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16e..b050e62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
  *******************************************************************/
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/idr.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@
 	sgid->raw[15] = mac_addr[5];
 }
 
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
 			    bool is_vlan, u16 vlan_id)
 {
 	int i;
-	bool found = false;
 	union ib_gid new_sgid;
-	int free_idx = OCRDMA_MAX_SGID;
 	unsigned long flags;
 
 	memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@
 		if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
 			    sizeof(union ib_gid))) {
 			/* found free entry */
-			if (!found) {
-				free_idx = i;
-				found = true;
-				break;
-			}
+			memcpy(&dev->sgid_tbl[i], &new_sgid,
+			       sizeof(union ib_gid));
+			spin_unlock_irqrestore(&dev->sgid_lock, flags);
+			return true;
 		} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
 				   sizeof(union ib_gid))) {
 			/* entry already present, no addition is required. */
 			spin_unlock_irqrestore(&dev->sgid_lock, flags);
-			return;
+			return false;
 		}
 	}
-	/* if entry doesn't exist and if table has some space, add entry */
-	if (found)
-		memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
-		       sizeof(union ib_gid));
 	spin_unlock_irqrestore(&dev->sgid_lock, flags);
+	return false;
 }
 
 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@
 	ocrdma_get_guid(dev, &sgid->raw[8]);
 }
 
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
 {
 	struct net_device *netdev, *tmp;
 	u16 vlan_id;
@@ -176,8 +170,6 @@
 
 	netdev = dev->nic_info.netdev;
 
-	ocrdma_add_default_sgid(dev);
-
 	rcu_read_lock();
 	for_each_netdev_rcu(&init_net, tmp) {
 		if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@
 		}
 	}
 	rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+	ocrdma_add_default_sgid(dev);
+	ocrdma_add_vlan_sgids(dev);
 	return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
 				  unsigned long event, void *ptr)
@@ -209,6 +214,7 @@
 	struct ib_event gid_event;
 	struct ocrdma_dev *dev;
 	bool found = false;
+	bool updated = false;
 	bool is_vlan = false;
 	u16 vid = 0;
 
@@ -234,23 +240,21 @@
 	mutex_lock(&dev->dev_lock);
 	switch (event) {
 	case NETDEV_UP:
-		ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+		updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
 		break;
 	case NETDEV_DOWN:
-		found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
-		if (found) {
-			/* found the matching entry, notify
-			 * the consumers about it
-			 */
-			gid_event.device = &dev->ibdev;
-			gid_event.element.port_num = 1;
-			gid_event.event = IB_EVENT_GID_CHANGE;
-			ib_dispatch_event(&gid_event);
-		}
+		updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
 		break;
 	default:
 		break;
 	}
+	if (updated) {
+		/* GID table updated, notify the consumers about it */
+		gid_event.device = &dev->ibdev;
+		gid_event.element.port_num = 1;
+		gid_event.event = IB_EVENT_GID_CHANGE;
+		ib_dispatch_event(&gid_event);
+	}
 	mutex_unlock(&dev->dev_lock);
 	return NOTIFY_OK;
 }
@@ -259,7 +263,7 @@
 	.notifier_call = ocrdma_inet6addr_event
 };
 
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
 
 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
 					      u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc..c75cbdf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@
 
 	OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT		= 0,
 	OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK		= 0xFFFF,
+	OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT	= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
 
 	OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT	= 0,
 	OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK	= 0xFFFF,
@@ -458,7 +461,7 @@
 				OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
 	OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET	= 0,
 	OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK	= 0xFFFF <<
-				OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+				OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
 
 	OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET		= 16,
 	OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK		= 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1..2e2e7ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@
 
 	dev = get_ocrdma_dev(ibdev);
 	memset(sgid, 0, sizeof(*sgid));
-	if (index > OCRDMA_MAX_SGID)
+	if (index >= OCRDMA_MAX_SGID)
 		return -EINVAL;
 
 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@
 					IB_DEVICE_SHUTDOWN_PORT |
 					IB_DEVICE_SYS_IMAGE_GUID |
 					IB_DEVICE_LOCAL_DMA_LKEY;
-	attr->max_sge = dev->attr.max_send_sge;
-	attr->max_sge_rd = dev->attr.max_send_sge;
+	attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+	attr->max_sge_rd = 0;
 	attr->max_cq = dev->attr.max_cq;
 	attr->max_cqe = dev->attr.max_cqe;
 	attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@
 	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
 	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
 	attr->max_srq = (dev->attr.max_qp - 1);
-	attr->max_srq_sge = attr->max_sge;
+	attr->max_srq_sge = attr->max_srq_sge;
 	attr->max_srq_wr = dev->attr.max_rqe;
 	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
 	attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@
 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
 		uresp.db_shift = 16;
 	}
-	uresp.free_wqe_delta = qp->sq.free_delta;
-	uresp.free_rqe_delta = qp->rq.free_delta;
 
 	if (qp->dpp_enabled) {
 		uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@
 		free_cnt = (q->max_cnt - q->head) + q->tail;
 	else
 		free_cnt = q->tail - q->head;
-	if (q->free_delta)
-		free_cnt -= q->free_delta;
 	return free_cnt;
 }
 
@@ -1501,7 +1497,6 @@
 	    (srq->pd->id * srq->dev->nic_info.db_page_size);
 	uresp.db_page_size = srq->dev->nic_info.db_page_size;
 	uresp.num_rqe_allocated = srq->rq.max_cnt;
-	uresp.free_rqe_delta = 1;
 	if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
 		uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@
 			*stop = true;
 			expand = false;
 		}
-	} else
+	} else {
+		*polled = true;
 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+	}
 	return expand;
 }
 
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e648343..633f03d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
 #ifndef __OCRDMA_VERBS_H__
 #define __OCRDMA_VERBS_H__
 
-#include <linux/version.h>
 int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
 		     struct ib_send_wr **bad_wr);
 int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 3063464..57d19d4 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -231,6 +231,7 @@
 	}
 
 	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_NOSTART |
 				     I2C_FUNC_PROTOCOL_MANGLING)) {
 		dev_err(&client->dev,
 			"need i2c bus that supports protocol mangling\n");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2..7f7b724 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@
 	if (pdata->enable_rotary0 || pdata->enable_rotary1)
 		pxa27x_keypad_scan_rotary(keypad);
 
-	new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+	/*
+	 * The KPDR_DK only output the key pin level, so it relates to board,
+	 * and low level may be active.
+	 */
+	if (pdata->direct_key_low_active)
+		new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
+	else
+		new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+
 	bits_changed = keypad->direct_key_state ^ new_state;
 
 	if (bits_changed == 0)
@@ -383,7 +391,14 @@
 	if (pdata->direct_key_num > direct_key_num)
 		direct_key_num = pdata->direct_key_num;
 
-	keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+	/*
+	 * Direct keys usage may not start from KP_DKIN0, check the platfrom
+	 * mask data to config the specific.
+	 */
+	if (pdata->direct_key_mask)
+		keypad->direct_key_mask = pdata->direct_key_mask;
+	else
+		keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
 
 	/* enable direct key */
 	if (direct_key_num)
@@ -399,7 +414,7 @@
 	struct pxa27x_keypad *keypad = input_get_drvdata(dev);
 
 	/* Enable unit clock */
-	clk_enable(keypad->clk);
+	clk_prepare_enable(keypad->clk);
 	pxa27x_keypad_config(keypad);
 
 	return 0;
@@ -410,7 +425,7 @@
 	struct pxa27x_keypad *keypad = input_get_drvdata(dev);
 
 	/* Disable clock unit */
-	clk_disable(keypad->clk);
+	clk_disable_unprepare(keypad->clk);
 }
 
 #ifdef CONFIG_PM
@@ -419,10 +434,14 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
 
-	clk_disable(keypad->clk);
-
+	/*
+	 * If the keypad is used a wake up source, clock can not be disabled.
+	 * Or it can not detect the key pressing.
+	 */
 	if (device_may_wakeup(&pdev->dev))
 		enable_irq_wake(keypad->irq);
+	else
+		clk_disable_unprepare(keypad->clk);
 
 	return 0;
 }
@@ -433,19 +452,24 @@
 	struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
 	struct input_dev *input_dev = keypad->input_dev;
 
-	if (device_may_wakeup(&pdev->dev))
+	/*
+	 * If the keypad is used as wake up source, the clock is not turned
+	 * off. So do not need configure it again.
+	 */
+	if (device_may_wakeup(&pdev->dev)) {
 		disable_irq_wake(keypad->irq);
+	} else {
+		mutex_lock(&input_dev->mutex);
 
-	mutex_lock(&input_dev->mutex);
+		if (input_dev->users) {
+			/* Enable unit clock */
+			clk_prepare_enable(keypad->clk);
+			pxa27x_keypad_config(keypad);
+		}
 
-	if (input_dev->users) {
-		/* Enable unit clock */
-		clk_enable(keypad->clk);
-		pxa27x_keypad_config(keypad);
+		mutex_unlock(&input_dev->mutex);
 	}
 
-	mutex_unlock(&input_dev->mutex);
-
 	return 0;
 }
 
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 47f18d6..6790a81 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -73,7 +73,7 @@
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_on *wm831x_on;
-	int irq = platform_get_irq(pdev, 0);
+	int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
 	int ret;
 
 	wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 4bc851a..e834107 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -260,15 +260,16 @@
 	 * If we have a direct IRQ use it, otherwise use the interrupt
 	 * from the WM831x IRQ controller.
 	 */
+	wm831x_ts->data_irq = wm831x_irq(wm831x,
+					 platform_get_irq_byname(pdev,
+								 "TCHDATA"));
 	if (pdata && pdata->data_irq)
 		wm831x_ts->data_irq = pdata->data_irq;
-	else
-		wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
 
+	wm831x_ts->pd_irq = wm831x_irq(wm831x,
+				       platform_get_irq_byname(pdev, "TCHPD"));
 	if (pdata && pdata->pd_irq)
 		wm831x_ts->pd_irq = pdata->pd_irq;
-	else
-		wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
 
 	if (pdata)
 		wm831x_ts->pressure = pdata->pressure;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c698437..3408937 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -162,4 +162,25 @@
 	  space through the SMMU (System Memory Management Unit)
 	  hardware included on Tegra SoCs.
 
+config EXYNOS_IOMMU
+	bool "Exynos IOMMU Support"
+	depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+	select IOMMU_API
+	help
+	  Support for the IOMMU(System MMU) of Samsung Exynos application
+	  processor family. This enables H/W multimedia accellerators to see
+	  non-linear physical memory chunks as a linear memory in their
+	  address spaces
+
+	  If unsure, say N here.
+
+config EXYNOS_IOMMU_DEBUG
+	bool "Debugging log for Exynos IOMMU"
+	depends on EXYNOS_IOMMU
+	help
+	  Select this to see the detailed log message that shows what
+	  happens in the IOMMU driver
+
+	  Say N unless you need kernel log message for IOMMU debugging
+
 endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82a..76e54ef 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
 obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e..a2e418c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -450,12 +450,27 @@
 
 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 {
-	u32 *event = __evt;
-	int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
-	int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-	int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
-	int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
-	u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+	int type, devid, domid, flags;
+	volatile u32 *event = __evt;
+	int count = 0;
+	u64 address;
+
+retry:
+	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
+	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+	address = (u64)(((u64)event[3]) << 32) | event[2];
+
+	if (type == 0) {
+		/* Did we hit the erratum? */
+		if (++count == LOOP_TIMEOUT) {
+			pr_err("AMD-Vi: No event written to event log\n");
+			return;
+		}
+		udelay(1);
+		goto retry;
+	}
 
 	printk(KERN_ERR "AMD-Vi: Event logged [");
 
@@ -508,6 +523,8 @@
 	default:
 		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 	}
+
+	memset(__evt, 0, 4 * sizeof(u32));
 }
 
 static void iommu_poll_events(struct amd_iommu *iommu)
@@ -530,26 +547,12 @@
 	spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 {
 	struct amd_iommu_fault fault;
-	volatile u64 *raw;
-	int i;
 
 	INC_STATS_COUNTER(pri_requests);
 
-	raw = (u64 *)(iommu->ppr_log + head);
-
-	/*
-	 * Hardware bug: Interrupt may arrive before the entry is written to
-	 * memory. If this happens we need to wait for the entry to arrive.
-	 */
-	for (i = 0; i < LOOP_TIMEOUT; ++i) {
-		if (PPR_REQ_TYPE(raw[0]) != 0)
-			break;
-		udelay(1);
-	}
-
 	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 		return;
@@ -561,12 +564,6 @@
 	fault.tag       = PPR_TAG(raw[0]);
 	fault.flags     = PPR_FLAGS(raw[0]);
 
-	/*
-	 * To detect the hardware bug we need to clear the entry
-	 * to back to zero.
-	 */
-	raw[0] = raw[1] = 0;
-
 	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 }
 
@@ -578,25 +575,62 @@
 	if (iommu->ppr_log == NULL)
 		return;
 
+	/* enable ppr interrupts again */
+	writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
 	spin_lock_irqsave(&iommu->lock, flags);
 
 	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
 	while (head != tail) {
+		volatile u64 *raw;
+		u64 entry[2];
+		int i;
 
-		/* Handle PPR entry */
-		iommu_handle_ppr_entry(iommu, head);
+		raw = (u64 *)(iommu->ppr_log + head);
 
-		/* Update and refresh ring-buffer state*/
+		/*
+		 * Hardware bug: Interrupt may arrive before the entry is
+		 * written to memory. If this happens we need to wait for the
+		 * entry to arrive.
+		 */
+		for (i = 0; i < LOOP_TIMEOUT; ++i) {
+			if (PPR_REQ_TYPE(raw[0]) != 0)
+				break;
+			udelay(1);
+		}
+
+		/* Avoid memcpy function-call overhead */
+		entry[0] = raw[0];
+		entry[1] = raw[1];
+
+		/*
+		 * To detect the hardware bug we need to clear the entry
+		 * back to zero.
+		 */
+		raw[0] = raw[1] = 0UL;
+
+		/* Update head pointer of hardware ring-buffer */
 		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+		/*
+		 * Release iommu->lock because ppr-handling might need to
+		 * re-aquire it
+		 */
+		spin_unlock_irqrestore(&iommu->lock, flags);
+
+		/* Handle PPR entry */
+		iommu_handle_ppr_entry(iommu, entry);
+
+		spin_lock_irqsave(&iommu->lock, flags);
+
+		/* Refresh ring-buffer information */
+		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 	}
 
-	/* enable ppr interrupts again */
-	writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
 	spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -2035,20 +2069,20 @@
 }
 
 /* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF		(1 << 2)
+#define PCI_PRI_TLP_OFF		(1 << 15)
 
 bool pci_pri_tlp_required(struct pci_dev *pdev)
 {
-	u16 control;
+	u16 status;
 	int pos;
 
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
 	if (!pos)
 		return false;
 
-	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
 
-	return (control & PCI_PRI_TLP_OFF) ? true : false;
+	return (status & PCI_PRI_TLP_OFF) ? true : false;
 }
 
 /*
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c567903..542024b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1029,6 +1029,9 @@
 	if (!iommu->dev)
 		return 1;
 
+	iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+						PCI_DEVFN(0, 0));
+
 	iommu->cap_ptr = h->cap_ptr;
 	iommu->pci_seg = h->pci_seg;
 	iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@
 {
 	int i, j;
 	u32 ioc_feature_control;
-	struct pci_dev *pdev = NULL;
+	struct pci_dev *pdev = iommu->root_pdev;
 
 	/* RD890 BIOSes may not have completely reconfigured the iommu */
-	if (!is_rd890_iommu(iommu->dev))
+	if (!is_rd890_iommu(iommu->dev) || !pdev)
 		return;
 
 	/*
 	 * First, we need to ensure that the iommu is enabled. This is
 	 * controlled by a register in the northbridge
 	 */
-	pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
-	if (!pdev)
-		return;
 
 	/* Select Northbridge indirect register 0x75 and enable writing */
 	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@
 	if (!(ioc_feature_control & 0x1))
 		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
 
-	pci_dev_put(pdev);
-
 	/* Restore the iommu BAR */
 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
 			       iommu->stored_addr_lo);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b..2435555 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@
 	/* Pointer to PCI device of this IOMMU */
 	struct pci_dev *dev;
 
+	/* Cache pdev to root device for resume quirks */
+	struct pci_dev *root_pdev;
+
 	/* physical address of MMIO space */
 	u64 mmio_phys;
 	/* virtual address of MMIO space */
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 0000000..9a114b9
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
+/* linux/drivers/iommu/exynos_iommu.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/sysmmu.h>
+
+/* We does not consider super section mapping (16MB) */
+#define SECT_ORDER 20
+#define LPAGE_ORDER 16
+#define SPAGE_ORDER 12
+
+#define SECT_SIZE (1 << SECT_ORDER)
+#define LPAGE_SIZE (1 << LPAGE_ORDER)
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+#define SECT_MASK (~(SECT_SIZE - 1))
+#define LPAGE_MASK (~(LPAGE_SIZE - 1))
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_section(sent) ((*(sent) & 3) == 2)
+
+#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
+#define lv2ent_small(pent) ((*(pent) & 2) == 2)
+#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+
+#define section_phys(sent) (*(sent) & SECT_MASK)
+#define section_offs(iova) ((iova) & 0xFFFFF)
+#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
+#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0xFFF)
+
+#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
+#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+
+#define NUM_LV1ENTRIES 4096
+#define NUM_LV2ENTRIES 256
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
+
+#define mk_lv1ent_sect(pa) ((pa) | 2)
+#define mk_lv1ent_page(pa) ((pa) | 1)
+#define mk_lv2ent_lpage(pa) ((pa) | 1)
+#define mk_lv2ent_spage(pa) ((pa) | 2)
+
+#define CTRL_ENABLE	0x5
+#define CTRL_BLOCK	0x7
+#define CTRL_DISABLE	0x0
+
+#define REG_MMU_CTRL		0x000
+#define REG_MMU_CFG		0x004
+#define REG_MMU_STATUS		0x008
+#define REG_MMU_FLUSH		0x00C
+#define REG_MMU_FLUSH_ENTRY	0x010
+#define REG_PT_BASE_ADDR	0x014
+#define REG_INT_STATUS		0x018
+#define REG_INT_CLEAR		0x01C
+
+#define REG_PAGE_FAULT_ADDR	0x024
+#define REG_AW_FAULT_ADDR	0x028
+#define REG_AR_FAULT_ADDR	0x02C
+#define REG_DEFAULT_SLAVE_ADDR	0x030
+
+#define REG_MMU_VERSION		0x034
+
+#define REG_PB0_SADDR		0x04C
+#define REG_PB0_EADDR		0x050
+#define REG_PB1_SADDR		0x054
+#define REG_PB1_EADDR		0x058
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+	return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+	return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+enum exynos_sysmmu_inttype {
+	SYSMMU_PAGEFAULT,
+	SYSMMU_AR_MULTIHIT,
+	SYSMMU_AW_MULTIHIT,
+	SYSMMU_BUSERROR,
+	SYSMMU_AR_SECURITY,
+	SYSMMU_AR_ACCESS,
+	SYSMMU_AW_SECURITY,
+	SYSMMU_AW_PROTECTION, /* 7 */
+	SYSMMU_FAULT_UNKNOWN,
+	SYSMMU_FAULTS_NUM
+};
+
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ *                is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
+			unsigned long pgtable_base, unsigned long fault_addr);
+
+static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
+	REG_PAGE_FAULT_ADDR,
+	REG_AR_FAULT_ADDR,
+	REG_AW_FAULT_ADDR,
+	REG_DEFAULT_SLAVE_ADDR,
+	REG_AR_FAULT_ADDR,
+	REG_AR_FAULT_ADDR,
+	REG_AW_FAULT_ADDR,
+	REG_AW_FAULT_ADDR
+};
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+	"PAGE FAULT",
+	"AR MULTI-HIT FAULT",
+	"AW MULTI-HIT FAULT",
+	"BUS ERROR",
+	"AR SECURITY PROTECTION FAULT",
+	"AR ACCESS PROTECTION FAULT",
+	"AW SECURITY PROTECTION FAULT",
+	"AW ACCESS PROTECTION FAULT",
+	"UNKNOWN FAULT"
+};
+
+struct exynos_iommu_domain {
+	struct list_head clients; /* list of sysmmu_drvdata.node */
+	unsigned long *pgtable; /* lv1 page table, 16KB */
+	short *lv2entcnt; /* free lv2 entry counter for each section */
+	spinlock_t lock; /* lock for this structure */
+	spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+struct sysmmu_drvdata {
+	struct list_head node; /* entry of exynos_iommu_domain.clients */
+	struct device *sysmmu;	/* System MMU's device descriptor */
+	struct device *dev;	/* Owner of system MMU */
+	char *dbgname;
+	int nsfrs;
+	void __iomem **sfrbases;
+	struct clk *clk[2];
+	int activations;
+	rwlock_t lock;
+	struct iommu_domain *domain;
+	sysmmu_fault_handler_t fault_handler;
+	unsigned long pgtable;
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+	/* return true if the System MMU was not active previously
+	   and it needs to be initialized */
+	return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+	/* return true if the System MMU is needed to be disabled */
+	BUG_ON(data->activations < 1);
+	return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+	return data->activations > 0;
+}
+
+static void sysmmu_unblock(void __iomem *sfrbase)
+{
+	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+static bool sysmmu_block(void __iomem *sfrbase)
+{
+	int i = 120;
+
+	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+		--i;
+
+	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+		sysmmu_unblock(sfrbase);
+		return false;
+	}
+
+	return true;
+}
+
+static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+{
+	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+						unsigned long iova)
+{
+	__raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,
+				       unsigned long pgd)
+{
+	__raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
+	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+
+	__sysmmu_tlb_invalidate(sfrbase);
+}
+
+static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
+						unsigned long size, int idx)
+{
+	__raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
+	__raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
+}
+
+void exynos_sysmmu_set_prefbuf(struct device *dev,
+				unsigned long base0, unsigned long size0,
+				unsigned long base1, unsigned long size1)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+	unsigned long flags;
+	int i;
+
+	BUG_ON((base0 + size0) <= base0);
+	BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
+
+	read_lock_irqsave(&data->lock, flags);
+	if (!is_sysmmu_active(data))
+		goto finish;
+
+	for (i = 0; i < data->nsfrs; i++) {
+		if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+			if (!sysmmu_block(data->sfrbases[i]))
+				continue;
+
+			if (size1 == 0) {
+				if (size0 <= SZ_128K) {
+					base1 = base0;
+					size1 = size0;
+				} else {
+					size1 = size0 -
+						ALIGN(size0 / 2, SZ_64K);
+					size0 = size0 - size1;
+					base1 = base0 + size0;
+				}
+			}
+
+			__sysmmu_set_prefbuf(
+					data->sfrbases[i], base0, size0, 0);
+			__sysmmu_set_prefbuf(
+					data->sfrbases[i], base1, size1, 1);
+
+			sysmmu_unblock(data->sfrbases[i]);
+		}
+	}
+finish:
+	read_unlock_irqrestore(&data->lock, flags);
+}
+
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+					sysmmu_fault_handler_t handler)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&data->lock, flags);
+	data->fault_handler = handler;
+	write_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_set_fault_handler(struct device *dev,
+					sysmmu_fault_handler_t handler)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+	__set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(enum exynos_sysmmu_inttype itype,
+		     unsigned long pgtable_base, unsigned long fault_addr)
+{
+	unsigned long *ent;
+
+	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+		itype = SYSMMU_FAULT_UNKNOWN;
+
+	pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+			sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+	ent = section_entry(__va(pgtable_base), fault_addr);
+	pr_err("\tLv1 entry: 0x%lx\n", *ent);
+
+	if (lv1ent_page(ent)) {
+		ent = page_entry(ent, fault_addr);
+		pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+	}
+
+	pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+	BUG();
+
+	return 0;
+}
+
+static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
+{
+	/* SYSMMU is in blocked when interrupt occurred. */
+	struct sysmmu_drvdata *data = dev_id;
+	struct resource *irqres;
+	struct platform_device *pdev;
+	enum exynos_sysmmu_inttype itype;
+	unsigned long addr = -1;
+
+	int i, ret = -ENOSYS;
+
+	read_lock(&data->lock);
+
+	WARN_ON(!is_sysmmu_active(data));
+
+	pdev = to_platform_device(data->sysmmu);
+	for (i = 0; i < (pdev->num_resources / 2); i++) {
+		irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		if (irqres && ((int)irqres->start == irq))
+			break;
+	}
+
+	if (i == pdev->num_resources) {
+		itype = SYSMMU_FAULT_UNKNOWN;
+	} else {
+		itype = (enum exynos_sysmmu_inttype)
+			__ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
+		if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
+			itype = SYSMMU_FAULT_UNKNOWN;
+		else
+			addr = __raw_readl(
+				data->sfrbases[i] + fault_reg_offset[itype]);
+	}
+
+	if (data->domain)
+		ret = report_iommu_fault(data->domain, data->dev,
+				addr, itype);
+
+	if ((ret == -ENOSYS) && data->fault_handler) {
+		unsigned long base = data->pgtable;
+		if (itype != SYSMMU_FAULT_UNKNOWN)
+			base = __raw_readl(
+					data->sfrbases[i] + REG_PT_BASE_ADDR);
+		ret = data->fault_handler(itype, base, addr);
+	}
+
+	if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+		__raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+	else
+		dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
+				data->dbgname, sysmmu_fault_name[itype]);
+
+	if (itype != SYSMMU_FAULT_UNKNOWN)
+		sysmmu_unblock(data->sfrbases[i]);
+
+	read_unlock(&data->lock);
+
+	return IRQ_HANDLED;
+}
+
+static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+	unsigned long flags;
+	bool disabled = false;
+	int i;
+
+	write_lock_irqsave(&data->lock, flags);
+
+	if (!set_sysmmu_inactive(data))
+		goto finish;
+
+	for (i = 0; i < data->nsfrs; i++)
+		__raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+
+	if (data->clk[1])
+		clk_disable(data->clk[1]);
+	if (data->clk[0])
+		clk_disable(data->clk[0]);
+
+	disabled = true;
+	data->pgtable = 0;
+	data->domain = NULL;
+finish:
+	write_unlock_irqrestore(&data->lock, flags);
+
+	if (disabled)
+		dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
+	else
+		dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
+					data->dbgname, data->activations);
+
+	return disabled;
+}
+
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
+			unsigned long pgtable, struct iommu_domain *domain)
+{
+	int i, ret = 0;
+	unsigned long flags;
+
+	write_lock_irqsave(&data->lock, flags);
+
+	if (!set_sysmmu_active(data)) {
+		if (WARN_ON(pgtable != data->pgtable)) {
+			ret = -EBUSY;
+			set_sysmmu_inactive(data);
+		} else {
+			ret = 1;
+		}
+
+		dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
+		goto finish;
+	}
+
+	if (data->clk[0])
+		clk_enable(data->clk[0]);
+	if (data->clk[1])
+		clk_enable(data->clk[1]);
+
+	data->pgtable = pgtable;
+
+	for (i = 0; i < data->nsfrs; i++) {
+		__sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+
+		if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+			/* System MMU version is 3.x */
+			__raw_writel((1 << 12) | (2 << 28),
+					data->sfrbases[i] + REG_MMU_CFG);
+			__sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
+			__sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
+		}
+
+		__raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+	}
+
+	data->domain = domain;
+
+	dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
+finish:
+	write_unlock_irqrestore(&data->lock, flags);
+
+	return ret;
+}
+
+int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+	int ret;
+
+	BUG_ON(!memblock_is_memory(pgtable));
+
+	ret = pm_runtime_get_sync(data->sysmmu);
+	if (ret < 0) {
+		dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
+		return ret;
+	}
+
+	ret = __exynos_sysmmu_enable(data, pgtable, NULL);
+	if (WARN_ON(ret < 0)) {
+		pm_runtime_put(data->sysmmu);
+		dev_err(data->sysmmu,
+			"(%s) Already enabled with page table %#lx\n",
+			data->dbgname, data->pgtable);
+	} else {
+		data->dev = dev;
+	}
+
+	return ret;
+}
+
+bool exynos_sysmmu_disable(struct device *dev)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+	bool disabled;
+
+	disabled = __exynos_sysmmu_disable(data);
+	pm_runtime_put(data->sysmmu);
+
+	return disabled;
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+{
+	unsigned long flags;
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+	read_lock_irqsave(&data->lock, flags);
+
+	if (is_sysmmu_active(data)) {
+		int i;
+		for (i = 0; i < data->nsfrs; i++) {
+			if (sysmmu_block(data->sfrbases[i])) {
+				__sysmmu_tlb_invalidate_entry(
+						data->sfrbases[i], iova);
+				sysmmu_unblock(data->sfrbases[i]);
+			}
+		}
+	} else {
+		dev_dbg(data->sysmmu,
+			"(%s) Disabled. Skipping invalidating TLB.\n",
+			data->dbgname);
+	}
+
+	read_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_tlb_invalidate(struct device *dev)
+{
+	unsigned long flags;
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+	read_lock_irqsave(&data->lock, flags);
+
+	if (is_sysmmu_active(data)) {
+		int i;
+		for (i = 0; i < data->nsfrs; i++) {
+			if (sysmmu_block(data->sfrbases[i])) {
+				__sysmmu_tlb_invalidate(data->sfrbases[i]);
+				sysmmu_unblock(data->sfrbases[i]);
+			}
+		}
+	} else {
+		dev_dbg(data->sysmmu,
+			"(%s) Disabled. Skipping invalidating TLB.\n",
+			data->dbgname);
+	}
+
+	read_unlock_irqrestore(&data->lock, flags);
+}
+
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+	int i, ret;
+	struct device *dev;
+	struct sysmmu_drvdata *data;
+
+	dev = &pdev->dev;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		dev_dbg(dev, "Not enough memory\n");
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	ret = dev_set_drvdata(dev, data);
+	if (ret) {
+		dev_dbg(dev, "Unabled to initialize driver data\n");
+		goto err_init;
+	}
+
+	data->nsfrs = pdev->num_resources / 2;
+	data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
+								GFP_KERNEL);
+	if (data->sfrbases == NULL) {
+		dev_dbg(dev, "Not enough memory\n");
+		ret = -ENOMEM;
+		goto err_init;
+	}
+
+	for (i = 0; i < data->nsfrs; i++) {
+		struct resource *res;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			dev_dbg(dev, "Unable to find IOMEM region\n");
+			ret = -ENOENT;
+			goto err_res;
+		}
+
+		data->sfrbases[i] = ioremap(res->start, resource_size(res));
+		if (!data->sfrbases[i]) {
+			dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
+							res->start);
+			ret = -ENOENT;
+			goto err_res;
+		}
+	}
+
+	for (i = 0; i < data->nsfrs; i++) {
+		ret = platform_get_irq(pdev, i);
+		if (ret <= 0) {
+			dev_dbg(dev, "Unable to find IRQ resource\n");
+			goto err_irq;
+		}
+
+		ret = request_irq(ret, exynos_sysmmu_irq, 0,
+					dev_name(dev), data);
+		if (ret) {
+			dev_dbg(dev, "Unabled to register interrupt handler\n");
+			goto err_irq;
+		}
+	}
+
+	if (dev_get_platdata(dev)) {
+		char *deli, *beg;
+		struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
+
+		beg = platdata->clockname;
+
+		for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
+			/* NOTHING */;
+
+		if (*deli == '\0')
+			deli = NULL;
+		else
+			*deli = '\0';
+
+		data->clk[0] = clk_get(dev, beg);
+		if (IS_ERR(data->clk[0])) {
+			data->clk[0] = NULL;
+			dev_dbg(dev, "No clock descriptor registered\n");
+		}
+
+		if (data->clk[0] && deli) {
+			*deli = ',';
+			data->clk[1] = clk_get(dev, deli + 1);
+			if (IS_ERR(data->clk[1]))
+				data->clk[1] = NULL;
+		}
+
+		data->dbgname = platdata->dbgname;
+	}
+
+	data->sysmmu = dev;
+	rwlock_init(&data->lock);
+	INIT_LIST_HEAD(&data->node);
+
+	__set_fault_handler(data, &default_fault_handler);
+
+	if (dev->parent)
+		pm_runtime_enable(dev);
+
+	dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
+	return 0;
+err_irq:
+	while (i-- > 0) {
+		int irq;
+
+		irq = platform_get_irq(pdev, i);
+		free_irq(irq, data);
+	}
+err_res:
+	while (data->nsfrs-- > 0)
+		iounmap(data->sfrbases[data->nsfrs]);
+	kfree(data->sfrbases);
+err_init:
+	kfree(data);
+err_alloc:
+	dev_err(dev, "Failed to initialize\n");
+	return ret;
+}
+
+static struct platform_driver exynos_sysmmu_driver = {
+	.probe		= exynos_sysmmu_probe,
+	.driver		= {
+		.owner		= THIS_MODULE,
+		.name		= "exynos-sysmmu",
+	}
+};
+
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+	dmac_flush_range(vastart, vaend);
+	outer_flush_range(virt_to_phys(vastart),
+				virt_to_phys(vaend));
+}
+
+static int exynos_iommu_domain_init(struct iommu_domain *domain)
+{
+	struct exynos_iommu_domain *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pgtable = (unsigned long *)__get_free_pages(
+						GFP_KERNEL | __GFP_ZERO, 2);
+	if (!priv->pgtable)
+		goto err_pgtable;
+
+	priv->lv2entcnt = (short *)__get_free_pages(
+						GFP_KERNEL | __GFP_ZERO, 1);
+	if (!priv->lv2entcnt)
+		goto err_counter;
+
+	pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+	spin_lock_init(&priv->lock);
+	spin_lock_init(&priv->pgtablelock);
+	INIT_LIST_HEAD(&priv->clients);
+
+	domain->priv = priv;
+	return 0;
+
+err_counter:
+	free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+	kfree(priv);
+	return -ENOMEM;
+}
+
+static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+{
+	struct exynos_iommu_domain *priv = domain->priv;
+	struct sysmmu_drvdata *data;
+	unsigned long flags;
+	int i;
+
+	WARN_ON(!list_empty(&priv->clients));
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	list_for_each_entry(data, &priv->clients, node) {
+		while (!exynos_sysmmu_disable(data->dev))
+			; /* until System MMU is actually disabled */
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	for (i = 0; i < NUM_LV1ENTRIES; i++)
+		if (lv1ent_page(priv->pgtable + i))
+			kfree(__va(lv2table_base(priv->pgtable + i)));
+
+	free_pages((unsigned long)priv->pgtable, 2);
+	free_pages((unsigned long)priv->lv2entcnt, 1);
+	kfree(domain->priv);
+	domain->priv = NULL;
+}
+
+static int exynos_iommu_attach_device(struct iommu_domain *domain,
+				   struct device *dev)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+	struct exynos_iommu_domain *priv = domain->priv;
+	unsigned long flags;
+	int ret;
+
+	ret = pm_runtime_get_sync(data->sysmmu);
+	if (ret < 0)
+		return ret;
+
+	ret = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+	if (ret == 0) {
+		/* 'data->node' must not be appeared in priv->clients */
+		BUG_ON(!list_empty(&data->node));
+		data->dev = dev;
+		list_add_tail(&data->node, &priv->clients);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (ret < 0) {
+		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
+				__func__, __pa(priv->pgtable));
+		pm_runtime_put(data->sysmmu);
+	} else if (ret > 0) {
+		dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
+					__func__, __pa(priv->pgtable));
+	} else {
+		dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
+					__func__, __pa(priv->pgtable));
+	}
+
+	return ret;
+}
+
+static void exynos_iommu_detach_device(struct iommu_domain *domain,
+				    struct device *dev)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+	struct exynos_iommu_domain *priv = domain->priv;
+	struct list_head *pos;
+	unsigned long flags;
+	bool found = false;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	list_for_each(pos, &priv->clients) {
+		if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		goto finish;
+
+	if (__exynos_sysmmu_disable(data)) {
+		dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
+					__func__, __pa(priv->pgtable));
+		list_del(&data->node);
+		INIT_LIST_HEAD(&data->node);
+
+	} else {
+		dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
+					__func__, __pa(priv->pgtable));
+	}
+
+finish:
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (found)
+		pm_runtime_put(data->sysmmu);
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
+					short *pgcounter)
+{
+	if (lv1ent_fault(sent)) {
+		unsigned long *pent;
+
+		pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
+		BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+		if (!pent)
+			return NULL;
+
+		*sent = mk_lv1ent_page(__pa(pent));
+		*pgcounter = NUM_LV2ENTRIES;
+		pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+		pgtable_flush(sent, sent + 1);
+	}
+
+	return page_entry(sent, iova);
+}
+
+static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+{
+	if (lv1ent_section(sent))
+		return -EADDRINUSE;
+
+	if (lv1ent_page(sent)) {
+		if (*pgcnt != NUM_LV2ENTRIES)
+			return -EADDRINUSE;
+
+		kfree(page_entry(sent, 0));
+
+		*pgcnt = 0;
+	}
+
+	*sent = mk_lv1ent_sect(paddr);
+
+	pgtable_flush(sent, sent + 1);
+
+	return 0;
+}
+
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+								short *pgcnt)
+{
+	if (size == SPAGE_SIZE) {
+		if (!lv2ent_fault(pent))
+			return -EADDRINUSE;
+
+		*pent = mk_lv2ent_spage(paddr);
+		pgtable_flush(pent, pent + 1);
+		*pgcnt -= 1;
+	} else { /* size == LPAGE_SIZE */
+		int i;
+		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
+			if (!lv2ent_fault(pent)) {
+				memset(pent, 0, sizeof(*pent) * i);
+				return -EADDRINUSE;
+			}
+
+			*pent = mk_lv2ent_lpage(paddr);
+		}
+		pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+		*pgcnt -= SPAGES_PER_LPAGE;
+	}
+
+	return 0;
+}
+
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+			 phys_addr_t paddr, size_t size, int prot)
+{
+	struct exynos_iommu_domain *priv = domain->priv;
+	unsigned long *entry;
+	unsigned long flags;
+	int ret = -ENOMEM;
+
+	BUG_ON(priv->pgtable == NULL);
+
+	spin_lock_irqsave(&priv->pgtablelock, flags);
+
+	entry = section_entry(priv->pgtable, iova);
+
+	if (size == SECT_SIZE) {
+		ret = lv1set_section(entry, paddr,
+					&priv->lv2entcnt[lv1ent_offset(iova)]);
+	} else {
+		unsigned long *pent;
+
+		pent = alloc_lv2entry(entry, iova,
+					&priv->lv2entcnt[lv1ent_offset(iova)]);
+
+		if (!pent)
+			ret = -ENOMEM;
+		else
+			ret = lv2set_page(pent, paddr, size,
+					&priv->lv2entcnt[lv1ent_offset(iova)]);
+	}
+
+	if (ret) {
+		pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
+							__func__, iova, size);
+	}
+
+	spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+	return ret;
+}
+
+static size_t exynos_iommu_unmap(struct iommu_domain *domain,
+					       unsigned long iova, size_t size)
+{
+	struct exynos_iommu_domain *priv = domain->priv;
+	struct sysmmu_drvdata *data;
+	unsigned long flags;
+	unsigned long *ent;
+
+	BUG_ON(priv->pgtable == NULL);
+
+	spin_lock_irqsave(&priv->pgtablelock, flags);
+
+	ent = section_entry(priv->pgtable, iova);
+
+	if (lv1ent_section(ent)) {
+		BUG_ON(size < SECT_SIZE);
+
+		*ent = 0;
+		pgtable_flush(ent, ent + 1);
+		size = SECT_SIZE;
+		goto done;
+	}
+
+	if (unlikely(lv1ent_fault(ent))) {
+		if (size > SECT_SIZE)
+			size = SECT_SIZE;
+		goto done;
+	}
+
+	/* lv1ent_page(sent) == true here */
+
+	ent = page_entry(ent, iova);
+
+	if (unlikely(lv2ent_fault(ent))) {
+		size = SPAGE_SIZE;
+		goto done;
+	}
+
+	if (lv2ent_small(ent)) {
+		*ent = 0;
+		size = SPAGE_SIZE;
+		priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+		goto done;
+	}
+
+	/* lv1ent_large(ent) == true here */
+	BUG_ON(size < LPAGE_SIZE);
+
+	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+
+	size = LPAGE_SIZE;
+	priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+done:
+	spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry(data, &priv->clients, node)
+		sysmmu_tlb_invalidate_entry(data->dev, iova);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+
+	return size;
+}
+
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+					  unsigned long iova)
+{
+	struct exynos_iommu_domain *priv = domain->priv;
+	unsigned long *entry;
+	unsigned long flags;
+	phys_addr_t phys = 0;
+
+	spin_lock_irqsave(&priv->pgtablelock, flags);
+
+	entry = section_entry(priv->pgtable, iova);
+
+	if (lv1ent_section(entry)) {
+		phys = section_phys(entry) + section_offs(iova);
+	} else if (lv1ent_page(entry)) {
+		entry = page_entry(entry, iova);
+
+		if (lv2ent_large(entry))
+			phys = lpage_phys(entry) + lpage_offs(iova);
+		else if (lv2ent_small(entry))
+			phys = spage_phys(entry) + spage_offs(iova);
+	}
+
+	spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+	return phys;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
+	.domain_init = &exynos_iommu_domain_init,
+	.domain_destroy = &exynos_iommu_domain_destroy,
+	.attach_dev = &exynos_iommu_attach_device,
+	.detach_dev = &exynos_iommu_detach_device,
+	.map = &exynos_iommu_map,
+	.unmap = &exynos_iommu_unmap,
+	.iova_to_phys = &exynos_iommu_iova_to_phys,
+	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+};
+
+static int __init exynos_iommu_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&exynos_sysmmu_driver);
+
+	if (ret == 0)
+		bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+
+	return ret;
+}
+subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bf2fbaa..b12af2f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1907,6 +1907,15 @@
 	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 }
 
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+	assert_spin_locked(&device_domain_lock);
+	list_del(&info->link);
+	list_del(&info->global);
+	if (info->dev)
+		info->dev->dev.archdata.iommu = NULL;
+}
+
 static void domain_remove_dev_info(struct dmar_domain *domain)
 {
 	struct device_domain_info *info;
@@ -1917,10 +1926,7 @@
 	while (!list_empty(&domain->devices)) {
 		info = list_entry(domain->devices.next,
 			struct device_domain_info, link);
-		list_del(&info->link);
-		list_del(&info->global);
-		if (info->dev)
-			info->dev->dev.archdata.iommu = NULL;
+		unlink_domain_info(info);
 		spin_unlock_irqrestore(&device_domain_lock, flags);
 
 		iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@
 	if (!info)
 		return -ENOMEM;
 
-	ret = domain_context_mapping(domain, pdev, translation);
-	if (ret) {
-		free_devinfo_mem(info);
-		return ret;
-	}
-
 	info->segment = pci_domain_nr(pdev->bus);
 	info->bus = pdev->bus->number;
 	info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@
 	pdev->dev.archdata.iommu = info;
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 
+	ret = domain_context_mapping(domain, pdev, translation);
+	if (ret) {
+		spin_lock_irqsave(&device_domain_lock, flags);
+		unlink_domain_info(info);
+		spin_unlock_irqrestore(&device_domain_lock, flags);
+		free_devinfo_mem(info);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -3728,10 +3737,7 @@
 		if (info->segment == pci_domain_nr(pdev->bus) &&
 		    info->bus == pdev->bus->number &&
 		    info->devfn == pdev->devfn) {
-			list_del(&info->link);
-			list_del(&info->global);
-			if (info->dev)
-				info->dev->dev.archdata.iommu = NULL;
+			unlink_domain_info(info);
 			spin_unlock_irqrestore(&device_domain_lock, flags);
 
 			iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@
 	while (!list_empty(&domain->devices)) {
 		info = list_entry(domain->devices.next,
 			struct device_domain_info, link);
-		list_del(&info->link);
-		list_del(&info->global);
-		if (info->dev)
-			info->dev->dev.archdata.iommu = NULL;
-
+		unlink_domain_info(info);
 		spin_unlock_irqrestore(&device_domain_lock, flags1);
 
 		iommu_disable_dev_iotlb(info);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2d..8b9ded8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@
  * iommu_set_fault_handler() - set a fault handler for an iommu domain
  * @domain: iommu domain
  * @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
  *
  * This function should be used by IOMMU users which want to be notified
  * whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@
  * error code otherwise.
  */
 void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler)
+					iommu_fault_handler_t handler,
+					void *token)
 {
 	BUG_ON(!domain);
 
 	domain->handler = handler;
+	domain->handler_token = token;
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd..e70ee2b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
  * @pgtable:	the page table
  * @iommu_dev:	an omap iommu device attached to this domain. only a single
  *		iommu device can be attached for now.
+ * @dev:	Device using this domain.
  * @lock:	domain lock, should be taken when attaching/detaching
  */
 struct omap_iommu_domain {
 	u32 *pgtable;
 	struct omap_iommu *iommu_dev;
+	struct device *dev;
 	spinlock_t lock;
 };
 
@@ -1081,6 +1083,7 @@
 	}
 
 	omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+	omap_domain->dev = dev;
 	oiommu->domain = domain;
 
 out:
@@ -1088,19 +1091,16 @@
 	return ret;
 }
 
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
-				 struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+			struct device *dev)
 {
-	struct omap_iommu_domain *omap_domain = domain->priv;
-	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
 	struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
-	spin_lock(&omap_domain->lock);
+	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
 
 	/* only a single device is supported per domain for now */
 	if (omap_domain->iommu_dev != oiommu) {
 		dev_err(dev, "invalid iommu device\n");
-		goto out;
+		return;
 	}
 
 	iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@
 	omap_iommu_detach(oiommu);
 
 	omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+	omap_domain->dev = NULL;
+}
 
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+				 struct device *dev)
+{
+	struct omap_iommu_domain *omap_domain = domain->priv;
+
+	spin_lock(&omap_domain->lock);
+	_omap_iommu_detach_dev(omap_domain, dev);
 	spin_unlock(&omap_domain->lock);
 }
 
@@ -1148,13 +1156,19 @@
 	return -ENOMEM;
 }
 
-/* assume device was already detached */
 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
 {
 	struct omap_iommu_domain *omap_domain = domain->priv;
 
 	domain->priv = NULL;
 
+	/*
+	 * An iommu device is still attached
+	 * (currently, only one device can be attached) ?
+	 */
+	if (omap_domain->iommu_dev)
+		_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
 	kfree(omap_domain->pgtable);
 	kfree(omap_domain);
 }
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306e..0c0a377 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/iommu.h>
+#include <linux/of.h>
 
 #include <asm/cacheflush.h>
 
 /* bitmap of the page sizes currently supported */
 #define GART_IOMMU_PGSIZES	(SZ_4K)
 
-#define GART_CONFIG		0x24
-#define GART_ENTRY_ADDR		0x28
-#define GART_ENTRY_DATA		0x2c
+#define GART_REG_BASE		0x24
+#define GART_CONFIG		(0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR		(0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA		(0x2c - GART_REG_BASE)
 #define GART_ENTRY_PHYS_ADDR_VALID	(1 << 31)
 
 #define GART_PAGE_SHIFT		12
@@ -158,7 +160,7 @@
 	struct gart_client *client, *c;
 	int err = 0;
 
-	gart = dev_get_drvdata(dev->parent);
+	gart = gart_handle;
 	if (!gart)
 		return -EINVAL;
 	domain->priv = gart;
@@ -422,6 +424,14 @@
 	.resume		= tegra_gart_resume,
 };
 
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+	{ .compatible = "nvidia,tegra20-gart", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
 static struct platform_driver tegra_gart_driver = {
 	.probe		= tegra_gart_probe,
 	.remove		= tegra_gart_remove,
@@ -429,6 +439,7 @@
 		.owner	= THIS_MODULE,
 		.name	= "tegra-gart",
 		.pm	= &tegra_gart_pm_ops,
+		.of_match_table = of_match_ptr(tegra_gart_of_match),
 	},
 };
 
@@ -448,4 +459,5 @@
 
 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c82..ecd6790 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -733,7 +733,7 @@
 		pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
 	}
 
-	dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+	dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
 	return 0;
 
 err_client:
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cf..12b2b55 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@
 	  controlled manually or using PWM input or using ambient
 	  light automatically.
 
+config LEDS_LM3533
+	tristate "LED support for LM3533"
+	depends on LEDS_CLASS
+	depends on MFD_LM3533
+	help
+	  This option enables support for the LEDs on National Semiconductor /
+	  TI LM3533 Lighting Power chips.
+
+	  The LEDs can be controlled directly, through PWM input, or by the
+	  ambient-light-sensor interface. The chip supports
+	  hardware-accelerated blinking with maximum on and off periods of 9.8
+	  and 77 seconds respectively.
+
 config LEDS_LOCOMO
 	tristate "LED Support for Locomo device"
 	depends on LEDS_CLASS
@@ -259,6 +272,14 @@
 	  This option enables support for on-chip LED drivers found
 	  on Dialog Semiconductor DA9030/DA9034 PMICs.
 
+config LEDS_DA9052
+	tristate "Dialog DA9052/DA9053 LEDS"
+	depends on LEDS_CLASS
+	depends on PMIC_DA9052
+	help
+	  This option enables support for on-chip LED drivers found
+	  on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
 config LEDS_DAC124S085
 	tristate "LED Support for DAC124S085 SPI DAC"
 	depends on LEDS_CLASS
@@ -358,7 +379,7 @@
 
 config LEDS_ASIC3
 	bool "LED support for the HTC ASIC3"
-	depends on LEDS_CLASS
+	depends on LEDS_CLASS=y
 	depends on MFD_ASIC3
 	default y
 	help
@@ -369,7 +390,7 @@
 
 config LEDS_RENESAS_TPU
 	bool "LED support for Renesas TPU"
-	depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+	depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
 	help
 	  This option enables build of the LED TPU platform driver,
 	  suitable to drive any TPU channel on newer Renesas SoCs.
@@ -471,4 +492,12 @@
 comment "iptables trigger is under Netfilter config (LED target)"
 	depends on LEDS_TRIGGERS
 
+config LEDS_TRIGGER_TRANSIENT
+	tristate "LED Transient Trigger"
+	depends on LEDS_TRIGGERS
+	help
+	  This allows one time activation of a transient state on
+	  GPIO/PWM based hadrware.
+	  If unsure, say Y.
+
 endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481c..f8958cd 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_LEDS_BD2802)		+= leds-bd2802.o
 obj-$(CONFIG_LEDS_LOCOMO)		+= leds-locomo.o
 obj-$(CONFIG_LEDS_LM3530)		+= leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533)		+= leds-lm3533.o
 obj-$(CONFIG_LEDS_MIKROTIK_RB532)	+= leds-rb532.o
 obj-$(CONFIG_LEDS_S3C24XX)		+= leds-s3c24xx.o
 obj-$(CONFIG_LEDS_NET48XX)		+= leds-net48xx.o
@@ -31,6 +32,7 @@
 obj-$(CONFIG_LEDS_PCA955X)		+= leds-pca955x.o
 obj-$(CONFIG_LEDS_PCA9633)		+= leds-pca9633.o
 obj-$(CONFIG_LEDS_DA903X)		+= leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052)		+= leds-da9052.o
 obj-$(CONFIG_LEDS_WM831X_STATUS)	+= leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
 obj-$(CONFIG_LEDS_PWM)			+= leds-pwm.o
@@ -56,3 +58,4 @@
 obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT)	+= ledtrig-backlight.o
 obj-$(CONFIG_LEDS_TRIGGER_GPIO)		+= ledtrig-gpio.o
 obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON)	+= ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT)	+= ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff843..e663e6f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@
 		led_cdev->brightness = led_cdev->brightness_get(led_cdev);
 }
 
-static ssize_t led_brightness_show(struct device *dev, 
+static ssize_t led_brightness_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -44,23 +44,18 @@
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	unsigned long state;
 	ssize_t ret = -EINVAL;
-	char *after;
-	unsigned long state = simple_strtoul(buf, &after, 10);
-	size_t count = after - buf;
 
-	if (isspace(*after))
-		count++;
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
 
-	if (count == size) {
-		ret = count;
+	if (state == LED_OFF)
+		led_trigger_remove(led_cdev);
+	led_set_brightness(led_cdev, state);
 
-		if (state == LED_OFF)
-			led_trigger_remove(led_cdev);
-		led_set_brightness(led_cdev, state);
-	}
-
-	return ret;
+	return size;
 }
 
 static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d686004..d65353d 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@
 	if (!led_cdev->blink_brightness)
 		led_cdev->blink_brightness = led_cdev->max_brightness;
 
-	if (led_get_trigger_data(led_cdev) &&
-	    delay_on == led_cdev->blink_delay_on &&
-	    delay_off == led_cdev->blink_delay_off)
-		return;
-
-	led_stop_software_blink(led_cdev);
-
 	led_cdev->blink_delay_on = delay_on;
 	led_cdev->blink_delay_off = delay_off;
 
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 0000000..58a5244
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT	2
+#define DA9052_SET_HIGH_LVL_OUTPUT	(1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE	0xF0
+#define DA9052_MASK_LOWER_NIBBLE	0x0F
+#define DA9052_NIBBLE_SHIFT		4
+#define DA9052_MAX_BRIGHTNESS		0x5f
+
+struct da9052_led {
+	struct led_classdev cdev;
+	struct work_struct work;
+	struct da9052 *da9052;
+	unsigned char led_index;
+	unsigned char id;
+	int brightness;
+};
+
+static unsigned char led_reg[] = {
+	DA9052_LED_CONT_4_REG,
+	DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+	u8 val;
+	int error;
+
+	val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+	error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+	if (error < 0)
+		dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+			error);
+	return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+	struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+	da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+			   enum led_brightness value)
+{
+	struct da9052_led *led;
+
+	led = container_of(led_cdev, struct da9052_led, cdev);
+	led->brightness = value;
+	schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+	int error;
+	unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+				       | DA9052_SET_HIGH_LVL_OUTPUT;
+
+	error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+				  DA9052_MASK_LOWER_NIBBLE,
+				  register_value);
+
+	if (error < 0) {
+		dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+			error);
+		return error;
+	}
+
+	error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+				  DA9052_MASK_UPPER_NIBBLE,
+				  register_value << DA9052_NIBBLE_SHIFT);
+	if (error < 0)
+		dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+			error);
+
+	return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+	struct da9052_pdata *pdata;
+	struct da9052 *da9052;
+	struct led_platform_data *pled;
+	struct da9052_led *led = NULL;
+	int error = -ENODEV;
+	int i;
+
+	da9052 = dev_get_drvdata(pdev->dev.parent);
+	pdata = da9052->dev->platform_data;
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "No platform data\n");
+		goto err;
+	}
+
+	pled = pdata->pled;
+	if (pled == NULL) {
+		dev_err(&pdev->dev, "No platform data for LED\n");
+		goto err;
+	}
+
+	led = devm_kzalloc(&pdev->dev,
+			   sizeof(struct da9052_led) * pled->num_leds,
+			   GFP_KERNEL);
+	if (led == NULL) {
+		dev_err(&pdev->dev, "Failed to alloc memory\n");
+		error = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < pled->num_leds; i++) {
+		led[i].cdev.name = pled->leds[i].name;
+		led[i].cdev.brightness_set = da9052_led_set;
+		led[i].cdev.brightness = LED_OFF;
+		led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+		led[i].brightness = LED_OFF;
+		led[i].led_index = pled->leds[i].flags;
+		led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+		INIT_WORK(&led[i].work, da9052_led_work);
+
+		error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+		if (error) {
+			dev_err(&pdev->dev, "Failed to register led %d\n",
+				led[i].led_index);
+			goto err_register;
+		}
+
+		error = da9052_set_led_brightness(&led[i]);
+		if (error) {
+			dev_err(&pdev->dev, "Unable to init led %d\n",
+				led[i].led_index);
+			continue;
+		}
+	}
+	error = da9052_configure_leds(led->da9052);
+	if (error) {
+		dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+		goto err_register;
+	}
+
+	platform_set_drvdata(pdev, led);
+
+	return 0;
+
+err_register:
+	for (i = i - 1; i >= 0; i--) {
+		led_classdev_unregister(&led[i].cdev);
+		cancel_work_sync(&led[i].work);
+	}
+err:
+	return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+	struct da9052_led *led = platform_get_drvdata(pdev);
+	struct da9052_pdata *pdata;
+	struct da9052 *da9052;
+	struct led_platform_data *pled;
+	int i;
+
+	da9052 = dev_get_drvdata(pdev->dev.parent);
+	pdata = da9052->dev->platform_data;
+	pled = pdata->pled;
+
+	for (i = 0; i < pled->num_leds; i++) {
+		led[i].brightness = 0;
+		da9052_set_led_brightness(&led[i]);
+		led_classdev_unregister(&led[i].cdev);
+		cancel_work_sync(&led[i].work);
+	}
+
+	return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+	.driver		= {
+		.name	= "da9052-leds",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= da9052_led_probe,
+	.remove		= __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5f..84ba6de 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@
 	bool enable;
 };
 
+/*
+ * struct lm3530_als_data
+ * @config  : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone    : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+	u8 config;
+	u8 imp_sel;
+	u8 zones[LM3530_ALS_ZB_MAX];
+};
+
 static const u8 lm3530_reg[LM3530_REG_MAX] = {
 	LM3530_GEN_CONFIG,
 	LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@
 	return -1;
 }
 
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+				struct lm3530_als_data *als)
+{
+	int i;
+	u32 als_vmin, als_vmax, als_vstep;
+
+	if (pdata->als_vmax == 0) {
+		pdata->als_vmin = 0;
+		pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+	}
+
+	als_vmin = pdata->als_vmin;
+	als_vmax = pdata->als_vmax;
+
+	if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+		pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+	/* n zone boundary makes n+1 zones */
+	als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+	for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+		als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+			als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+	als->config =
+		(pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+		(LM3530_ENABLE_ALS) |
+		(pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+	als->imp_sel =
+		(pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+		(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
 static int lm3530_init_registers(struct lm3530_data *drvdata)
 {
 	int ret = 0;
 	int i;
 	u8 gen_config;
-	u8 als_config = 0;
 	u8 brt_ramp;
-	u8 als_imp_sel = 0;
 	u8 brightness;
 	u8 reg_val[LM3530_REG_MAX];
-	u8 zones[LM3530_ALS_ZB_MAX];
-	u32 als_vmin, als_vmax, als_vstep;
 	struct lm3530_platform_data *pdata = drvdata->pdata;
 	struct i2c_client *client = drvdata->client;
 	struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+	struct lm3530_als_data als;
+
+	memset(&als, 0, sizeof(struct lm3530_als_data));
 
 	gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
 			((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
 
 	switch (drvdata->mode) {
 	case LM3530_BL_MODE_MANUAL:
+		gen_config |= LM3530_ENABLE_I2C;
+		break;
 	case LM3530_BL_MODE_ALS:
 		gen_config |= LM3530_ENABLE_I2C;
+		lm3530_als_configure(pdata, &als);
 		break;
 	case LM3530_BL_MODE_PWM:
 		gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@
 		break;
 	}
 
-	if (drvdata->mode == LM3530_BL_MODE_ALS) {
-		if (pdata->als_vmax == 0) {
-			pdata->als_vmin = 0;
-			pdata->als_vmax = LM3530_ALS_WINDOW_mV;
-		}
-
-		als_vmin = pdata->als_vmin;
-		als_vmax = pdata->als_vmax;
-
-		if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
-			pdata->als_vmax = als_vmax =
-				als_vmin + LM3530_ALS_WINDOW_mV;
-
-		/* n zone boundary makes n+1 zones */
-		als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
-		for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
-			zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
-					als_vstep + (i * als_vstep)) * LED_FULL)
-					/ 1000;
-
-		als_config =
-			(pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
-			(LM3530_ENABLE_ALS) |
-			(pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
-		als_imp_sel =
-			(pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
-			(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
-	}
-
 	brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
 			(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
 
@@ -215,14 +231,14 @@
 		brightness = drvdata->led_dev.max_brightness;
 
 	reg_val[0] = gen_config;	/* LM3530_GEN_CONFIG */
-	reg_val[1] = als_config;	/* LM3530_ALS_CONFIG */
+	reg_val[1] = als.config;	/* LM3530_ALS_CONFIG */
 	reg_val[2] = brt_ramp;		/* LM3530_BRT_RAMP_RATE */
-	reg_val[3] = als_imp_sel;	/* LM3530_ALS_IMP_SELECT */
+	reg_val[3] = als.imp_sel;	/* LM3530_ALS_IMP_SELECT */
 	reg_val[4] = brightness;	/* LM3530_BRT_CTRL_REG */
-	reg_val[5] = zones[0];		/* LM3530_ALS_ZB0_REG */
-	reg_val[6] = zones[1];		/* LM3530_ALS_ZB1_REG */
-	reg_val[7] = zones[2];		/* LM3530_ALS_ZB2_REG */
-	reg_val[8] = zones[3];		/* LM3530_ALS_ZB3_REG */
+	reg_val[5] = als.zones[0];	/* LM3530_ALS_ZB0_REG */
+	reg_val[6] = als.zones[1];	/* LM3530_ALS_ZB1_REG */
+	reg_val[7] = als.zones[2];	/* LM3530_ALS_ZB2_REG */
+	reg_val[8] = als.zones[3];	/* LM3530_ALS_ZB3_REG */
 	reg_val[9] = LM3530_DEF_ZT_0;	/* LM3530_ALS_Z0T_REG */
 	reg_val[10] = LM3530_DEF_ZT_1;	/* LM3530_ALS_Z1T_REG */
 	reg_val[11] = LM3530_DEF_ZT_2;	/* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 0000000..f56b6e7
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN		2
+#define LM3533_LVCTRLBANK_MAX		5
+#define LM3533_LVCTRLBANK_COUNT		4
+#define LM3533_RISEFALLTIME_MAX		7
+#define LM3533_ALS_CHANNEL_LV_MIN	1
+#define LM3533_ALS_CHANNEL_LV_MAX	2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE		0x1b
+#define LM3533_REG_PATTERN_ENABLE		0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE	0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE	0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE	0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE	0x75
+
+#define LM3533_REG_PATTERN_STEP			0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK		0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK		0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK	0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE		1
+
+
+struct lm3533_led {
+	struct lm3533 *lm3533;
+	struct lm3533_ctrlbank cb;
+	struct led_classdev cdev;
+	int id;
+
+	struct mutex mutex;
+	unsigned long flags;
+
+	struct work_struct work;
+	u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+	return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+	return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+	return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+	return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+								u8 base)
+{
+	return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+	u8 mask;
+	u8 val;
+	int pattern;
+	int state;
+	int ret = 0;
+
+	dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+	mutex_lock(&led->mutex);
+
+	state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+	if ((enable && state) || (!enable && !state))
+		goto out;
+
+	pattern = lm3533_led_get_pattern(led);
+	mask = 1 << (2 * pattern);
+
+	if (enable)
+		val = mask;
+	else
+		val = 0;
+
+	ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+	if (ret) {
+		dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+							pattern, enable);
+		goto out;
+	}
+
+	__change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+	mutex_unlock(&led->mutex);
+
+	return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+	struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+	dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+	if (led->new_brightness == 0)
+		lm3533_led_pattern_enable(led, 0);	/* disable blink */
+
+	lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+						enum led_brightness value)
+{
+	struct lm3533_led *led = to_lm3533_led(cdev);
+
+	dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+	led->new_brightness = value;
+	schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+	struct lm3533_led *led = to_lm3533_led(cdev);
+	u8 val;
+	int ret;
+
+	ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+	if (ret)
+		return ret;
+
+	dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+	return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN	0x00
+#define LM3533_LED_DELAY2_VMIN	0x3d
+#define LM3533_LED_DELAY3_VMIN	0x80
+
+#define LM3533_LED_DELAY1_VMAX	(LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX	(LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX	0xff
+
+#define LM3533_LED_DELAY1_TMIN	16384U
+#define LM3533_LED_DELAY2_TMIN	1130496U
+#define LM3533_LED_DELAY3_TMIN	10305536U
+
+#define LM3533_LED_DELAY1_TMAX	999424U
+#define LM3533_LED_DELAY2_TMAX	9781248U
+#define LM3533_LED_DELAY3_TMAX	76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP	16384
+#define LM3533_LED_DELAY2_TSTEP	131072
+#define LM3533_LED_DELAY3_TSTEP	524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+	((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+	((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ *	t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+							u8 v_min, u8 v_max)
+{
+	unsigned val;
+
+	val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+	*t = t_step * (val - v_min) + t_min;
+
+	return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ *	[   16,   999]	[0x00, 0x3e]	step  16 ms
+ *	[ 1130,  9781]	[0x3d, 0x7f]	step 131 ms
+ *	[10306, 76890]	[0x80, 0xff]	step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+	unsigned t;
+	u8 val;
+
+	t = *delay * 1000;
+
+	if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+		t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+		val = time_to_val(&t,	LM3533_LED_DELAY3_TMIN,
+					LM3533_LED_DELAY3_TSTEP,
+					LM3533_LED_DELAY3_VMIN,
+					LM3533_LED_DELAY3_VMAX);
+	} else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+		t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+		val = time_to_val(&t,	LM3533_LED_DELAY2_TMIN,
+					LM3533_LED_DELAY2_TSTEP,
+					LM3533_LED_DELAY2_VMIN,
+					LM3533_LED_DELAY2_VMAX);
+	} else {
+		t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+		val = time_to_val(&t,	LM3533_LED_DELAY1_TMIN,
+					LM3533_LED_DELAY1_TSTEP,
+					LM3533_LED_DELAY1_VMIN,
+					LM3533_LED_DELAY1_VMAX);
+	}
+
+	*delay = (t + 500) / 1000;
+
+	return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+							unsigned long *delay)
+{
+	unsigned t;
+	u8 val;
+	u8 reg;
+	int ret;
+
+	t = (unsigned)*delay;
+
+	/* Delay group 3 is only available for low time (delay off). */
+	if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+		t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+	val = lm3533_led_get_hw_delay(&t);
+
+	dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+							*delay, t, val);
+	reg = lm3533_led_get_pattern_reg(led, base);
+	ret = lm3533_write(led->lm3533, reg, val);
+	if (ret)
+		dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+	*delay = t;
+
+	return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+	return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+	return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+				unsigned long *delay_on,
+				unsigned long *delay_off)
+{
+	struct lm3533_led *led = to_lm3533_led(cdev);
+	int ret;
+
+	dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+							*delay_on, *delay_off);
+
+	if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+					*delay_off > LM3533_LED_DELAY_OFF_MAX)
+		return -EINVAL;
+
+	if (*delay_on == 0 && *delay_off == 0) {
+		*delay_on = 500;
+		*delay_off = 500;
+	}
+
+	ret = lm3533_led_delay_on_set(led, delay_on);
+	if (ret)
+		return ret;
+
+	ret = lm3533_led_delay_off_set(led, delay_off);
+	if (ret)
+		return ret;
+
+	return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ *   0 - 2048 us (default)
+ *   1 - 262 ms
+ *   2 - 524 ms
+ *   3 - 1.049 s
+ *   4 - 2.097 s
+ *   5 - 4.194 s
+ *   6 - 8.389 s
+ *   7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+					struct device_attribute *attr,
+					char *buf, u8 base)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	ssize_t ret;
+	u8 reg;
+	u8 val;
+
+	reg = lm3533_led_get_pattern_reg(led, base);
+	ret = lm3533_read(led->lm3533, reg, &val);
+	if (ret)
+		return ret;
+
+	return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return show_risefalltime(dev, attr, buf,
+					LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return show_risefalltime(dev, attr, buf,
+					LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len, u8 base)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	u8 val;
+	u8 reg;
+	int ret;
+
+	if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+		return -EINVAL;
+
+	reg = lm3533_led_get_pattern_reg(led, base);
+	ret = lm3533_write(led->lm3533, reg, val);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	return store_risefalltime(dev, attr, buf, len,
+					LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	return store_risefalltime(dev, attr, buf, len,
+					LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	unsigned channel;
+	u8 reg;
+	u8 val;
+	int ret;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	ret = lm3533_read(led->lm3533, reg, &val);
+	if (ret)
+		return ret;
+
+	channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	unsigned channel;
+	u8 reg;
+	u8 val;
+	u8 mask;
+	int ret;
+
+	if (kstrtouint(buf, 0, &channel))
+		return -EINVAL;
+
+	if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+					channel > LM3533_ALS_CHANNEL_LV_MAX)
+		return -EINVAL;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+	val = channel - 1;
+
+	ret = lm3533_update(led->lm3533, reg, val, mask);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	bool enable;
+	u8 reg;
+	u8 val;
+	int ret;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	ret = lm3533_read(led->lm3533, reg, &val);
+	if (ret)
+		return ret;
+
+	enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	unsigned enable;
+	u8 reg;
+	u8 mask;
+	u8 val;
+	int ret;
+
+	if (kstrtouint(buf, 0, &enable))
+		return -EINVAL;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+	if (enable)
+		val = mask;
+	else
+		val = 0;
+
+	ret = lm3533_update(led->lm3533, reg, val, mask);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	u8 reg;
+	u8 val;
+	int linear;
+	int ret;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	ret = lm3533_read(led->lm3533, reg, &val);
+	if (ret)
+		return ret;
+
+	if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+		linear = 1;
+	else
+		linear = 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	unsigned long linear;
+	u8 reg;
+	u8 mask;
+	u8 val;
+	int ret;
+
+	if (kstrtoul(buf, 0, &linear))
+		return -EINVAL;
+
+	reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+	mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+	if (linear)
+		val = mask;
+	else
+		val = 0;
+
+	ret = lm3533_update(led->lm3533, reg, val, mask);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	u8 val;
+	int ret;
+
+	ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+	if (ret)
+		return ret;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	u8 val;
+	int ret;
+
+	if (kstrtou8(buf, 0, &val))
+		return -EINVAL;
+
+	ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+	&dev_attr_als_channel.attr,
+	&dev_attr_als_en.attr,
+	&dev_attr_falltime.attr,
+	&dev_attr_id.attr,
+	&dev_attr_linear.attr,
+	&dev_attr_pwm.attr,
+	&dev_attr_risetime.attr,
+	NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+					     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lm3533_led *led = to_lm3533_led(led_cdev);
+	umode_t mode = attr->mode;
+
+	if (attr == &dev_attr_als_channel.attr ||
+					attr == &dev_attr_als_en.attr) {
+		if (!led->lm3533->have_als)
+			mode = 0;
+	}
+
+	return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+	.is_visible	= lm3533_led_attr_is_visible,
+	.attrs		= lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+					struct lm3533_led_platform_data *pdata)
+{
+	int ret;
+
+	ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+	if (ret)
+		return ret;
+
+	return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+	struct lm3533 *lm3533;
+	struct lm3533_led_platform_data *pdata;
+	struct lm3533_led *led;
+	int ret;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	lm3533 = dev_get_drvdata(pdev->dev.parent);
+	if (!lm3533)
+		return -EINVAL;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+		dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	led->lm3533 = lm3533;
+	led->cdev.name = pdata->name;
+	led->cdev.default_trigger = pdata->default_trigger;
+	led->cdev.brightness_set = lm3533_led_set;
+	led->cdev.brightness_get = lm3533_led_get;
+	led->cdev.blink_set = lm3533_led_blink_set;
+	led->cdev.brightness = LED_OFF;
+	led->id = pdev->id;
+
+	mutex_init(&led->mutex);
+	INIT_WORK(&led->work, lm3533_led_work);
+
+	/* The class framework makes a callback to get brightness during
+	 * registration so use parent device (for error reporting) until
+	 * registered.
+	 */
+	led->cb.lm3533 = lm3533;
+	led->cb.id = lm3533_led_get_ctrlbank_id(led);
+	led->cb.dev = lm3533->dev;
+
+	platform_set_drvdata(pdev, led);
+
+	ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+		return ret;
+	}
+
+	led->cb.dev = led->cdev.dev;
+
+	ret = sysfs_create_group(&led->cdev.dev->kobj,
+						&lm3533_led_attribute_group);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+		goto err_unregister;
+	}
+
+	ret = lm3533_led_setup(led, pdata);
+	if (ret)
+		goto err_sysfs_remove;
+
+	ret = lm3533_ctrlbank_enable(&led->cb);
+	if (ret)
+		goto err_sysfs_remove;
+
+	return 0;
+
+err_sysfs_remove:
+	sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+	led_classdev_unregister(&led->cdev);
+	flush_work_sync(&led->work);
+
+	return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+	struct lm3533_led *led = platform_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	lm3533_ctrlbank_disable(&led->cb);
+	sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+	led_classdev_unregister(&led->cdev);
+	flush_work_sync(&led->work);
+
+	return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+	struct lm3533_led *led = platform_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	lm3533_ctrlbank_disable(&led->cb);
+	lm3533_led_set(&led->cdev, LED_OFF);		/* disable blink */
+	flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+	.driver = {
+		.name = "lm3533-leds",
+		.owner = THIS_MODULE,
+	},
+	.probe		= lm3533_led_probe,
+	.remove		= __devexit_p(lm3533_led_remove),
+	.shutdown	= lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723..2381562 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@
 
 	/* move current engine to direct mode and remember the state */
 	ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+	if (ret)
+		return ret;
+
 	/* Mode change requires min 500 us delay. 1 - 2 ms  with margin */
 	usleep_range(1000, 2000);
-	ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+	ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+	if (ret)
+		return ret;
 
 	/* For loading, all the engines to load mode */
 	lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@
 				LP5521_PROG_MEM_SIZE,
 				pattern);
 
-	ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
-	return ret;
+	return lp5521_write(client, LP5521_REG_OP_MODE, mode);
 }
 
 static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@
 	 * LP5521_REG_ENABLE register will not have any effect - strange!
 	 */
 	ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
-	if (buf != LP5521_REG_R_CURR_DEFAULT) {
+	if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
 		dev_err(&client->dev, "error in resetting chip\n");
 		goto fail2;
 	}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc4915..4cc6a2e 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@
 		return -EINVAL;
 	}
 
-	led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+	led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
 	if (led == NULL) {
 		dev_err(&pdev->dev, "failed to alloc memory\n");
 		return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3..5f462db 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@
 };
 MODULE_DEVICE_TABLE(i2c, pca955x_id);
 
-struct pca955x_led {
+struct pca955x {
+	struct mutex lock;
+	struct pca955x_led *leds;
 	struct pca955x_chipdef	*chipdef;
 	struct i2c_client	*client;
+};
+
+struct pca955x_led {
+	struct pca955x	*pca955x;
 	struct work_struct	work;
-	spinlock_t		lock;
 	enum led_brightness	brightness;
 	struct led_classdev	led_cdev;
 	int			led_num;	/* 0 .. 15 potentially */
@@ -140,7 +145,7 @@
  */
 static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
 {
-	struct pca955x_led *pca955x = i2c_get_clientdata(client);
+	struct pca955x *pca955x = i2c_get_clientdata(client);
 
 	i2c_smbus_write_byte_data(client,
 		pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@
  */
 static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
 {
-	struct pca955x_led *pca955x = i2c_get_clientdata(client);
+	struct pca955x *pca955x = i2c_get_clientdata(client);
 
 	i2c_smbus_write_byte_data(client,
 		pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@
  */
 static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
 {
-	struct pca955x_led *pca955x = i2c_get_clientdata(client);
+	struct pca955x *pca955x = i2c_get_clientdata(client);
 
 	i2c_smbus_write_byte_data(client,
 		pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@
  */
 static u8 pca955x_read_ls(struct i2c_client *client, int n)
 {
-	struct pca955x_led *pca955x = i2c_get_clientdata(client);
+	struct pca955x *pca955x = i2c_get_clientdata(client);
 
 	return (u8) i2c_smbus_read_byte_data(client,
 		pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@
 
 static void pca955x_led_work(struct work_struct *work)
 {
-	struct pca955x_led *pca955x;
+	struct pca955x_led *pca955x_led;
+	struct pca955x *pca955x;
 	u8 ls;
 	int chip_ls;	/* which LSx to use (0-3 potentially) */
 	int ls_led;	/* which set of bits within LSx to use (0-3) */
 
-	pca955x = container_of(work, struct pca955x_led, work);
-	chip_ls = pca955x->led_num / 4;
-	ls_led = pca955x->led_num % 4;
+	pca955x_led = container_of(work, struct pca955x_led, work);
+	pca955x = pca955x_led->pca955x;
+
+	chip_ls = pca955x_led->led_num / 4;
+	ls_led = pca955x_led->led_num % 4;
+
+	mutex_lock(&pca955x->lock);
 
 	ls = pca955x_read_ls(pca955x->client, chip_ls);
 
-	switch (pca955x->brightness) {
+	switch (pca955x_led->brightness) {
 	case LED_FULL:
 		ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
 		break;
@@ -219,12 +229,15 @@
 		 * OFF, HALF, or FULL.  But, this is probably better than
 		 * just turning off for all other values.
 		 */
-		pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+		pca955x_write_pwm(pca955x->client, 1,
+				255 - pca955x_led->brightness);
 		ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
 		break;
 	}
 
 	pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+	mutex_unlock(&pca955x->lock);
 }
 
 static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@
 
 	pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
 
-	spin_lock(&pca955x->lock);
 	pca955x->brightness = value;
 
 	/*
@@ -241,14 +253,13 @@
 	 * can sleep.
 	 */
 	schedule_work(&pca955x->work);
-
-	spin_unlock(&pca955x->lock);
 }
 
 static int __devinit pca955x_probe(struct i2c_client *client,
 					const struct i2c_device_id *id)
 {
-	struct pca955x_led *pca955x;
+	struct pca955x *pca955x;
+	struct pca955x_led *pca955x_led;
 	struct pca955x_chipdef *chip;
 	struct i2c_adapter *adapter;
 	struct led_platform_data *pdata;
@@ -282,39 +293,48 @@
 		}
 	}
 
-	pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+	pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
 	if (!pca955x)
 		return -ENOMEM;
 
+	pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+	if (!pca955x->leds) {
+		err = -ENOMEM;
+		goto exit_nomem;
+	}
+
 	i2c_set_clientdata(client, pca955x);
 
+	mutex_init(&pca955x->lock);
+	pca955x->client = client;
+	pca955x->chipdef = chip;
+
 	for (i = 0; i < chip->bits; i++) {
-		pca955x[i].chipdef = chip;
-		pca955x[i].client = client;
-		pca955x[i].led_num = i;
+		pca955x_led = &pca955x->leds[i];
+		pca955x_led->led_num = i;
+		pca955x_led->pca955x = pca955x;
 
 		/* Platform data can specify LED names and default triggers */
 		if (pdata) {
 			if (pdata->leds[i].name)
-				snprintf(pca955x[i].name,
-					 sizeof(pca955x[i].name), "pca955x:%s",
-					 pdata->leds[i].name);
+				snprintf(pca955x_led->name,
+					sizeof(pca955x_led->name), "pca955x:%s",
+					pdata->leds[i].name);
 			if (pdata->leds[i].default_trigger)
-				pca955x[i].led_cdev.default_trigger =
+				pca955x_led->led_cdev.default_trigger =
 					pdata->leds[i].default_trigger;
 		} else {
-			snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+			snprintf(pca955x_led->name, sizeof(pca955x_led->name),
 				 "pca955x:%d", i);
 		}
 
-		spin_lock_init(&pca955x[i].lock);
+		pca955x_led->led_cdev.name = pca955x_led->name;
+		pca955x_led->led_cdev.brightness_set = pca955x_led_set;
 
-		pca955x[i].led_cdev.name = pca955x[i].name;
-		pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+		INIT_WORK(&pca955x_led->work, pca955x_led_work);
 
-		INIT_WORK(&pca955x[i].work, pca955x_led_work);
-
-		err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+		err = led_classdev_register(&client->dev,
+					&pca955x_led->led_cdev);
 		if (err < 0)
 			goto exit;
 	}
@@ -337,10 +357,12 @@
 
 exit:
 	while (i--) {
-		led_classdev_unregister(&pca955x[i].led_cdev);
-		cancel_work_sync(&pca955x[i].work);
+		led_classdev_unregister(&pca955x->leds[i].led_cdev);
+		cancel_work_sync(&pca955x->leds[i].work);
 	}
 
+	kfree(pca955x->leds);
+exit_nomem:
 	kfree(pca955x);
 
 	return err;
@@ -348,14 +370,15 @@
 
 static int __devexit pca955x_remove(struct i2c_client *client)
 {
-	struct pca955x_led *pca955x = i2c_get_clientdata(client);
+	struct pca955x *pca955x = i2c_get_clientdata(client);
 	int i;
 
 	for (i = 0; i < pca955x->chipdef->bits; i++) {
-		led_classdev_unregister(&pca955x[i].led_cdev);
-		cancel_work_sync(&pca955x[i].work);
+		led_classdev_unregister(&pca955x->leds[i].led_cdev);
+		cancel_work_sync(&pca955x->leds[i].work);
 	}
 
+	kfree(pca955x->leds);
 	kfree(pca955x);
 
 	return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2..e272686 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@
 	ret = fb_register_client(&n->notifier);
 	if (ret)
 		dev_err(led->dev, "unable to register backlight trigger\n");
+	led->activated = true;
 
 	return;
 
@@ -133,10 +134,11 @@
 	struct bl_trig_notifier *n =
 		(struct bl_trig_notifier *) led->trigger_data;
 
-	if (n) {
+	if (led->activated) {
 		device_remove_file(led->dev, &dev_attr_inverted);
 		fb_unregister_client(&n->notifier);
 		kfree(n);
+		led->activated = false;
 	}
 }
 
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3..f057c10 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@
 	gpio_data->led = led;
 	led->trigger_data = gpio_data;
 	INIT_WORK(&gpio_data->work, gpio_trig_work);
+	led->activated = true;
 
 	return;
 
@@ -217,7 +218,7 @@
 {
 	struct gpio_trig_data *gpio_data = led->trigger_data;
 
-	if (gpio_data) {
+	if (led->activated) {
 		device_remove_file(led->dev, &dev_attr_gpio);
 		device_remove_file(led->dev, &dev_attr_inverted);
 		device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@
 		if (gpio_data->gpio != 0)
 			free_irq(gpio_to_irq(gpio_data->gpio), led);
 		kfree(gpio_data);
+		led->activated = false;
 	}
 }
 
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba..41dc76d 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,6 +18,7 @@
 #include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/leds.h>
+#include <linux/reboot.h>
 #include "leds.h"
 
 struct heartbeat_trig_data {
@@ -83,15 +84,17 @@
 		    led_heartbeat_function, (unsigned long) led_cdev);
 	heartbeat_data->phase = 0;
 	led_heartbeat_function(heartbeat_data->timer.data);
+	led_cdev->activated = true;
 }
 
 static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
 {
 	struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
 
-	if (heartbeat_data) {
+	if (led_cdev->activated) {
 		del_timer_sync(&heartbeat_data->timer);
 		kfree(heartbeat_data);
+		led_cdev->activated = false;
 	}
 }
 
@@ -101,13 +104,38 @@
 	.deactivate = heartbeat_trig_deactivate,
 };
 
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+				     unsigned long code, void *unused)
+{
+	led_trigger_unregister(&heartbeat_led_trigger);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+	.notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+	.notifier_call = heartbeat_reboot_notifier,
+};
+
 static int __init heartbeat_trig_init(void)
 {
-	return led_trigger_register(&heartbeat_led_trigger);
+	int rc = led_trigger_register(&heartbeat_led_trigger);
+
+	if (!rc) {
+		atomic_notifier_chain_register(&panic_notifier_list,
+					       &heartbeat_panic_nb);
+		register_reboot_notifier(&heartbeat_reboot_nb);
+	}
+	return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
+	unregister_reboot_notifier(&heartbeat_reboot_nb);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &heartbeat_panic_nb);
 	led_trigger_unregister(&heartbeat_led_trigger);
 }
 
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c..9010f7a 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	int ret = -EINVAL;
-	char *after;
-	unsigned long state = simple_strtoul(buf, &after, 10);
-	size_t count = after - buf;
+	unsigned long state;
+	ssize_t ret = -EINVAL;
 
-	if (isspace(*after))
-		count++;
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
 
-	if (count == size) {
-		led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
-		led_cdev->blink_delay_on = state;
-		ret = count;
-	}
+	led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+	led_cdev->blink_delay_on = state;
 
-	return ret;
+	return size;
 }
 
 static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	int ret = -EINVAL;
-	char *after;
-	unsigned long state = simple_strtoul(buf, &after, 10);
-	size_t count = after - buf;
+	unsigned long state;
+	ssize_t ret = -EINVAL;
 
-	if (isspace(*after))
-		count++;
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
 
-	if (count == size) {
-		led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
-		led_cdev->blink_delay_off = state;
-		ret = count;
-	}
+	led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+	led_cdev->blink_delay_off = state;
 
-	return ret;
+	return size;
 }
 
 static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@
 
 	led_blink_set(led_cdev, &led_cdev->blink_delay_on,
 		      &led_cdev->blink_delay_off);
-
-	led_cdev->trigger_data = (void *)1;
+	led_cdev->activated = true;
 
 	return;
 
@@ -106,9 +97,10 @@
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
-	if (led_cdev->trigger_data) {
+	if (led_cdev->activated) {
 		device_remove_file(led_cdev->dev, &dev_attr_delay_on);
 		device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+		led_cdev->activated = false;
 	}
 
 	/* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 0000000..83179f4
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+	int activate;
+	int state;
+	int restore_state;
+	unsigned long duration;
+	struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+	struct led_classdev *led_cdev = (struct led_classdev *) data;
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+	transient_data->activate = 0;
+	led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+	return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+	unsigned long state;
+	ssize_t ret;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	if (state != 1 && state != 0)
+		return -EINVAL;
+
+	/* cancel the running timer */
+	if (state == 0 && transient_data->activate == 1) {
+		del_timer(&transient_data->timer);
+		transient_data->activate = state;
+		led_set_brightness(led_cdev, transient_data->restore_state);
+		return size;
+	}
+
+	/* start timer if there is no active timer */
+	if (state == 1 && transient_data->activate == 0 &&
+	    transient_data->duration != 0) {
+		transient_data->activate = state;
+		led_set_brightness(led_cdev, transient_data->state);
+		transient_data->restore_state =
+		    (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+		mod_timer(&transient_data->timer,
+			  jiffies + transient_data->duration);
+	}
+
+	/* state == 0 && transient_data->activate == 0
+		timer is not active - just return */
+	/* state == 1 && transient_data->activate == 1
+		timer is already active - just return */
+
+	return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+	return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+	unsigned long state;
+	ssize_t ret;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	transient_data->duration = state;
+	return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+	int state;
+
+	state = (transient_data->state == LED_FULL) ? 1 : 0;
+	return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+	unsigned long state;
+	ssize_t ret;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	if (state != 1 && state != 0)
+		return -EINVAL;
+
+	transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+	return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+		   transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+		   transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+	int rc;
+	struct transient_trig_data *tdata;
+
+	tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+	if (!tdata) {
+		dev_err(led_cdev->dev,
+			"unable to allocate transient trigger\n");
+		return;
+	}
+	led_cdev->trigger_data = tdata;
+
+	rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+	if (rc)
+		goto err_out;
+
+	rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+	if (rc)
+		goto err_out_duration;
+
+	rc = device_create_file(led_cdev->dev, &dev_attr_state);
+	if (rc)
+		goto err_out_state;
+
+	setup_timer(&tdata->timer, transient_timer_function,
+		    (unsigned long) led_cdev);
+	led_cdev->activated = true;
+
+	return;
+
+err_out_state:
+	device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+	device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+	dev_err(led_cdev->dev, "unable to register transient trigger\n");
+	led_cdev->trigger_data = NULL;
+	kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+	struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+	if (led_cdev->activated) {
+		del_timer_sync(&transient_data->timer);
+		led_set_brightness(led_cdev, transient_data->restore_state);
+		device_remove_file(led_cdev->dev, &dev_attr_activate);
+		device_remove_file(led_cdev->dev, &dev_attr_duration);
+		device_remove_file(led_cdev->dev, &dev_attr_state);
+		led_cdev->trigger_data = NULL;
+		led_cdev->activated = false;
+		kfree(transient_data);
+	}
+}
+
+static struct led_trigger transient_trigger = {
+	.name     = "transient",
+	.activate = transient_trig_activate,
+	.deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+	return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+	led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f..638dae0 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/workqueue.h>
+#include <linux/delay.h>
 #include <scsi/scsi_dh.h>
 #include <linux/atomic.h>
 
@@ -61,11 +62,11 @@
 	struct list_head list;
 	struct dm_target *ti;
 
-	spinlock_t lock;
-
 	const char *hw_handler_name;
 	char *hw_handler_params;
 
+	spinlock_t lock;
+
 	unsigned nr_priority_groups;
 	struct list_head priority_groups;
 
@@ -81,16 +82,17 @@
 	struct priority_group *next_pg;	/* Switch to this PG if set */
 	unsigned repeat_count;		/* I/Os left before calling PS again */
 
-	unsigned queue_io;		/* Must we queue all I/O? */
-	unsigned queue_if_no_path;	/* Queue I/O if last path fails? */
-	unsigned saved_queue_if_no_path;/* Saved state during suspension */
+	unsigned queue_io:1;		/* Must we queue all I/O? */
+	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
+	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+
 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
 	unsigned pg_init_count;		/* Number of times pg_init called */
 	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
 
+	unsigned queue_size;
 	struct work_struct process_queued_ios;
 	struct list_head queued_ios;
-	unsigned queue_size;
 
 	struct work_struct trigger_event;
 
@@ -328,14 +330,18 @@
 	/*
 	 * Loop through priority groups until we find a valid path.
 	 * First time we skip PGs marked 'bypassed'.
-	 * Second time we only try the ones we skipped.
+	 * Second time we only try the ones we skipped, but set
+	 * pg_init_delay_retry so we do not hammer controllers.
 	 */
 	do {
 		list_for_each_entry(pg, &m->priority_groups, list) {
 			if (pg->bypassed == bypassed)
 				continue;
-			if (!__choose_path_in_pg(m, pg, nr_bytes))
+			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+				if (!bypassed)
+					m->pg_init_delay_retry = 1;
 				return;
+			}
 		}
 	} while (bypassed--);
 
@@ -481,9 +487,6 @@
 
 	spin_lock_irqsave(&m->lock, flags);
 
-	if (!m->queue_size)
-		goto out;
-
 	if (!m->current_pgpath)
 		__choose_pgpath(m, 0);
 
@@ -496,7 +499,6 @@
 	if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
 		__pg_init_all_paths(m);
 
-out:
 	spin_unlock_irqrestore(&m->lock, flags);
 	if (!must_queue)
 		dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@
 static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
 			   unsigned long arg)
 {
-	struct multipath *m = (struct multipath *) ti->private;
-	struct block_device *bdev = NULL;
-	fmode_t mode = 0;
+	struct multipath *m = ti->private;
+	struct block_device *bdev;
+	fmode_t mode;
 	unsigned long flags;
-	int r = 0;
+	int r;
+
+again:
+	bdev = NULL;
+	mode = 0;
+	r = 0;
 
 	spin_lock_irqsave(&m->lock, flags);
 
@@ -1546,6 +1553,12 @@
 	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
 		r = scsi_verify_blk_ioctl(NULL, cmd);
 
+	if (r == -EAGAIN && !fatal_signal_pending(current)) {
+		queue_work(kmultipathd, &m->process_queued_ios);
+		msleep(10);
+		goto again;
+	}
+
 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
 
@@ -1643,7 +1656,7 @@
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
 	.name = "multipath",
-	.version = {1, 3, 0},
+	.version = {1, 4, 0},
 	.module = THIS_MODULE,
 	.ctr = multipath_ctr,
 	.dtr = multipath_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d388..3e2907f 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,15 +1082,139 @@
 	return 0;
 }
 
-static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
-				    dm_block_t *result)
+static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+	int r, inc;
+	struct thin_disk_superblock *disk_super;
+	struct dm_block *copy, *sblock;
+	dm_block_t held_root;
+
+	/*
+	 * Copy the superblock.
+	 */
+	dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+	r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
+			       &sb_validator, &copy, &inc);
+	if (r)
+		return r;
+
+	BUG_ON(!inc);
+
+	held_root = dm_block_location(copy);
+	disk_super = dm_block_data(copy);
+
+	if (le64_to_cpu(disk_super->held_root)) {
+		DMWARN("Pool metadata snapshot already exists: release this before taking another.");
+
+		dm_tm_dec(pmd->tm, held_root);
+		dm_tm_unlock(pmd->tm, copy);
+		pmd->need_commit = 1;
+
+		return -EBUSY;
+	}
+
+	/*
+	 * Wipe the spacemap since we're not publishing this.
+	 */
+	memset(&disk_super->data_space_map_root, 0,
+	       sizeof(disk_super->data_space_map_root));
+	memset(&disk_super->metadata_space_map_root, 0,
+	       sizeof(disk_super->metadata_space_map_root));
+
+	/*
+	 * Increment the data structures that need to be preserved.
+	 */
+	dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
+	dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
+	dm_tm_unlock(pmd->tm, copy);
+
+	/*
+	 * Write the held root into the superblock.
+	 */
+	r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+			     &sb_validator, &sblock);
+	if (r) {
+		dm_tm_dec(pmd->tm, held_root);
+		pmd->need_commit = 1;
+		return r;
+	}
+
+	disk_super = dm_block_data(sblock);
+	disk_super->held_root = cpu_to_le64(held_root);
+	dm_bm_unlock(sblock);
+
+	pmd->need_commit = 1;
+
+	return 0;
+}
+
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+	int r;
+
+	down_write(&pmd->root_lock);
+	r = __reserve_metadata_snap(pmd);
+	up_write(&pmd->root_lock);
+
+	return r;
+}
+
+static int __release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+	int r;
+	struct thin_disk_superblock *disk_super;
+	struct dm_block *sblock, *copy;
+	dm_block_t held_root;
+
+	r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+			     &sb_validator, &sblock);
+	if (r)
+		return r;
+
+	disk_super = dm_block_data(sblock);
+	held_root = le64_to_cpu(disk_super->held_root);
+	disk_super->held_root = cpu_to_le64(0);
+	pmd->need_commit = 1;
+
+	dm_bm_unlock(sblock);
+
+	if (!held_root) {
+		DMWARN("No pool metadata snapshot found: nothing to release.");
+		return -EINVAL;
+	}
+
+	r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
+	if (r)
+		return r;
+
+	disk_super = dm_block_data(copy);
+	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+	dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+	return dm_tm_unlock(pmd->tm, copy);
+}
+
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+	int r;
+
+	down_write(&pmd->root_lock);
+	r = __release_metadata_snap(pmd);
+	up_write(&pmd->root_lock);
+
+	return r;
+}
+
+static int __get_metadata_snap(struct dm_pool_metadata *pmd,
+			       dm_block_t *result)
 {
 	int r;
 	struct thin_disk_superblock *disk_super;
 	struct dm_block *sblock;
 
-	r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
-			     &sb_validator, &sblock);
+	r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+			    &sb_validator, &sblock);
 	if (r)
 		return r;
 
@@ -1100,13 +1224,13 @@
 	return dm_bm_unlock(sblock);
 }
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-				   dm_block_t *result)
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+			      dm_block_t *result)
 {
 	int r;
 
 	down_read(&pmd->root_lock);
-	r = __get_held_metadata_root(pmd, result);
+	r = __get_metadata_snap(pmd, result);
 	up_read(&pmd->root_lock);
 
 	return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e..b88918c 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@
 
 /*
  * Hold/get root for userspace transaction.
+ *
+ * The metadata snapshot is a copy of the current superblock (minus the
+ * space maps).  Userland can access the data structures for READ
+ * operations only.  A small performance hit is incurred by providing this
+ * copy of the metadata to userland due to extra copy-on-write operations
+ * on the metadata nodes.  Release this as soon as you finish with it.
  */
-int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-				   dm_block_t *result);
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+			      dm_block_t *result);
 
 /*
  * Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138..37fdaf8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@
 	dm_block_t block;
 };
 
-struct cell {
+struct dm_bio_prison_cell {
 	struct hlist_node list;
 	struct bio_prison *prison;
 	struct cell_key key;
@@ -141,6 +141,8 @@
 	return n;
 }
 
+static struct kmem_cache *_cell_cache;
+
 /*
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@
 		return NULL;
 
 	spin_lock_init(&prison->lock);
-	prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
-							sizeof(struct cell));
+	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
 	if (!prison->cell_pool) {
 		kfree(prison);
 		return NULL;
@@ -194,10 +195,10 @@
 		       (lhs->block == rhs->block);
 }
 
-static struct cell *__search_bucket(struct hlist_head *bucket,
-				    struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+						  struct cell_key *key)
 {
-	struct cell *cell;
+	struct dm_bio_prison_cell *cell;
 	struct hlist_node *tmp;
 
 	hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@
  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  */
 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-		      struct bio *inmate, struct cell **ref)
+		      struct bio *inmate, struct dm_bio_prison_cell **ref)
 {
 	int r = 1;
 	unsigned long flags;
 	uint32_t hash = hash_key(prison, key);
-	struct cell *cell, *cell2;
+	struct dm_bio_prison_cell *cell, *cell2;
 
 	BUG_ON(hash > prison->nr_buckets);
 
@@ -273,7 +274,7 @@
 /*
  * @inmates must have been initialised prior to this call
  */
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 {
 	struct bio_prison *prison = cell->prison;
 
@@ -287,7 +288,7 @@
 	mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 {
 	unsigned long flags;
 	struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@
  * bio may be in the cell.  This function releases the cell, and also does
  * a sanity check.
  */
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
 	BUG_ON(cell->holder != bio);
 	BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@
 	__cell_release(cell, NULL);
 }
 
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
 	unsigned long flags;
 	struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+				     struct bio_list *inmates)
 {
 	struct bio_prison *prison = cell->prison;
 
@@ -334,7 +336,8 @@
 	mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+				   struct bio_list *inmates)
 {
 	unsigned long flags;
 	struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@
 	spin_unlock_irqrestore(&prison->lock, flags);
 }
 
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
 {
 	struct bio_prison *prison = cell->prison;
 	struct bio_list bios;
@@ -491,7 +494,7 @@
  * also provides the interface for creating and destroying internal
  * devices.
  */
-struct new_mapping;
+struct dm_thin_new_mapping;
 
 struct pool_features {
 	unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@
 	struct deferred_set shared_read_ds;
 	struct deferred_set all_io_ds;
 
-	struct new_mapping *next_mapping;
+	struct dm_thin_new_mapping *next_mapping;
 	mempool_t *mapping_pool;
 	mempool_t *endio_hook_pool;
 };
@@ -630,11 +633,11 @@
 
 /*----------------------------------------------------------------*/
 
-struct endio_hook {
+struct dm_thin_endio_hook {
 	struct thin_c *tc;
 	struct deferred_entry *shared_read_entry;
 	struct deferred_entry *all_io_entry;
-	struct new_mapping *overwrite_mapping;
+	struct dm_thin_new_mapping *overwrite_mapping;
 };
 
 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@
 	bio_list_init(master);
 
 	while ((bio = bio_list_pop(&bios))) {
-		struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
 		if (h->tc == tc)
 			bio_endio(bio, DM_ENDIO_REQUEUE);
 		else
@@ -736,7 +740,7 @@
 /*
  * Bio endio functions.
  */
-struct new_mapping {
+struct dm_thin_new_mapping {
 	struct list_head list;
 
 	unsigned quiesced:1;
@@ -746,7 +750,7 @@
 	struct thin_c *tc;
 	dm_block_t virt_block;
 	dm_block_t data_block;
-	struct cell *cell, *cell2;
+	struct dm_bio_prison_cell *cell, *cell2;
 	int err;
 
 	/*
@@ -759,7 +763,7 @@
 	bio_end_io_t *saved_bi_end_io;
 };
 
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 {
 	struct pool *pool = m->tc->pool;
 
@@ -772,7 +776,7 @@
 static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
 	unsigned long flags;
-	struct new_mapping *m = context;
+	struct dm_thin_new_mapping *m = context;
 	struct pool *pool = m->tc->pool;
 
 	m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@
 static void overwrite_endio(struct bio *bio, int err)
 {
 	unsigned long flags;
-	struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
-	struct new_mapping *m = h->overwrite_mapping;
+	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 	struct pool *pool = m->tc->pool;
 
 	m->err = err;
@@ -811,7 +815,7 @@
 /*
  * This sends the bios in the cell back to the deferred_bios list.
  */
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
 		       dm_block_t data_block)
 {
 	struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@
  * Same as cell_defer above, except it omits one particular detainee,
  * a write bio that covers the block and has already been processed.
  */
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
 	struct bio_list bios;
 	struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@
 	wake_worker(pool);
 }
 
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
 	struct thin_c *tc = m->tc;
 	struct bio *bio;
@@ -886,7 +890,7 @@
 	mempool_free(m, tc->pool->mapping_pool);
 }
 
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
 {
 	int r;
 	struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@
 }
 
 static void process_prepared(struct pool *pool, struct list_head *head,
-			     void (*fn)(struct new_mapping *))
+			     void (*fn)(struct dm_thin_new_mapping *))
 {
 	unsigned long flags;
 	struct list_head maps;
-	struct new_mapping *m, *tmp;
+	struct dm_thin_new_mapping *m, *tmp;
 
 	INIT_LIST_HEAD(&maps);
 	spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@
 	return pool->next_mapping ? 0 : -ENOMEM;
 }
 
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 {
-	struct new_mapping *r = pool->next_mapping;
+	struct dm_thin_new_mapping *r = pool->next_mapping;
 
 	BUG_ON(!pool->next_mapping);
 
@@ -971,11 +975,11 @@
 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 			  struct dm_dev *origin, dm_block_t data_origin,
 			  dm_block_t data_dest,
-			  struct cell *cell, struct bio *bio)
+			  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
 	int r;
 	struct pool *pool = tc->pool;
-	struct new_mapping *m = get_next_mapping(pool);
+	struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
 	INIT_LIST_HEAD(&m->list);
 	m->quiesced = 0;
@@ -997,7 +1001,8 @@
 	 * bio immediately. Otherwise we use kcopyd to clone the data first.
 	 */
 	if (io_overwrites_block(pool, bio)) {
-		struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
 		h->overwrite_mapping = m;
 		m->bio = bio;
 		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@
 
 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
 				   dm_block_t data_origin, dm_block_t data_dest,
-				   struct cell *cell, struct bio *bio)
+				   struct dm_bio_prison_cell *cell, struct bio *bio)
 {
 	schedule_copy(tc, virt_block, tc->pool_dev,
 		      data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@
 
 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 				   dm_block_t data_dest,
-				   struct cell *cell, struct bio *bio)
+				   struct dm_bio_prison_cell *cell, struct bio *bio)
 {
 	schedule_copy(tc, virt_block, tc->origin_dev,
 		      virt_block, data_dest, cell, bio);
 }
 
 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
-			  dm_block_t data_block, struct cell *cell,
+			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
 			  struct bio *bio)
 {
 	struct pool *pool = tc->pool;
-	struct new_mapping *m = get_next_mapping(pool);
+	struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
 	INIT_LIST_HEAD(&m->list);
 	m->quiesced = 1;
@@ -1065,12 +1070,12 @@
 		process_prepared_mapping(m);
 
 	else if (io_overwrites_block(pool, bio)) {
-		struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
 		h->overwrite_mapping = m;
 		m->bio = bio;
 		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
 		remap_and_issue(tc, bio, data_block);
-
 	} else {
 		int r;
 		struct dm_io_region to;
@@ -1155,7 +1160,7 @@
  */
 static void retry_on_resume(struct bio *bio)
 {
-	struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 	struct thin_c *tc = h->tc;
 	struct pool *pool = tc->pool;
 	unsigned long flags;
@@ -1165,7 +1170,7 @@
 	spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
 {
 	struct bio *bio;
 	struct bio_list bios;
@@ -1182,11 +1187,11 @@
 	int r;
 	unsigned long flags;
 	struct pool *pool = tc->pool;
-	struct cell *cell, *cell2;
+	struct dm_bio_prison_cell *cell, *cell2;
 	struct cell_key key, key2;
 	dm_block_t block = get_bio_block(tc, bio);
 	struct dm_thin_lookup_result lookup_result;
-	struct new_mapping *m;
+	struct dm_thin_new_mapping *m;
 
 	build_virtual_key(tc->td, block, &key);
 	if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
 			  struct cell_key *key,
 			  struct dm_thin_lookup_result *lookup_result,
-			  struct cell *cell)
+			  struct dm_bio_prison_cell *cell)
 {
 	int r;
 	dm_block_t data_block;
@@ -1290,7 +1295,7 @@
 			       dm_block_t block,
 			       struct dm_thin_lookup_result *lookup_result)
 {
-	struct cell *cell;
+	struct dm_bio_prison_cell *cell;
 	struct pool *pool = tc->pool;
 	struct cell_key key;
 
@@ -1305,7 +1310,7 @@
 	if (bio_data_dir(bio) == WRITE)
 		break_sharing(tc, bio, block, &key, lookup_result, cell);
 	else {
-		struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
 		h->shared_read_entry = ds_inc(&pool->shared_read_ds);
 
@@ -1315,7 +1320,7 @@
 }
 
 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
-			    struct cell *cell)
+			    struct dm_bio_prison_cell *cell)
 {
 	int r;
 	dm_block_t data_block;
@@ -1363,7 +1368,7 @@
 {
 	int r;
 	dm_block_t block = get_bio_block(tc, bio);
-	struct cell *cell;
+	struct dm_bio_prison_cell *cell;
 	struct cell_key key;
 	struct dm_thin_lookup_result lookup_result;
 
@@ -1432,7 +1437,7 @@
 	spin_unlock_irqrestore(&pool->lock, flags);
 
 	while ((bio = bio_list_pop(&bios))) {
-		struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 		struct thin_c *tc = h->tc;
 
 		/*
@@ -1522,10 +1527,10 @@
 	wake_worker(pool);
 }
 
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
 	struct pool *pool = tc->pool;
-	struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+	struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
 
 	h->tc = tc;
 	h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@
 	kfree(pool);
 }
 
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
 static struct pool *pool_create(struct mapped_device *pool_md,
 				struct block_device *metadata_dev,
 				unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@
 	ds_init(&pool->all_io_ds);
 
 	pool->next_mapping = NULL;
-	pool->mapping_pool =
-		mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+	pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+						      _new_mapping_cache);
 	if (!pool->mapping_pool) {
 		*error = "Error creating pool's mapping mempool";
 		err_p = ERR_PTR(-ENOMEM);
 		goto bad_mapping_pool;
 	}
 
-	pool->endio_hook_pool =
-		mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+	pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+							 _endio_hook_cache);
 	if (!pool->endio_hook_pool) {
 		*error = "Error creating pool's endio_hook mempool";
 		err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,36 @@
 	return 0;
 }
 
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+	int r;
+
+	r = check_arg_count(argc, 1);
+	if (r)
+		return r;
+
+	r = dm_pool_reserve_metadata_snap(pool->pmd);
+	if (r)
+		DMWARN("reserve_metadata_snap message failed.");
+
+	return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+	int r;
+
+	r = check_arg_count(argc, 1);
+	if (r)
+		return r;
+
+	r = dm_pool_release_metadata_snap(pool->pmd);
+	if (r)
+		DMWARN("release_metadata_snap message failed.");
+
+	return r;
+}
+
 /*
  * Messages supported:
  *   create_thin	<dev_id>
@@ -2283,6 +2321,8 @@
  *   delete		<dev_id>
  *   trim		<dev_id> <new_size_in_sectors>
  *   set_transaction_id <current_trans_id> <new_trans_id>
+ *   reserve_metadata_snap
+ *   release_metadata_snap
  */
 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
 {
@@ -2302,6 +2342,12 @@
 	else if (!strcasecmp(argv[0], "set_transaction_id"))
 		r = process_set_transaction_id_mesg(argc, argv, pool);
 
+	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+	else if (!strcasecmp(argv[0], "release_metadata_snap"))
+		r = process_release_metadata_snap_mesg(argc, argv, pool);
+
 	else
 		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
 
@@ -2361,7 +2407,7 @@
 		if (r)
 			return r;
 
-		r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
 		if (r)
 			return r;
 
@@ -2457,7 +2503,7 @@
 	.name = "thin-pool",
 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
 		    DM_TARGET_IMMUTABLE,
-	.version = {1, 1, 0},
+	.version = {1, 2, 0},
 	.module = THIS_MODULE,
 	.ctr = pool_ctr,
 	.dtr = pool_dtr,
@@ -2613,9 +2659,9 @@
 		      union map_info *map_context)
 {
 	unsigned long flags;
-	struct endio_hook *h = map_context->ptr;
+	struct dm_thin_endio_hook *h = map_context->ptr;
 	struct list_head work;
-	struct new_mapping *m, *tmp;
+	struct dm_thin_new_mapping *m, *tmp;
 	struct pool *pool = h->tc->pool;
 
 	if (h->shared_read_entry) {
@@ -2755,7 +2801,32 @@
 
 	r = dm_register_target(&pool_target);
 	if (r)
-		dm_unregister_target(&thin_target);
+		goto bad_pool_target;
+
+	r = -ENOMEM;
+
+	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+	if (!_cell_cache)
+		goto bad_cell_cache;
+
+	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+	if (!_new_mapping_cache)
+		goto bad_new_mapping_cache;
+
+	_endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+	if (!_endio_hook_cache)
+		goto bad_endio_hook_cache;
+
+	return 0;
+
+bad_endio_hook_cache:
+	kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+	kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+	dm_unregister_target(&pool_target);
+bad_pool_target:
+	dm_unregister_target(&thin_target);
 
 	return r;
 }
@@ -2764,6 +2835,10 @@
 {
 	dm_unregister_target(&thin_target);
 	dm_unregister_target(&pool_target);
+
+	kmem_cache_destroy(_cell_cache);
+	kmem_cache_destroy(_new_mapping_cache);
+	kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d387..400fe14 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -249,6 +249,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
 
 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
 		    struct dm_block_validator *v,
@@ -259,6 +260,7 @@
 
 	return dm_bm_read_lock(tm->bm, b, v, blk);
 }
+EXPORT_SYMBOL_GPL(dm_tm_read_lock);
 
 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
 {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de71..a9c7981 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2550,6 +2550,7 @@
 	err = -EINVAL;
 	spin_lock_init(&conf->device_lock);
 	rdev_for_each(rdev, mddev) {
+		struct request_queue *q;
 		int disk_idx = rdev->raid_disk;
 		if (disk_idx >= mddev->raid_disks
 		    || disk_idx < 0)
@@ -2562,6 +2563,9 @@
 		if (disk->rdev)
 			goto abort;
 		disk->rdev = rdev;
+		q = bdev_get_queue(rdev->bdev);
+		if (q->merge_bvec_fn)
+			mddev->merge_check_needed = 1;
 
 		disk->head_position = 0;
 	}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 987db37..99ae606 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3475,6 +3475,7 @@
 
 	rdev_for_each(rdev, mddev) {
 		long long diff;
+		struct request_queue *q;
 
 		disk_idx = rdev->raid_disk;
 		if (disk_idx < 0)
@@ -3493,6 +3494,9 @@
 				goto out_free_conf;
 			disk->rdev = rdev;
 		}
+		q = bdev_get_queue(rdev->bdev);
+		if (q->merge_bvec_fn)
+			mddev->merge_check_needed = 1;
 		diff = (rdev->new_data_offset - rdev->data_offset);
 		if (!mddev->reshape_backwards)
 			diff = -diff;
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 7d42c11..0cdbd74 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -198,7 +198,6 @@
 	struct saa7146_dev *dev = video_drvdata(file);
 	struct saa7146_fh *fh = NULL;
 	int result = 0;
-	enum v4l2_buf_type type;
 
 	DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
 
@@ -207,10 +206,6 @@
 
 	DEB_D("using: %p\n", dev);
 
-	type = vdev->vfl_type == VFL_TYPE_GRABBER
-	     ? V4L2_BUF_TYPE_VIDEO_CAPTURE
-	     : V4L2_BUF_TYPE_VBI_CAPTURE;
-
 	/* check if an extension is registered */
 	if( NULL == dev->ext ) {
 		DEB_S("no extension registered for this device\n");
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 98ecaf0..3180f5b 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -516,9 +516,9 @@
 	if(cx24110_readreg(state,0x10)&0x40) {
 		/* the RS error counter has finished one counting window */
 		cx24110_writereg(state,0x10,0x60); /* select the byer reg */
-		cx24110_readreg(state, 0x12) |
+		(void)(cx24110_readreg(state, 0x12) |
 			(cx24110_readreg(state, 0x13) << 8) |
-			(cx24110_readreg(state, 0x14) << 16);
+			(cx24110_readreg(state, 0x14) << 16));
 		cx24110_writereg(state,0x10,0x70); /* select the bler reg */
 		state->lastbler=cx24110_readreg(state,0x12)|
 			(cx24110_readreg(state,0x13)<<8)|
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index 9454049..ed3b0ba6 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -121,7 +121,7 @@
 	if (ret)
 		goto error;
 
-	switch ((buf[0] >> 0) & 0x03) {
+	switch ((buf[0] >> 0) & 0x07) {
 	case 0:
 		c->modulation = QAM_16;
 		break;
diff --git a/drivers/media/dvb/frontends/lg2160.c b/drivers/media/dvb/frontends/lg2160.c
index a3ab1a5..cc11260 100644
--- a/drivers/media/dvb/frontends/lg2160.c
+++ b/drivers/media/dvb/frontends/lg2160.c
@@ -126,7 +126,7 @@
 
 	lg_reg("writing %d registers...\n", len);
 
-	for (i = 0; i < len - 1; i++) {
+	for (i = 0; i < len; i++) {
 		ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
 		if (lg_fail(ret))
 			return ret;
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 63c004a..664e460 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -544,6 +544,8 @@
 		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
 	{ USB_DEVICE(0x2040, 0xc0a0),
 		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+	{ USB_DEVICE(0x2040, 0xf5a0),
+		.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
 	{ } /* Terminating entry */
 	};
 
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 740a3d5..b415211 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -157,7 +157,7 @@
 		goto err_out_free_region;
 
 	dev->io = pci_resource_start(pdev, 0);
-	if (snd_tea575x_init(&dev->tea)) {
+	if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
 		printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
 		goto err_out_free_region;
 	}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 52b8011..4efcbec 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -238,7 +238,7 @@
 	snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
 			fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
 
-	if (snd_tea575x_init(&fmr2->tea)) {
+	if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
 		printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
 		release_region(fmr2->io, 2);
 		return -ENODEV;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index e9f6387..f412f7a 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -51,6 +51,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
 	/* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+	/* Axentia ALERT FM USB Receiver */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
 	/* Terminating entry */
 	{ }
 };
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index ff2933a..856ab96 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -371,7 +371,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 2, 0, 0, 0 },
 		.gpiomute 	= 10,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -384,7 +383,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -398,7 +396,6 @@
 		.gpiomux 	= { 4, 0, 2, 3 },
 		.gpiomute 	= 1,
 		.no_msp34xx	= 1,
-		.needs_tvaudio	= 1,
 		.tuner_type     = TUNER_PHILIPS_NTSC,
 		.tuner_addr	= ADDR_UNSET,
 		.pll            = PLL_28,
@@ -414,7 +411,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0 },
-		.needs_tvaudio	= 0,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -427,7 +423,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 0),
 		.gpiomux 	= { 0, 1, 0, 1 },
 		.gpiomute 	= 3,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -440,7 +435,6 @@
 		.gpiomask	= 0x0f,
 		.gpiomux 	= { 0x0c, 0x04, 0x08, 0x04 },
 		/*                0x04 for some cards ?? */
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 0, 0),
 		.gpiomux 	= { 0 },
-		.needs_tvaudio	= 1,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -469,7 +462,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 0xc00, 0x800, 0x400 },
 		.gpiomute 	= 0xc00,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -482,7 +474,6 @@
 		.gpiomask	= 3,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 1, 1, 2, 3 },
-		.needs_tvaudio	= 0,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_TEMIC_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -496,7 +487,6 @@
 		.muxsel		= MUXSEL(2, 0, 1, 1),
 		.gpiomux 	= { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -510,7 +500,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x20001,0x10001, 0, 0 },
 		.gpiomute 	= 10,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -524,7 +513,6 @@
 		.gpiomask	= 15,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 13, 14, 11, 7 },
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -536,7 +524,6 @@
 		.gpiomask	= 15,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 13, 14, 11, 7 },
-		.needs_tvaudio	= 1,
 		.msp34xx_alt    = 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
 		.gpiomute 	= 4,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -567,7 +553,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 0, 1, 0 },
 		.gpiomute 	= 10,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -583,7 +568,6 @@
 		/* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
 		.gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
 		.gpiomute 	= 0x002000,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr     = ADDR_UNSET,
@@ -597,7 +581,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1, 0),
 		.gpiomux 	= { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
 		.gpiomute 	= 0xcfa007,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.volume_gpio	= winview_volume,
@@ -611,7 +594,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 1, 0, 0, 0 },
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -660,7 +642,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 1, 0x800, 0x400 },
 		.gpiomute 	= 0xc00,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -691,7 +672,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= {0x400, 0x400, 0x400, 0x400 },
 		.gpiomute 	= 0xc00,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -706,7 +686,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
 		.gpiomute 	= 0x40000,
-		.needs_tvaudio	= 0,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@
 		.muxsel		= MUXSEL(2, 0, 1, 1),
 		.gpiomux 	= { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
-		.needs_tvaudio	= 1,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -748,7 +726,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x20000, 0x30000, 0x10000, 0x00000 },
 		.gpiomute 	= 0x40000,
-		.needs_tvaudio	= 0,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 0, 0),
 		.gpiomux 	= { 0 },
-		.needs_tvaudio	= 1,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.muxsel_hook    = PXC200_muxsel,
@@ -834,7 +810,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0 },
-		.needs_tvaudio	= 0,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -847,7 +822,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x500, 0, 0x300, 0x900 },
 		.gpiomute 	= 0x900,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -874,7 +848,6 @@
 		Note: There exists another variant "Winfast 2000" with tv stereo !?
 		Note: eeprom only contains FF and pci subsystem id 107d:6606
 		*/
-		.needs_tvaudio	= 0,
 		.pll		= PLL_28,
 		.has_radio	= 1,
 		.tuner_type	= TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 0),
 		.gpiomux 	= { 0x551400, 0x551200, 0, 0 },
 		.gpiomute 	= 0x551c00,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
@@ -949,7 +921,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 2, 0xd0001, 0, 0 },
 		.gpiomute 	= 1,
-		.needs_tvaudio	= 0,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -966,7 +937,6 @@
 		.gpiomux 	= { 4, 0, 2, 3 },
 		.gpiomute 	= 1,
 		.no_msp34xx	= 1,
-		.needs_tvaudio	= 1,
 		.tuner_type     = TUNER_PHILIPS_NTSC,
 		.tuner_addr	= ADDR_UNSET,
 		.pll            = PLL_28,
@@ -980,7 +950,6 @@
 		.gpiomask	= 15,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 13, 4, 11, 7 },
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -995,7 +964,6 @@
 		.gpiomask	= 0,
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 0, 0, 0},
-		.needs_tvaudio	= 1,
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x20000, 0x30000, 0x10000, 0 },
 		.gpiomute 	= 0x40000,
-		.needs_tvaudio	= 1,
 		.no_msp34xx	= 1,
 		.pll		= PLL_35,
 		.tuner_type	= TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= {2,0,0,0 },
 		.gpiomute 	= 1,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -1163,7 +1129,6 @@
 				MUX2 (mask 0x30000):
 					0,2,3= from MSP34xx
 					1= FM stereo Radio from Tuner */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -1179,7 +1144,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0, 0, 0x10, 8 },
 		.gpiomute 	= 4,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1218,7 +1182,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.gpiomux        = { 2, 0, 0, 0 },
 		.gpiomute 	= 10,
-		.needs_tvaudio  = 0,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_TEMIC_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1250,7 +1213,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(3, 1),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_35,
 		.tuner_type     = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0x400, 0x400, 0x400, 0x400 },
 		.gpiomute 	= 0x800,
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_TEMIC_4036FY5_NTSC,
 		.tuner_addr	= ADDR_UNSET,
@@ -1312,7 +1273,6 @@
 		.muxsel		= MUXSEL(2, 2),
 		.gpiomux        = { },
 		.no_msp34xx     = 1,
-		.needs_tvaudio  = 0,
 		.pll		= PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -1329,7 +1289,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.gpiomux        = { 1, 0, 4, 4 },
 		.gpiomute 	= 9,
-		.needs_tvaudio  = 0,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1379,7 +1338,6 @@
 		.gpiomute 	= 0x1800,
 		.audio_mode_gpio= fv2000s_audio,
 		.no_msp34xx	= 1,
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1393,7 +1351,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x500, 0x500, 0x300, 0x900 },
 		.gpiomute 	= 0x900,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1477,7 +1434,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
 		.gpiomute 	= 13,
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_LG_PAL_I_FM,
 		.tuner_addr	= ADDR_UNSET,
@@ -1514,7 +1470,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 0x01, 0x00, 0x03, 0x03 },
 		.gpiomute 	= 0x09,
-		.needs_tvaudio  = 1,
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(2, 3, 1, 0, 0),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -1567,7 +1521,6 @@
 		.muxsel         = MUXSEL(2, 1, 1),
 		.gpiomux        = { 0, 1, 2, 2 },
 		.gpiomute 	= 4,
-		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.pll		= PLL_28,
@@ -1597,7 +1550,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@
 						* btwincap uses 0x80000/0x80003
 						*/
 		.gpiomute 	= 4,
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@
 		/* .audio_inputs= 1, */
 		.svhs           = 2,
 		.muxsel         = MUXSEL(2, 0, 1, 1),
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
@@ -1875,7 +1825,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 1, 2, 3},
 		.gpiomute 	= 4,
-		.needs_tvaudio  = 1,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.pll            = PLL_28,
@@ -1902,7 +1851,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(2, 3),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@
 		/*                  Tuner, Radio, external, internal, off,  on */
 		.gpiomux        = { 0x08,  0x0f,  0x0a,     0x08 },
 		.gpiomute 	= 0x0f,
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@
 		.svhs           = 2,
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(2, 3, 1, 1),
-		.needs_tvaudio  = 1,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2062,7 +2006,6 @@
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2079,7 +2022,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
 		.muxsel_hook	= phytec_muxsel,
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2094,7 +2036,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
 		.muxsel_hook	= phytec_muxsel,
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2118,7 +2059,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.svhs           = NO_SVHS,   /* card has no svhs */
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.no_tda7432     = 1,
 		.gpiomask       = 0x00,
@@ -2168,7 +2108,6 @@
 		.gpiomask       = 3,
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 1, 1, 1, 1 },
-		.needs_tvaudio  = 1,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.pll            = PLL_35,
@@ -2210,7 +2149,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.no_msp34xx     = 1,
 		.no_tda7432     = 1,
-		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 	},
@@ -2222,7 +2160,6 @@
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.svhs           = 2,
-		.needs_tvaudio  = 0,
 		.gpiomask       = 0x68,
 		.muxsel         = MUXSEL(2, 3, 1),
 		.gpiomux        = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 1, 2, 2 },
 		.gpiomute 	= 3,
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -2265,7 +2201,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
 		.pll            = PLL_28,
-		.needs_tvaudio  = 0,
 		.muxsel_hook    = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2358,7 +2293,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 2, 0, 0, 0 },
 		.gpiomute 	= 10,
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -2405,7 +2339,6 @@
 		.tuner_addr	= ADDR_UNSET,
 		.gpiomask       = 0x008007,
 		.gpiomux        = { 0, 0x000001,0,0 },
-		.needs_tvaudio  = 1,
 		.has_radio      = 1,
 	},
 	[BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
 		.gpiomute 	= 0x002000,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_YMEC_TVF66T5_B_DFF,
 		.tuner_addr	= 0xc1 >>1,
@@ -2534,7 +2466,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 1, 2, 2 },
 		.gpiomute 	= 3,
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_TENA_9533_DI,
 		.tuner_addr	= ADDR_UNSET,
@@ -2615,7 +2546,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 2, 0, 0, 0 },
 		.gpiomute 	= 1,
-		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_NTSC,
 		.tuner_addr	= ADDR_UNSET,
@@ -2714,7 +2644,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0x20001,0x10001, 0, 0 },
 		.gpiomute       = 10,
-		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL_I,
 		.tuner_addr     = ADDR_UNSET,
@@ -2746,7 +2675,6 @@
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomux        = { 0, 1, 2, 2 }, /* CONTVFMi */
 		.gpiomute 	= 3, /* CONTVFMi */
-		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
 		.tuner_addr     = ADDR_UNSET,
 		.pll            = PLL_28,
@@ -2785,7 +2713,6 @@
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(0, 2, 3, 1),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2799,7 +2726,6 @@
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(2, 3, 1),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2813,7 +2739,6 @@
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(3, 2, 1),
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2877,7 +2802,6 @@
 		.gpiomask       = 0,
 		.muxsel         = MUXSEL(2, 3),
 		.gpiomux        = { 0 },
-		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@
 		struct tuner_setup tun_setup;
 
 		/* Load tuner module before issuing tuner config call! */
-		if (bttv_tvcards[btv->c.type].has_radio)
+		if (btv->has_radio)
 			v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
 				&btv->c.i2c_adap, "tuner",
 				0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@
 		tun_setup.type = btv->tuner_type;
 		tun_setup.addr = addr;
 
-		if (bttv_tvcards[btv->c.type].has_radio)
+		if (btv->has_radio)
 			tun_setup.mode_mask |= T_RADIO;
 
 		bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@
 			bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
 		btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
 	}
+
+	/* The 61334 needs the msp3410 to do the radio demod to get sound */
+	if (tv.model == 61334)
+		btv->radio_uses_msp_demodulator = 1;
 }
 
 static int terratec_active_radio_upgrade(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a9cfb0f..ff7a589 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1218,6 +1218,11 @@
 		   For now this is sufficient. */
 		switch (input) {
 		case TVAUDIO_INPUT_RADIO:
+			/* Some boards need the msp do to the radio demod */
+			if (btv->radio_uses_msp_demodulator) {
+				in = MSP_INPUT_DEFAULT;
+				break;
+			}
 			in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
 				    MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
 			break;
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index c517161..acfe2f3 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -236,7 +236,6 @@
 	/* i2c audio flags */
 	unsigned int no_msp34xx:1;
 	unsigned int no_tda7432:1;
-	unsigned int needs_tvaudio:1;
 	unsigned int msp34xx_alt:1;
 	/* Note: currently no card definition needs to mark the presence
 	   of a RDS saa6588 chip. If this is ever needed, then add a new
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index db943a8d..70fd4f23 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -440,6 +440,7 @@
 	/* radio data/state */
 	int has_radio;
 	int radio_user;
+	int radio_uses_msp_demodulator;
 
 	/* miro/pinnacle + Aimslab VHX
 	   philips matchbox (tea5757 radio tuner) support */
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 2520219..5b75a64 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -607,8 +607,9 @@
 				}
 				o = i * pixels_per_line + pixels_read + k;
 				if (o < len) {
+					u8 ch = invert - buffer[k];
 					got++;
-					put_user((invert - buffer[k]) << shift, buf + o);
+					put_user(ch << shift, buf + o);
 				}
 			}
 			pixels_read += bytes;
@@ -648,8 +649,8 @@
 	struct qcam *qcam = video_drvdata(file);
 
 	strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
-	strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
-	strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+	strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+	strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
 	vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
 	vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
@@ -688,8 +689,8 @@
 	pix->height = qcam->height / qcam->transfer_scale;
 	pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
 	pix->field = V4L2_FIELD_NONE;
-	pix->bytesperline = qcam->width;
-	pix->sizeimage = qcam->width * qcam->height;
+	pix->bytesperline = pix->width;
+	pix->sizeimage = pix->width * pix->height;
 	/* Just a guess */
 	pix->colorspace = V4L2_COLORSPACE_SRGB;
 	return 0;
@@ -757,7 +758,7 @@
 		  "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
 		  { 0, 0, 0, 0 }
 		},
-		{ 0, 0, 0,
+		{ 1, 0, 0,
 		  "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
 		  { 0, 0, 0, 0 }
 		},
@@ -772,6 +773,25 @@
 	return 0;
 }
 
+static int qcam_enum_framesizes(struct file *file, void *fh,
+					 struct v4l2_frmsizeenum *fsize)
+{
+	static const struct v4l2_frmsize_discrete sizes[] = {
+		{  80,  60 },
+		{ 160, 120 },
+		{ 320, 240 },
+	};
+
+	if (fsize->index > 2)
+		return -EINVAL;
+	if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+	    fsize->pixel_format != V4L2_PIX_FMT_Y6)
+		return -EINVAL;
+	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+	fsize->discrete = sizes[fsize->index];
+	return 0;
+}
+
 static ssize_t qcam_read(struct file *file, char __user *buf,
 		size_t count, loff_t *ppos)
 {
@@ -795,6 +815,11 @@
 	return len;
 }
 
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+	return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
 static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
 {
 	struct qcam *qcam =
@@ -828,7 +853,7 @@
 	.owner		= THIS_MODULE,
 	.open		= v4l2_fh_open,
 	.release	= v4l2_fh_release,
-	.poll		= v4l2_ctrl_poll,
+	.poll		= qcam_poll,
 	.unlocked_ioctl = video_ioctl2,
 	.read		= qcam_read,
 };
@@ -839,6 +864,7 @@
 	.vidioc_s_input      		    = qcam_s_input,
 	.vidioc_enum_input   		    = qcam_enum_input,
 	.vidioc_enum_fmt_vid_cap 	    = qcam_enum_fmt_vid_cap,
+	.vidioc_enum_framesizes		    = qcam_enum_framesizes,
 	.vidioc_g_fmt_vid_cap 		    = qcam_g_fmt_vid_cap,
 	.vidioc_s_fmt_vid_cap  		    = qcam_s_fmt_vid_cap,
 	.vidioc_try_fmt_vid_cap  	    = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@
 		return NULL;
 
 	v4l2_dev = &qcam->v4l2_dev;
-	strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+	snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
 
-	if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+	if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
 		v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
 		kfree(qcam);
 		return NULL;
@@ -886,7 +912,7 @@
 		return NULL;
 	}
 	qcam->pport = port;
-	qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+	qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
 			NULL, 0, NULL);
 	if (qcam->pdev == NULL) {
 		v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@
 		return -ENODEV;
 	}
 	qc_calibrate(qcam);
+	v4l2_ctrl_handler_setup(&qcam->hdl);
 
 	parport_release(qcam->pdev);
 
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index b55d57c..7e5ffd6 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -838,10 +838,10 @@
 	}
 
 	CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
-		   "irq: %d, latency: %d, memory: 0x%lx\n",
+		   "irq: %d, latency: %d, memory: 0x%llx\n",
 		   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
 		   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
-		   cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+		   cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
 
 	return 0;
 }
@@ -938,7 +938,7 @@
 	if (retval)
 		goto err;
 
-	CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+	CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
 
 	/* PCI Device Setup */
 	retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@
 		goto free_workqueues;
 
 	/* map io memory */
-	CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-		   cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+	CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+		   (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
 	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
 				       CX18_MEM_SIZE);
 	if (!cx->enc_mem) {
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 7a37e0e..2767c64 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -622,7 +622,7 @@
 				   unique ID. Starts at 1, so 0 can be used as
 				   uninitialized value in the stream->id. */
 
-	u32 base_addr;
+	resource_size_t base_addr;
 
 	u8 card_rev;
 	void __iomem *enc_mem, *reg_mem;
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
index 1b3fb50..b85c292a 100644
--- a/drivers/media/video/cx18/cx18-firmware.c
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -164,8 +164,13 @@
 
 	apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
 	while (offset + sizeof(seghdr) < fw->size) {
-		/* TODO: byteswapping */
-		memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+		const u32 *shptr = src + offset / 4;
+
+		seghdr.sync1 = le32_to_cpu(shptr[0]);
+		seghdr.sync2 = le32_to_cpu(shptr[1]);
+		seghdr.addr = le32_to_cpu(shptr[2]);
+		seghdr.size = le32_to_cpu(shptr[3]);
+
 		offset += sizeof(seghdr);
 		if (seghdr.sync1 != APU_ROM_SYNC1 ||
 		    seghdr.sync2 != APU_ROM_SYNC2) {
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index ed81183..eabf00c 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -434,6 +434,7 @@
 {
 	u32 handle, mdl_ack_offset, mdl_ack_count;
 	struct cx18_mailbox *mb;
+	int i;
 
 	mb = &order->mb;
 	handle = mb->args[0];
@@ -447,8 +448,9 @@
 		return -1;
 	}
 
-	cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
-			   sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+	for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+		((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+			cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
 
 	if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
 		mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@
 	struct cx18_mailbox *order_mb;
 	struct cx18_in_work_order *order;
 	int submit;
+	int i;
 
 	switch (rpu) {
 	case CPU:
@@ -562,10 +565,12 @@
 	order_mb = &order->mb;
 
 	/* mb->cmd and mb->args[0] through mb->args[2] */
-	cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+	for (i = 0; i < 4; i++)
+		(&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
 	/* mb->request and mb->ack.  N.B. we want to read mb->ack last */
-	cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
-			   2 * sizeof(u32));
+	for (i = 0; i < 2; i++)
+		(&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
 
 	if (order_mb->request == order_mb->ack) {
 		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e46446a..ed7b2aa 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -471,7 +471,7 @@
 	dprintk(1,"Loading firmware ...\n");
 	dataptr = (u32*)firmware->data;
 	for (i = 0; i < (firmware->size >> 2); i++) {
-		value = *dataptr;
+		value = le32_to_cpu(*dataptr);
 		checksum += ~value;
 		memory_write(dev->core, i, value);
 		dataptr++;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 20a7e24..92da7c2 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -974,6 +974,7 @@
 	[EM2884_BOARD_CINERGY_HTC_STICK] = {
 		.name         = "Terratec Cinergy HTC Stick",
 		.has_dvb      = 1,
+		.ir_codes     = RC_MAP_NEC_TERRATEC_CINERGY_XS,
 #if 0
 		.tuner_type   = TUNER_PHILIPS_TDA8290,
 		.tuner_addr   = 0x41,
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index fce5f76..5e30c4f 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -527,6 +527,8 @@
 
 	if (dev->board.ir_codes == NULL) {
 		/* No remote control support */
+		em28xx_warn("Remote control support is not available for "
+				"this card.\n");
 		return 0;
 	}
 
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 137166d..31721ea 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1653,7 +1653,7 @@
 				enum v4l2_buf_type buf_type)
 {
 	struct gspca_dev *gspca_dev = video_drvdata(file);
-	int ret;
+	int i, ret;
 
 	if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
@@ -1678,6 +1678,8 @@
 	wake_up_interruptible(&gspca_dev->wq);
 
 	/* empty the transfer queues */
+	for (i = 0; i < gspca_dev->nframes; i++)
+		gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
 	atomic_set(&gspca_dev->fr_q, 0);
 	atomic_set(&gspca_dev->fr_i, 0);
 	gspca_dev->fr_o = 0;
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index b5acb1e..80c81dd 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -96,7 +96,7 @@
 static void setcontrast(struct gspca_dev *gspca_dev);
 static void setgain(struct gspca_dev *gspca_dev);
 static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
 static void setawb(struct gspca_dev *gspca_dev);
 static void setaec(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@
 			.step    = 1,
 			.default_value = 1,
 		},
-		.set = sd_setagc
+		.set_control = setagc
 	},
 [AWB] = {
 		{
@@ -851,6 +851,7 @@
 	int i;
 
 	for (i = 0; i < 5; i++) {
+		msleep(10);
 		data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
 
 		switch (data) {
@@ -1242,10 +1243,6 @@
 
 	cam->ctrls = sd->ctrls;
 
-	/* the auto white balance control works only when auto gain is set */
-	if (sd_ctrls[AGC].qctrl.default_value == 0)
-		gspca_dev->ctrl_inac |= (1 << AWB);
-
 	cam->cam_mode = ov772x_mode;
 	cam->nmodes = ARRAY_SIZE(ov772x_mode);
 
@@ -1486,29 +1483,6 @@
 	} while (remaining_len > 0);
 }
 
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
-	struct sd *sd = (struct sd *) gspca_dev;
-
-	sd->ctrls[AGC].val = val;
-
-	/* the auto white balance control works only
-	 * when auto gain is set */
-	if (val) {
-		gspca_dev->ctrl_inac &= ~(1 << AWB);
-	} else {
-		gspca_dev->ctrl_inac |= (1 << AWB);
-		if (sd->ctrls[AWB].val) {
-			sd->ctrls[AWB].val = 0;
-			if (gspca_dev->streaming)
-				setawb(gspca_dev);
-		}
-	}
-	if (gspca_dev->streaming)
-		setagc(gspca_dev);
-	return gspca_dev->usb_err;
-}
-
 static int sd_querymenu(struct gspca_dev *gspca_dev,
 		struct v4l2_querymenu *menu)
 {
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b579730..1fd41f0 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1008,6 +1008,7 @@
 	int i;
 
 	for (i = 0; i < 5; i++) {
+		msleep(10);
 		data = reg_r(gspca_dev, OV534_REG_STATUS);
 
 		switch (data) {
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 2cb7d95..115da16 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -418,7 +418,7 @@
 	struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
 
 	gspca_dev->vdev.ctrl_handler = hdl;
-	v4l2_ctrl_handler_init(hdl, 4);
+	v4l2_ctrl_handler_init(hdl, 5);
 
 	sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 					V4L2_CID_CONTRAST, 0, 15, 1, 7);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index ad09820..6c31e46 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1761,7 +1761,6 @@
 			V4L2_CID_SATURATION, 0, 255, 1, 127);
 	sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 			V4L2_CID_HUE, -180, 180, 1, 0);
-	v4l2_ctrl_cluster(4, &sd->brightness);
 
 	sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 			V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@
 			V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
 	sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 			V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
-	v4l2_ctrl_cluster(2, &sd->blue);
 
 	if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
 	    sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@
 			V4L2_CID_HFLIP, 0, 1, 1, 0);
 		sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 			V4L2_CID_VFLIP, 0, 1, 1, 0);
-		v4l2_ctrl_cluster(2, &sd->hflip);
 	}
 
 	if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@
 			V4L2_CID_GAIN, 0, 28, 1, 0);
 		sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
 			V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+	}
+
+	sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+			V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+	if (hdl->error) {
+		pr_err("Could not initialize controls\n");
+		return hdl->error;
+	}
+
+	v4l2_ctrl_cluster(4, &sd->brightness);
+	v4l2_ctrl_cluster(2, &sd->blue);
+	if (sd->hflip)
+		v4l2_ctrl_cluster(2, &sd->hflip);
+	if (sd->autogain) {
 		if (sd->sensor == SENSOR_SOI968)
 			/* this sensor doesn't have the exposure control and
 			   autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@
 			/* Otherwise autogain is clustered with exposure. */
 			v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
 	}
-
-	sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
-			V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
-	if (hdl->error) {
-		pr_err("Could not initialize controls\n");
-		return hdl->error;
-	}
 	return 0;
 }
 
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 4d1696d..f38faa9 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3120,7 +3120,7 @@
 			| (SENSOR_ ## sensor << 8) \
 			| (flags)
 static const struct usb_device_id device_table[] = {
-	{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+	{USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
 	{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
 	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
 	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 057929e..5462ce2 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -866,10 +866,10 @@
 	pci_write_config_dword(pdev, 0x40, 0xffff);
 
 	IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
-		   "irq: %d, latency: %d, memory: 0x%lx\n",
+		   "irq: %d, latency: %d, memory: 0x%llx\n",
 		   pdev->device, pdev->revision, pdev->bus->number,
 		   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
-		   pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+		   pdev->irq, pci_latency, (u64)itv->base_addr);
 
 	return 0;
 }
@@ -1007,7 +1007,7 @@
 	itv->cxhdl.priv = itv;
 	itv->cxhdl.func = ivtv_api_func;
 
-	IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+	IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
 
 	/* PCI Device Setup */
 	retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@
 		goto free_mem;
 
 	/* map io memory */
-	IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-		   itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+	IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+		   (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
 	itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
 				       IVTV_ENCODER_SIZE);
 	if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@
 	}
 
 	if (itv->has_cx23415) {
-		IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-				itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+		IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+				(u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
 		itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
 				IVTV_DECODER_SIZE);
 		if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@
 	}
 
 	/* map registers memory */
-	IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-		   itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+	IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+		   (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
 	itv->reg_mem =
 	    ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
 	if (!itv->reg_mem) {
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 2e22002..a7e00f8 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -622,7 +622,7 @@
 	struct v4l2_subdev *sd_video;	/* controlling video decoder subdev */
 	struct v4l2_subdev *sd_audio;	/* controlling audio subdev */
 	struct v4l2_subdev *sd_muxer;	/* controlling audio muxer subdev */
-	u32 base_addr;                  /* PCI resource base address */
+	resource_size_t base_addr;      /* PCI resource base address */
 	volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
 	volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
 	volatile void __iomem *reg_mem; /* pointer to mapped registers */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index d2dec58..3945556 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -110,22 +110,6 @@
 	V4L2_M2M_DST = 1,
 };
 
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
-	switch (type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		return &q_data[V4L2_M2M_SRC];
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		return &q_data[V4L2_M2M_DST];
-	default:
-		BUG();
-	}
-	return NULL;
-}
-
 #define V4L2_CID_TRANS_TIME_MSEC	V4L2_CID_PRIVATE_BASE
 #define V4L2_CID_TRANS_NUM_BUFS		(V4L2_CID_PRIVATE_BASE + 1)
 
@@ -198,8 +182,26 @@
 	int			aborting;
 
 	struct v4l2_m2m_ctx	*m2m_ctx;
+
+	/* Source and destination queue data */
+	struct m2mtest_q_data   q_data[2];
 };
 
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &ctx->q_data[V4L2_M2M_SRC];
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &ctx->q_data[V4L2_M2M_DST];
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+
 static struct v4l2_queryctrl *get_ctrl(int id)
 {
 	int i;
@@ -223,7 +225,7 @@
 	int tile_w, bytes_left;
 	int width, height, bytesperline;
 
-	q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 
 	width	= q_data->width;
 	height	= q_data->height;
@@ -436,7 +438,7 @@
 	if (!vq)
 		return -EINVAL;
 
-	q_data = get_q_data(f->type);
+	q_data = get_q_data(ctx, f->type);
 
 	f->fmt.pix.width	= q_data->width;
 	f->fmt.pix.height	= q_data->height;
@@ -535,7 +537,7 @@
 	if (!vq)
 		return -EINVAL;
 
-	q_data = get_q_data(f->type);
+	q_data = get_q_data(ctx, f->type);
 	if (!q_data)
 		return -EINVAL;
 
@@ -747,7 +749,7 @@
 	struct m2mtest_q_data *q_data;
 	unsigned int size, count = *nbuffers;
 
-	q_data = get_q_data(vq->type);
+	q_data = get_q_data(ctx, vq->type);
 
 	size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
 
@@ -775,7 +777,7 @@
 
 	dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
 
-	q_data = get_q_data(vb->vb2_queue->type);
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
 
 	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
 		dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@
 	ctx->transtime = MEM2MEM_DEF_TRANSTIME;
 	ctx->num_processed = 0;
 
+	ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+	ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
 	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
 
 	if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@
 		goto err_m2m;
 	}
 
-	q_data[V4L2_M2M_SRC].fmt = &formats[0];
-	q_data[V4L2_M2M_DST].fmt = &formats[0];
-
 	return 0;
 
 	v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index ded26b7..41f9a25 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -345,19 +345,6 @@
 					PRP_INTR_CH2OVF,
 		}
 	},
-	{
-		.in_fmt		= V4L2_MBUS_FMT_UYVY8_2X8,
-		.out_fmt	= V4L2_PIX_FMT_YUV420,
-		.cfg		= {
-			.channel	= 2,
-			.in_fmt		= PRP_CNTL_DATA_IN_YUV422,
-			.out_fmt	= PRP_CNTL_CH2_OUT_YUV420,
-			.src_pixel	= 0x22000888, /* YUV422 (YUYV) */
-			.irq_flags	= PRP_INTR_RDERR | PRP_INTR_CH2WERR |
-					PRP_INTR_CH2FC | PRP_INTR_LBOVF |
-					PRP_INTR_CH2OVF,
-		}
-	},
 };
 
 static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
@@ -984,7 +971,6 @@
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 	struct mx2_camera_dev *pcdev = ici->priv;
 	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-	const struct soc_camera_format_xlate *xlate;
 	unsigned long common_flags;
 	int ret;
 	int bytesperline;
@@ -1029,31 +1015,14 @@
 		return ret;
 	}
 
-	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-	if (!xlate) {
-		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
-		return -EINVAL;
-	}
-
-	if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
-		csicr1 |= CSICR1_PACK_DIR;
-		csicr1 &= ~CSICR1_SWAP16_EN;
-		dev_dbg(icd->parent, "already yuyv format, don't convert\n");
-	} else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
-		csicr1 &= ~CSICR1_PACK_DIR;
-		csicr1 |= CSICR1_SWAP16_EN;
-		dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
-	} else {
-		dev_warn(icd->parent, "mbus format not supported\n");
-		return -EINVAL;
-	}
-
 	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
 		csicr1 |= CSICR1_REDGE;
 	if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
 		csicr1 |= CSICR1_SOF_POL;
 	if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
 		csicr1 |= CSICR1_HSYNC_POL;
+	if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
+		csicr1 |= CSICR1_SWAP16_EN;
 	if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
 		csicr1 |= CSICR1_EXT_VSYNC;
 	if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1064,6 +1033,8 @@
 		csicr1 |= CSICR1_GCLK_MODE;
 	if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
 		csicr1 |= CSICR1_INV_DATA;
+	if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
+		csicr1 |= CSICR1_PACK_DIR;
 
 	pcdev->csicr1 = csicr1;
 
@@ -1138,8 +1109,7 @@
 		return 0;
 	}
 
-	if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
-	    code == V4L2_MBUS_FMT_UYVY8_2X8) {
+	if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
 		formats++;
 		if (xlate) {
 			/*
@@ -1155,18 +1125,6 @@
 		}
 	}
 
-	if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
-		formats++;
-		if (xlate) {
-			xlate->host_fmt =
-				soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
-			xlate->code	= code;
-			dev_dbg(dev, "Providing host format %s for sensor code %d\n",
-				xlate->host_fmt->name, code);
-			xlate++;
-		}
-	}
-
 	/* Generic pass-trough */
 	formats++;
 	if (xlate) {
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 02d54a05..f13643d 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -511,7 +511,7 @@
 	/* ipu_csi_init_interface() */
 	csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
 
-	clk_enable(mx3_cam->clk);
+	clk_prepare_enable(mx3_cam->clk);
 	rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
 	dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
 	if (rate)
@@ -552,7 +552,7 @@
 		*ichan = NULL;
 	}
 
-	clk_disable(mx3_cam->clk);
+	clk_disable_unprepare(mx3_cam->clk);
 
 	mx3_cam->icd = NULL;
 
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index af2d908..c370c2d 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -29,6 +29,7 @@
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/isa.h>
 #include <asm/io.h>
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index fedcd56..92fc5a2 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -615,7 +615,7 @@
 	ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
 
 	if (!handler->error) {
-		v4l2_ctrl_cluster(3, &ctrls->colorfx);
+		v4l2_ctrl_cluster(2, &ctrls->colorfx);
 		ctrls->ready = true;
 	}
 
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
index 053a8a8..a19bece 100644
--- a/drivers/media/video/s5p-mfc/regs-mfc.h
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -164,10 +164,15 @@
 								decoded pic */
 #define S5P_FIMV_SI_DISPLAY_Y_ADR	0x2010 /* luma addr of displayed pic */
 #define S5P_FIMV_SI_DISPLAY_C_ADR	0x2014 /* chroma addrof displayed pic */
+
 #define S5P_FIMV_SI_CONSUMED_BYTES	0x2018 /* Consumed number of bytes to
 							decode a frame */
 #define S5P_FIMV_SI_DISPLAY_STATUS	0x201c /* status of decoded picture */
 
+#define S5P_FIMV_SI_DECODE_Y_ADR	0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR	0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS	0x202c /* status of decoded picture */
+
 #define S5P_FIMV_SI_CH0_SB_ST_ADR	0x2044 /* start addr of stream buf */
 #define S5P_FIMV_SI_CH0_SB_FRM_SIZE	0x2048 /* size of stream buf */
 #define S5P_FIMV_SI_CH0_DESC_ADR	0x204c /* addr of descriptor buf */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index c25ec02..4dd32fc 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -627,13 +627,13 @@
 
 	switch (ctrl->id) {
 	case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
-		ctx->loop_filter_mpeg4 = ctrl->val;
+		ctx->display_delay = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
 		ctx->display_delay_enable = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
-		ctx->display_delay = ctrl->val;
+		ctx->loop_filter_mpeg4 = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
 		ctx->slice_interface = ctrl->val;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index acedb20..03d8334 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -243,12 +243,6 @@
 		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
 		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
 		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-		.menu_skip_mask = ~(
-				(1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
-				(1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
-				(1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
-				(1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
-				),
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
 		.maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
-		.default_value = 0,
+		.default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
 		.menu_skip_mask = 0,
 	},
 	{
@@ -534,7 +528,7 @@
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
 		.maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
-		.default_value = 0,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
 		.menu_skip_mask = 0,
 	},
 	{
@@ -907,6 +901,8 @@
 			mfc_err("failed to try output format\n");
 			return -EINVAL;
 		}
+		v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+			&pix_fmt_mp->height, 4, 1080, 1, 0);
 	} else {
 		mfc_err("invalid buf type\n");
 		return -EINVAL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
index db83836..5932d1c 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -57,10 +57,12 @@
 					S5P_FIMV_SI_DISPLAY_Y_ADR) << \
 					MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dec_y_adr()		(readl(dev->regs_base + \
-					S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+					S5P_FIMV_SI_DECODE_Y_ADR) << \
 					MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dspl_status()	readl(dev->regs_base + \
 						S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status()	readl(dev->regs_base + \
+						S5P_FIMV_SI_DECODE_STATUS)
 #define s5p_mfc_get_frame_type()	(readl(dev->regs_base + \
 						S5P_FIMV_DECODE_FRAME_TYPE) \
 					& S5P_FIMV_DECODE_FRAME_MASK)
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
index 764eac6..cf962a4 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -13,8 +13,7 @@
 #ifndef S5P_MFC_SHM_H_
 #define S5P_MFC_SHM_H_
 
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
 	EXTENEDED_DECODE_STATUS	= 0x00,	/* D */
 	SET_FRAME_TAG		= 0x04, /* D */
 	GET_FRAME_TAG_TOP	= 0x08, /* D */
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
index f7b35ff..fb99ff1 100644
--- a/drivers/media/video/smiapp/Kconfig
+++ b/drivers/media/video/smiapp/Kconfig
@@ -1,6 +1,6 @@
 config VIDEO_SMIAPP
 	tristate "SMIA++/SMIA sensor support"
-	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
 	select VIDEO_SMIAPP_PLL
 	---help---
 	  This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
index f518026..e8c93c8 100644
--- a/drivers/media/video/smiapp/smiapp-core.c
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -32,6 +32,7 @@
 #include <linux/gpio.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/v4l2-mediabus.h>
 #include <media/v4l2-device.h>
 
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 3e050e1..1ad5ab6 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1178,7 +1178,7 @@
 		return 0;
 	if (vt->type == t->mode && analog_ops->get_afc)
 		vt->afc = analog_ops->get_afc(&t->fe);
-	if (t->mode != V4L2_TUNER_RADIO) {
+	if (vt->type != V4L2_TUNER_RADIO) {
 		vt->capability |= V4L2_TUNER_CAP_NORM;
 		vt->rangelow = tv_range[0] * 16;
 		vt->rangehigh = tv_range[1] * 16;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 5ccbd46..83dbb2d 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -656,7 +656,7 @@
 	SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
 	SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
 	SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
-	if (ops->vidioc_g_parm || vdev->current_norm)
+	if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
 		set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
 	SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
 	SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,8 @@
 	SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
 	SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
 	SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+	SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+	SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
 	/* yes, really vidioc_subscribe_event */
 	SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
 	SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 91be4e8..d7fa896 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1680,6 +1680,7 @@
 				break;
 
 			ret = 0;
+			p->parm.capture.readbuffers = 2;
 			if (ops->vidioc_g_std)
 				ret = ops->vidioc_g_std(file, fh, &std);
 			if (ret == 0)
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 4d7391e..aae1720 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2561,7 +2561,7 @@
 	} else if (vino_drvdata->decoder
 		   && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
 		int input;
-		int data_norm;
+		int data_norm = 0;
 		v4l2_std_id norm;
 
 		input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@
 		}
 
 		if (vino_drvdata->decoder_owner == vcs->channel) {
-			int data_norm;
+			int data_norm = 0;
 			v4l2_std_id norm;
 
 			ret = decoder_call(video, s_routing,
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 0960d7f..08c1024 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1149,10 +1149,14 @@
 vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
 	struct vivi_dev *dev = video_drvdata(file);
+	int err;
 
 	dprintk(dev, 1, "read called\n");
-	return vb2_read(&dev->vb_vidq, data, count, ppos,
+	mutex_lock(&dev->mutex);
+	err = vb2_read(&dev->vb_vidq, data, count, ppos,
 		       file->f_flags & O_NONBLOCK);
+	mutex_unlock(&dev->mutex);
+	return err;
 }
 
 static unsigned int
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591f..d99db56 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@
 	unsigned long	 port;
 	u32		 msize;
 	u32		 psize;
-	u8		 revision;
 	int		 r = -ENODEV;
 	struct pci_dev *pdev;
 
@@ -1670,8 +1669,6 @@
 		return r;
 	}
 
-	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
 	if (sizeof(dma_addr_t) > 4) {
 		const uint64_t required_mask = dma_get_required_mask
 		    (&pdev->dev);
@@ -1779,7 +1776,6 @@
 	MPT_ADAPTER	*ioc;
 	u8		 cb_idx;
 	int		 r = -ENODEV;
-	u8		 revision;
 	u8		 pcixcmd;
 	static int	 mpt_ids = 0;
 #ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@
 	dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
 	    ioc->name, &ioc->facts, &ioc->pfacts[0]));
 
-	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-	mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+	mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+			     ioc->prod_name);
 
 	switch (pdev->device)
 	{
@@ -1903,7 +1899,7 @@
 		break;
 
 	case MPI_MANUFACTPAGE_DEVICEID_FC929X:
-		if (revision < XL_929) {
+		if (pdev->revision < XL_929) {
 			/* 929X Chip Fix. Set Split transactions level
 		 	* for PCIX. Set MOST bits to zero.
 		 	*/
@@ -1934,7 +1930,7 @@
 		/* 1030 Chip Fix. Disable Split transactions
 		 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
 		 */
-		if (revision < C0_1030) {
+		if (pdev->revision < C0_1030) {
 			pci_read_config_byte(pdev, 0x6a, &pcixcmd);
 			pcixcmd &= 0x8F;
 			pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@
 				printk(MYIOC_s_INFO_FMT "%s: host reset in"
 					" progress mpt_config timed out.!!\n",
 					__func__, ioc->name);
+				mutex_unlock(&ioc->mptbase_cmds.mutex);
 				return -EFAULT;
 			}
 			spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16a..b383b69 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@
 	int			iocnum;
 	unsigned int		port;
 	int			cim_rev;
-	u8			revision;
 	struct scsi_device 	*sdev;
 	VirtDevice		*vdevice;
 
@@ -1324,8 +1323,7 @@
 	pdev = (struct pci_dev *) ioc->pcidev;
 
 	karg->pciId = pdev->device;
-	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-	karg->hwRev = revision;
+	karg->hwRev = pdev->revision;
 	karg->subSystemDevice = pdev->subsystem_device;
 	karg->subSystemVendor = pdev->subsystem_vendor;
 
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f4b4dad..e129c82 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -106,6 +106,19 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ucb1400_core.
 
+config MFD_LM3533
+	tristate "LM3533 Lighting Power chip"
+	depends on I2C
+	select MFD_CORE
+	select REGMAP_I2C
+	help
+	  Say yes here to enable support for National Semiconductor / TI
+	  LM3533 Lighting Power chips.
+
+	  This driver provides common support for accessing the device;
+	  additional drivers must be enabled in order to use the LED,
+	  backlight or ambient-light-sensor functionality of the device.
+
 config TPS6105X
 	tristate "TPS61050/61052 Boost Converters"
 	depends on I2C
@@ -177,8 +190,8 @@
 	bool "TPS65910 Power Management chip"
 	depends on I2C=y && GPIOLIB
 	select MFD_CORE
-	select GPIO_TPS65910
 	select REGMAP_I2C
+	select IRQ_DOMAIN
 	help
 	  if you say yes here you get support for the TPS65910 series of
 	  Power Management chips.
@@ -409,6 +422,19 @@
 	  individual components like LCD backlight, LEDs, GPIOs and Kepad
 	  under the corresponding menus.
 
+config MFD_MAX77693
+	bool "Maxim Semiconductor MAX77693 PMIC Support"
+	depends on I2C=y && GENERIC_HARDIRQS
+	select MFD_CORE
+	select REGMAP_I2C
+	help
+	  Say yes here to support for Maxim Semiconductor MAX77693.
+	  This is a companion Power Management IC with Flash, Haptic, Charger,
+	  and MUIC(Micro USB Interface Controller) controls on chip.
+	  This driver provides common support for accessing the device;
+	  additional drivers must be enabled in order to use the functionality
+	  of the device.
+
 config MFD_MAX8925
 	bool "Maxim Semiconductor MAX8925 PMIC Support"
 	depends on I2C=y && GENERIC_HARDIRQS
@@ -454,9 +480,9 @@
 	 of the device
 
 config MFD_WM8400
-	tristate "Support Wolfson Microelectronics WM8400"
+	bool "Support Wolfson Microelectronics WM8400"
 	select MFD_CORE
-	depends on I2C
+	depends on I2C=y
 	select REGMAP_I2C
 	help
 	  Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -473,6 +499,7 @@
 	select MFD_CORE
 	select MFD_WM831X
 	select REGMAP_I2C
+	select IRQ_DOMAIN
 	depends on I2C=y && GENERIC_HARDIRQS
 	help
 	  Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -485,6 +512,7 @@
 	select MFD_CORE
 	select MFD_WM831X
 	select REGMAP_SPI
+	select IRQ_DOMAIN
 	depends on SPI_MASTER && GENERIC_HARDIRQS
 	help
 	  Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -597,17 +625,32 @@
 	tristate
 
 config MFD_MC13XXX
-	tristate "Support Freescale MC13783 and MC13892"
-	depends on SPI_MASTER
+	tristate
+	depends on SPI_MASTER || I2C
 	select MFD_CORE
 	select MFD_MC13783
 	help
-	  Support for the Freescale (Atlas) PMIC and audio CODECs
-	  MC13783 and MC13892.
-	  This driver provides common support for accessing  the device,
+	  Enable support for the Freescale MC13783 and MC13892 PMICs.
+	  This driver provides common support for accessing the device,
 	  additional drivers must be enabled in order to use the
 	  functionality of the device.
 
+config MFD_MC13XXX_SPI
+	tristate "Freescale MC13783 and MC13892 SPI interface"
+	depends on SPI_MASTER
+	select REGMAP_SPI
+	select MFD_MC13XXX
+	help
+	  Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+	tristate "Freescale MC13892 I2C interface"
+	depends on I2C
+	select REGMAP_I2C
+	select MFD_MC13XXX
+	help
+	  Select this if your MC13xxx is connected via an I2C bus.
+
 config ABX500_CORE
 	bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
 	default y if ARCH_U300 || ARCH_U8500
@@ -651,7 +694,7 @@
 
 config AB8500_CORE
 	bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
-	depends on GENERIC_HARDIRQS && ABX500_CORE
+	depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
 	select MFD_CORE
 	help
 	  Select this option to enable access to AB8500 power management
@@ -722,6 +765,16 @@
 	  LPC bridge function of the Intel SCH provides support for
 	  System Management Bus and General Purpose I/O.
 
+config LPC_ICH
+	tristate "Intel ICH LPC"
+	depends on PCI
+	select MFD_CORE
+	help
+	  The LPC bridge function of the Intel ICH provides support for
+	  many functional units. This driver provides needed support for
+	  other drivers to control these functions, currently GPIO and
+	  watchdog.
+
 config MFD_RDC321X
 	tristate "Support for RDC-R321x southbridge"
 	select MFD_CORE
@@ -854,6 +907,11 @@
 	  Additional drivers must be enabled in order to use the
 	  different functionality of the device.
 
+config MFD_STA2X11
+	bool "STA2X11 multi function device support"
+	depends on STA2X11
+	select MFD_CORE
+
 config MFD_ANATOP
 	bool "Support for Freescale i.MX on-chip ANATOP controller"
 	depends on SOC_IMX6Q
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 43672b8..75f6ed6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_MFD_DM355EVM_MSP)	+= dm355evm_msp.o
 obj-$(CONFIG_MFD_TI_SSP)	+= ti-ssp.o
 
+obj-$(CONFIG_MFD_STA2X11)	+= sta2x11-mfd.o
 obj-$(CONFIG_MFD_STMPE)		+= stmpe.o
 obj-$(CONFIG_STMPE_I2C)		+= stmpe-i2c.o
 obj-$(CONFIG_STMPE_SPI)		+= stmpe-spi.o
@@ -54,6 +55,8 @@
 obj-$(CONFIG_TWL6040_CORE)	+= twl6040-core.o twl6040-irq.o
 
 obj-$(CONFIG_MFD_MC13XXX)	+= mc13xxx-core.o
+obj-$(CONFIG_MFD_MC13XXX_SPI)	+= mc13xxx-spi.o
+obj-$(CONFIG_MFD_MC13XXX_I2C)	+= mc13xxx-i2c.o
 
 obj-$(CONFIG_MFD_CORE)		+= mfd-core.o
 
@@ -75,6 +78,7 @@
 obj-$(CONFIG_MFD_DA9052_SPI)	+= da9052-spi.o
 obj-$(CONFIG_MFD_DA9052_I2C)	+= da9052-i2c.o
 
+obj-$(CONFIG_MFD_MAX77693)	+= max77693.o max77693-irq.o
 max8925-objs			:= max8925-core.o max8925-i2c.o
 obj-$(CONFIG_MFD_MAX8925)	+= max8925.o
 obj-$(CONFIG_MFD_MAX8997)	+= max8997.o max8997-irq.o
@@ -87,15 +91,15 @@
 obj-$(CONFIG_ABX500_CORE)	+= abx500-core.o
 obj-$(CONFIG_AB3100_CORE)	+= ab3100-core.o
 obj-$(CONFIG_AB3100_OTP)	+= ab3100-otp.o
-obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o ab8500-sysctrl.o
 obj-$(CONFIG_AB8500_DEBUG)	+= ab8500-debugfs.o
 obj-$(CONFIG_AB8500_GPADC)	+= ab8500-gpadc.o
 obj-$(CONFIG_MFD_DB8500_PRCMU)	+= db8500-prcmu.o
-# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
-obj-$(CONFIG_AB8500_I2C_CORE)	+= ab8500-i2c.o
+# ab8500-core need to come after db8500-prcmu (which provides the channel)
+obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o ab8500-sysctrl.o
 obj-$(CONFIG_MFD_TIMBERDALE)    += timberdale.o
 obj-$(CONFIG_PMIC_ADP5520)	+= adp5520.o
 obj-$(CONFIG_LPC_SCH)		+= lpc_sch.o
+obj-$(CONFIG_LPC_ICH)		+= lpc_ich.o
 obj-$(CONFIG_MFD_RDC321X)	+= rdc321x-southbridge.o
 obj-$(CONFIG_MFD_JANZ_CMODIO)	+= janz-cmodio.o
 obj-$(CONFIG_MFD_JZ4740_ADC)	+= jz4740-adc.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704..dac0e29 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -18,7 +18,10 @@
 #include <linux/mfd/core.h>
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
 #include <linux/regulator/ab8500.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * Interrupt register offsets
@@ -91,12 +94,24 @@
 #define AB8500_IT_MASK23_REG		0x56
 #define AB8500_IT_MASK24_REG		0x57
 
+/*
+ * latch hierarchy registers
+ */
+#define AB8500_IT_LATCHHIER1_REG	0x60
+#define AB8500_IT_LATCHHIER2_REG	0x61
+#define AB8500_IT_LATCHHIER3_REG	0x62
+
+#define AB8500_IT_LATCHHIER_NUM		3
+
 #define AB8500_REV_REG			0x80
 #define AB8500_IC_NAME_REG		0x82
 #define AB8500_SWITCH_OFF_STATUS	0x00
 
 #define AB8500_TURN_ON_STATUS		0x00
 
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
 #define AB9540_MODEM_CTRL2_REG			0x23
 #define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT	BIT(2)
 
@@ -125,6 +140,41 @@
 	[AB8500_VERSION_AB8540] = "AB8540",
 };
 
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+	int ret;
+
+	ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+	if (ret < 0)
+		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+	return ret;
+}
+
+static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+	u8 data)
+{
+	int ret;
+
+	ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
+		&mask, 1);
+	if (ret < 0)
+		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+	return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+	int ret;
+	u8 data;
+
+	ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+	if (ret < 0) {
+		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+		return ret;
+	}
+	return (int)data;
+}
+
 static int ab8500_get_chip_id(struct device *dev)
 {
 	struct ab8500 *ab8500;
@@ -161,9 +211,13 @@
 static int ab8500_set_register(struct device *dev, u8 bank,
 	u8 reg, u8 value)
 {
+	int ret;
 	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-	return set_register_interruptible(ab8500, bank, reg, value);
+	atomic_inc(&ab8500->transfer_ongoing);
+	ret = set_register_interruptible(ab8500, bank, reg, value);
+	atomic_dec(&ab8500->transfer_ongoing);
+	return ret;
 }
 
 static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@
 static int ab8500_get_register(struct device *dev, u8 bank,
 	u8 reg, u8 *value)
 {
+	int ret;
 	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-	return get_register_interruptible(ab8500, bank, reg, value);
+	atomic_inc(&ab8500->transfer_ongoing);
+	ret = get_register_interruptible(ab8500, bank, reg, value);
+	atomic_dec(&ab8500->transfer_ongoing);
+	return ret;
 }
 
 static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@
 static int ab8500_mask_and_set_register(struct device *dev,
 	u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
 {
+	int ret;
 	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-	return mask_and_set_register_interruptible(ab8500, bank, reg,
-		bitmask, bitvalues);
-
+	atomic_inc(&ab8500->transfer_ongoing);
+	ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+						 bitmask, bitvalues);
+	atomic_dec(&ab8500->transfer_ongoing);
+	return ret;
 }
 
 static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@
 	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&ab8500->irq_lock);
+	atomic_inc(&ab8500->transfer_ongoing);
 }
 
 static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@
 		reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
 		set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
 	}
-
+	atomic_dec(&ab8500->transfer_ongoing);
 	mutex_unlock(&ab8500->irq_lock);
 }
 
@@ -325,6 +387,90 @@
 	.irq_unmask		= ab8500_irq_unmask,
 };
 
+static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+					int latch_offset, u8 latch_val)
+{
+	int int_bit = __ffs(latch_val);
+	int line, i;
+
+	do {
+		int_bit = __ffs(latch_val);
+
+		for (i = 0; i < ab8500->mask_size; i++)
+			if (ab8500->irq_reg_offset[i] == latch_offset)
+				break;
+
+		if (i >= ab8500->mask_size) {
+			dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+					latch_offset);
+			return -ENXIO;
+		}
+
+		line = (i << 3) + int_bit;
+		latch_val &= ~(1 << int_bit);
+
+		handle_nested_irq(ab8500->irq_base + line);
+	} while (latch_val);
+
+	return 0;
+}
+
+static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
+					int hier_offset, u8 hier_val)
+{
+	int latch_bit, status;
+	u8 latch_offset, latch_val;
+
+	do {
+		latch_bit = __ffs(hier_val);
+		latch_offset = (hier_offset << 3) + latch_bit;
+
+		/* Fix inconsistent ITFromLatch25 bit mapping... */
+		if (unlikely(latch_offset == 17))
+			latch_offset = 24;
+
+		status = get_register_interruptible(ab8500,
+				AB8500_INTERRUPT,
+				AB8500_IT_LATCH1_REG + latch_offset,
+				&latch_val);
+		if (status < 0 || latch_val == 0)
+			goto discard;
+
+		status = ab8500_handle_hierarchical_line(ab8500,
+				latch_offset, latch_val);
+		if (status < 0)
+			return status;
+discard:
+		hier_val &= ~(1 << latch_bit);
+	} while (hier_val);
+
+	return 0;
+}
+
+static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
+{
+	struct ab8500 *ab8500 = dev;
+	u8 i;
+
+	dev_vdbg(ab8500->dev, "interrupt\n");
+
+	/*  Hierarchical interrupt version */
+	for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+		int status;
+		u8 hier_val;
+
+		status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+			AB8500_IT_LATCHHIER1_REG + i, &hier_val);
+		if (status < 0 || hier_val == 0)
+			continue;
+
+		status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
+		if (status < 0)
+			break;
+	}
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t ab8500_irq(int irq, void *dev)
 {
 	struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@
 
 	dev_vdbg(ab8500->dev, "interrupt\n");
 
+	atomic_inc(&ab8500->transfer_ongoing);
+
 	for (i = 0; i < ab8500->mask_size; i++) {
 		int regoffset = ab8500->irq_reg_offset[i];
 		int status;
@@ -355,9 +503,10 @@
 
 			handle_nested_irq(ab8500->irq_base + line);
 			value &= ~(1 << bit);
+
 		} while (value);
 	}
-
+	atomic_dec(&ab8500->transfer_ongoing);
 	return IRQ_HANDLED;
 }
 
@@ -411,6 +560,14 @@
 	}
 }
 
+int ab8500_suspend(struct ab8500 *ab8500)
+{
+	if (atomic_read(&ab8500->transfer_ongoing))
+		return -EINVAL;
+	else
+		return 0;
+}
+
 /* AB8500 GPIO Resources */
 static struct resource __devinitdata ab8500_gpio_resources[] = {
 	{
@@ -744,6 +901,39 @@
 	},
 };
 
+static struct resource __devinitdata ab8505_iddet_resources[] = {
+	{
+		.name  = "KeyDeglitch",
+		.start = AB8505_INT_KEYDEGLITCH,
+		.end   = AB8505_INT_KEYDEGLITCH,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name  = "KP",
+		.start = AB8505_INT_KP,
+		.end   = AB8505_INT_KP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name  = "IKP",
+		.start = AB8505_INT_IKP,
+		.end   = AB8505_INT_IKP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name  = "IKR",
+		.start = AB8505_INT_IKR,
+		.end   = AB8505_INT_IKR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name  = "KeyStuck",
+		.start = AB8505_INT_KEYSTUCK,
+		.end   = AB8505_INT_KEYSTUCK,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
 static struct resource __devinitdata ab8500_temp_resources[] = {
 	{
 		.name  = "AB8500_TEMP_WARM",
@@ -778,35 +968,11 @@
 		.resources = ab8500_rtc_resources,
 	},
 	{
-		.name = "ab8500-charger",
-		.num_resources = ARRAY_SIZE(ab8500_charger_resources),
-		.resources = ab8500_charger_resources,
-	},
-	{
-		.name = "ab8500-btemp",
-		.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
-		.resources = ab8500_btemp_resources,
-	},
-	{
-		.name = "ab8500-fg",
-		.num_resources = ARRAY_SIZE(ab8500_fg_resources),
-		.resources = ab8500_fg_resources,
-	},
-	{
-		.name = "ab8500-chargalg",
-		.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
-		.resources = ab8500_chargalg_resources,
-	},
-	{
 		.name = "ab8500-acc-det",
 		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
 		.resources = ab8500_av_acc_detect_resources,
 	},
 	{
-		.name = "ab8500-codec",
-	},
-
-	{
 		.name = "ab8500-poweron-key",
 		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
 		.resources = ab8500_poweronkey_db_resources,
@@ -834,6 +1000,29 @@
 	},
 };
 
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+	{
+		.name = "ab8500-charger",
+		.num_resources = ARRAY_SIZE(ab8500_charger_resources),
+		.resources = ab8500_charger_resources,
+	},
+	{
+		.name = "ab8500-btemp",
+		.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+		.resources = ab8500_btemp_resources,
+	},
+	{
+		.name = "ab8500-fg",
+		.num_resources = ARRAY_SIZE(ab8500_fg_resources),
+		.resources = ab8500_fg_resources,
+	},
+	{
+		.name = "ab8500-chargalg",
+		.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+		.resources = ab8500_chargalg_resources,
+	},
+};
+
 static struct mfd_cell __devinitdata ab8500_devs[] = {
 	{
 		.name = "ab8500-gpio",
@@ -845,6 +1034,9 @@
 		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
 		.resources = ab8500_usb_resources,
 	},
+	{
+		.name = "ab8500-codec",
+	},
 };
 
 static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@
 		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
 		.resources = ab8500_usb_resources,
 	},
+	{
+		.name = "ab9540-codec",
+	},
+};
+
+/* Device list common to ab9540 and ab8505 */
+static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+	{
+		.name = "ab-iddet",
+		.num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+		.resources = ab8505_iddet_resources,
+	},
 };
 
 static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@
 	.attrs	= ab9540_sysfs_entries,
 };
 
-int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
+static const struct of_device_id ab8500_match[] = {
+	{
+		.compatible = "stericsson,ab8500",
+		.data = (void *)AB8500_VERSION_AB8500,
+	},
+	{},
+};
+
+static int __devinit ab8500_probe(struct platform_device *pdev)
 {
-	struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+	struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
+	const struct platform_device_id *platid = platform_get_device_id(pdev);
+	enum ab8500_version version = AB8500_VERSION_UNDEFINED;
+	struct device_node *np = pdev->dev.of_node;
+	struct ab8500 *ab8500;
+	struct resource *resource;
 	int ret;
 	int i;
 	u8 value;
 
+	ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+	if (!ab8500)
+		return -ENOMEM;
+
 	if (plat)
 		ab8500->irq_base = plat->irq_base;
+	else if (np)
+		ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
+
+	if (!ab8500->irq_base) {
+		dev_info(&pdev->dev, "couldn't find irq-base\n");
+		ret = -EINVAL;
+		goto out_free_ab8500;
+	}
+
+	ab8500->dev = &pdev->dev;
+
+	resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!resource) {
+		ret = -ENODEV;
+		goto out_free_ab8500;
+	}
+
+	ab8500->irq = resource->start;
+
+	ab8500->read = ab8500_i2c_read;
+	ab8500->write = ab8500_i2c_write;
+	ab8500->write_masked = ab8500_i2c_write_masked;
 
 	mutex_init(&ab8500->lock);
 	mutex_init(&ab8500->irq_lock);
+	atomic_set(&ab8500->transfer_ongoing, 0);
+
+	platform_set_drvdata(pdev, ab8500);
+
+	if (platid)
+		version = platid->driver_data;
+	else if (np)
+		version = (unsigned int)
+			of_match_device(ab8500_match, &pdev->dev)->data;
 
 	if (version != AB8500_VERSION_UNDEFINED)
 		ab8500->version = version;
@@ -1022,7 +1274,7 @@
 		ret = get_register_interruptible(ab8500, AB8500_MISC,
 			AB8500_IC_NAME_REG, &value);
 		if (ret < 0)
-			return ret;
+			goto out_free_ab8500;
 
 		ab8500->version = value;
 	}
@@ -1030,7 +1282,7 @@
 	ret = get_register_interruptible(ab8500, AB8500_MISC,
 		AB8500_REV_REG, &value);
 	if (ret < 0)
-		return ret;
+		goto out_free_ab8500;
 
 	ab8500->chip_id = value;
 
@@ -1105,30 +1357,57 @@
 		if (ret)
 			goto out_freeoldmask;
 
-		ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
-					   IRQF_ONESHOT | IRQF_NO_SUSPEND,
-					   "ab8500", ab8500);
+		/*  Activate this feature only in ab9540 */
+		/*  till tests are done on ab8500 1p2 or later*/
+		if (is_ab9540(ab8500))
+			ret = request_threaded_irq(ab8500->irq, NULL,
+					ab8500_hierarchical_irq,
+					IRQF_ONESHOT | IRQF_NO_SUSPEND,
+					"ab8500", ab8500);
+		else
+			ret = request_threaded_irq(ab8500->irq, NULL,
+					ab8500_irq,
+					IRQF_ONESHOT | IRQF_NO_SUSPEND,
+					"ab8500", ab8500);
 		if (ret)
 			goto out_removeirq;
 	}
 
-	ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
-			      ARRAY_SIZE(abx500_common_devs), NULL,
-			      ab8500->irq_base);
+	if (!np) {
+		ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+				ARRAY_SIZE(abx500_common_devs), NULL,
+				ab8500->irq_base);
 
-	if (ret)
-		goto out_freeirq;
+		if (ret)
+			goto out_freeirq;
 
-	if (is_ab9540(ab8500))
-		ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
-			      ARRAY_SIZE(ab9540_devs), NULL,
-			      ab8500->irq_base);
-	else
-		ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
-			      ARRAY_SIZE(ab9540_devs), NULL,
-			      ab8500->irq_base);
-	if (ret)
-		goto out_freeirq;
+		if (is_ab9540(ab8500))
+			ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+					ARRAY_SIZE(ab9540_devs), NULL,
+					ab8500->irq_base);
+		else
+			ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+					ARRAY_SIZE(ab8500_devs), NULL,
+					ab8500->irq_base);
+		if (ret)
+			goto out_freeirq;
+
+		if (is_ab9540(ab8500) || is_ab8505(ab8500))
+			ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+					ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+					ab8500->irq_base);
+		if (ret)
+			goto out_freeirq;
+	}
+
+	if (!no_bm) {
+		/* Add battery management devices */
+		ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+				      ARRAY_SIZE(ab8500_bm_devs), NULL,
+				      ab8500->irq_base);
+		if (ret)
+			dev_err(ab8500->dev, "error adding bm devices\n");
+	}
 
 	if (is_ab9540(ab8500))
 		ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@
 	kfree(ab8500->oldmask);
 out_freemask:
 	kfree(ab8500->mask);
+out_free_ab8500:
+	kfree(ab8500);
 
 	return ret;
 }
 
-int __devexit ab8500_exit(struct ab8500 *ab8500)
+static int __devexit ab8500_remove(struct platform_device *pdev)
 {
+	struct ab8500 *ab8500 = platform_get_drvdata(pdev);
+
 	if (is_ab9540(ab8500))
 		sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
 	else
@@ -1168,10 +1451,42 @@
 	}
 	kfree(ab8500->oldmask);
 	kfree(ab8500->mask);
+	kfree(ab8500);
 
 	return 0;
 }
 
+static const struct platform_device_id ab8500_id[] = {
+	{ "ab8500-core", AB8500_VERSION_AB8500 },
+	{ "ab8505-i2c", AB8500_VERSION_AB8505 },
+	{ "ab9540-i2c", AB8500_VERSION_AB9540 },
+	{ "ab8540-i2c", AB8500_VERSION_AB8540 },
+	{ }
+};
+
+static struct platform_driver ab8500_core_driver = {
+	.driver = {
+		.name = "ab8500-core",
+		.owner = THIS_MODULE,
+		.of_match_table = ab8500_match,
+	},
+	.probe	= ab8500_probe,
+	.remove	= __devexit_p(ab8500_remove),
+	.id_table = ab8500_id,
+};
+
+static int __init ab8500_core_init(void)
+{
+	return platform_driver_register(&ab8500_core_driver);
+}
+
+static void __exit ab8500_core_exit(void)
+{
+	platform_driver_unregister(&ab8500_core_driver);
+}
+arch_initcall(ab8500_core_init);
+module_exit(ab8500_core_exit);
+
 MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
 MODULE_DESCRIPTION("AB8500 MFD core");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211a..50c4c89 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -608,10 +608,16 @@
 	return 0;
 }
 
+static const struct of_device_id ab8500_debug_match[] = {
+        { .compatible = "stericsson,ab8500-debug", },
+        {}
+};
+
 static struct platform_driver ab8500_debug_driver = {
 	.driver = {
 		.name = "ab8500-debug",
 		.owner = THIS_MODULE,
+		.of_match_table = ab8500_debug_match,
 	},
 	.probe  = ab8500_debug_probe,
 	.remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc71..b86fd8e 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -584,7 +584,7 @@
 
 	gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
 	if (gpadc->irq < 0) {
-		dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+		dev_err(&pdev->dev, "failed to get platform irq-%d\n",
 			gpadc->irq);
 		ret = gpadc->irq;
 		goto fail;
@@ -648,12 +648,18 @@
 	return 0;
 }
 
+static const struct of_device_id ab8500_gpadc_match[] = {
+	{ .compatible = "stericsson,ab8500-gpadc", },
+	{}
+};
+
 static struct platform_driver ab8500_gpadc_driver = {
 	.probe = ab8500_gpadc_probe,
 	.remove = __devexit_p(ab8500_gpadc_remove),
 	.driver = {
 		.name = "ab8500-gpadc",
 		.owner = THIS_MODULE,
+		.of_match_table = ab8500_gpadc_match,
 	},
 };
 
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644
index b83045f..0000000
--- a/drivers/mfd/ab8500-i2c.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
- * License Terms: GNU General Public License v2
- * This file was based on drivers/mfd/ab8500-spi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
-
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
-	int ret;
-
-	ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
-	if (ret < 0)
-		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-	return ret;
-}
-
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
-	u8 data)
-{
-	int ret;
-
-	ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
-		&mask, 1);
-	if (ret < 0)
-		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-	return ret;
-}
-
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
-{
-	int ret;
-	u8 data;
-
-	ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
-	if (ret < 0) {
-		dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-		return ret;
-	}
-	return (int)data;
-}
-
-static int __devinit ab8500_i2c_probe(struct platform_device *plf)
-{
-	const struct platform_device_id *platid = platform_get_device_id(plf);
-	struct ab8500 *ab8500;
-	struct resource *resource;
-	int ret;
-
-	ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
-	if (!ab8500)
-		return -ENOMEM;
-
-	ab8500->dev = &plf->dev;
-
-	resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
-	if (!resource) {
-		kfree(ab8500);
-		return -ENODEV;
-	}
-
-	ab8500->irq = resource->start;
-
-	ab8500->read = ab8500_i2c_read;
-	ab8500->write = ab8500_i2c_write;
-	ab8500->write_masked = ab8500_i2c_write_masked;
-
-	platform_set_drvdata(plf, ab8500);
-
-	ret = ab8500_init(ab8500, platid->driver_data);
-	if (ret)
-		kfree(ab8500);
-
-
-	return ret;
-}
-
-static int __devexit ab8500_i2c_remove(struct platform_device *plf)
-{
-	struct ab8500 *ab8500 = platform_get_drvdata(plf);
-
-	ab8500_exit(ab8500);
-	kfree(ab8500);
-
-	return 0;
-}
-
-static const struct platform_device_id ab8500_id[] = {
-	{ "ab8500-i2c", AB8500_VERSION_AB8500 },
-	{ "ab8505-i2c", AB8500_VERSION_AB8505 },
-	{ "ab9540-i2c", AB8500_VERSION_AB9540 },
-	{ "ab8540-i2c", AB8500_VERSION_AB8540 },
-	{ }
-};
-
-static struct platform_driver ab8500_i2c_driver = {
-	.driver = {
-		.name = "ab8500-i2c",
-		.owner = THIS_MODULE,
-	},
-	.probe	= ab8500_i2c_probe,
-	.remove	= __devexit_p(ab8500_i2c_remove),
-	.id_table = ab8500_id,
-};
-
-static int __init ab8500_i2c_init(void)
-{
-	return platform_driver_register(&ab8500_i2c_driver);
-}
-
-static void __exit ab8500_i2c_exit(void)
-{
-	platform_driver_unregister(&ab8500_i2c_driver);
-}
-arch_initcall(ab8500_i2c_init);
-module_exit(ab8500_i2c_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1..5a3e51c 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,10 +61,16 @@
 	return 0;
 }
 
+static const struct of_device_id ab8500_sysctrl_match[] = {
+	{ .compatible = "stericsson,ab8500-sysctrl", },
+	{}
+};
+
 static struct platform_driver ab8500_sysctrl_driver = {
 	.driver = {
 		.name = "ab8500-sysctrl",
 		.owner = THIS_MODULE,
+		.of_match_table = ab8500_sysctrl_match,
 	},
 	.probe = ab8500_sysctrl_probe,
 	.remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 2af4248..6da0634 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -41,39 +41,26 @@
 #include <linux/of_address.h>
 #include <linux/mfd/anatop.h>
 
-u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift,
-		    int bit_width)
+u32 anatop_read_reg(struct anatop *adata, u32 addr)
 {
-	u32 val, mask;
-
-	if (bit_width == 32)
-		mask = ~0;
-	else
-		mask = (1 << bit_width) - 1;
-
-	val = readl(adata->ioreg + addr);
-	val = (val >> bit_shift) & mask;
-
-	return val;
+	return readl(adata->ioreg + addr);
 }
-EXPORT_SYMBOL_GPL(anatop_get_bits);
+EXPORT_SYMBOL_GPL(anatop_read_reg);
 
-void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift,
-		     int bit_width, u32 data)
+void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
 {
-	u32 val, mask;
+	u32 val;
 
-	if (bit_width == 32)
-		mask = ~0;
-	else
-		mask = (1 << bit_width) - 1;
+	data &= mask;
 
 	spin_lock(&adata->reglock);
-	val = readl(adata->ioreg + addr) & ~(mask << bit_shift);
-	writel((data << bit_shift) | val, adata->ioreg + addr);
+	val = readl(adata->ioreg + addr);
+	val &= ~mask;
+	val |= data;
+	writel(val, adata->ioreg + addr);
 	spin_unlock(&adata->reglock);
 }
-EXPORT_SYMBOL_GPL(anatop_set_bits);
+EXPORT_SYMBOL_GPL(anatop_write_reg);
 
 static const struct of_device_id of_anatop_match[] = {
 	{ .compatible = "fsl,imx6q-anatop", },
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1582c3d..383421b 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -353,12 +353,28 @@
 	return 0;
 }
 
+static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
+	u32 bank, index;
+	u16 bit;
+
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
+	bit = 1<<index;
+
+	asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
+
+	return 0;
+}
+
 static struct irq_chip asic3_gpio_irq_chip = {
 	.name		= "ASIC3-GPIO",
 	.irq_ack	= asic3_mask_gpio_irq,
 	.irq_mask	= asic3_mask_gpio_irq,
 	.irq_unmask	= asic3_unmask_gpio_irq,
 	.irq_set_type	= asic3_gpio_irq_type,
+	.irq_set_wake	= asic3_gpio_irq_set_wake,
 };
 
 static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@
 {
 	struct asic3 *asic = container_of(chip, struct asic3, gpio);
 
-	return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
+	return asic->irq_base + offset;
 }
 
 static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@
 	asic3_mmc_resources[0].start >>= asic->bus_shift;
 	asic3_mmc_resources[0].end   >>= asic->bus_shift;
 
-	ret = mfd_add_devices(&pdev->dev, pdev->id,
+	if (pdata->clock_rate) {
+		ds1wm_pdata.clock_rate = pdata->clock_rate;
+		ret = mfd_add_devices(&pdev->dev, pdev->id,
 			&asic3_cell_ds1wm, 1, mem, asic->irq_base);
-	if (ret < 0)
-		goto out;
+		if (ret < 0)
+			goto out;
+	}
 
 	if (mem_sdio && (irq >= 0)) {
 		ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@
 
 	asic3_mfd_probe(pdev, pdata, mem);
 
+	asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+		(ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
+
 	dev_info(asic->dev, "ASIC3 Core driver\n");
 
 	return 0;
@@ -1021,6 +1043,9 @@
 	int ret;
 	struct asic3 *asic = platform_get_drvdata(pdev);
 
+	asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+		(ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
+
 	asic3_mfd_remove(pdev);
 
 	ret = asic3_gpio_remove(pdev);
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 315fef5..3419e72 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -186,18 +186,7 @@
 	.remove = __devexit_p(cs5535_mfd_remove),
 };
 
-static int __init cs5535_mfd_init(void)
-{
-	return pci_register_driver(&cs5535_mfd_driver);
-}
-
-static void __exit cs5535_mfd_exit(void)
-{
-	pci_unregister_driver(&cs5535_mfd_driver);
-}
-
-module_init(cs5535_mfd_init);
-module_exit(cs5535_mfd_exit);
+module_pci_driver(cs5535_mfd_driver);
 
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7776aff..1f1313c 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -318,6 +318,135 @@
 	}
 }
 
+/*
+ * TBAT look-up table is computed from the R90 reg (8 bit register)
+ * reading as below. The battery temperature is in milliCentigrade
+ * TBAT = (1/(t1+1/298) - 273) * 1000 mC
+ * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+ * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
+ * Example:
+ * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
+ * TBAT = 20015 mili degrees Centrigrade
+ *
+*/
+static const int32_t tbat_lookup[255] = {
+	183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
+	78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
+	570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
+	45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
+	36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
+	30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
+	26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
+	22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
+	18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
+	15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
+	13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
+	11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
+	9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
+	7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
+	5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
+	3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
+	2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
+	1171, 1009, 849, 690, 532, 376, 221, 67,
+	-84, -236, -386, -535, -683, -830, -975, -1119,
+	-1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
+	-2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
+	-3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
+	-4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
+	-5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
+	-6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
+	-7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
+	-7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
+	-8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
+	-9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
+	-10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
+	-11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
+	-11823, -11903, -11982
+};
+
+static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
+	[DA9052_ADC_VDDOUT]	= DA9052_ADC_MAN_MUXSEL_VDDOUT,
+	[DA9052_ADC_ICH]	= DA9052_ADC_MAN_MUXSEL_ICH,
+	[DA9052_ADC_TBAT]	= DA9052_ADC_MAN_MUXSEL_TBAT,
+	[DA9052_ADC_VBAT]	= DA9052_ADC_MAN_MUXSEL_VBAT,
+	[DA9052_ADC_IN4]	= DA9052_ADC_MAN_MUXSEL_AD4,
+	[DA9052_ADC_IN5]	= DA9052_ADC_MAN_MUXSEL_AD5,
+	[DA9052_ADC_IN6]	= DA9052_ADC_MAN_MUXSEL_AD6,
+	[DA9052_ADC_VBBAT]	= DA9052_ADC_MAN_MUXSEL_VBBAT
+};
+
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
+{
+	int ret;
+	unsigned short calc_data;
+	unsigned short data;
+	unsigned char mux_sel;
+
+	if (channel > DA9052_ADC_VBBAT)
+		return -EINVAL;
+
+	mutex_lock(&da9052->auxadc_lock);
+
+	/* Channel gets activated on enabling the Conversion bit */
+	mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
+
+	ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
+	if (ret < 0)
+		goto err;
+
+	/* Wait for an interrupt */
+	if (!wait_for_completion_timeout(&da9052->done,
+					 msecs_to_jiffies(500))) {
+		dev_err(da9052->dev,
+			"timeout waiting for ADC conversion interrupt\n");
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
+	if (ret < 0)
+		goto err;
+
+	calc_data = (unsigned short)ret;
+	data = calc_data << 2;
+
+	ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
+	if (ret < 0)
+		goto err;
+
+	calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
+	data |= calc_data;
+
+	ret = data;
+
+err:
+	mutex_unlock(&da9052->auxadc_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+	struct da9052 *da9052 = irq_data;
+
+	complete(&da9052->done);
+
+	return IRQ_HANDLED;
+}
+
+int da9052_adc_read_temp(struct da9052 *da9052)
+{
+	int tbat;
+
+	tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
+	if (tbat <= 0)
+		return tbat;
+
+	/* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
+	return tbat_lookup[tbat - 1];
+}
+EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+
 static struct resource da9052_rtc_resource = {
 	.name = "ALM",
 	.start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@
 	struct irq_desc *desc;
 	int ret;
 
+	mutex_init(&da9052->auxadc_lock);
+	init_completion(&da9052->done);
+
 	if (pdata && pdata->init != NULL)
 		pdata->init(da9052);
 
@@ -665,6 +797,12 @@
 
 	da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
 
+	ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
+				   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				   "adc irq", da9052);
+	if (ret != 0)
+		dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+
 	ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
 			      ARRAY_SIZE(da9052_subdev_info), NULL, 0);
 	if (ret)
@@ -673,6 +811,7 @@
 	return 0;
 
 err:
+	free_irq(DA9052_IRQ_ADC_EOM, da9052);
 	mfd_remove_devices(da9052->dev);
 regmap_err:
 	return ret;
@@ -680,6 +819,7 @@
 
 void da9052_device_exit(struct da9052 *da9052)
 {
+	free_irq(DA9052_IRQ_ADC_EOM, da9052);
 	regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
 	mfd_remove_devices(da9052->dev);
 }
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 36b88e3..82c9d64 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -22,6 +22,11 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
 static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
 {
 	int reg_val, ret;
@@ -41,65 +46,6 @@
 	return 0;
 }
 
-static int __devinit da9052_i2c_probe(struct i2c_client *client,
-				       const struct i2c_device_id *id)
-{
-	struct da9052 *da9052;
-	int ret;
-
-	da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
-	if (!da9052)
-		return -ENOMEM;
-
-	if (!i2c_check_functionality(client->adapter,
-				     I2C_FUNC_SMBUS_BYTE_DATA)) {
-		dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
-			 __func__);
-		ret = -ENODEV;
-		goto err;
-	}
-
-	da9052->dev = &client->dev;
-	da9052->chip_irq = client->irq;
-
-	i2c_set_clientdata(client, da9052);
-
-	da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
-	if (IS_ERR(da9052->regmap)) {
-		ret = PTR_ERR(da9052->regmap);
-		dev_err(&client->dev, "Failed to allocate register map: %d\n",
-			ret);
-		goto err;
-	}
-
-	ret = da9052_i2c_enable_multiwrite(da9052);
-	if (ret < 0)
-		goto err_regmap;
-
-	ret = da9052_device_init(da9052, id->driver_data);
-	if (ret != 0)
-		goto err_regmap;
-
-	return 0;
-
-err_regmap:
-	regmap_exit(da9052->regmap);
-err:
-	kfree(da9052);
-	return ret;
-}
-
-static int __devexit da9052_i2c_remove(struct i2c_client *client)
-{
-	struct da9052 *da9052 = i2c_get_clientdata(client);
-
-	da9052_device_exit(da9052);
-	regmap_exit(da9052->regmap);
-	kfree(da9052);
-
-	return 0;
-}
-
 static struct i2c_device_id da9052_i2c_id[] = {
 	{"da9052", DA9052},
 	{"da9053-aa", DA9053_AA},
@@ -108,6 +54,81 @@
 	{}
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id dialog_dt_ids[] = {
+	{ .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
+	{ .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
+	{ .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+	{ .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+	{ /* sentinel */ }
+};
+#endif
+
+static int __devinit da9052_i2c_probe(struct i2c_client *client,
+				       const struct i2c_device_id *id)
+{
+	struct da9052 *da9052;
+	int ret;
+
+	da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
+	if (!da9052)
+		return -ENOMEM;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
+			 __func__);
+		return  -ENODEV;
+	}
+
+	da9052->dev = &client->dev;
+	da9052->chip_irq = client->irq;
+
+	i2c_set_clientdata(client, da9052);
+
+	da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
+	if (IS_ERR(da9052->regmap)) {
+		ret = PTR_ERR(da9052->regmap);
+		dev_err(&client->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = da9052_i2c_enable_multiwrite(da9052);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_OF
+	if (!id) {
+		struct device_node *np = client->dev.of_node;
+		const struct of_device_id *deviceid;
+
+		deviceid = of_match_node(dialog_dt_ids, np);
+		id = (const struct i2c_device_id *)deviceid->data;
+	}
+#endif
+
+	if (!id) {
+		ret = -ENODEV;
+		dev_err(&client->dev, "id is null.\n");
+		return ret;
+	}
+
+	ret = da9052_device_init(da9052, id->driver_data);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
+static int __devexit da9052_i2c_remove(struct i2c_client *client)
+{
+	struct da9052 *da9052 = i2c_get_clientdata(client);
+
+	da9052_device_exit(da9052);
+	return 0;
+}
+
 static struct i2c_driver da9052_i2c_driver = {
 	.probe = da9052_i2c_probe,
 	.remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@
 	.driver = {
 		.name = "da9052",
 		.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		.of_match_table = dialog_dt_ids,
+#endif
 	},
 };
 
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 6faf149..dbeadc5 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -25,8 +25,9 @@
 {
 	int ret;
 	const struct spi_device_id *id = spi_get_device_id(spi);
-	struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+	struct da9052 *da9052;
 
+	da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
 	if (!da9052)
 		return -ENOMEM;
 
@@ -42,25 +43,19 @@
 	da9052_regmap_config.read_flag_mask = 1;
 	da9052_regmap_config.write_flag_mask = 0;
 
-	da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+	da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
 	if (IS_ERR(da9052->regmap)) {
 		ret = PTR_ERR(da9052->regmap);
 		dev_err(&spi->dev, "Failed to allocate register map: %d\n",
 			ret);
-		goto err;
+		return ret;
 	}
 
 	ret = da9052_device_init(da9052, id->driver_data);
 	if (ret != 0)
-		goto err_regmap;
+		return ret;
 
 	return 0;
-
-err_regmap:
-	regmap_exit(da9052->regmap);
-err:
-	kfree(da9052);
-	return ret;
 }
 
 static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@
 	struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
 
 	da9052_device_exit(da9052);
-	regmap_exit(da9052->regmap);
-	kfree(da9052);
-
 	return 0;
 }
 
@@ -88,7 +80,6 @@
 	.id_table = da9052_spi_id,
 	.driver = {
 		.name = "da9052",
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 	},
 };
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be3248..50e83dc5 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2720,6 +2720,7 @@
 	REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
 	REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
 	REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+	REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
 	/* "v-mmc" changed to "vcore" in the mainline kernel */
 	REGULATOR_SUPPLY("vcore", "sdi0"),
 	REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2734,6 +2735,7 @@
 	REGULATOR_SUPPLY("vcore", "uart2"),
 	REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
 	REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
 };
 
 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
@@ -2958,9 +2960,10 @@
  * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
  *
  */
-static int __init db8500_prcmu_probe(struct platform_device *pdev)
+static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
 {
-	int err = 0;
+	struct device_node *np = pdev->dev.of_node;
+	int irq = 0, err = 0;
 
 	if (ux500_is_svp())
 		return -ENODEV;
@@ -2970,8 +2973,14 @@
 	/* Clean up the mailbox interrupts after pre-kernel code. */
 	writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
 
-	err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
-		prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+	if (np)
+		irq = platform_get_irq(pdev, 0);
+
+	if (!np || irq <= 0)
+		irq = IRQ_DB8500_PRCMU1;
+
+	err = request_threaded_irq(irq, prcmu_irq_handler,
+	        prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
 	if (err < 0) {
 		pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
 		err = -EBUSY;
@@ -2981,14 +2990,16 @@
 	if (cpu_is_u8500v20_or_later())
 		prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
 
-	err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
-			      ARRAY_SIZE(db8500_prcmu_devs), NULL,
-			      0);
+	if (!np) {
+		err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+				ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+		if (err) {
+			pr_err("prcmu: Failed to add subdevices\n");
+			return err;
+		}
+	}
 
-	if (err)
-		pr_err("prcmu: Failed to add subdevices\n");
-	else
-		pr_info("DB8500 PRCMU initialized\n");
+	pr_info("DB8500 PRCMU initialized\n");
 
 no_irq_return:
 	return err;
@@ -2999,11 +3010,12 @@
 		.name = "db8500-prcmu",
 		.owner = THIS_MODULE,
 	},
+	.probe = db8500_prcmu_probe,
 };
 
 static int __init db8500_prcmu_init(void)
 {
-	return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
+	return platform_driver_register(&db8500_prcmu_driver);
 }
 
 arch_initcall(db8500_prcmu_init);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index b76657e..59df558 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -406,7 +406,7 @@
 		return -ENXIO;
 	}
 
-	msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+	msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
 	if (!msic)
 		return -ENOMEM;
 
@@ -421,21 +421,13 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
-		ret = -ENODEV;
-		goto fail_free_msic;
+		return -ENODEV;
 	}
 
-	res = request_mem_region(res->start, resource_size(res), pdev->name);
-	if (!res) {
-		ret = -EBUSY;
-		goto fail_free_msic;
-	}
-
-	msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+	msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
 	if (!msic->irq_base) {
 		dev_err(&pdev->dev, "failed to map SRAM memory\n");
-		ret = -ENOMEM;
-		goto fail_release_region;
+		return -ENOMEM;
 	}
 
 	platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@
 	ret = intel_msic_init_devices(msic);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
-		goto fail_unmap_mem;
+		return ret;
 	}
 
 	dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@
 		 msic->vendor);
 
 	return 0;
-
-fail_unmap_mem:
-	iounmap(msic->irq_base);
-fail_release_region:
-	release_mem_region(res->start, resource_size(res));
-fail_free_msic:
-	kfree(msic);
-
-	return ret;
 }
 
 static int __devexit intel_msic_remove(struct platform_device *pdev)
 {
 	struct intel_msic *msic = platform_get_drvdata(pdev);
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	intel_msic_remove_devices(msic);
 	platform_set_drvdata(pdev, NULL);
-	iounmap(msic->irq_base);
-	release_mem_region(res->start, resource_size(res));
-	kfree(msic);
 
 	return 0;
 }
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index a9223ed..2ea9998 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -283,23 +283,8 @@
 	.remove   = __devexit_p(cmodio_pci_remove),
 };
 
-/*
- * Module Init / Exit
- */
-
-static int __init cmodio_init(void)
-{
-	return pci_register_driver(&cmodio_pci_driver);
-}
-
-static void __exit cmodio_exit(void)
-{
-	pci_unregister_driver(&cmodio_pci_driver);
-}
+module_pci_driver(cmodio_pci_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
 MODULE_LICENSE("GPL");
-
-module_init(cmodio_init);
-module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644
index 0000000..0b2879b
--- /dev/null
+++ b/drivers/mfd/lm3533-core.c
@@ -0,0 +1,667 @@
+/*
+ * lm3533-core.c -- LM3533 Core
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_BOOST_OVP_MASK		0x06
+#define LM3533_BOOST_OVP_SHIFT		1
+
+#define LM3533_BOOST_FREQ_MASK		0x01
+#define LM3533_BOOST_FREQ_SHIFT		0
+
+#define LM3533_BL_ID_MASK		1
+#define LM3533_LED_ID_MASK		3
+#define LM3533_BL_ID_MAX		1
+#define LM3533_LED_ID_MAX		3
+
+#define LM3533_HVLED_ID_MAX		2
+#define LM3533_LVLED_ID_MAX		5
+
+#define LM3533_REG_OUTPUT_CONF1		0x10
+#define LM3533_REG_OUTPUT_CONF2		0x11
+#define LM3533_REG_BOOST_PWM		0x2c
+
+#define LM3533_REG_MAX			0xb2
+
+
+static struct mfd_cell lm3533_als_devs[] = {
+	{
+		.name	= "lm3533-als",
+		.id	= -1,
+	},
+};
+
+static struct mfd_cell lm3533_bl_devs[] = {
+	{
+		.name	= "lm3533-backlight",
+		.id	= 0,
+	},
+	{
+		.name	= "lm3533-backlight",
+		.id	= 1,
+	},
+};
+
+static struct mfd_cell lm3533_led_devs[] = {
+	{
+		.name	= "lm3533-leds",
+		.id	= 0,
+	},
+	{
+		.name	= "lm3533-leds",
+		.id	= 1,
+	},
+	{
+		.name	= "lm3533-leds",
+		.id	= 2,
+	},
+	{
+		.name	= "lm3533-leds",
+		.id	= 3,
+	},
+};
+
+int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
+{
+	int tmp;
+	int ret;
+
+	ret = regmap_read(lm3533->regmap, reg, &tmp);
+	if (ret < 0) {
+		dev_err(lm3533->dev, "failed to read register %02x: %d\n",
+								reg, ret);
+		return ret;
+	}
+
+	*val = tmp;
+
+	dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_read);
+
+int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
+{
+	int ret;
+
+	dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
+
+	ret = regmap_write(lm3533->regmap, reg, val);
+	if (ret < 0) {
+		dev_err(lm3533->dev, "failed to write register %02x: %d\n",
+								reg, ret);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_write);
+
+int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
+{
+	int ret;
+
+	dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
+
+	ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
+	if (ret < 0) {
+		dev_err(lm3533->dev, "failed to update register %02x: %d\n",
+								reg, ret);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_update);
+
+static int lm3533_set_boost_freq(struct lm3533 *lm3533,
+						enum lm3533_boost_freq freq)
+{
+	int ret;
+
+	ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+					freq << LM3533_BOOST_FREQ_SHIFT,
+					LM3533_BOOST_FREQ_MASK);
+	if (ret)
+		dev_err(lm3533->dev, "failed to set boost frequency\n");
+
+	return ret;
+}
+
+
+static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
+						enum lm3533_boost_ovp ovp)
+{
+	int ret;
+
+	ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+					ovp << LM3533_BOOST_OVP_SHIFT,
+					LM3533_BOOST_OVP_MASK);
+	if (ret)
+		dev_err(lm3533->dev, "failed to set boost ovp\n");
+
+	return ret;
+}
+
+/*
+ * HVLED output config -- output hvled controlled by backlight bl
+ */
+static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
+{
+	u8 val;
+	u8 mask;
+	int shift;
+	int ret;
+
+	if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
+		return -EINVAL;
+
+	if (bl > LM3533_BL_ID_MAX)
+		return -EINVAL;
+
+	shift = hvled - 1;
+	mask = LM3533_BL_ID_MASK << shift;
+	val = bl << shift;
+
+	ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
+	if (ret)
+		dev_err(lm3533->dev, "failed to set hvled config\n");
+
+	return ret;
+}
+
+/*
+ * LVLED output config -- output lvled controlled by LED led
+ */
+static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
+{
+	u8 reg;
+	u8 val;
+	u8 mask;
+	int shift;
+	int ret;
+
+	if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
+		return -EINVAL;
+
+	if (led > LM3533_LED_ID_MAX)
+		return -EINVAL;
+
+	if (lvled < 4) {
+		reg = LM3533_REG_OUTPUT_CONF1;
+		shift = 2 * lvled;
+	} else {
+		reg = LM3533_REG_OUTPUT_CONF2;
+		shift = 2 * (lvled - 4);
+	}
+
+	mask = LM3533_LED_ID_MASK << shift;
+	val = led << shift;
+
+	ret = lm3533_update(lm3533, reg, val, mask);
+	if (ret)
+		dev_err(lm3533->dev, "failed to set lvled config\n");
+
+	return ret;
+}
+
+static void lm3533_enable(struct lm3533 *lm3533)
+{
+	if (gpio_is_valid(lm3533->gpio_hwen))
+		gpio_set_value(lm3533->gpio_hwen, 1);
+}
+
+static void lm3533_disable(struct lm3533 *lm3533)
+{
+	if (gpio_is_valid(lm3533->gpio_hwen))
+		gpio_set_value(lm3533->gpio_hwen, 0);
+}
+
+enum lm3533_attribute_type {
+	LM3533_ATTR_TYPE_BACKLIGHT,
+	LM3533_ATTR_TYPE_LED,
+};
+
+struct lm3533_device_attribute {
+	struct device_attribute dev_attr;
+	enum lm3533_attribute_type type;
+	union {
+		struct {
+			u8 id;
+		} output;
+	} u;
+};
+
+#define to_lm3533_dev_attr(_attr) \
+	container_of(_attr, struct lm3533_device_attribute, dev_attr)
+
+static ssize_t show_output(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm3533 *lm3533 = dev_get_drvdata(dev);
+	struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+	int id = lattr->u.output.id;
+	u8 reg;
+	u8 val;
+	u8 mask;
+	int shift;
+	int ret;
+
+	if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
+		reg = LM3533_REG_OUTPUT_CONF1;
+		shift = id - 1;
+		mask = LM3533_BL_ID_MASK << shift;
+	} else {
+		if (id < 4) {
+			reg = LM3533_REG_OUTPUT_CONF1;
+			shift = 2 * id;
+		} else {
+			reg = LM3533_REG_OUTPUT_CONF2;
+			shift = 2 * (id - 4);
+		}
+		mask = LM3533_LED_ID_MASK << shift;
+	}
+
+	ret = lm3533_read(lm3533, reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val & mask) >> shift;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_output(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct lm3533 *lm3533 = dev_get_drvdata(dev);
+	struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+	int id = lattr->u.output.id;
+	u8 val;
+	int ret;
+
+	if (kstrtou8(buf, 0, &val))
+		return -EINVAL;
+
+	if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
+		ret = lm3533_set_hvled_config(lm3533, id, val);
+	else
+		ret = lm3533_set_lvled_config(lm3533, id, val);
+
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
+	struct lm3533_device_attribute lm3533_dev_attr_##_name = \
+		{ .dev_attr	= __ATTR(_name, _mode, _show, _store), \
+		  .type		= _type, \
+		  .u.output	= { .id = _id }, }
+
+#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
+	LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
+					show_output, store_output, _type, _id)
+
+#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
+	LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
+#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
+	LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
+/*
+ * Output config:
+ *
+ * output_hvled<nr>	0-1
+ * output_lvled<nr>	0-3
+ */
+static LM3533_OUTPUT_HVLED_ATTR_RW(1);
+static LM3533_OUTPUT_HVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(1);
+static LM3533_OUTPUT_LVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(3);
+static LM3533_OUTPUT_LVLED_ATTR_RW(4);
+static LM3533_OUTPUT_LVLED_ATTR_RW(5);
+
+static struct attribute *lm3533_attributes[] = {
+	&lm3533_dev_attr_output_hvled1.dev_attr.attr,
+	&lm3533_dev_attr_output_hvled2.dev_attr.attr,
+	&lm3533_dev_attr_output_lvled1.dev_attr.attr,
+	&lm3533_dev_attr_output_lvled2.dev_attr.attr,
+	&lm3533_dev_attr_output_lvled3.dev_attr.attr,
+	&lm3533_dev_attr_output_lvled4.dev_attr.attr,
+	&lm3533_dev_attr_output_lvled5.dev_attr.attr,
+	NULL,
+};
+
+#define to_dev_attr(_attr) \
+	container_of(_attr, struct device_attribute, attr)
+
+static umode_t lm3533_attr_is_visible(struct kobject *kobj,
+					     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct lm3533 *lm3533 = dev_get_drvdata(dev);
+	struct device_attribute *dattr = to_dev_attr(attr);
+	struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
+	enum lm3533_attribute_type type = lattr->type;
+	umode_t mode = attr->mode;
+
+	if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
+		mode = 0;
+	else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
+		mode = 0;
+
+	return mode;
+};
+
+static struct attribute_group lm3533_attribute_group = {
+	.is_visible	= lm3533_attr_is_visible,
+	.attrs		= lm3533_attributes
+};
+
+static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+{
+	struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+	int ret;
+
+	if (!pdata->als)
+		return 0;
+
+	lm3533_als_devs[0].platform_data = pdata->als;
+	lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
+
+	ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+	if (ret) {
+		dev_err(lm3533->dev, "failed to add ALS device\n");
+		return ret;
+	}
+
+	lm3533->have_als = 1;
+
+	return 0;
+}
+
+static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+{
+	struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+	int i;
+	int ret;
+
+	if (!pdata->backlights || pdata->num_backlights == 0)
+		return 0;
+
+	if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
+		pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
+
+	for (i = 0; i < pdata->num_backlights; ++i) {
+		lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
+		lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
+	}
+
+	ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
+					pdata->num_backlights, NULL, 0);
+	if (ret) {
+		dev_err(lm3533->dev, "failed to add backlight devices\n");
+		return ret;
+	}
+
+	lm3533->have_backlights = 1;
+
+	return 0;
+}
+
+static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+{
+	struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+	int i;
+	int ret;
+
+	if (!pdata->leds || pdata->num_leds == 0)
+		return 0;
+
+	if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
+		pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
+
+	for (i = 0; i < pdata->num_leds; ++i) {
+		lm3533_led_devs[i].platform_data = &pdata->leds[i];
+		lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
+	}
+
+	ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
+						pdata->num_leds, NULL, 0);
+	if (ret) {
+		dev_err(lm3533->dev, "failed to add LED devices\n");
+		return ret;
+	}
+
+	lm3533->have_leds = 1;
+
+	return 0;
+}
+
+static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+					struct lm3533_platform_data *pdata)
+{
+	int ret;
+
+	ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
+	if (ret)
+		return ret;
+
+	ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+{
+	struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+	int ret;
+
+	dev_dbg(lm3533->dev, "%s\n", __func__);
+
+	if (!pdata) {
+		dev_err(lm3533->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	lm3533->gpio_hwen = pdata->gpio_hwen;
+
+	dev_set_drvdata(lm3533->dev, lm3533);
+
+	if (gpio_is_valid(lm3533->gpio_hwen)) {
+		ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
+								"lm3533-hwen");
+		if (ret < 0) {
+			dev_err(lm3533->dev,
+				"failed to request HWEN GPIO %d\n",
+				lm3533->gpio_hwen);
+			return ret;
+		}
+	}
+
+	lm3533_enable(lm3533);
+
+	ret = lm3533_device_setup(lm3533, pdata);
+	if (ret)
+		goto err_disable;
+
+	lm3533_device_als_init(lm3533);
+	lm3533_device_bl_init(lm3533);
+	lm3533_device_led_init(lm3533);
+
+	ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+	if (ret < 0) {
+		dev_err(lm3533->dev, "failed to create sysfs attributes\n");
+		goto err_unregister;
+	}
+
+	return 0;
+
+err_unregister:
+	mfd_remove_devices(lm3533->dev);
+err_disable:
+	lm3533_disable(lm3533);
+	if (gpio_is_valid(lm3533->gpio_hwen))
+		gpio_free(lm3533->gpio_hwen);
+
+	return ret;
+}
+
+static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+{
+	dev_dbg(lm3533->dev, "%s\n", __func__);
+
+	sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+
+	mfd_remove_devices(lm3533->dev);
+	lm3533_disable(lm3533);
+	if (gpio_is_valid(lm3533->gpio_hwen))
+		gpio_free(lm3533->gpio_hwen);
+}
+
+static bool lm3533_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0x10 ... 0x2c:
+	case 0x30 ... 0x38:
+	case 0x40 ... 0x45:
+	case 0x50 ... 0x57:
+	case 0x60 ... 0x6e:
+	case 0x70 ... 0x75:
+	case 0x80 ... 0x85:
+	case 0x90 ... 0x95:
+	case 0xa0 ... 0xa5:
+	case 0xb0 ... 0xb2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0x34 ... 0x36:	/* zone */
+	case 0x37 ... 0x38:	/* adc */
+	case 0xb0 ... 0xb1:	/* fault */
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool lm3533_precious_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0x34:		/* zone */
+		return true;
+	default:
+		return false;
+	}
+}
+
+static struct regmap_config regmap_config = {
+	.reg_bits	= 8,
+	.val_bits	= 8,
+	.max_register	= LM3533_REG_MAX,
+	.readable_reg	= lm3533_readable_register,
+	.volatile_reg	= lm3533_volatile_register,
+	.precious_reg	= lm3533_precious_register,
+};
+
+static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+					const struct i2c_device_id *id)
+{
+	struct lm3533 *lm3533;
+	int ret;
+
+	dev_dbg(&i2c->dev, "%s\n", __func__);
+
+	lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
+	if (!lm3533)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, lm3533);
+
+	lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
+	if (IS_ERR(lm3533->regmap))
+		return PTR_ERR(lm3533->regmap);
+
+	lm3533->dev = &i2c->dev;
+	lm3533->irq = i2c->irq;
+
+	ret = lm3533_device_init(lm3533);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+{
+	struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
+
+	dev_dbg(&i2c->dev, "%s\n", __func__);
+
+	lm3533_device_exit(lm3533);
+
+	return 0;
+}
+
+static const struct i2c_device_id lm3533_i2c_ids[] = {
+	{ "lm3533", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
+
+static struct i2c_driver lm3533_i2c_driver = {
+	.driver = {
+		   .name = "lm3533",
+		   .owner = THIS_MODULE,
+	},
+	.id_table	= lm3533_i2c_ids,
+	.probe		= lm3533_i2c_probe,
+	.remove		= __devexit_p(lm3533_i2c_remove),
+};
+
+static int __init lm3533_i2c_init(void)
+{
+	return i2c_add_driver(&lm3533_i2c_driver);
+}
+subsys_initcall(lm3533_i2c_init);
+
+static void __exit lm3533_i2c_exit(void)
+{
+	i2c_del_driver(&lm3533_i2c_driver);
+}
+module_exit(lm3533_i2c_exit);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644
index 0000000..a4cb7a5
--- /dev/null
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -0,0 +1,148 @@
+/*
+ * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_MAX_CURRENT_MIN		5000
+#define LM3533_MAX_CURRENT_MAX		29800
+#define LM3533_MAX_CURRENT_STEP		800
+
+#define LM3533_BRIGHTNESS_MAX		255
+#define LM3533_PWM_MAX			0x3f
+
+#define LM3533_REG_PWM_BASE		0x14
+#define LM3533_REG_MAX_CURRENT_BASE	0x1f
+#define LM3533_REG_CTRLBANK_ENABLE	0x27
+#define LM3533_REG_BRIGHTNESS_BASE	0x40
+
+
+static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
+{
+	return base + cb->id;
+}
+
+int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
+{
+	u8 mask;
+	int ret;
+
+	dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+	mask = 1 << cb->id;
+	ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
+								mask, mask);
+	if (ret)
+		dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
+
+int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
+{
+	u8 mask;
+	int ret;
+
+	dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+	mask = 1 << cb->id;
+	ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
+	if (ret)
+		dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
+
+/*
+ * Full-scale current.
+ *
+ * imax		5000 - 29800 uA (800 uA step)
+ */
+int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
+{
+	u8 reg;
+	u8 val;
+	int ret;
+
+	if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
+		return -EINVAL;
+
+	val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
+
+	reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
+	ret = lm3533_write(cb->lm3533, reg, val);
+	if (ret)
+		dev_err(cb->dev, "failed to set max current\n");
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
+
+#define lm3533_ctrlbank_set(_name, _NAME)				\
+int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val)	\
+{									\
+	u8 reg;								\
+	int ret;							\
+									\
+	if (val > LM3533_##_NAME##_MAX)					\
+		return -EINVAL;						\
+									\
+	reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);	\
+	ret = lm3533_write(cb->lm3533, reg, val);			\
+	if (ret)							\
+		dev_err(cb->dev, "failed to set " #_name "\n");		\
+									\
+	return ret;							\
+}									\
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
+
+#define lm3533_ctrlbank_get(_name, _NAME)				\
+int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val)	\
+{									\
+	u8 reg;								\
+	int ret;							\
+									\
+	reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);	\
+	ret = lm3533_read(cb->lm3533, reg, val);			\
+	if (ret)							\
+		dev_err(cb->dev, "failed to get " #_name "\n");		\
+									\
+	return ret;							\
+}									\
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
+
+lm3533_ctrlbank_set(brightness, BRIGHTNESS);
+lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+
+/*
+ * PWM-input control mask:
+ *
+ *   bit 5 - PWM-input enabled in Zone 4
+ *   bit 4 - PWM-input enabled in Zone 3
+ *   bit 3 - PWM-input enabled in Zone 2
+ *   bit 2 - PWM-input enabled in Zone 1
+ *   bit 1 - PWM-input enabled in Zone 0
+ *   bit 0 - PWM-input enabled
+ */
+lm3533_ctrlbank_set(pwm, PWM);
+lm3533_ctrlbank_get(pwm, PWM);
+
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Control Bank interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644
index 0000000..027cc8f
--- /dev/null
+++ b/drivers/mfd/lpc_ich.c
@@ -0,0 +1,888 @@
+/*
+ *  lpc_ich.c - LPC interface for Intel ICH
+ *
+ *  LPC bridge function of the Intel ICH contains many other
+ *  functional units, such as Interrupt controllers, Timers,
+ *  Power Management, System Management, GPIO, RTC, and LPC
+ *  Configuration Registers.
+ *
+ *  This driver is derived from lpc_sch.
+
+ *  Copyright (c) 2011 Extreme Engineering Solution, Inc.
+ *  Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  This driver supports the following I/O Controller hubs:
+ *	(See the intel documentation on http://developer.intel.com.)
+ *	document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ *	document number 290687-002, 298242-027: 82801BA (ICH2)
+ *	document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ *	document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ *	document number 290744-001, 290745-025: 82801DB (ICH4)
+ *	document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ *	document number 273599-001, 273645-002: 82801E (C-ICH)
+ *	document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ *	document number 300641-004, 300884-013: 6300ESB
+ *	document number 301473-002, 301474-026: 82801F (ICH6)
+ *	document number 313082-001, 313075-006: 631xESB, 632xESB
+ *	document number 307013-003, 307014-024: 82801G (ICH7)
+ *	document number 322896-001, 322897-001: NM10
+ *	document number 313056-003, 313057-017: 82801H (ICH8)
+ *	document number 316972-004, 316973-012: 82801I (ICH9)
+ *	document number 319973-002, 319974-002: 82801J (ICH10)
+ *	document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ *	document number 320066-003, 320257-008: EP80597 (IICH)
+ *	document number 324645-001, 324646-001: Cougar Point (CPT)
+ *	document number TBD : Patsburg (PBG)
+ *	document number TBD : DH89xxCC
+ *	document number TBD : Panther Point
+ *	document number TBD : Lynx Point
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define ACPIBASE		0x40
+#define ACPIBASE_GPE_OFF	0x28
+#define ACPIBASE_GPE_END	0x2f
+#define ACPIBASE_SMI_OFF	0x30
+#define ACPIBASE_SMI_END	0x33
+#define ACPIBASE_TCO_OFF	0x60
+#define ACPIBASE_TCO_END	0x7f
+#define ACPICTRL		0x44
+
+#define ACPIBASE_GCS_OFF	0x3410
+#define ACPIBASE_GCS_END	0x3414
+
+#define GPIOBASE		0x48
+#define GPIOCTRL		0x4C
+
+#define RCBABASE		0xf0
+
+#define wdt_io_res(i) wdt_res(0, i)
+#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
+#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
+
+static int lpc_ich_acpi_save = -1;
+static int lpc_ich_gpio_save = -1;
+
+static struct resource wdt_ich_res[] = {
+	/* ACPI - TCO */
+	{
+		.flags = IORESOURCE_IO,
+	},
+	/* ACPI - SMI */
+	{
+		.flags = IORESOURCE_IO,
+	},
+	/* GCS */
+	{
+		.flags = IORESOURCE_MEM,
+	},
+};
+
+static struct resource gpio_ich_res[] = {
+	/* GPIO */
+	{
+		.flags = IORESOURCE_IO,
+	},
+	/* ACPI - GPE0 */
+	{
+		.flags = IORESOURCE_IO,
+	},
+};
+
+enum lpc_cells {
+	LPC_WDT = 0,
+	LPC_GPIO,
+};
+
+static struct mfd_cell lpc_ich_cells[] = {
+	[LPC_WDT] = {
+		.name = "iTCO_wdt",
+		.num_resources = ARRAY_SIZE(wdt_ich_res),
+		.resources = wdt_ich_res,
+		.ignore_resource_conflicts = true,
+	},
+	[LPC_GPIO] = {
+		.name = "gpio_ich",
+		.num_resources = ARRAY_SIZE(gpio_ich_res),
+		.resources = gpio_ich_res,
+		.ignore_resource_conflicts = true,
+	},
+};
+
+/* chipset related info */
+enum lpc_chipsets {
+	LPC_ICH = 0,	/* ICH */
+	LPC_ICH0,	/* ICH0 */
+	LPC_ICH2,	/* ICH2 */
+	LPC_ICH2M,	/* ICH2-M */
+	LPC_ICH3,	/* ICH3-S */
+	LPC_ICH3M,	/* ICH3-M */
+	LPC_ICH4,	/* ICH4 */
+	LPC_ICH4M,	/* ICH4-M */
+	LPC_CICH,	/* C-ICH */
+	LPC_ICH5,	/* ICH5 & ICH5R */
+	LPC_6300ESB,	/* 6300ESB */
+	LPC_ICH6,	/* ICH6 & ICH6R */
+	LPC_ICH6M,	/* ICH6-M */
+	LPC_ICH6W,	/* ICH6W & ICH6RW */
+	LPC_631XESB,	/* 631xESB/632xESB */
+	LPC_ICH7,	/* ICH7 & ICH7R */
+	LPC_ICH7DH,	/* ICH7DH */
+	LPC_ICH7M,	/* ICH7-M & ICH7-U */
+	LPC_ICH7MDH,	/* ICH7-M DH */
+	LPC_NM10,	/* NM10 */
+	LPC_ICH8,	/* ICH8 & ICH8R */
+	LPC_ICH8DH,	/* ICH8DH */
+	LPC_ICH8DO,	/* ICH8DO */
+	LPC_ICH8M,	/* ICH8M */
+	LPC_ICH8ME,	/* ICH8M-E */
+	LPC_ICH9,	/* ICH9 */
+	LPC_ICH9R,	/* ICH9R */
+	LPC_ICH9DH,	/* ICH9DH */
+	LPC_ICH9DO,	/* ICH9DO */
+	LPC_ICH9M,	/* ICH9M */
+	LPC_ICH9ME,	/* ICH9M-E */
+	LPC_ICH10,	/* ICH10 */
+	LPC_ICH10R,	/* ICH10R */
+	LPC_ICH10D,	/* ICH10D */
+	LPC_ICH10DO,	/* ICH10DO */
+	LPC_PCH,	/* PCH Desktop Full Featured */
+	LPC_PCHM,	/* PCH Mobile Full Featured */
+	LPC_P55,	/* P55 */
+	LPC_PM55,	/* PM55 */
+	LPC_H55,	/* H55 */
+	LPC_QM57,	/* QM57 */
+	LPC_H57,	/* H57 */
+	LPC_HM55,	/* HM55 */
+	LPC_Q57,	/* Q57 */
+	LPC_HM57,	/* HM57 */
+	LPC_PCHMSFF,	/* PCH Mobile SFF Full Featured */
+	LPC_QS57,	/* QS57 */
+	LPC_3400,	/* 3400 */
+	LPC_3420,	/* 3420 */
+	LPC_3450,	/* 3450 */
+	LPC_EP80579,	/* EP80579 */
+	LPC_CPT,	/* Cougar Point */
+	LPC_CPTD,	/* Cougar Point Desktop */
+	LPC_CPTM,	/* Cougar Point Mobile */
+	LPC_PBG,	/* Patsburg */
+	LPC_DH89XXCC,	/* DH89xxCC */
+	LPC_PPT,	/* Panther Point */
+	LPC_LPT,	/* Lynx Point */
+};
+
+struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+	[LPC_ICH] = {
+		.name = "ICH",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH0] = {
+		.name = "ICH0",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH2] = {
+		.name = "ICH2",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH2M] = {
+		.name = "ICH2-M",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH3] = {
+		.name = "ICH3-S",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH3M] = {
+		.name = "ICH3-M",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH4] = {
+		.name = "ICH4",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH4M] = {
+		.name = "ICH4-M",
+		.iTCO_version = 1,
+	},
+	[LPC_CICH] = {
+		.name = "C-ICH",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH5] = {
+		.name = "ICH5 or ICH5R",
+		.iTCO_version = 1,
+	},
+	[LPC_6300ESB] = {
+		.name = "6300ESB",
+		.iTCO_version = 1,
+	},
+	[LPC_ICH6] = {
+		.name = "ICH6 or ICH6R",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V6_GPIO,
+	},
+	[LPC_ICH6M] = {
+		.name = "ICH6-M",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V6_GPIO,
+	},
+	[LPC_ICH6W] = {
+		.name = "ICH6W or ICH6RW",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V6_GPIO,
+	},
+	[LPC_631XESB] = {
+		.name = "631xESB/632xESB",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V6_GPIO,
+	},
+	[LPC_ICH7] = {
+		.name = "ICH7 or ICH7R",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH7DH] = {
+		.name = "ICH7DH",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH7M] = {
+		.name = "ICH7-M or ICH7-U",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH7MDH] = {
+		.name = "ICH7-M DH",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_NM10] = {
+		.name = "NM10",
+		.iTCO_version = 2,
+	},
+	[LPC_ICH8] = {
+		.name = "ICH8 or ICH8R",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH8DH] = {
+		.name = "ICH8DH",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH8DO] = {
+		.name = "ICH8DO",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH8M] = {
+		.name = "ICH8M",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH8ME] = {
+		.name = "ICH8M-E",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V7_GPIO,
+	},
+	[LPC_ICH9] = {
+		.name = "ICH9",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH9R] = {
+		.name = "ICH9R",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH9DH] = {
+		.name = "ICH9DH",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH9DO] = {
+		.name = "ICH9DO",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH9M] = {
+		.name = "ICH9M",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH9ME] = {
+		.name = "ICH9M-E",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V9_GPIO,
+	},
+	[LPC_ICH10] = {
+		.name = "ICH10",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V10CONS_GPIO,
+	},
+	[LPC_ICH10R] = {
+		.name = "ICH10R",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V10CONS_GPIO,
+	},
+	[LPC_ICH10D] = {
+		.name = "ICH10D",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V10CORP_GPIO,
+	},
+	[LPC_ICH10DO] = {
+		.name = "ICH10DO",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V10CORP_GPIO,
+	},
+	[LPC_PCH] = {
+		.name = "PCH Desktop Full Featured",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_PCHM] = {
+		.name = "PCH Mobile Full Featured",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_P55] = {
+		.name = "P55",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_PM55] = {
+		.name = "PM55",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_H55] = {
+		.name = "H55",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_QM57] = {
+		.name = "QM57",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_H57] = {
+		.name = "H57",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_HM55] = {
+		.name = "HM55",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_Q57] = {
+		.name = "Q57",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_HM57] = {
+		.name = "HM57",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_PCHMSFF] = {
+		.name = "PCH Mobile SFF Full Featured",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_QS57] = {
+		.name = "QS57",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_3400] = {
+		.name = "3400",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_3420] = {
+		.name = "3420",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_3450] = {
+		.name = "3450",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_EP80579] = {
+		.name = "EP80579",
+		.iTCO_version = 2,
+	},
+	[LPC_CPT] = {
+		.name = "Cougar Point",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_CPTD] = {
+		.name = "Cougar Point Desktop",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_CPTM] = {
+		.name = "Cougar Point Mobile",
+		.iTCO_version = 2,
+		.gpio_version = ICH_V5_GPIO,
+	},
+	[LPC_PBG] = {
+		.name = "Patsburg",
+		.iTCO_version = 2,
+	},
+	[LPC_DH89XXCC] = {
+		.name = "DH89xxCC",
+		.iTCO_version = 2,
+	},
+	[LPC_PPT] = {
+		.name = "Panther Point",
+		.iTCO_version = 2,
+	},
+	[LPC_LPT] = {
+		.name = "Lynx Point",
+		.iTCO_version = 2,
+	},
+};
+
+/*
+ * This data only exists for exporting the supported PCI ids
+ * via MODULE_DEVICE_TABLE.  We do not actually register a
+ * pci_driver, because the I/O Controller Hub has also other
+ * functions that probably will be registered by other drivers.
+ */
+static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+	{ PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+	{ PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+	{ PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+	{ PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+	{ PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+	{ PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+	{ PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+	{ PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+	{ PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+	{ PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+	{ PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+	{ PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+	{ PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+	{ PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+	{ PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+	{ PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+	{ PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+	{ PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+	{ PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+	{ PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+	{ PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+	{ PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+	{ PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+	{ PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+	{ PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+	{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+	{ PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+	{ PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+	{ PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+	{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+	{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+	{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+	{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+	{ PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+	{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+	{ PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+	{ PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+	{ PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+	{ PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+	{ PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+	{ PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+	{ PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+	{ PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+	{ PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+	{ PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+	{ PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+	{ PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+	{ PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+	{ PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+	{ PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+	{ PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+	{ PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
+	{ PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
+	{ PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
+	{ PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
+	{ PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
+	{ PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+	{ PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+	{ PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
+	{ PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+
+static void lpc_ich_restore_config_space(struct pci_dev *dev)
+{
+	if (lpc_ich_acpi_save >= 0) {
+		pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
+		lpc_ich_acpi_save = -1;
+	}
+
+	if (lpc_ich_gpio_save >= 0) {
+		pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
+		lpc_ich_gpio_save = -1;
+	}
+}
+
+static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+{
+	u8 reg_save;
+
+	pci_read_config_byte(dev, ACPICTRL, &reg_save);
+	pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
+	lpc_ich_acpi_save = reg_save;
+}
+
+static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+{
+	u8 reg_save;
+
+	pci_read_config_byte(dev, GPIOCTRL, &reg_save);
+	pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
+	lpc_ich_gpio_save = reg_save;
+}
+
+static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+					const struct pci_device_id *id)
+{
+	cell->platform_data = &lpc_chipset_info[id->driver_data];
+	cell->pdata_size = sizeof(struct lpc_ich_info);
+}
+
+static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	u32 base_addr_cfg;
+	u32 base_addr;
+	int ret;
+	bool acpi_conflict = false;
+	struct resource *res;
+
+	/* Setup power management base register */
+	pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+	base_addr = base_addr_cfg & 0x0000ff80;
+	if (!base_addr) {
+		dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+		lpc_ich_cells[LPC_GPIO].num_resources--;
+		goto gpe0_done;
+	}
+
+	res = &gpio_ich_res[ICH_RES_GPE0];
+	res->start = base_addr + ACPIBASE_GPE_OFF;
+	res->end = base_addr + ACPIBASE_GPE_END;
+	ret = acpi_check_resource_conflict(res);
+	if (ret) {
+		/*
+		 * This isn't fatal for the GPIO, but we have to make sure that
+		 * the platform_device subsystem doesn't see this resource
+		 * or it will register an invalid region.
+		 */
+		lpc_ich_cells[LPC_GPIO].num_resources--;
+		acpi_conflict = true;
+	} else {
+		lpc_ich_enable_acpi_space(dev);
+	}
+
+gpe0_done:
+	/* Setup GPIO base register */
+	pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+	base_addr = base_addr_cfg & 0x0000ff80;
+	if (!base_addr) {
+		dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+		ret = -ENODEV;
+		goto gpio_done;
+	}
+
+	/* Older devices provide fewer GPIO and have a smaller resource size. */
+	res = &gpio_ich_res[ICH_RES_GPIO];
+	res->start = base_addr;
+	switch (lpc_chipset_info[id->driver_data].gpio_version) {
+	case ICH_V5_GPIO:
+	case ICH_V10CORP_GPIO:
+		res->end = res->start + 128 - 1;
+		break;
+	default:
+		res->end = res->start + 64 - 1;
+		break;
+	}
+
+	ret = acpi_check_resource_conflict(res);
+	if (ret) {
+		/* this isn't necessarily fatal for the GPIO */
+		acpi_conflict = true;
+		goto gpio_done;
+	}
+	lpc_ich_enable_gpio_space(dev);
+
+	lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+	ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+				1, NULL, 0);
+
+gpio_done:
+	if (acpi_conflict)
+		pr_warn("Resource conflict(s) found affecting %s\n",
+				lpc_ich_cells[LPC_GPIO].name);
+	return ret;
+}
+
+static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	u32 base_addr_cfg;
+	u32 base_addr;
+	int ret;
+	bool acpi_conflict = false;
+	struct resource *res;
+
+	/* Setup power management base register */
+	pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+	base_addr = base_addr_cfg & 0x0000ff80;
+	if (!base_addr) {
+		dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+		ret = -ENODEV;
+		goto wdt_done;
+	}
+
+	res = wdt_io_res(ICH_RES_IO_TCO);
+	res->start = base_addr + ACPIBASE_TCO_OFF;
+	res->end = base_addr + ACPIBASE_TCO_END;
+	ret = acpi_check_resource_conflict(res);
+	if (ret) {
+		acpi_conflict = true;
+		goto wdt_done;
+	}
+
+	res = wdt_io_res(ICH_RES_IO_SMI);
+	res->start = base_addr + ACPIBASE_SMI_OFF;
+	res->end = base_addr + ACPIBASE_SMI_END;
+	ret = acpi_check_resource_conflict(res);
+	if (ret) {
+		acpi_conflict = true;
+		goto wdt_done;
+	}
+	lpc_ich_enable_acpi_space(dev);
+
+	/*
+	 * Get the Memory-Mapped GCS register. To get access to it
+	 * we have to read RCBA from PCI Config space 0xf0 and use
+	 * it as base. GCS = RCBA + ICH6_GCS(0x3410).
+	 */
+	if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+		pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
+		base_addr = base_addr_cfg & 0xffffc000;
+		if (!(base_addr_cfg & 1)) {
+			pr_err("RCBA is disabled by hardware/BIOS, "
+					"device disabled\n");
+			ret = -ENODEV;
+			goto wdt_done;
+		}
+		res = wdt_mem_res(ICH_RES_MEM_GCS);
+		res->start = base_addr + ACPIBASE_GCS_OFF;
+		res->end = base_addr + ACPIBASE_GCS_END;
+		ret = acpi_check_resource_conflict(res);
+		if (ret) {
+			acpi_conflict = true;
+			goto wdt_done;
+		}
+	}
+
+	lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+	ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+				1, NULL, 0);
+
+wdt_done:
+	if (acpi_conflict)
+		pr_warn("Resource conflict(s) found affecting %s\n",
+				lpc_ich_cells[LPC_WDT].name);
+	return ret;
+}
+
+static int __devinit lpc_ich_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	int ret;
+	bool cell_added = false;
+
+	ret = lpc_ich_init_wdt(dev, id);
+	if (!ret)
+		cell_added = true;
+
+	ret = lpc_ich_init_gpio(dev, id);
+	if (!ret)
+		cell_added = true;
+
+	/*
+	 * We only care if at least one or none of the cells registered
+	 * successfully.
+	 */
+	if (!cell_added) {
+		lpc_ich_restore_config_space(dev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void __devexit lpc_ich_remove(struct pci_dev *dev)
+{
+	mfd_remove_devices(&dev->dev);
+	lpc_ich_restore_config_space(dev);
+}
+
+static struct pci_driver lpc_ich_driver = {
+	.name		= "lpc_ich",
+	.id_table	= lpc_ich_ids,
+	.probe		= lpc_ich_probe,
+	.remove		= __devexit_p(lpc_ich_remove),
+};
+
+static int __init lpc_ich_init(void)
+{
+	return pci_register_driver(&lpc_ich_driver);
+}
+
+static void __exit lpc_ich_exit(void)
+{
+	pci_unregister_driver(&lpc_ich_driver);
+}
+
+module_init(lpc_ich_init);
+module_exit(lpc_ich_exit);
+
+MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
+MODULE_DESCRIPTION("LPC interface for Intel ICH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index abc4213..9f20abc 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -36,6 +36,7 @@
 
 #define GPIOBASE	0x44
 #define GPIO_IO_SIZE	64
+#define GPIO_IO_SIZE_CENTERTON	128
 
 #define WDTBASE		0x84
 #define WDT_IO_SIZE	64
@@ -68,7 +69,7 @@
 
 static struct mfd_cell tunnelcreek_cells[] = {
 	{
-		.name = "tunnelcreek_wdt",
+		.name = "ie6xx_wdt",
 		.num_resources = 1,
 		.resources = &wdt_sch_resource,
 	},
@@ -77,6 +78,7 @@
 static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@
 	}
 
 	gpio_sch_resource.start = base_addr;
-	gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+
+	if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+		gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+	else
+		gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
 
 	for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
 		lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@
 	if (ret)
 		goto out_dev;
 
-	if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+	if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
+	 || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
 		pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
 		if (!(base_addr_cfg & (1 << 31))) {
 			dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@
 	.remove		= __devexit_p(lpc_sch_remove),
 };
 
-static int __init lpc_sch_init(void)
-{
-	return pci_register_driver(&lpc_sch_driver);
-}
-
-static void __exit lpc_sch_exit(void)
-{
-	pci_unregister_driver(&lpc_sch_driver);
-}
-
-module_init(lpc_sch_init);
-module_exit(lpc_sch_exit);
+module_pci_driver(lpc_sch_driver);
 
 MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
 MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644
index 0000000..2b40356
--- /dev/null
+++ b/drivers/mfd/max77693-irq.c
@@ -0,0 +1,309 @@
+/*
+ * max77693-irq.c - Interrupt controller support for MAX77693
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const u8 max77693_mask_reg[] = {
+	[LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
+	[TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+	[CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
+	[MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
+	[MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
+	[MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
+};
+
+static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
+				enum max77693_irq_source src)
+{
+	switch (src) {
+	case LED_INT ... CHG_INT:
+		return max77693->regmap;
+	case MUIC_INT1 ... MUIC_INT3:
+		return max77693->regmap_muic;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+struct max77693_irq_data {
+	int mask;
+	enum max77693_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask)		\
+	[(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77693_irq_data max77693_irqs[] = {
+	DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN,	LED_INT, 1 << 0),
+	DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT,	LED_INT, 1 << 1),
+	DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN,	LED_INT, 1 << 2),
+	DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT,	LED_INT, 1 << 3),
+	DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH,		LED_INT, 1 << 4),
+
+	DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT,	TOPSYS_INT, 1 << 0),
+	DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT,	TOPSYS_INT, 1 << 1),
+	DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT,	TOPSYS_INT, 1 << 3),
+
+	DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I,		CHG_INT, 1 << 0),
+	DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I,		CHG_INT, 1 << 2),
+	DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I,		CHG_INT, 1 << 3),
+	DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I,		CHG_INT, 1 << 4),
+	DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I,		CHG_INT, 1 << 6),
+
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC,		MUIC_INT1, 1 << 0),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW,	MUIC_INT1, 1 << 1),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR,	MUIC_INT1, 1 << 2),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K,	MUIC_INT1, 1 << 3),
+
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP,	MUIC_INT2, 1 << 0),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN,	MUIC_INT2, 1 << 1),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR,	MUIC_INT2, 1 << 2),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP,	MUIC_INT2, 1 << 3),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT,	MUIC_INT2, 1 << 4),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM,	MUIC_INT2, 1 << 5),
+
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC,		MUIC_INT3, 1 << 0),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC,	MUIC_INT3, 1 << 1),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP,		MUIC_INT3, 1 << 2),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,	MUIC_INT3, 1 << 3),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,	MUIC_INT3, 1 << 4),
+	DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET,	MUIC_INT3, 1 << 5),
+};
+
+static void max77693_irq_lock(struct irq_data *data)
+{
+	struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+
+	mutex_lock(&max77693->irqlock);
+}
+
+static void max77693_irq_sync_unlock(struct irq_data *data)
+{
+	struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+	int i;
+
+	for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+		u8 mask_reg = max77693_mask_reg[i];
+		struct regmap *map = max77693_get_regmap(max77693, i);
+
+		if (mask_reg == MAX77693_REG_INVALID ||
+				IS_ERR_OR_NULL(map))
+			continue;
+		max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
+
+		max77693_write_reg(map, max77693_mask_reg[i],
+				max77693->irq_masks_cur[i]);
+	}
+
+	mutex_unlock(&max77693->irqlock);
+}
+
+static const inline struct max77693_irq_data *
+irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
+{
+	return &max77693_irqs[irq];
+}
+
+static void max77693_irq_mask(struct irq_data *data)
+{
+	struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+	const struct max77693_irq_data *irq_data =
+				irq_to_max77693_irq(max77693, data->irq);
+
+	if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+		max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+	else
+		max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max77693_irq_unmask(struct irq_data *data)
+{
+	struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+	const struct max77693_irq_data *irq_data =
+	    irq_to_max77693_irq(max77693, data->irq);
+
+	if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+		max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+	else
+		max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max77693_irq_chip = {
+	.name			= "max77693",
+	.irq_bus_lock		= max77693_irq_lock,
+	.irq_bus_sync_unlock	= max77693_irq_sync_unlock,
+	.irq_mask		= max77693_irq_mask,
+	.irq_unmask		= max77693_irq_unmask,
+};
+
+#define MAX77693_IRQSRC_CHG		(1 << 0)
+#define MAX77693_IRQSRC_TOP		(1 << 1)
+#define MAX77693_IRQSRC_FLASH		(1 << 2)
+#define MAX77693_IRQSRC_MUIC		(1 << 3)
+static irqreturn_t max77693_irq_thread(int irq, void *data)
+{
+	struct max77693_dev *max77693 = data;
+	u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
+	u8 irq_src;
+	int ret;
+	int i, cur_irq;
+
+	ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
+				&irq_src);
+	if (ret < 0) {
+		dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
+				ret);
+		return IRQ_NONE;
+	}
+
+	if (irq_src & MAX77693_IRQSRC_CHG)
+		/* CHG_INT */
+		ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
+				&irq_reg[CHG_INT]);
+
+	if (irq_src & MAX77693_IRQSRC_TOP)
+		/* TOPSYS_INT */
+		ret = max77693_read_reg(max77693->regmap,
+			MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
+
+	if (irq_src & MAX77693_IRQSRC_FLASH)
+		/* LED_INT */
+		ret = max77693_read_reg(max77693->regmap,
+			MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
+
+	if (irq_src & MAX77693_IRQSRC_MUIC)
+		/* MUIC INT1 ~ INT3 */
+		max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+			MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
+
+	/* Apply masking */
+	for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+		if (i >= MUIC_INT1 && i <= MUIC_INT3)
+			irq_reg[i] &= max77693->irq_masks_cur[i];
+		else
+			irq_reg[i] &= ~max77693->irq_masks_cur[i];
+	}
+
+	/* Report */
+	for (i = 0; i < MAX77693_IRQ_NR; i++) {
+		if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
+			cur_irq = irq_find_mapping(max77693->irq_domain, i);
+			if (cur_irq)
+				handle_nested_irq(cur_irq);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int max77693_irq_resume(struct max77693_dev *max77693)
+{
+	if (max77693->irq)
+		max77693_irq_thread(0, max77693);
+
+	return 0;
+}
+
+static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	struct max77693_dev *max77693 = d->host_data;
+
+	irq_set_chip_data(irq, max77693);
+	irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
+	irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+	set_irq_flags(irq, IRQF_VALID);
+#else
+	irq_set_noprobe(irq);
+#endif
+	return 0;
+}
+
+static struct irq_domain_ops max77693_irq_domain_ops = {
+	.map = max77693_irq_domain_map,
+};
+
+int max77693_irq_init(struct max77693_dev *max77693)
+{
+	struct irq_domain *domain;
+	int i;
+	int ret;
+
+	mutex_init(&max77693->irqlock);
+
+	/* Mask individual interrupt sources */
+	for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+		struct regmap *map;
+		/* MUIC IRQ  0:MASK 1:NOT MASK */
+		/* Other IRQ 1:MASK 0:NOT MASK */
+		if (i >= MUIC_INT1 && i <= MUIC_INT3) {
+			max77693->irq_masks_cur[i] = 0x00;
+			max77693->irq_masks_cache[i] = 0x00;
+		} else {
+			max77693->irq_masks_cur[i] = 0xff;
+			max77693->irq_masks_cache[i] = 0xff;
+		}
+		map = max77693_get_regmap(max77693, i);
+
+		if (IS_ERR_OR_NULL(map))
+			continue;
+		if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
+			continue;
+		if (i >= MUIC_INT1 && i <= MUIC_INT3)
+			max77693_write_reg(map, max77693_mask_reg[i], 0x00);
+		else
+			max77693_write_reg(map, max77693_mask_reg[i], 0xff);
+	}
+
+	domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
+					&max77693_irq_domain_ops, max77693);
+	if (!domain) {
+		dev_err(max77693->dev, "could not create irq domain\n");
+		return -ENODEV;
+	}
+	max77693->irq_domain = domain;
+
+	ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
+				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				   "max77693-irq", max77693);
+
+	if (ret)
+		dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
+			max77693->irq, ret);
+
+	return 0;
+}
+
+void max77693_irq_exit(struct max77693_dev *max77693)
+{
+	if (max77693->irq)
+		free_irq(max77693->irq, max77693);
+}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644
index 0000000..e9e4278
--- /dev/null
+++ b/drivers/mfd/max77693.c
@@ -0,0 +1,249 @@
+/*
+ * max77693.c - mfd core driver for the MAX 77693
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * SangYoung Son <hello.son@smasung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC	(0xCC >> 1)	/* Charger, Flash LED */
+#define I2C_ADDR_MUIC	(0x4A >> 1)
+#define I2C_ADDR_HAPTIC	(0x90 >> 1)
+
+static struct mfd_cell max77693_devs[] = {
+	{ .name = "max77693-pmic", },
+	{ .name = "max77693-charger", },
+	{ .name = "max77693-flash", },
+	{ .name = "max77693-muic", },
+	{ .name = "max77693-haptic", },
+};
+
+int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(map, reg, &val);
+	*dest = val;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_read_reg);
+
+int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+	int ret;
+
+	ret = regmap_bulk_read(map, reg, buf, count);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_read);
+
+int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+	int ret;
+
+	ret = regmap_write(map, reg, value);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_write_reg);
+
+int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+	int ret;
+
+	ret = regmap_bulk_write(map, reg, buf, count);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_write);
+
+int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
+{
+	int ret;
+
+	ret = regmap_update_bits(map, reg, mask, val);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_update_reg);
+
+static const struct regmap_config max77693_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = MAX77693_PMIC_REG_END,
+};
+
+static int max77693_i2c_probe(struct i2c_client *i2c,
+			      const struct i2c_device_id *id)
+{
+	struct max77693_dev *max77693;
+	struct max77693_platform_data *pdata = i2c->dev.platform_data;
+	u8 reg_data;
+	int ret = 0;
+
+	max77693 = devm_kzalloc(&i2c->dev,
+			sizeof(struct max77693_dev), GFP_KERNEL);
+	if (max77693 == NULL)
+		return -ENOMEM;
+
+	max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+	if (IS_ERR(max77693->regmap)) {
+		ret = PTR_ERR(max77693->regmap);
+		dev_err(max77693->dev,"failed to allocate register map: %d\n",
+				ret);
+		goto err_regmap;
+	}
+
+	i2c_set_clientdata(i2c, max77693);
+	max77693->dev = &i2c->dev;
+	max77693->i2c = i2c;
+	max77693->irq = i2c->irq;
+	max77693->type = id->driver_data;
+
+	if (!pdata)
+		goto err_regmap;
+
+	max77693->wakeup = pdata->wakeup;
+
+	mutex_init(&max77693->iolock);
+
+	if (max77693_read_reg(max77693->regmap,
+				MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+		dev_err(max77693->dev, "device not found on this channel\n");
+		ret = -ENODEV;
+		goto err_regmap;
+	} else
+		dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+	max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+	i2c_set_clientdata(max77693->muic, max77693);
+
+	max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+	i2c_set_clientdata(max77693->haptic, max77693);
+
+	ret = max77693_irq_init(max77693);
+	if (ret < 0)
+		goto err_mfd;
+
+	pm_runtime_set_active(max77693->dev);
+
+	ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
+			ARRAY_SIZE(max77693_devs), NULL, 0);
+	if (ret < 0)
+		goto err_mfd;
+
+	device_init_wakeup(max77693->dev, pdata->wakeup);
+
+	return ret;
+
+err_mfd:
+	i2c_unregister_device(max77693->muic);
+	i2c_unregister_device(max77693->haptic);
+err_regmap:
+	kfree(max77693);
+
+	return ret;
+}
+
+static int max77693_i2c_remove(struct i2c_client *i2c)
+{
+	struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(max77693->dev);
+	i2c_unregister_device(max77693->muic);
+	i2c_unregister_device(max77693->haptic);
+
+	return 0;
+}
+
+static const struct i2c_device_id max77693_i2c_id[] = {
+	{ "max77693", TYPE_MAX77693 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
+
+static int max77693_suspend(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+	if (device_may_wakeup(dev))
+		irq_set_irq_wake(max77693->irq, 1);
+	return 0;
+}
+
+static int max77693_resume(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+	if (device_may_wakeup(dev))
+		irq_set_irq_wake(max77693->irq, 0);
+	return max77693_irq_resume(max77693);
+}
+
+const struct dev_pm_ops max77693_pm = {
+	.suspend = max77693_suspend,
+	.resume = max77693_resume,
+};
+
+static struct i2c_driver max77693_i2c_driver = {
+	.driver = {
+		   .name = "max77693",
+		   .owner = THIS_MODULE,
+		   .pm = &max77693_pm,
+	},
+	.probe = max77693_i2c_probe,
+	.remove = max77693_i2c_remove,
+	.id_table = max77693_i2c_id,
+};
+
+static int __init max77693_i2c_init(void)
+{
+	return i2c_add_driver(&max77693_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77693_i2c_init);
+
+static void __exit max77693_i2c_exit(void)
+{
+	i2c_del_driver(&max77693_i2c_driver);
+}
+module_exit(max77693_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
+MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 738722c..f0ea3b8 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -15,24 +15,13 @@
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
-#include <linux/spi/spi.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/mc13xxx.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 
-struct mc13xxx {
-	struct spi_device *spidev;
-	struct mutex lock;
-	int irq;
-	int flags;
-
-	irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
-	void *irqdata[MC13XXX_NUM_IRQ];
-
-	int adcflags;
-};
+#include "mc13xxx.h"
 
 #define MC13XXX_IRQSTAT0	0
 #define MC13XXX_IRQSTAT0_ADCDONEI	(1 << 0)
@@ -139,34 +128,29 @@
 
 #define MC13XXX_ADC2		45
 
-#define MC13XXX_NUMREGS 0x3f
-
 void mc13xxx_lock(struct mc13xxx *mc13xxx)
 {
 	if (!mutex_trylock(&mc13xxx->lock)) {
-		dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+		dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
 				__func__, __builtin_return_address(0));
 
 		mutex_lock(&mc13xxx->lock);
 	}
-	dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+	dev_dbg(mc13xxx->dev, "%s from %pf\n",
 			__func__, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(mc13xxx_lock);
 
 void mc13xxx_unlock(struct mc13xxx *mc13xxx)
 {
-	dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+	dev_dbg(mc13xxx->dev, "%s from %pf\n",
 			__func__, __builtin_return_address(0));
 	mutex_unlock(&mc13xxx->lock);
 }
 EXPORT_SYMBOL(mc13xxx_unlock);
 
-#define MC13XXX_REGOFFSET_SHIFT 25
 int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
 {
-	struct spi_transfer t;
-	struct spi_message m;
 	int ret;
 
 	BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@
 	if (offset > MC13XXX_NUMREGS)
 		return -EINVAL;
 
-	*val = offset << MC13XXX_REGOFFSET_SHIFT;
+	ret = regmap_read(mc13xxx->regmap, offset, val);
+	dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
 
-	memset(&t, 0, sizeof(t));
-
-	t.tx_buf = val;
-	t.rx_buf = val;
-	t.len = sizeof(u32);
-
-	spi_message_init(&m);
-	spi_message_add_tail(&t, &m);
-
-	ret = spi_sync(mc13xxx->spidev, &m);
-
-	/* error in message.status implies error return from spi_sync */
-	BUG_ON(!ret && m.status);
-
-	if (ret)
-		return ret;
-
-	*val &= 0xffffff;
-
-	dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(mc13xxx_reg_read);
 
 int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
 {
-	u32 buf;
-	struct spi_transfer t;
-	struct spi_message m;
-	int ret;
-
 	BUG_ON(!mutex_is_locked(&mc13xxx->lock));
 
-	dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+	dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
 
 	if (offset > MC13XXX_NUMREGS || val > 0xffffff)
 		return -EINVAL;
 
-	buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
-
-	memset(&t, 0, sizeof(t));
-
-	t.tx_buf = &buf;
-	t.rx_buf = &buf;
-	t.len = sizeof(u32);
-
-	spi_message_init(&m);
-	spi_message_add_tail(&t, &m);
-
-	ret = spi_sync(mc13xxx->spidev, &m);
-
-	BUG_ON(!ret && m.status);
-
-	if (ret)
-		return ret;
-
-	return 0;
+	return regmap_write(mc13xxx->regmap, offset, val);
 }
 EXPORT_SYMBOL(mc13xxx_reg_write);
 
 int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
 		u32 mask, u32 val)
 {
-	int ret;
-	u32 valread;
-
+	BUG_ON(!mutex_is_locked(&mc13xxx->lock));
 	BUG_ON(val & ~mask);
+	dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+			offset, val, mask);
 
-	ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
-	if (ret)
-		return ret;
-
-	valread = (valread & ~mask) | val;
-
-	return mc13xxx_reg_write(mc13xxx, offset, valread);
+	return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
 }
 EXPORT_SYMBOL(mc13xxx_reg_rmw);
 
@@ -439,7 +374,7 @@
 			if (handled == IRQ_HANDLED)
 				num_handled++;
 		} else {
-			dev_err(&mc13xxx->spidev->dev,
+			dev_err(mc13xxx->dev,
 					"BUG: irq %u but no handler\n",
 					baseirq + irq);
 
@@ -475,25 +410,23 @@
 	return IRQ_RETVAL(handled);
 }
 
-enum mc13xxx_id {
-	MC13XXX_ID_MC13783,
-	MC13XXX_ID_MC13892,
-	MC13XXX_ID_INVALID,
-};
-
 static const char *mc13xxx_chipname[] = {
 	[MC13XXX_ID_MC13783] = "mc13783",
 	[MC13XXX_ID_MC13892] = "mc13892",
 };
 
 #define maskval(reg, mask)	(((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+static int mc13xxx_identify(struct mc13xxx *mc13xxx)
 {
 	u32 icid;
 	u32 revision;
-	const char *name;
 	int ret;
 
+	/*
+	 * Get the generation ID from register 46, as apparently some older
+	 * IC revisions only have this info at this location. Newer ICs seem to
+	 * have both.
+	 */
 	ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
 	if (ret)
 		return ret;
@@ -502,26 +435,23 @@
 
 	switch (icid) {
 	case 2:
-		*id = MC13XXX_ID_MC13783;
-		name = "mc13783";
+		mc13xxx->ictype = MC13XXX_ID_MC13783;
 		break;
 	case 7:
-		*id = MC13XXX_ID_MC13892;
-		name = "mc13892";
+		mc13xxx->ictype = MC13XXX_ID_MC13892;
 		break;
 	default:
-		*id = MC13XXX_ID_INVALID;
+		mc13xxx->ictype = MC13XXX_ID_INVALID;
 		break;
 	}
 
-	if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+	if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
+			mc13xxx->ictype == MC13XXX_ID_MC13892) {
 		ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-		if (ret)
-			return ret;
 
-		dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+		dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
 				"fin: %d, fab: %d, icid: %d/%d\n",
-				mc13xxx_chipname[*id],
+				mc13xxx_chipname[mc13xxx->ictype],
 				maskval(revision, MC13XXX_REVISION_REVFULL),
 				maskval(revision, MC13XXX_REVISION_REVMETAL),
 				maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@
 				maskval(revision, MC13XXX_REVISION_ICIDCODE));
 	}
 
-	if (*id != MC13XXX_ID_INVALID) {
-		const struct spi_device_id *devid =
-			spi_get_device_id(mc13xxx->spidev);
-		if (!devid || devid->driver_data != *id)
-			dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
-					"match auto detection!\n");
-	}
-
-	return 0;
+	return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
 }
 
 static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
 {
-	const struct spi_device_id *devid =
-		spi_get_device_id(mc13xxx->spidev);
-
-	if (!devid)
-		return NULL;
-
-	return mc13xxx_chipname[devid->driver_data];
+	return mc13xxx_chipname[mc13xxx->ictype];
 }
 
 int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@
 	};
 	init_completion(&adcdone_data.done);
 
-	dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+	dev_dbg(mc13xxx->dev, "%s\n", __func__);
 
 	mc13xxx_lock(mc13xxx);
 
@@ -637,7 +553,8 @@
 	adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
 	if (atox)
 		adc1 |= MC13783_ADC1_ATOX;
-	dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+
+	dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
 	mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
 			mc13xxx_handler_adcdone, __func__, &adcdone_data);
 	mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@
 	if (!cell.name)
 		return -ENOMEM;
 
-	return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+	return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
 }
 
 static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@
 #ifdef CONFIG_OF
 static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
 {
-	struct device_node *np = mc13xxx->spidev->dev.of_node;
+	struct device_node *np = mc13xxx->dev->of_node;
 
 	if (!np)
 		return -ENODEV;
@@ -732,55 +649,15 @@
 }
 #endif
 
-static const struct spi_device_id mc13xxx_device_id[] = {
-	{
-		.name = "mc13783",
-		.driver_data = MC13XXX_ID_MC13783,
-	}, {
-		.name = "mc13892",
-		.driver_data = MC13XXX_ID_MC13892,
-	}, {
-		/* sentinel */
-	}
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
-static const struct of_device_id mc13xxx_dt_ids[] = {
-	{ .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
-	{ .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
-
-static int mc13xxx_probe(struct spi_device *spi)
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+		struct mc13xxx_platform_data *pdata, int irq)
 {
-	const struct of_device_id *of_id;
-	struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
-	struct mc13xxx *mc13xxx;
-	struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
-	enum mc13xxx_id id;
 	int ret;
 
-	of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
-	if (of_id)
-		sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
-	mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
-	if (!mc13xxx)
-		return -ENOMEM;
-
-	dev_set_drvdata(&spi->dev, mc13xxx);
-	spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-	spi->bits_per_word = 32;
-	spi_setup(spi);
-
-	mc13xxx->spidev = spi;
-
-	mutex_init(&mc13xxx->lock);
 	mc13xxx_lock(mc13xxx);
 
-	ret = mc13xxx_identify(mc13xxx, &id);
-	if (ret || id == MC13XXX_ID_INVALID)
+	ret = mc13xxx_identify(mc13xxx);
+	if (ret)
 		goto err_revision;
 
 	/* mask all irqs */
@@ -792,18 +669,19 @@
 	if (ret)
 		goto err_mask;
 
-	ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+	ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
 			IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
 
 	if (ret) {
 err_mask:
 err_revision:
 		mc13xxx_unlock(mc13xxx);
-		dev_set_drvdata(&spi->dev, NULL);
 		kfree(mc13xxx);
 		return ret;
 	}
 
+	mc13xxx->irq = irq;
+
 	mc13xxx_unlock(mc13xxx);
 
 	if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -838,42 +716,19 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mc13xxx_common_init);
 
-static int __devexit mc13xxx_remove(struct spi_device *spi)
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
 {
-	struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+	free_irq(mc13xxx->irq, mc13xxx);
 
-	free_irq(mc13xxx->spidev->irq, mc13xxx);
+	mfd_remove_devices(mc13xxx->dev);
 
-	mfd_remove_devices(&spi->dev);
+	regmap_exit(mc13xxx->regmap);
 
 	kfree(mc13xxx);
-
-	return 0;
 }
-
-static struct spi_driver mc13xxx_driver = {
-	.id_table = mc13xxx_device_id,
-	.driver = {
-		.name = "mc13xxx",
-		.owner = THIS_MODULE,
-		.of_match_table = mc13xxx_dt_ids,
-	},
-	.probe = mc13xxx_probe,
-	.remove = __devexit_p(mc13xxx_remove),
-};
-
-static int __init mc13xxx_init(void)
-{
-	return spi_register_driver(&mc13xxx_driver);
-}
-subsys_initcall(mc13xxx_init);
-
-static void __exit mc13xxx_exit(void)
-{
-	spi_unregister_driver(&mc13xxx_driver);
-}
-module_exit(mc13xxx_exit);
+EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
 
 MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
 MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644
index 0000000..d22501d
--- /dev/null
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009-2010 Creative Product Design
+ * Marc Reilly marc@cpdesign.com.au
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include "mc13xxx.h"
+
+static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
+	{
+		.name = "mc13892",
+		.driver_data = MC13XXX_ID_MC13892,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+	{
+		.compatible = "fsl,mc13892",
+		.data = (void *) &mc13xxx_i2c_device_id[0],
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_i2c_config = {
+	.reg_bits = 8,
+	.val_bits = 24,
+
+	.max_register = MC13XXX_NUMREGS,
+
+	.cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	const struct of_device_id *of_id;
+	struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
+	struct mc13xxx *mc13xxx;
+	struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
+	int ret;
+
+	of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
+	if (of_id)
+		idrv->id_table = (const struct i2c_device_id*) of_id->data;
+
+	mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+	if (!mc13xxx)
+		return -ENOMEM;
+
+	dev_set_drvdata(&client->dev, mc13xxx);
+
+	mc13xxx->dev = &client->dev;
+	mutex_init(&mc13xxx->lock);
+
+	mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+	if (IS_ERR(mc13xxx->regmap)) {
+		ret = PTR_ERR(mc13xxx->regmap);
+		dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+				ret);
+		dev_set_drvdata(&client->dev, NULL);
+		kfree(mc13xxx);
+		return ret;
+	}
+
+	ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+
+	if (ret == 0 && (id->driver_data != mc13xxx->ictype))
+		dev_warn(mc13xxx->dev,
+				"device id doesn't match auto detection!\n");
+
+	return ret;
+}
+
+static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+{
+	struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
+
+	mc13xxx_common_cleanup(mc13xxx);
+
+	return 0;
+}
+
+static struct i2c_driver mc13xxx_i2c_driver = {
+	.id_table = mc13xxx_i2c_device_id,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mc13xxx",
+		.of_match_table = mc13xxx_dt_ids,
+	},
+	.probe = mc13xxx_i2c_probe,
+	.remove = __devexit_p(mc13xxx_i2c_remove),
+};
+
+static int __init mc13xxx_i2c_init(void)
+{
+	return i2c_add_driver(&mc13xxx_i2c_driver);
+}
+subsys_initcall(mc13xxx_i2c_init);
+
+static void __exit mc13xxx_i2c_exit(void)
+{
+	i2c_del_driver(&mc13xxx_i2c_driver);
+}
+module_exit(mc13xxx_i2c_exit);
+
+MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644
index 0000000..3fcdab3
--- /dev/null
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include "mc13xxx.h"
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+	{
+		.name = "mc13783",
+		.driver_data = MC13XXX_ID_MC13783,
+	}, {
+		.name = "mc13892",
+		.driver_data = MC13XXX_ID_MC13892,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+	{ .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+	{ .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_spi_config = {
+	.reg_bits = 7,
+	.pad_bits = 1,
+	.val_bits = 24,
+
+	.max_register = MC13XXX_NUMREGS,
+
+	.cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_spi_probe(struct spi_device *spi)
+{
+	const struct of_device_id *of_id;
+	struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
+	struct mc13xxx *mc13xxx;
+	struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+	int ret;
+
+	of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+	if (of_id)
+		sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
+
+	mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+	if (!mc13xxx)
+		return -ENOMEM;
+
+	dev_set_drvdata(&spi->dev, mc13xxx);
+	spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+	spi->bits_per_word = 32;
+
+	mc13xxx->dev = &spi->dev;
+	mutex_init(&mc13xxx->lock);
+
+	mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+	if (IS_ERR(mc13xxx->regmap)) {
+		ret = PTR_ERR(mc13xxx->regmap);
+		dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+				ret);
+		dev_set_drvdata(&spi->dev, NULL);
+		kfree(mc13xxx);
+		return ret;
+	}
+
+	ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+
+	if (ret) {
+		dev_set_drvdata(&spi->dev, NULL);
+	} else {
+		const struct spi_device_id *devid =
+			spi_get_device_id(spi);
+		if (!devid || devid->driver_data != mc13xxx->ictype)
+			dev_warn(mc13xxx->dev,
+				"device id doesn't match auto detection!\n");
+	}
+
+	return ret;
+}
+
+static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+{
+	struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+	mc13xxx_common_cleanup(mc13xxx);
+
+	return 0;
+}
+
+static struct spi_driver mc13xxx_spi_driver = {
+	.id_table = mc13xxx_device_id,
+	.driver = {
+		.name = "mc13xxx",
+		.owner = THIS_MODULE,
+		.of_match_table = mc13xxx_dt_ids,
+	},
+	.probe = mc13xxx_spi_probe,
+	.remove = __devexit_p(mc13xxx_spi_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+	return spi_register_driver(&mc13xxx_spi_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+	spi_unregister_driver(&mc13xxx_spi_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644
index 0000000..bbba06f
--- /dev/null
+++ b/drivers/mfd/mc13xxx.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Creative Product Design
+ * Marc Reilly <marc@cpdesign.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __DRIVERS_MFD_MC13XXX_H
+#define __DRIVERS_MFD_MC13XXX_H
+
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mc13xxx.h>
+
+enum mc13xxx_id {
+	MC13XXX_ID_MC13783,
+	MC13XXX_ID_MC13892,
+	MC13XXX_ID_INVALID,
+};
+
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx {
+	struct regmap *regmap;
+
+	struct device *dev;
+	enum mc13xxx_id ictype;
+
+	struct mutex lock;
+	int irq;
+	int flags;
+
+	irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+	void *irqdata[MC13XXX_NUM_IRQ];
+
+	int adcflags;
+};
+
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+		struct mc13xxx_platform_data *pdata, int irq);
+
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+
+#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 189c2f0..29c122b 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -204,7 +204,7 @@
 		return -ENOENT;
 	}
 
-	pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+	pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
 	if (!pcf)
 		return -ENOMEM;
 
@@ -212,12 +212,11 @@
 
 	mutex_init(&pcf->lock);
 
-	pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+	pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
 	if (IS_ERR(pcf->regmap)) {
 		ret = PTR_ERR(pcf->regmap);
-		dev_err(pcf->dev, "Failed to allocate register map: %d\n",
-			ret);
-		goto err_free;
+		dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
+		return ret;
 	}
 
 	i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@
 	if (version < 0 || variant < 0) {
 		dev_err(pcf->dev, "Unable to probe pcf50633\n");
 		ret = -ENODEV;
-		goto err_regmap;
+		return ret;
 	}
 
 	dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@
 	pcf50633_irq_init(pcf, client->irq);
 
 	/* Create sub devices */
-	pcf50633_client_dev_register(pcf, "pcf50633-input",
-						&pcf->input_pdev);
-	pcf50633_client_dev_register(pcf, "pcf50633-rtc",
-						&pcf->rtc_pdev);
-	pcf50633_client_dev_register(pcf, "pcf50633-mbc",
-						&pcf->mbc_pdev);
-	pcf50633_client_dev_register(pcf, "pcf50633-adc",
-						&pcf->adc_pdev);
-	pcf50633_client_dev_register(pcf, "pcf50633-backlight",
-						&pcf->bl_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
 
 
 	for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@
 		pdata->probe_done(pcf);
 
 	return 0;
-
-err_regmap:
-	regmap_exit(pcf->regmap);
-err_free:
-	kfree(pcf);
-
-	return ret;
 }
 
 static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@
 	for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
 		platform_device_unregister(pcf->regulator_pdev[i]);
 
-	regmap_exit(pcf->regmap);
-	kfree(pcf);
-
 	return 0;
 }
 
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 44afae0..cdc1df7 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -75,6 +75,7 @@
 	(RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
 
 static struct mfd_cell rc5t583_subdevs[] = {
+	{.name = "rc5t583-gpio",},
 	{.name = "rc5t583-regulator",},
 	{.name = "rc5t583-rtc",      },
 	{.name = "rc5t583-key",      }
@@ -267,7 +268,7 @@
 	rc5t583->dev = &i2c->dev;
 	i2c_set_clientdata(i2c, rc5t583);
 
-	rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config);
+	rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
 	if (IS_ERR(rc5t583->regmap)) {
 		ret = PTR_ERR(rc5t583->regmap);
 		dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@
 
 	ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
 	if (ret < 0)
-		goto err_irq_init;
+		return ret;
 
 	if (i2c->irq) {
 		ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@
 err_add_devs:
 	if (irq_init_success)
 		rc5t583_irq_exit(rc5t583);
-err_irq_init:
-	regmap_exit(rc5t583->regmap);
 	return ret;
 }
 
@@ -310,7 +309,6 @@
 
 	mfd_remove_devices(rc5t583->dev);
 	rc5t583_irq_exit(rc5t583);
-	regmap_exit(rc5t583->regmap);
 	return 0;
 }
 
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 809bd4a..685d61e 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -108,18 +108,7 @@
 	.remove		= __devexit_p(rdc321x_sb_remove),
 };
 
-static int __init rdc321x_sb_init(void)
-{
-	return pci_register_driver(&rdc321x_sb_driver);
-}
-
-static void __exit rdc321x_sb_exit(void)
-{
-	pci_unregister_driver(&rdc321x_sb_driver);
-}
-
-module_init(rdc321x_sb_init);
-module_exit(rdc321x_sb_exit);
+module_pci_driver(rdc321x_sb_driver);
 
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
index 48949d9..dd17030 100644
--- a/drivers/mfd/s5m-core.c
+++ b/drivers/mfd/s5m-core.c
@@ -114,12 +114,12 @@
 		s5m87xx->wakeup = pdata->wakeup;
 	}
 
-	s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+	s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
 	if (IS_ERR(s5m87xx->regmap)) {
 		ret = PTR_ERR(s5m87xx->regmap);
 		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
 			ret);
-		goto err;
+		return ret;
 	}
 
 	s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@
 	mfd_remove_devices(s5m87xx->dev);
 	s5m_irq_exit(s5m87xx);
 	i2c_unregister_device(s5m87xx->rtc);
-	regmap_exit(s5m87xx->regmap);
 	return ret;
 }
 
@@ -170,7 +169,6 @@
 	mfd_remove_devices(s5m87xx->dev);
 	s5m_irq_exit(s5m87xx);
 	i2c_unregister_device(s5m87xx->rtc);
-	regmap_exit(s5m87xx->regmap);
 	return 0;
 }
 
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644
index 0000000..d31fed0
--- /dev/null
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+#include <asm/sta2x11.h>
+
+/* This describes STA2X11 MFD chip for us, we may have several */
+struct sta2x11_mfd {
+	struct sta2x11_instance *instance;
+	spinlock_t lock;
+	struct list_head list;
+	void __iomem *sctl_regs;
+	void __iomem *apbreg_regs;
+};
+
+static LIST_HEAD(sta2x11_mfd_list);
+
+/* Three functions to act on the list */
+static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
+{
+	struct sta2x11_instance *instance;
+	struct sta2x11_mfd *mfd;
+
+	if (!pdev && !list_empty(&sta2x11_mfd_list)) {
+		pr_warning("%s: Unspecified device, "
+			    "using first instance\n", __func__);
+		return list_entry(sta2x11_mfd_list.next,
+				  struct sta2x11_mfd, list);
+	}
+
+	instance = sta2x11_get_instance(pdev);
+	if (!instance)
+		return NULL;
+	list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
+		if (mfd->instance == instance)
+			return mfd;
+	}
+	return NULL;
+}
+
+static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+{
+	struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+	struct sta2x11_instance *instance;
+
+	if (mfd)
+		return -EBUSY;
+	instance = sta2x11_get_instance(pdev);
+	if (!instance)
+		return -EINVAL;
+	mfd = kzalloc(sizeof(*mfd), flags);
+	if (!mfd)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&mfd->list);
+	spin_lock_init(&mfd->lock);
+	mfd->instance = instance;
+	list_add(&mfd->list, &sta2x11_mfd_list);
+	return 0;
+}
+
+static int __devexit mfd_remove(struct pci_dev *pdev)
+{
+	struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+	list_del(&mfd->list);
+	kfree(mfd);
+	return 0;
+}
+
+/* These two functions are exported and are not expected to fail */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+	struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+	u32 r;
+	unsigned long flags;
+
+	if (!mfd) {
+		dev_warn(&pdev->dev, ": can't access sctl regs\n");
+		return 0;
+	}
+	if (!mfd->sctl_regs) {
+		dev_warn(&pdev->dev, ": system ctl not initialized\n");
+		return 0;
+	}
+	spin_lock_irqsave(&mfd->lock, flags);
+	r = readl(mfd->sctl_regs + reg);
+	r &= ~mask;
+	r |= val;
+	if (mask)
+		writel(r, mfd->sctl_regs + reg);
+	spin_unlock_irqrestore(&mfd->lock, flags);
+	return r;
+}
+EXPORT_SYMBOL(sta2x11_sctl_mask);
+
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+	struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+	u32 r;
+	unsigned long flags;
+
+	if (!mfd) {
+		dev_warn(&pdev->dev, ": can't access apb regs\n");
+		return 0;
+	}
+	if (!mfd->apbreg_regs) {
+		dev_warn(&pdev->dev, ": apb bridge not initialized\n");
+		return 0;
+	}
+	spin_lock_irqsave(&mfd->lock, flags);
+	r = readl(mfd->apbreg_regs + reg);
+	r &= ~mask;
+	r |= val;
+	if (mask)
+		writel(r, mfd->apbreg_regs + reg);
+	spin_unlock_irqrestore(&mfd->lock, flags);
+	return r;
+}
+EXPORT_SYMBOL(sta2x11_apbreg_mask);
+
+/* Two debugfs files, for our registers (FIXME: one instance only) */
+#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
+static struct debugfs_reg32 sta2x11_sctl_regs[] = {
+	REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
+	REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
+	REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
+	REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
+	REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
+	REG(SCCLKSTAT2), REG(SCRSTSTA),
+};
+#undef REG
+
+static struct debugfs_regset32 sctl_regset = {
+	.regs = sta2x11_sctl_regs,
+	.nregs = ARRAY_SIZE(sta2x11_sctl_regs),
+};
+
+#define REG(regname) {.name = #regname, .offset = regname}
+static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
+	REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
+	REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+};
+#undef REG
+
+static struct debugfs_regset32 apbreg_regset = {
+	.regs = sta2x11_apbreg_regs,
+	.nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+};
+
+static struct dentry *sta2x11_sctl_debugfs;
+static struct dentry *sta2x11_apbreg_debugfs;
+
+/* Probe for the two platform devices */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+	struct pci_dev **pdev;
+	struct sta2x11_mfd *mfd;
+	struct resource *res;
+
+	pdev = dev->dev.platform_data;
+	mfd = sta2x11_mfd_find(*pdev);
+	if (!mfd)
+		return -ENODEV;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOMEM;
+
+	if (!request_mem_region(res->start, resource_size(res),
+				"sta2x11-sctl"))
+		return -EBUSY;
+
+	mfd->sctl_regs = ioremap(res->start, resource_size(res));
+	if (!mfd->sctl_regs) {
+		release_mem_region(res->start, resource_size(res));
+		return -ENOMEM;
+	}
+	sctl_regset.base = mfd->sctl_regs;
+	sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
+						  S_IFREG | S_IRUGO,
+						  NULL, &sctl_regset);
+	return 0;
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+	struct pci_dev **pdev;
+	struct sta2x11_mfd *mfd;
+	struct resource *res;
+
+	pdev = dev->dev.platform_data;
+	dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
+	dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
+
+	mfd = sta2x11_mfd_find(*pdev);
+	if (!mfd)
+		return -ENODEV;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOMEM;
+
+	if (!request_mem_region(res->start, resource_size(res),
+				"sta2x11-apbreg"))
+		return -EBUSY;
+
+	mfd->apbreg_regs = ioremap(res->start, resource_size(res));
+	if (!mfd->apbreg_regs) {
+		release_mem_region(res->start, resource_size(res));
+		return -ENOMEM;
+	}
+	dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+
+	apbreg_regset.base = mfd->apbreg_regs;
+	sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
+						  S_IFREG | S_IRUGO,
+						  NULL, &apbreg_regset);
+	return 0;
+}
+
+/* The two platform drivers */
+static struct platform_driver sta2x11_sctl_platform_driver = {
+	.driver = {
+		.name	= "sta2x11-sctl",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sta2x11_sctl_probe,
+};
+
+static int __init sta2x11_sctl_init(void)
+{
+	pr_info("%s\n", __func__);
+	return platform_driver_register(&sta2x11_sctl_platform_driver);
+}
+
+static struct platform_driver sta2x11_platform_driver = {
+	.driver = {
+		.name	= "sta2x11-apbreg",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sta2x11_apbreg_probe,
+};
+
+static int __init sta2x11_apbreg_init(void)
+{
+	pr_info("%s\n", __func__);
+	return platform_driver_register(&sta2x11_platform_driver);
+}
+
+/*
+ * What follows is the PCI device that hosts the above two pdevs.
+ * Each logic block is 4kB and they are all consecutive: we use this info.
+ */
+
+/* Bar 0 */
+enum bar0_cells {
+	STA2X11_GPIO_0 = 0,
+	STA2X11_GPIO_1,
+	STA2X11_GPIO_2,
+	STA2X11_GPIO_3,
+	STA2X11_SCTL,
+	STA2X11_SCR,
+	STA2X11_TIME,
+};
+/* Bar 1 */
+enum bar1_cells {
+	STA2X11_APBREG = 0,
+};
+#define CELL_4K(_name, _cell) { \
+		.name = _name, \
+		.start = _cell * 4096, .end = _cell * 4096 + 4095, \
+		.flags = IORESOURCE_MEM, \
+		}
+
+static const __devinitconst struct resource gpio_resources[] = {
+	{
+		.name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+		.start = 0,
+		.end = (4 * 4096) - 1,
+		.flags = IORESOURCE_MEM,
+	}
+};
+static const __devinitconst struct resource sctl_resources[] = {
+	CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+};
+static const __devinitconst struct resource scr_resources[] = {
+	CELL_4K("sta2x11-scr", STA2X11_SCR),
+};
+static const __devinitconst struct resource time_resources[] = {
+	CELL_4K("sta2x11-time", STA2X11_TIME),
+};
+
+static const __devinitconst struct resource apbreg_resources[] = {
+	CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+};
+
+#define DEV(_name, _r) \
+	{ .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
+	DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
+	DEV("sta2x11-sctl", sctl_resources),
+	DEV("sta2x11-scr", scr_resources),
+	DEV("sta2x11-time", time_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
+	DEV("sta2x11-apbreg", apbreg_resources),
+};
+
+static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int sta2x11_mfd_resume(struct pci_dev *pdev)
+{
+	int err;
+
+	pci_set_power_state(pdev, 0);
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+	pci_restore_state(pdev);
+
+	return 0;
+}
+
+static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *pci_id)
+{
+	int err, i;
+	struct sta2x11_gpio_pdata *gpio_data;
+
+	dev_info(&pdev->dev, "%s\n", __func__);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Can't enable device.\n");
+		return err;
+	}
+
+	err = pci_enable_msi(pdev);
+	if (err)
+		dev_info(&pdev->dev, "Enable msi failed\n");
+
+	/* Read gpio config data as pci device's platform data */
+	gpio_data = dev_get_platdata(&pdev->dev);
+	if (!gpio_data)
+		dev_warn(&pdev->dev, "no gpio configuration\n");
+
+	dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
+		gpio_data, &gpio_data);
+	dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
+		pdev, &pdev);
+
+	/* platform data is the pci device for all of them */
+	for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
+		sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
+		sta2x11_mfd_bar0[i].platform_data = &pdev;
+	}
+	sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
+	sta2x11_mfd_bar1[0].platform_data = &pdev;
+
+	/* Record this pdev before mfd_add_devices: their probe looks for it */
+	sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+
+	err = mfd_add_devices(&pdev->dev, -1,
+			      sta2x11_mfd_bar0,
+			      ARRAY_SIZE(sta2x11_mfd_bar0),
+			      &pdev->resource[0],
+			      0);
+	if (err) {
+		dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
+		goto err_disable;
+	}
+
+	err = mfd_add_devices(&pdev->dev, -1,
+			      sta2x11_mfd_bar1,
+			      ARRAY_SIZE(sta2x11_mfd_bar1),
+			      &pdev->resource[1],
+			      0);
+	if (err) {
+		dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
+		goto err_disable;
+	}
+
+	return 0;
+
+err_disable:
+	mfd_remove_devices(&pdev->dev);
+	pci_disable_device(pdev);
+	pci_disable_msi(pdev);
+	return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+	{0,},
+};
+
+static struct pci_driver sta2x11_mfd_driver = {
+	.name =		"sta2x11-mfd",
+	.id_table =	sta2x11_mfd_tbl,
+	.probe =	sta2x11_mfd_probe,
+	.suspend =	sta2x11_mfd_suspend,
+	.resume =	sta2x11_mfd_resume,
+};
+
+static int __init sta2x11_mfd_init(void)
+{
+	pr_info("%s\n", __func__);
+	return pci_register_driver(&sta2x11_mfd_driver);
+}
+
+/*
+ * All of this must be ready before "normal" devices like MMCI appear.
+ * But MFD (the pci device) can't be too early. The following choice
+ * prepares platform drivers very early and probe the PCI device later,
+ * but before other PCI devices.
+ */
+subsys_initcall(sta2x11_apbreg_init);
+subsys_initcall(sta2x11_sctl_init);
+rootfs_initcall(sta2x11_mfd_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wind River");
+MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
+MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423..947a06a 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
  *
  * License Terms: GNU General Public License, version 2
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b58c43c..9edfe86 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
  * Copyright (C) ST Microelectronics SA 2011
  *
  * License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/spi/spi.h>
@@ -122,7 +122,6 @@
 static struct spi_driver stmpe_spi_driver = {
 	.driver = {
 		.name	= "stmpe-spi",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &stmpe_dev_pm_ops,
@@ -147,4 +146,4 @@
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 47f802b..396b9d1 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -283,27 +283,24 @@
 		}
 	}
 
-	tps65090->rmap = regmap_init_i2c(tps65090->client,
-		&tps65090_regmap_config);
+	tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
+					      &tps65090_regmap_config);
 	if (IS_ERR(tps65090->rmap)) {
-		dev_err(&client->dev, "regmap_init failed with err: %ld\n",
-			PTR_ERR(tps65090->rmap));
+		ret = PTR_ERR(tps65090->rmap);
+		dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
 		goto err_irq_exit;
-	};
+	}
 
 	ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
 		ARRAY_SIZE(tps65090s), NULL, 0);
 	if (ret) {
 		dev_err(&client->dev, "add mfd devices failed with err: %d\n",
 			ret);
-		goto err_regmap_exit;
+		goto err_irq_exit;
 	}
 
 	return 0;
 
-err_regmap_exit:
-	regmap_exit(tps65090->rmap);
-
 err_irq_exit:
 	if (client->irq)
 		free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@
 	struct tps65090 *tps65090 = i2c_get_clientdata(client);
 
 	mfd_remove_devices(tps65090->dev);
-	regmap_exit(tps65090->rmap);
 	if (client->irq)
 		free_irq(client->irq, tps65090);
 
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tps65090_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	if (client->irq)
 		disable_irq(client->irq);
 	return 0;
 }
 
-static int tps65090_i2c_resume(struct i2c_client *client)
+static int tps65090_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	if (client->irq)
 		enable_irq(client->irq);
 	return 0;
 }
 #endif
 
+static const struct dev_pm_ops tps65090_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
+};
+
 static const struct i2c_device_id tps65090_id_table[] = {
 	{ "tps65090", 0 },
 	{ },
@@ -349,13 +351,10 @@
 	.driver	= {
 		.name	= "tps65090",
 		.owner	= THIS_MODULE,
+		.pm	= &tps65090_pm_ops,
 	},
 	.probe		= tps65090_i2c_probe,
 	.remove		= __devexit_p(tps65090_i2c_remove),
-#ifdef CONFIG_PM
-	.suspend	= tps65090_i2c_suspend,
-	.resume		= tps65090_i2c_resume,
-#endif
 	.id_table	= tps65090_id_table,
 };
 
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f7d854e..db194e4 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -96,7 +96,7 @@
  * @val: Value to write.
  * @level: Password protected level
  */
-int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
+static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
 		unsigned int mask, unsigned int val, unsigned int level)
 {
 	int ret;
@@ -150,7 +150,7 @@
 		return -ENOMEM;
 
 	tps->pdata = pdata;
-	tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
+	tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
 	if (IS_ERR(tps->regmap)) {
 		ret = PTR_ERR(tps->regmap);
 		dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@
 
 	ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
 	if (ret < 0) {
-		dev_err(tps->dev, "Failed to read revision"
-					" register: %d\n", ret);
-		goto err_regmap;
+		dev_err(tps->dev, "Failed to read revision register: %d\n",
+			ret);
+		return ret;
 	}
 
 	dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@
 	}
 
 	return 0;
-
-err_regmap:
-	regmap_exit(tps->regmap);
-
-	return ret;
 }
 
 static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@
 	for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
 		platform_device_unregister(tps->regulator_pdev[i]);
 
-	regmap_exit(tps->regmap);
-
 	return 0;
 }
 
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index c9ed5c0..09aab3e4 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -20,15 +20,10 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/gpio.h>
 #include <linux/mfd/tps65910.h>
 
-static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
-							int irq)
-{
-	return (irq - tps65910->irq_base);
-}
-
 /*
  * This is a threaded IRQ handler so can access I2C/SPI.  Since all
  * interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@
 static irqreturn_t tps65910_irq(int irq, void *irq_data)
 {
 	struct tps65910 *tps65910 = irq_data;
+	unsigned int reg;
 	u32 irq_sts;
 	u32 irq_mask;
-	u8 reg;
 	int i;
 
-	tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
 	irq_sts = reg;
-	tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
 	irq_sts |= reg << 8;
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65911:
-		tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+		tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
 		irq_sts |= reg << 16;
 	}
 
-	tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
 	irq_mask = reg;
-	tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
 	irq_mask |= reg << 8;
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65911:
-		tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+		tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
 		irq_mask |= reg << 16;
 	}
 
@@ -76,19 +71,19 @@
 		if (!(irq_sts & (1 << i)))
 			continue;
 
-		handle_nested_irq(tps65910->irq_base + i);
+		handle_nested_irq(irq_find_mapping(tps65910->domain, i));
 	}
 
 	/* Write the STS register back to clear IRQs we handled */
 	reg = irq_sts & 0xFF;
 	irq_sts >>= 8;
-	tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+	tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
 	reg = irq_sts & 0xFF;
-	tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+	tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65911:
 		reg = irq_sts >> 8;
-		tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+		tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
 	}
 
 	return IRQ_HANDLED;
@@ -105,27 +100,27 @@
 {
 	struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
 	u32 reg_mask;
-	u8 reg;
+	unsigned int reg;
 
-	tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
 	reg_mask = reg;
-	tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+	tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
 	reg_mask |= reg << 8;
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65911:
-		tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+		tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
 		reg_mask |= reg << 16;
 	}
 
 	if (tps65910->irq_mask != reg_mask) {
 		reg = tps65910->irq_mask & 0xFF;
-		tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+		tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
 		reg = tps65910->irq_mask >> 8 & 0xFF;
-		tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+		tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
 		switch (tps65910_chip_id(tps65910)) {
 		case TPS65911:
 			reg = tps65910->irq_mask >> 16;
-			tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+			tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
 		}
 	}
 	mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@
 {
 	struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
 
-	tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+	tps65910->irq_mask &= ~(1 << data->hwirq);
 }
 
 static void tps65910_irq_disable(struct irq_data *data)
 {
 	struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
 
-	tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+	tps65910->irq_mask |= (1 << data->hwirq);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@
 	.irq_set_wake = tps65910_irq_set_wake,
 };
 
+static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
+				irq_hw_number_t hw)
+{
+	struct tps65910 *tps65910 = h->host_data;
+
+	irq_set_chip_data(virq, tps65910);
+	irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
+	irq_set_nested_thread(virq, 1);
+
+	/* ARM needs us to explicitly flag the IRQ as valid
+	 * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+
+	return 0;
+}
+
+static struct irq_domain_ops tps65910_domain_ops = {
+	.map	= tps65910_irq_map,
+	.xlate	= irq_domain_xlate_twocell,
+};
+
 int tps65910_irq_init(struct tps65910 *tps65910, int irq,
 		    struct tps65910_platform_data *pdata)
 {
-	int ret, cur_irq;
+	int ret;
 	int flags = IRQF_ONESHOT;
 
 	if (!irq) {
@@ -175,17 +195,11 @@
 		return -EINVAL;
 	}
 
-	if (!pdata || !pdata->irq_base) {
-		dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+	if (!pdata) {
+		dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
 		return -EINVAL;
 	}
 
-	tps65910->irq_mask = 0xFFFFFF;
-
-	mutex_init(&tps65910->irq_lock);
-	tps65910->chip_irq = irq;
-	tps65910->irq_base = pdata->irq_base;
-
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65910:
 		tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@
 		break;
 	}
 
-	/* Register with genirq */
-	for (cur_irq = tps65910->irq_base;
-	     cur_irq < tps65910->irq_num + tps65910->irq_base;
-	     cur_irq++) {
-		irq_set_chip_data(cur_irq, tps65910);
-		irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
-					 handle_edge_irq);
-		irq_set_nested_thread(cur_irq, 1);
+	if (pdata->irq_base > 0) {
+		pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
+					tps65910->irq_num, -1);
+		if (pdata->irq_base < 0) {
+			dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
+					pdata->irq_base);
+			return pdata->irq_base;
+		}
+	}
 
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+	tps65910->irq_mask = 0xFFFFFF;
+
+	mutex_init(&tps65910->irq_lock);
+	tps65910->chip_irq = irq;
+	tps65910->irq_base = pdata->irq_base;
+
+	if (pdata->irq_base > 0)
+		tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
+					tps65910->irq_num,
+					pdata->irq_base,
+					0,
+					&tps65910_domain_ops, tps65910);
+	else
+		tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
+					tps65910->irq_num,
+					&tps65910_domain_ops, tps65910);
+
+	if (!tps65910->domain) {
+		dev_err(tps65910->dev, "Failed to create IRQ domain\n");
+		return -ENOMEM;
 	}
 
 	ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index bf2b25e..be9e07b 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,13 +19,16 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
 #include <linux/mfd/core.h>
 #include <linux/regmap.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
 
 static struct mfd_cell tps65910s[] = {
 	{
+		.name = "tps65910-gpio",
+	},
+	{
 		.name = "tps65910-pmic",
 	},
 	{
@@ -37,30 +40,6 @@
 };
 
 
-static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
-				  int bytes, void *dest)
-{
-	return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
-}
-
-static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
-				  int bytes, void *src)
-{
-	return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
-}
-
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
-	return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-EXPORT_SYMBOL_GPL(tps65910_set_bits);
-
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
-	return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-EXPORT_SYMBOL_GPL(tps65910_clear_bits);
-
 static bool is_volatile_reg(struct device *dev, unsigned int reg)
 {
 	struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@
 	.reg_bits = 8,
 	.val_bits = 8,
 	.volatile_reg = is_volatile_reg,
-	.max_register = TPS65910_MAX_REGISTER,
-	.num_reg_defaults_raw = TPS65910_MAX_REGISTER,
+	.max_register = TPS65910_MAX_REGISTER - 1,
 	.cache_type = REGCACHE_RBTREE,
 };
 
-static int tps65910_i2c_probe(struct i2c_client *i2c,
-			    const struct i2c_device_id *id)
+static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+		struct tps65910_board *pmic_pdata)
+{
+	struct device *dev = NULL;
+	int ret = 0;
+
+	dev = tps65910->dev;
+
+	if (!pmic_pdata->en_dev_slp)
+		return 0;
+
+	/* enabling SLEEP device state */
+	ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
+				DEVCTRL_DEV_SLP_MASK);
+	if (ret < 0) {
+		dev_err(dev, "set dev_slp failed: %d\n", ret);
+		goto err_sleep_init;
+	}
+
+	/* Return if there is no sleep keepon data. */
+	if (!pmic_pdata->slp_keepon)
+		return 0;
+
+	if (pmic_pdata->slp_keepon->therm_keepon) {
+		ret = tps65910_reg_set_bits(tps65910,
+				TPS65910_SLEEP_KEEP_RES_ON,
+				SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+		if (ret < 0) {
+			dev_err(dev, "set therm_keepon failed: %d\n", ret);
+			goto disable_dev_slp;
+		}
+	}
+
+	if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+		ret = tps65910_reg_set_bits(tps65910,
+				TPS65910_SLEEP_KEEP_RES_ON,
+				SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+		if (ret < 0) {
+			dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+			goto disable_dev_slp;
+		}
+	}
+
+	if (pmic_pdata->slp_keepon->i2chs_keepon) {
+		ret = tps65910_reg_set_bits(tps65910,
+				TPS65910_SLEEP_KEEP_RES_ON,
+				SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+		if (ret < 0) {
+			dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+			goto disable_dev_slp;
+		}
+	}
+
+	return 0;
+
+disable_dev_slp:
+	tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+				DEVCTRL_DEV_SLP_MASK);
+
+err_sleep_init:
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tps65910_of_match[] = {
+	{ .compatible = "ti,tps65910", .data = (void *)TPS65910},
+	{ .compatible = "ti,tps65911", .data = (void *)TPS65911},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tps65910_of_match);
+
+static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+						int *chip_id)
+{
+	struct device_node *np = client->dev.of_node;
+	struct tps65910_board *board_info;
+	unsigned int prop;
+	const struct of_device_id *match;
+	int ret = 0;
+
+	match = of_match_device(tps65910_of_match, &client->dev);
+	if (!match) {
+		dev_err(&client->dev, "Failed to find matching dt id\n");
+		return NULL;
+	}
+
+	*chip_id  = (int)match->data;
+
+	board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
+			GFP_KERNEL);
+	if (!board_info) {
+		dev_err(&client->dev, "Failed to allocate pdata\n");
+		return NULL;
+	}
+
+	ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
+	if (!ret)
+		board_info->vmbch_threshold = prop;
+	else if (*chip_id == TPS65911)
+		dev_warn(&client->dev, "VMBCH-Threshold not specified");
+
+	ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
+	if (!ret)
+		board_info->vmbch2_threshold = prop;
+	else if (*chip_id == TPS65911)
+		dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+
+	board_info->irq = client->irq;
+	board_info->irq_base = -1;
+
+	return board_info;
+}
+#else
+static inline
+struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+					 int *chip_id)
+{
+	return NULL;
+}
+#endif
+
+static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+					const struct i2c_device_id *id)
 {
 	struct tps65910 *tps65910;
 	struct tps65910_board *pmic_plat_data;
+	struct tps65910_board *of_pmic_plat_data = NULL;
 	struct tps65910_platform_data *init_data;
 	int ret = 0;
+	int chip_id = id->driver_data;
 
 	pmic_plat_data = dev_get_platdata(&i2c->dev);
+
+	if (!pmic_plat_data && i2c->dev.of_node) {
+		pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
+		of_pmic_plat_data = pmic_plat_data;
+	}
+
 	if (!pmic_plat_data)
 		return -EINVAL;
 
-	init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+	init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
 	if (init_data == NULL)
 		return -ENOMEM;
 
-	tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
-	if (tps65910 == NULL) {
-		kfree(init_data);
+	tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
+	if (tps65910 == NULL)
 		return -ENOMEM;
-	}
 
+	tps65910->of_plat_data = of_pmic_plat_data;
 	i2c_set_clientdata(i2c, tps65910);
 	tps65910->dev = &i2c->dev;
 	tps65910->i2c_client = i2c;
-	tps65910->id = id->driver_data;
-	tps65910->read = tps65910_i2c_read;
-	tps65910->write = tps65910_i2c_write;
+	tps65910->id = chip_id;
 	mutex_init(&tps65910->io_mutex);
 
-	tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config);
+	tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
 	if (IS_ERR(tps65910->regmap)) {
 		ret = PTR_ERR(tps65910->regmap);
 		dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
-		goto regmap_err;
+		return ret;
 	}
 
 	ret = mfd_add_devices(tps65910->dev, -1,
 			      tps65910s, ARRAY_SIZE(tps65910s),
 			      NULL, 0);
-	if (ret < 0)
-		goto err;
+	if (ret < 0) {
+		dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+		return ret;
+	}
 
 	init_data->irq = pmic_plat_data->irq;
 	init_data->irq_base = pmic_plat_data->irq_base;
 
-	tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
-
 	tps65910_irq_init(tps65910, init_data->irq, init_data);
 
-	kfree(init_data);
-	return ret;
+	tps65910_sleepinit(tps65910, pmic_plat_data);
 
-err:
-	regmap_exit(tps65910->regmap);
-regmap_err:
-	kfree(tps65910);
-	kfree(init_data);
 	return ret;
 }
 
-static int tps65910_i2c_remove(struct i2c_client *i2c)
+static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
 {
 	struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
 
 	tps65910_irq_exit(tps65910);
 	mfd_remove_devices(tps65910->dev);
-	regmap_exit(tps65910->regmap);
-	kfree(tps65910);
 
 	return 0;
 }
@@ -175,9 +271,10 @@
 	.driver = {
 		   .name = "tps65910",
 		   .owner = THIS_MODULE,
+		   .of_match_table = of_match_ptr(tps65910_of_match),
 	},
 	.probe = tps65910_i2c_probe,
-	.remove = tps65910_i2c_remove,
+	.remove = __devexit_p(tps65910_i2c_remove),
 	.id_table = tps65910_i2c_id,
 };
 
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d656e8..ad733d7 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -757,6 +757,7 @@
 		dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
 		goto fail_rqirq;
 	}
+	enable_irq_wake(irq_num);
 
 	return irq_base;
 fail_rqirq:
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 2d6beda..4ded9e7 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -27,7 +27,12 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
@@ -35,8 +40,24 @@
 #include <linux/err.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
+#include <linux/regulator/consumer.h>
 
 #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
+#define TWL6040_NUM_SUPPLIES	(2)
+
+static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
+			      struct device_node *node)
+{
+	if (pdata && pdata->vibra)
+		return true;
+
+#ifdef CONFIG_OF
+	if (of_find_node_by_name(node, "vibra"))
+		return true;
+#endif
+
+	return false;
+}
 
 int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
 {
@@ -502,17 +523,18 @@
 				     const struct i2c_device_id *id)
 {
 	struct twl6040_platform_data *pdata = client->dev.platform_data;
+	struct device_node *node = client->dev.of_node;
 	struct twl6040 *twl6040;
 	struct mfd_cell *cell = NULL;
-	int ret, children = 0;
+	int irq, ret, children = 0;
 
-	if (!pdata) {
+	if (!pdata && !node) {
 		dev_err(&client->dev, "Platform data is missing\n");
 		return -EINVAL;
 	}
 
 	/* In order to operate correctly we need valid interrupt config */
-	if (!client->irq || !pdata->irq_base) {
+	if (!client->irq) {
 		dev_err(&client->dev, "Invalid IRQ configuration\n");
 		return -EINVAL;
 	}
@@ -524,7 +546,7 @@
 		goto err;
 	}
 
-	twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+	twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
 	if (IS_ERR(twl6040->regmap)) {
 		ret = PTR_ERR(twl6040->regmap);
 		goto err;
@@ -532,9 +554,23 @@
 
 	i2c_set_clientdata(client, twl6040);
 
+	twl6040->supplies[0].supply = "vio";
+	twl6040->supplies[1].supply = "v2v1";
+	ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+				 twl6040->supplies);
+	if (ret != 0) {
+		dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
+		goto regulator_get_err;
+	}
+
+	ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+	if (ret != 0) {
+		dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
+		goto power_err;
+	}
+
 	twl6040->dev = &client->dev;
 	twl6040->irq = client->irq;
-	twl6040->irq_base = pdata->irq_base;
 
 	mutex_init(&twl6040->mutex);
 	mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@
 	twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
 
 	/* ERRATA: Automatic power-up is not possible in ES1.0 */
-	if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
-		twl6040->audpwron = pdata->audpwron_gpio;
-	else
+	if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
+		if (pdata)
+			twl6040->audpwron = pdata->audpwron_gpio;
+		else
+			twl6040->audpwron = of_get_named_gpio(node,
+						"ti,audpwron-gpio", 0);
+	} else
 		twl6040->audpwron = -EINVAL;
 
 	if (gpio_is_valid(twl6040->audpwron)) {
 		ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
 				       "audpwron");
 		if (ret)
-			goto gpio1_err;
+			goto gpio_err;
 	}
 
 	/* codec interrupt */
 	ret = twl6040_irq_init(twl6040);
 	if (ret)
-		goto gpio2_err;
+		goto irq_init_err;
 
 	ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
 				   NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@
 	/* dual-access registers controlled by I2C only */
 	twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
 
-	if (pdata->codec) {
-		int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
-
-		cell = &twl6040->cells[children];
-		cell->name = "twl6040-codec";
-		twl6040_codec_rsrc[0].start = irq;
-		twl6040_codec_rsrc[0].end = irq;
-		cell->resources = twl6040_codec_rsrc;
-		cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+	/*
+	 * The main functionality of twl6040 to provide audio on OMAP4+ systems.
+	 * We can add the ASoC codec child whenever this driver has been loaded.
+	 * The ASoC codec can work without pdata, pass the platform_data only if
+	 * it has been provided.
+	 */
+	irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+	cell = &twl6040->cells[children];
+	cell->name = "twl6040-codec";
+	twl6040_codec_rsrc[0].start = irq;
+	twl6040_codec_rsrc[0].end = irq;
+	cell->resources = twl6040_codec_rsrc;
+	cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+	if (pdata && pdata->codec) {
 		cell->platform_data = pdata->codec;
 		cell->pdata_size = sizeof(*pdata->codec);
-		children++;
 	}
+	children++;
 
-	if (pdata->vibra) {
-		int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+	if (twl6040_has_vibra(pdata, node)) {
+		irq = twl6040->irq_base + TWL6040_IRQ_VIB;
 
 		cell = &twl6040->cells[children];
 		cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@
 		cell->resources = twl6040_vibra_rsrc;
 		cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
 
-		cell->platform_data = pdata->vibra;
-		cell->pdata_size = sizeof(*pdata->vibra);
+		if (pdata && pdata->vibra) {
+			cell->platform_data = pdata->vibra;
+			cell->pdata_size = sizeof(*pdata->vibra);
+		}
 		children++;
 	}
 
-	if (children) {
-		ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
-				      children, NULL, 0);
-		if (ret)
-			goto mfd_err;
-	} else {
-		dev_err(&client->dev, "No platform data found for children\n");
-		ret = -ENODEV;
+	ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
+			      NULL, 0);
+	if (ret)
 		goto mfd_err;
-	}
 
 	return 0;
 
@@ -618,12 +659,15 @@
 	free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
 irq_err:
 	twl6040_irq_exit(twl6040);
-gpio2_err:
+irq_init_err:
 	if (gpio_is_valid(twl6040->audpwron))
 		gpio_free(twl6040->audpwron);
-gpio1_err:
+gpio_err:
+	regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+power_err:
+	regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+regulator_get_err:
 	i2c_set_clientdata(client, NULL);
-	regmap_exit(twl6040->regmap);
 err:
 	return ret;
 }
@@ -643,7 +687,9 @@
 
 	mfd_remove_devices(&client->dev);
 	i2c_set_clientdata(client, NULL);
-	regmap_exit(twl6040->regmap);
+
+	regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+	regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
 
 	return 0;
 }
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
index b3f8dda..4b42543 100644
--- a/drivers/mfd/twl6040-irq.c
+++ b/drivers/mfd/twl6040-irq.c
@@ -23,7 +23,10 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/err.h>
 #include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@
 
 int twl6040_irq_init(struct twl6040 *twl6040)
 {
-	int cur_irq, ret;
+	struct device_node *node = twl6040->dev->of_node;
+	int i, nr_irqs, irq_base, ret;
 	u8 val;
 
 	mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@
 	twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
 	twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
 
+	nr_irqs = ARRAY_SIZE(twl6040_irqs);
+
+	irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+	if (IS_ERR_VALUE(irq_base)) {
+		dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
+		return irq_base;
+	}
+	twl6040->irq_base = irq_base;
+
+	irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
+			      &irq_domain_simple_ops, NULL);
+
 	/* Register them with genirq */
-	for (cur_irq = twl6040->irq_base;
-	     cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
-	     cur_irq++) {
-		irq_set_chip_data(cur_irq, twl6040);
-		irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+	for (i = irq_base; i < irq_base + nr_irqs; i++) {
+		irq_set_chip_data(i, twl6040);
+		irq_set_chip_and_handler(i, &twl6040_irq_chip,
 					 handle_level_irq);
-		irq_set_nested_thread(cur_irq, 1);
+		irq_set_nested_thread(i, 1);
 
 		/* ARM needs us to explicitly flag the IRQ as valid
 		 * and will set them noprobe when we do so. */
 #ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
+		set_irq_flags(i, IRQF_VALID);
 #else
-		irq_set_noprobe(cur_irq);
+		irq_set_noprobe(i);
 #endif
 	}
 
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b73cc15..872aff2 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -131,17 +131,7 @@
 	.remove		= __devexit_p(vx855_remove),
 };
 
-static int vx855_init(void)
-{
-	return pci_register_driver(&vx855_pci_driver);
-}
-module_init(vx855_init);
-
-static void vx855_exit(void)
-{
-	pci_unregister_driver(&vx855_pci_driver);
-}
-module_exit(vx855_exit);
+module_pci_driver(vx855_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 8721095..6ee3018 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -280,11 +280,11 @@
 	mutex_init(&wm831x->auxadc_lock);
 	INIT_LIST_HEAD(&wm831x->auxadc_pending);
 
-	if (wm831x->irq && wm831x->irq_base) {
+	if (wm831x->irq) {
 		wm831x->auxadc_read = wm831x_auxadc_read_irq;
 
-		ret = request_threaded_irq(wm831x->irq_base +
-					   WM831X_IRQ_AUXADC_DATA,
+		ret = request_threaded_irq(wm831x_irq(wm831x,
+						      WM831X_IRQ_AUXADC_DATA),
 					   NULL, wm831x_auxadc_irq, 0,
 					   "auxadc", wm831x);
 		if (ret < 0) {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 838056c..946698f 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -614,8 +614,15 @@
 }
 EXPORT_SYMBOL_GPL(wm831x_set_bits);
 
+static struct resource wm831x_io_parent = {
+	.start = 0,
+	.end   = 0xffffffff,
+	.flags = IORESOURCE_IO,
+};
+
 static struct resource wm831x_dcdc1_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_DC1_CONTROL_1,
 		.end   = WM831X_DC1_DVS_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@
 
 static struct resource wm831x_dcdc2_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_DC2_CONTROL_1,
 		.end   = WM831X_DC2_DVS_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@
 
 static struct resource wm831x_dcdc3_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_DC3_CONTROL_1,
 		.end   = WM831X_DC3_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@
 
 static struct resource wm831x_dcdc4_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_DC4_CONTROL,
 		.end   = WM831X_DC4_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@
 
 static struct resource wm8320_dcdc4_buck_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_DC4_CONTROL,
 		.end   = WM832X_DC4_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@
 
 static struct resource wm831x_isink1_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_CURRENT_SINK_1,
 		.end   = WM831X_CURRENT_SINK_1,
 		.flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@
 
 static struct resource wm831x_isink2_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_CURRENT_SINK_2,
 		.end   = WM831X_CURRENT_SINK_2,
 		.flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@
 
 static struct resource wm831x_ldo1_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO1_CONTROL,
 		.end   = WM831X_LDO1_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@
 
 static struct resource wm831x_ldo2_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO2_CONTROL,
 		.end   = WM831X_LDO2_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@
 
 static struct resource wm831x_ldo3_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO3_CONTROL,
 		.end   = WM831X_LDO3_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@
 
 static struct resource wm831x_ldo4_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO4_CONTROL,
 		.end   = WM831X_LDO4_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@
 
 static struct resource wm831x_ldo5_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO5_CONTROL,
 		.end   = WM831X_LDO5_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@
 
 static struct resource wm831x_ldo6_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO6_CONTROL,
 		.end   = WM831X_LDO6_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@
 
 static struct resource wm831x_ldo7_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO7_CONTROL,
 		.end   = WM831X_LDO7_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@
 
 static struct resource wm831x_ldo8_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO8_CONTROL,
 		.end   = WM831X_LDO8_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@
 
 static struct resource wm831x_ldo9_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO9_CONTROL,
 		.end   = WM831X_LDO9_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@
 
 static struct resource wm831x_ldo10_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO10_CONTROL,
 		.end   = WM831X_LDO10_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@
 
 static struct resource wm831x_ldo11_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_LDO11_ON_CONTROL,
 		.end   = WM831X_LDO11_SLEEP_CONTROL,
 		.flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@
 
 static struct resource wm831x_status1_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_STATUS_LED_1,
 		.end   = WM831X_STATUS_LED_1,
 		.flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@
 
 static struct resource wm831x_status2_resources[] = {
 	{
+		.parent = &wm831x_io_parent,
 		.start = WM831X_STATUS_LED_2,
 		.end   = WM831X_STATUS_LED_2,
 		.flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@
 	case WM8310:
 		ret = mfd_add_devices(wm831x->dev, wm831x_num,
 				      wm8310_devs, ARRAY_SIZE(wm8310_devs),
-				      NULL, wm831x->irq_base);
+				      NULL, 0);
 		break;
 
 	case WM8311:
 		ret = mfd_add_devices(wm831x->dev, wm831x_num,
 				      wm8311_devs, ARRAY_SIZE(wm8311_devs),
-				      NULL, wm831x->irq_base);
+				      NULL, 0);
 		if (!pdata || !pdata->disable_touch)
 			mfd_add_devices(wm831x->dev, wm831x_num,
 					touch_devs, ARRAY_SIZE(touch_devs),
-					NULL, wm831x->irq_base);
+					NULL, 0);
 		break;
 
 	case WM8312:
 		ret = mfd_add_devices(wm831x->dev, wm831x_num,
 				      wm8312_devs, ARRAY_SIZE(wm8312_devs),
-				      NULL, wm831x->irq_base);
+				      NULL, 0);
 		if (!pdata || !pdata->disable_touch)
 			mfd_add_devices(wm831x->dev, wm831x_num,
 					touch_devs, ARRAY_SIZE(touch_devs),
-					NULL, wm831x->irq_base);
+					NULL, 0);
 		break;
 
 	case WM8320:
@@ -1816,7 +1842,7 @@
 	case WM8326:
 		ret = mfd_add_devices(wm831x->dev, wm831x_num,
 				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-				      NULL, wm831x->irq_base);
+				      NULL, 0);
 		break;
 
 	default:
@@ -1841,7 +1867,7 @@
 	if (ret & WM831X_XTAL_ENA) {
 		ret = mfd_add_devices(wm831x->dev, wm831x_num,
 				      rtc_devs, ARRAY_SIZE(rtc_devs),
-				      NULL, wm831x->irq_base);
+				      NULL, 0);
 		if (ret != 0) {
 			dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
 			goto err_irq;
@@ -1854,7 +1880,7 @@
 		/* Treat errors as non-critical */
 		ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
 				      ARRAY_SIZE(backlight_devs), NULL,
-				      wm831x->irq_base);
+				      0);
 		if (ret < 0)
 			dev_err(wm831x->dev, "Failed to add backlight: %d\n",
 				ret);
@@ -1883,8 +1909,7 @@
 {
 	wm831x_otp_exit(wm831x);
 	mfd_remove_devices(wm831x->dev);
-	if (wm831x->irq_base)
-		free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
+	free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
 	wm831x_irq_exit(wm831x);
 }
 
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index bec4d05..804e56e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -18,6 +18,7 @@
 #include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 
 #include <linux/mfd/wm831x/core.h>
 #include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@
 static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
 							int irq)
 {
-	return &wm831x_irqs[irq - wm831x->irq_base];
+	return &wm831x_irqs[irq];
 }
 
 static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@
 {
 	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
-							     data->irq);
+							     data->hwirq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
@@ -383,7 +384,7 @@
 {
 	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
-							     data->irq);
+							     data->hwirq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
@@ -393,7 +394,7 @@
 	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 	int irq;
 
-	irq = data->irq - wm831x->irq_base;
+	irq = data->hwirq;
 
 	if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
 		/* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@
 	 * do the update here as we can be called with the bus lock
 	 * held.
 	 */
+	wm831x->gpio_level_low[irq] = false;
+	wm831x->gpio_level_high[irq] = false;
 	switch (type) {
 	case IRQ_TYPE_EDGE_BOTH:
 		wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
-		wm831x->gpio_level[irq] = false;
 		break;
 	case IRQ_TYPE_EDGE_RISING:
 		wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
-		wm831x->gpio_level[irq] = false;
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
 		wm831x->gpio_update[irq] = 0x10000;
-		wm831x->gpio_level[irq] = false;
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
 		wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
-		wm831x->gpio_level[irq] = true;
+		wm831x->gpio_level_high[irq] = true;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		wm831x->gpio_update[irq] = 0x10000;
+		wm831x->gpio_level_low[irq] = true;
 		break;
 	default:
 		return -EINVAL;
@@ -469,9 +473,11 @@
 	 * descriptors.
 	 */
 	if (primary & WM831X_TCHPD_INT)
-		handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+		handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+						   WM831X_IRQ_TCHPD));
 	if (primary & WM831X_TCHDATA_INT)
-		handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+		handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+						   WM831X_IRQ_TCHDATA));
 	primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
 
 	for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@
 		}
 
 		if (*status & wm831x_irqs[i].mask)
-			handle_nested_irq(wm831x->irq_base + i);
+			handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+							   i));
 
 		/* Simulate an edge triggered IRQ by polling the input
 		 * status.  This is sucky but improves interoperability.
 		 */
 		if (primary == WM831X_GP_INT &&
-		    wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+		    wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
 			ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
 			while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
-				handle_nested_irq(wm831x->irq_base + i);
+				handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+								   i));
+				ret = wm831x_reg_read(wm831x,
+						      WM831X_GPIO_LEVEL);
+			}
+		}
+
+		if (primary == WM831X_GP_INT &&
+		    wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
+			ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+			while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
+				handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+								   i));
 				ret = wm831x_reg_read(wm831x,
 						      WM831X_GPIO_LEVEL);
 			}
@@ -527,10 +546,34 @@
 	return IRQ_HANDLED;
 }
 
+static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	irq_set_chip_data(virq, h->host_data);
+	irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
+	irq_set_nested_thread(virq, 1);
+
+	/* ARM needs us to explicitly flag the IRQ as valid
+	 * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+
+	return 0;
+}
+
+static struct irq_domain_ops wm831x_irq_domain_ops = {
+	.map	= wm831x_irq_map,
+	.xlate	= irq_domain_xlate_twocell,
+};
+
 int wm831x_irq_init(struct wm831x *wm831x, int irq)
 {
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
-	int i, cur_irq, ret;
+	struct irq_domain *domain;
+	int i, ret, irq_base;
 
 	mutex_init(&wm831x->irq_lock);
 
@@ -543,18 +586,33 @@
 	}
 
 	/* Try to dynamically allocate IRQs if no base is specified */
-	if (!pdata || !pdata->irq_base)
-		wm831x->irq_base = -1;
-	else
-		wm831x->irq_base = pdata->irq_base;
-
-	wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
+	if (pdata && pdata->irq_base) {
+		irq_base = irq_alloc_descs(pdata->irq_base, 0,
 					   WM831X_NUM_IRQS, 0);
-	if (wm831x->irq_base < 0) {
-		dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
-			 wm831x->irq_base);
-		wm831x->irq_base = 0;
-		return 0;
+		if (irq_base < 0) {
+			dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+				 irq_base);
+			irq_base = 0;
+		}
+	} else {
+		irq_base = 0;
+	}
+
+	if (irq_base)
+		domain = irq_domain_add_legacy(wm831x->dev->of_node,
+					       ARRAY_SIZE(wm831x_irqs),
+					       irq_base, 0,
+					       &wm831x_irq_domain_ops,
+					       wm831x);
+	else
+		domain = irq_domain_add_linear(wm831x->dev->of_node,
+					       ARRAY_SIZE(wm831x_irqs),
+					       &wm831x_irq_domain_ops,
+					       wm831x);
+
+	if (!domain) {
+		dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
+		return -EINVAL;
 	}
 
 	if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@
 	wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
 			WM831X_IRQ_OD, i);
 
-	/* Try to flag /IRQ as a wake source; there are a number of
-	 * unconditional wake sources in the PMIC so this isn't
-	 * conditional but we don't actually care *too* much if it
-	 * fails.
-	 */
-	ret = enable_irq_wake(irq);
-	if (ret != 0) {
-		dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
-			 ret);
-	}
-
 	wm831x->irq = irq;
-
-	/* Register them with genirq */
-	for (cur_irq = wm831x->irq_base;
-	     cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
-	     cur_irq++) {
-		irq_set_chip_data(cur_irq, wm831x);
-		irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
-					 handle_edge_irq);
-		irq_set_nested_thread(cur_irq, 1);
-
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
-	}
+	wm831x->irq_domain = domain;
 
 	if (irq) {
+		/* Try to flag /IRQ as a wake source; there are a number of
+		 * unconditional wake sources in the PMIC so this isn't
+		 * conditional but we don't actually care *too* much if it
+		 * fails.
+		 */
+		ret = enable_irq_wake(irq);
+		if (ret != 0) {
+			dev_warn(wm831x->dev,
+				 "Can't enable IRQ as wake source: %d\n",
+				 ret);
+		}
+
 		ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
 					   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 					   "wm831x", wm831x);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index dd1caaa..8a9b11c 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -20,6 +20,7 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/regmap.h>
 #include <linux/workqueue.h>
 
 #include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@
 	int bytes = num_regs * 2;
 
 	dev_dbg(wm8350->dev, "volatile read\n");
-	ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest);
+	ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
 
 	for (i = reg; i < reg + num_regs; i++) {
 		/* Cache is CPU endian */
@@ -96,9 +97,6 @@
 	int ret = 0;
 	int bytes = num_regs * 2;
 
-	if (wm8350->read_dev == NULL)
-		return -ENODEV;
-
 	if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
 		dev_err(wm8350->dev, "invalid reg %x\n",
 			reg + num_regs - 1);
@@ -149,9 +147,6 @@
 	int end = reg + num_regs;
 	int bytes = num_regs * 2;
 
-	if (wm8350->write_dev == NULL)
-		return -ENODEV;
-
 	if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
 		dev_err(wm8350->dev, "invalid reg %x\n",
 			reg + num_regs - 1);
@@ -182,7 +177,7 @@
 	}
 
 	/* Actually write it out */
-	return wm8350->write_dev(wm8350, reg, bytes, (char *)src);
+	return regmap_raw_write(wm8350->regmap, reg, src, bytes);
 }
 
 /*
@@ -515,9 +510,8 @@
 	 * a PMIC so the device many not be in a virgin state and we
 	 * can't rely on the silicon values.
 	 */
-	ret = wm8350->read_dev(wm8350, 0,
-			       sizeof(u16) * (WM8350_MAX_REGISTER + 1),
-			       wm8350->reg_cache);
+	ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
+			      sizeof(u16) * (WM8350_MAX_REGISTER + 1));
 	if (ret < 0) {
 		dev_err(wm8350->dev,
 			"failed to read initial cache values\n");
@@ -570,35 +564,30 @@
 		       struct wm8350_platform_data *pdata)
 {
 	int ret;
-	u16 id1, id2, mask_rev;
-	u16 cust_id, mode, chip_rev;
+	unsigned int id1, id2, mask_rev;
+	unsigned int cust_id, mode, chip_rev;
 
 	dev_set_drvdata(wm8350->dev, wm8350);
 
 	/* get WM8350 revision and config mode */
-	ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
+	ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
 	if (ret != 0) {
 		dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
 		goto err;
 	}
 
-	ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
+	ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
 	if (ret != 0) {
 		dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
 		goto err;
 	}
 
-	ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
-			       &mask_rev);
+	ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
 	if (ret != 0) {
 		dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
 		goto err;
 	}
 
-	id1 = be16_to_cpu(id1);
-	id2 = be16_to_cpu(id2);
-	mask_rev = be16_to_cpu(mask_rev);
-
 	if (id1 != 0x6143) {
 		dev_err(wm8350->dev,
 			"Device with ID %x is not a WM8350\n", id1);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index d955faa..a68aceb 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -15,47 +15,18 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/wm8350/core.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
-static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
-				  int bytes, void *dest)
-{
-	int ret;
-
-	ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
-	if (ret < 0)
-		return ret;
-	ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
-	if (ret < 0)
-		return ret;
-	if (ret != bytes)
-		return -EIO;
-	return 0;
-}
-
-static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
-				   int bytes, void *src)
-{
-	/* we add 1 byte for device register */
-	u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
-	int ret;
-
-	if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
-		return -EINVAL;
-
-	msg[0] = reg;
-	memcpy(&msg[1], src, bytes);
-	ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
-	if (ret < 0)
-		return ret;
-	if (ret != bytes + 1)
-		return -EIO;
-	return 0;
-}
+static const struct regmap_config wm8350_regmap = {
+	.reg_bits = 8,
+	.val_bits = 16,
+};
 
 static int wm8350_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
@@ -67,20 +38,18 @@
 	if (wm8350 == NULL)
 		return -ENOMEM;
 
+	wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
+	if (IS_ERR(wm8350->regmap)) {
+		ret = PTR_ERR(wm8350->regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
 	i2c_set_clientdata(i2c, wm8350);
 	wm8350->dev = &i2c->dev;
-	wm8350->i2c_client = i2c;
-	wm8350->read_dev = wm8350_i2c_read_device;
-	wm8350->write_dev = wm8350_i2c_write_device;
 
-	ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
-	if (ret < 0)
-		goto err;
-
-	return ret;
-
-err:
-	return ret;
+	return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
 }
 
 static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1189a17..4b7d378 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -23,136 +23,16 @@
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
-static struct {
-	u16  readable;    /* Mask of readable bits */
-	u16  writable;    /* Mask of writable bits */
-	u16  vol;         /* Mask of volatile bits */
-	int  is_codec;    /* Register controlled by codec reset */
-	u16  default_val; /* Value on reset */
-} reg_data[] = {
-	{ 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
-	{ 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
-	{ 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
-	{ 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
-	{ 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4  */
-	{ 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5  */
-	{ 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6  */
-	{ 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7  */
-	{ 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8  */
-	{ 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9  */
-	{ 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
-	{ 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
-	{ 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
-	{ 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
-	{ 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
-	{ 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
-	{ 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
-	{ 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
-	{ 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
-	{ 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
-	{ 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
-	{ 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
-	{ 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
-	{ 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
-	{ 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
-	{ 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
-	{ 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
-	{ 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
-	{ 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
-	{ 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
-	{ 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
-	{ 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
-	{ 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
-	{ 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
-	{ 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
-	{ 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
-	{ 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
-	{ 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
-	{ 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
-	{ 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
-	{ 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
-	{ 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
-	{ 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
-	{ 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
-	{ 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
-	{ 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
-	{ 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
-	{ 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
-	{ 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
-	{ 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
-	{ 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
-	{ 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
-	{ 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
-	{ 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
-	{ 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
-	{ 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
-	{ 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
-	{ 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
-	{ 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
-	{ 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
-	{ 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
-	{ 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
-};
-
-static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
+static bool wm8400_volatile(struct device *dev, unsigned int reg)
 {
-	int i, ret = 0;
-
-	BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
-	/* If there are any volatile reads then read back the entire block */
-	for (i = reg; i < reg + num_regs; i++)
-		if (reg_data[i].vol) {
-			ret = regmap_bulk_read(wm8400->regmap, reg, dest,
-					       num_regs);
-			return ret;
-		}
-
-	/* Otherwise use the cache */
-	memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
-
-	return 0;
-}
-
-static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
-			u16 *src)
-{
-	int ret, i;
-
-	BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
-	for (i = 0; i < num_regs; i++) {
-		BUG_ON(!reg_data[reg + i].writable);
-		wm8400->reg_cache[reg + i] = src[i];
-		ret = regmap_write(wm8400->regmap, reg, src[i]);
-		if (ret != 0)
-			return ret;
+	switch (reg) {
+	case WM8400_INTERRUPT_STATUS_1:
+	case WM8400_INTERRUPT_LEVELS:
+	case WM8400_SHUTDOWN_REASON:
+		return true;
+	default:
+		return false;
 	}
-
-	return 0;
 }
 
 /**
@@ -165,13 +45,12 @@
  */
 u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
 {
-	u16 val;
+	unsigned int val;
+	int ret;
 
-	mutex_lock(&wm8400->io_lock);
-
-	wm8400_read(wm8400, reg, 1, &val);
-
-	mutex_unlock(&wm8400->io_lock);
+	ret = regmap_read(wm8400->regmap, reg, &val);
+	if (ret < 0)
+		return ret;
 
 	return val;
 }
@@ -179,63 +58,10 @@
 
 int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
 {
-	int ret;
-
-	mutex_lock(&wm8400->io_lock);
-
-	ret = wm8400_read(wm8400, reg, count, data);
-
-	mutex_unlock(&wm8400->io_lock);
-
-	return ret;
+	return regmap_bulk_read(wm8400->regmap, reg, data, count);
 }
 EXPORT_SYMBOL_GPL(wm8400_block_read);
 
-/**
- * wm8400_set_bits - Bitmask write
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg:    Register to access
- * @mask:   Mask of bits to change
- * @val:    Value to set for masked bits
- */
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
-{
-	u16 tmp;
-	int ret;
-
-	mutex_lock(&wm8400->io_lock);
-
-	ret = wm8400_read(wm8400, reg, 1, &tmp);
-	tmp = (tmp & ~mask) | val;
-	if (ret == 0)
-		ret = wm8400_write(wm8400, reg, 1, &tmp);
-
-	mutex_unlock(&wm8400->io_lock);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(wm8400_set_bits);
-
-/**
- * wm8400_reset_codec_reg_cache - Reset cached codec registers to
- * their default values.
- */
-void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
-{
-	int i;
-
-	mutex_lock(&wm8400->io_lock);
-
-	/* Reset all codec registers to their initial value */
-	for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-		if (reg_data[i].is_codec)
-			wm8400->reg_cache[i] = reg_data[i].default_val;
-
-	mutex_unlock(&wm8400->io_lock);
-}
-EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-
 static int wm8400_register_codec(struct wm8400 *wm8400)
 {
 	struct mfd_cell cell = {
@@ -257,44 +83,24 @@
 static int wm8400_init(struct wm8400 *wm8400,
 		       struct wm8400_platform_data *pdata)
 {
-	u16 reg;
-	int ret, i;
-
-	mutex_init(&wm8400->io_lock);
+	unsigned int reg;
+	int ret;
 
 	dev_set_drvdata(wm8400->dev, wm8400);
 
 	/* Check that this is actually a WM8400 */
-	ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
+	ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
 	if (ret != 0) {
 		dev_err(wm8400->dev, "Chip ID register read failed\n");
 		return -EIO;
 	}
-	if (i != reg_data[WM8400_RESET_ID].default_val) {
-		dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i);
+	if (reg != 0x6172) {
+		dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
+			reg);
 		return -ENODEV;
 	}
 
-	/* We don't know what state the hardware is in and since this
-	 * is a PMIC we can't reset it safely so initialise the register
-	 * cache from the hardware.
-	 */
-	ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
-			      ARRAY_SIZE(wm8400->reg_cache));
-	if (ret != 0) {
-		dev_err(wm8400->dev, "Register cache read failed\n");
-		return -EIO;
-	}
-	for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-		wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
-
-	/* If the codec is in reset use hard coded values */
-	if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
-		for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-			if (reg_data[i].is_codec)
-				wm8400->reg_cache[i] = reg_data[i].default_val;
-
-	ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
+	ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
 	if (ret != 0) {
 		dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
 		return ret;
@@ -334,8 +140,22 @@
 	.reg_bits = 8,
 	.val_bits = 16,
 	.max_register = WM8400_REGISTER_COUNT - 1,
+
+	.volatile_reg = wm8400_volatile,
+
+	.cache_type = REGCACHE_RBTREE,
 };
 
+/**
+ * wm8400_reset_codec_reg_cache - Reset cached codec registers to
+ * their default values.
+ */
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
+{
+	regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
+}
+EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
+
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
 static int wm8400_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 9d7ca1e..1e321d3 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -500,7 +500,8 @@
 			ret);
 		goto err_enable;
 	}
-	wm8994->revision = ret;
+	wm8994->revision = ret & WM8994_CHIP_REV_MASK;
+	wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
 
 	switch (wm8994->type) {
 	case WM8994:
@@ -553,8 +554,8 @@
 		break;
 	}
 
-	dev_info(wm8994->dev, "%s revision %c\n", devname,
-		 'A' + wm8994->revision);
+	dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
+		 'A' + wm8994->revision, wm8994->cust_id);
 
 	switch (wm8994->type) {
 	case WM1811:
@@ -732,23 +733,7 @@
 	.id_table = wm8994_i2c_id,
 };
 
-static int __init wm8994_i2c_init(void)
-{
-	int ret;
-
-	ret = i2c_add_driver(&wm8994_i2c_driver);
-	if (ret != 0)
-		pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
-
-	return ret;
-}
-module_init(wm8994_i2c_init);
-
-static void __exit wm8994_i2c_exit(void)
-{
-	i2c_del_driver(&wm8994_i2c_driver);
-}
-module_exit(wm8994_i2c_exit);
+module_i2c_driver(wm8994_i2c_driver);
 
 MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
 MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index bfd25af..52e9e29 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1122,7 +1122,6 @@
 	case WM8994_RATE_STATUS:
 	case WM8958_MIC_DETECT_3:
 	case WM8994_DC_SERVO_4E:
-	case WM8994_CHIP_REVISION:
 	case WM8994_INTERRUPT_STATUS_1:
 	case WM8994_INTERRUPT_STATUS_2:
 		return true;
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa1..042a8fe 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,10 +142,16 @@
 	return 0;
 }
 
+static const struct of_device_id ab8500_pwm_match[] = {
+	{ .compatible = "stericsson,ab8500-pwm", },
+	{}
+};
+
 static struct platform_driver ab8500_pwm_driver = {
 	.driver = {
 		.name = "ab8500-pwm",
 		.owner = THIS_MODULE,
+		.of_match_table = ab8500_pwm_match,
 	},
 	.probe = ab8500_pwm_probe,
 	.remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1..23f5463 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@
 			struct mei_cl *cl,
 			struct mei_io_list *cmpl_list)
 {
-	if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+	if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
 			sizeof(struct hbm_flow_control))) {
 		/* return the cancel routine */
 		list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c703332..7de1389 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@
 		err = request_threaded_irq(pdev->irq,
 			NULL,
 			mei_interrupt_thread_handler,
-			0, mei_driver_name, dev);
+			IRQF_ONESHOT, mei_driver_name, dev);
 	else
 		err = request_threaded_irq(pdev->irq,
 			mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@
 	if (err) {
 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
 		       pdev->irq);
-		goto unmap_memory;
+		goto disable_msi;
 	}
 	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
 	if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@
 	mei_disable_interrupts(dev);
 	flush_scheduled_work();
 	free_irq(pdev->irq, dev);
+disable_msi:
 	pci_disable_msi(pdev);
-unmap_memory:
 	pci_iounmap(pdev, dev->mem_addr);
 free_device:
 	kfree(dev);
@@ -1101,6 +1101,8 @@
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
+
+	misc_deregister(&mei_misc_device);
 }
 #ifdef CONFIG_PM
 static int mei_pci_suspend(struct device *device)
@@ -1216,7 +1218,6 @@
  */
 static void __exit mei_exit_module(void)
 {
-	misc_deregister(&mei_misc_device);
 	pci_unregister_driver(&mei_driver);
 
 	pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605..e2ec050 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@
 };
 static const struct watchdog_info wd_info = {
 		.identity = INTEL_AMT_WATCHDOG_ID,
-		.options = WDIOF_KEEPALIVEPING,
+		.options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
 };
 
 static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec55..276d21c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@
 	md = mmc_blk_get(bdev->bd_disk);
 	if (!md) {
 		err = -EINVAL;
-		goto cmd_done;
+		goto cmd_err;
 	}
 
 	card = md->queue.card;
@@ -483,6 +483,7 @@
 
 cmd_done:
 	mmc_blk_put(md);
+cmd_err:
 	kfree(idata->buf);
 	kfree(idata);
 	return err;
@@ -553,7 +554,6 @@
 	struct mmc_request mrq = {NULL};
 	struct mmc_command cmd = {0};
 	struct mmc_data data = {0};
-	unsigned int timeout_us;
 
 	struct scatterlist sg;
 
@@ -573,23 +573,12 @@
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-	data.timeout_ns = card->csd.tacc_ns * 100;
-	data.timeout_clks = card->csd.tacc_clks * 100;
-
-	timeout_us = data.timeout_ns / 1000;
-	timeout_us += data.timeout_clks * 1000 /
-		(card->host->ios.clock / 1000);
-
-	if (timeout_us > 100000) {
-		data.timeout_ns = 100000000;
-		data.timeout_clks = 0;
-	}
-
 	data.blksz = 4;
 	data.blocks = 1;
 	data.flags = MMC_DATA_READ;
 	data.sg = &sg;
 	data.sg_len = 1;
+	mmc_set_data_timeout(&data, card);
 
 	mrq.cmd = &cmd;
 	mrq.data = &data;
@@ -1283,7 +1272,7 @@
 	int ret = 1, disable_multi = 0, retry = 0, type;
 	enum mmc_blk_status status;
 	struct mmc_queue_req *mq_rq;
-	struct request *req;
+	struct request *req = rqc;
 	struct mmc_async_req *areq;
 
 	if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1280,16 @@
 
 	do {
 		if (rqc) {
+			/*
+			 * When 4KB native sector is enabled, only 8 blocks
+			 * multiple read or write is allowed
+			 */
+			if ((brq->data.blocks & 0x07) &&
+			    (card->ext_csd.data_sector_size == 4096)) {
+				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+					req->rq_disk->disk_name);
+				goto cmd_abort;
+			}
 			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
 			areq = &mq->mqrq_cur->mmc_active;
 		} else
@@ -1538,7 +1537,12 @@
 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
 		 "mmcblk%d%s", md->name_idx, subname ? subname : "");
 
-	blk_queue_logical_block_size(md->queue.queue, 512);
+	if (mmc_card_mmc(card))
+		blk_queue_logical_block_size(md->queue.queue,
+					     card->ext_csd.data_sector_size);
+	else
+		blk_queue_logical_block_size(md->queue.queue, 512);
+
 	set_capacity(md->disk, size);
 
 	if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e3..e360a97 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@
  * on any queue on this host, and attempt to issue it.  This may
  * not be the queue we were asked to process.
  */
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
 {
 	struct mmc_queue *mq = q->queuedata;
 	struct request *req;
@@ -171,12 +171,10 @@
 		limit = *mmc_dev(host)->dma_mask;
 
 	mq->card = card;
-	mq->queue = blk_init_queue(mmc_request, lock);
+	mq->queue = blk_init_queue(mmc_request_fn, lock);
 	if (!mq->queue)
 		return -ENOMEM;
 
-	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
-	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
 	mq->mqrq_cur = mqrq_cur;
 	mq->mqrq_prev = mqrq_prev;
 	mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee9..9b68933 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int mmc_bus_suspend(struct device *dev)
 {
 	struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@
 		ret = drv->resume(card);
 	return ret;
 }
+#endif
 
 #ifdef CONFIG_PM_RUNTIME
 
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be7..f13e38d 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -73,6 +73,9 @@
 {
 	struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
 
+	if (!cd)
+		return;
+
 	free_irq(host->hotplug.irq, host);
 	gpio_free(cd->gpio);
 	kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe..0b6141d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
 #include "sdio_ops.h"
 
 static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 
 /*
  * Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@
 {
 	int bit;
 
+	if (host->ios.power_mode == MMC_POWER_ON)
+		return;
+
 	mmc_host_clk_hold(host);
 
 	/* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@
 void mmc_power_off(struct mmc_host *host)
 {
 	int err = 0;
+
+	if (host->ios.power_mode == MMC_POWER_OFF)
+		return;
+
 	mmc_host_clk_hold(host);
 
 	host->ios.clock = 0;
@@ -2005,7 +2013,6 @@
 
 void mmc_rescan(struct work_struct *work)
 {
-	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
 	int i;
@@ -2044,8 +2051,12 @@
 	 */
 	mmc_bus_put(host);
 
-	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+	if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+		mmc_claim_host(host);
+		mmc_power_off(host);
+		mmc_release_host(host);
 		goto out;
+	}
 
 	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@
 
 void mmc_start_host(struct mmc_host *host)
 {
-	mmc_power_off(host);
+	host->f_init = max(freqs[0], host->f_min);
+	mmc_power_up(host);
 	mmc_detect_change(host, 0);
 }
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5ad..258b203 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@
 	return err;
 }
 
+static void mmc_select_card_type(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+	unsigned int caps = host->caps, caps2 = host->caps2;
+	unsigned int hs_max_dtr = 0;
+
+	if (card_type & EXT_CSD_CARD_TYPE_26)
+		hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+
+	if (caps & MMC_CAP_MMC_HIGHSPEED &&
+			card_type & EXT_CSD_CARD_TYPE_52)
+		hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+
+	if ((caps & MMC_CAP_1_8V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+	    (caps & MMC_CAP_1_2V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+
+	if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+			card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+	    (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+			card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+		hs_max_dtr = MMC_HS200_MAX_DTR;
+
+	card->ext_csd.hs_max_dtr = hs_max_dtr;
+	card->ext_csd.card_type = card_type;
+}
+
 /*
  * Decode extended CSD.
  */
@@ -284,56 +314,9 @@
 		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
 			mmc_card_set_blockaddr(card);
 	}
+
 	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
-	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
-	case EXT_CSD_CARD_TYPE_SDR_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
-		break;
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
-		break;
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
-		break;
-	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		break;
-	case EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 26000000;
-		break;
-	default:
-		/* MMC v4 spec says this cannot happen */
-		pr_warning("%s: card is mmc v4 but doesn't "
-			"support any high-speed modes.\n",
-			mmc_hostname(card->host));
-	}
+	mmc_select_card_type(card);
 
 	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
 	card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@
 		} else {
 			card->ext_csd.data_tag_unit_size = 0;
 		}
+	} else {
+		card->ext_csd.data_sector_size = 512;
 	}
 
 out:
@@ -556,14 +541,10 @@
 	err = mmc_get_ext_csd(card, &bw_ext_csd);
 
 	if (err || bw_ext_csd == NULL) {
-		if (bus_width != MMC_BUS_WIDTH_1)
-			err = -EINVAL;
+		err = -EINVAL;
 		goto out;
 	}
 
-	if (bus_width == MMC_BUS_WIDTH_1)
-		goto out;
-
 	/* only compare read only fields */
 	err = !((card->ext_csd.raw_partition_support ==
 			bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@
 				 card->ext_csd.generic_cmd6_time);
 	}
 
+	if (err)
+		pr_err("%s: power class selection for ext_csd_bus_width %d"
+		       " failed\n", mmc_hostname(card->host), bus_width);
+
 	return err;
 }
 
@@ -745,7 +730,7 @@
  */
 static int mmc_select_hs200(struct mmc_card *card)
 {
-	int idx, err = 0;
+	int idx, err = -EINVAL;
 	struct mmc_host *host;
 	static unsigned ext_csd_bits[] = {
 		EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@
 	host = card->host;
 
 	if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
-	    host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
-		if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
-			err = mmc_set_signal_voltage(host,
-						     MMC_SIGNAL_VOLTAGE_180, 0);
+			host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+	if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+			host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
 
 	/* If fails try again during next card power cycle */
 	if (err)
@@ -1117,9 +1104,7 @@
 				EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
 		err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
 		if (err)
-			pr_warning("%s: power class selection to bus width %d"
-				   " failed\n", mmc_hostname(card->host),
-				   1 << bus_width);
+			goto err;
 	}
 
 	/*
@@ -1151,10 +1136,7 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
 						    ext_csd);
 			if (err)
-				pr_warning("%s: power class selection to "
-					   "bus width %d failed\n",
-					   mmc_hostname(card->host),
-					   1 << bus_width);
+				goto err;
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
 						    ext_csd);
 			if (err)
-				pr_warning("%s: power class selection to "
-					   "bus width %d ddr %d failed\n",
-					   mmc_hostname(card->host),
-					   1 << bus_width, ddr);
+				goto err;
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
@@ -1347,7 +1326,7 @@
 		if (!err)
 			mmc_card_set_sleep(host->card);
 	} else if (!mmc_host_is_spi(host))
-		mmc_deselect_cards(host);
+		err = mmc_deselect_cards(host);
 	host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
 	mmc_release_host(host);
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c686..b2b43f6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1075,16 +1075,18 @@
  */
 static int mmc_sd_suspend(struct mmc_host *host)
 {
+	int err = 0;
+
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	mmc_claim_host(host);
 	if (!mmc_host_is_spi(host))
-		mmc_deselect_cards(host);
+		err = mmc_deselect_cards(host);
 	host->card->state &= ~MMC_STATE_HIGHSPEED;
 	mmc_release_host(host);
 
-	return 0;
+	return err;
 }
 
 /*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f..41c5fd8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@
 	if (ret)
 		return ret;
 
+	if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+		pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+			   mmc_hostname(card->host), ctrl);
+
+	/* set as 4-bit bus width */
+	ctrl &= ~SDIO_BUS_WIDTH_MASK;
 	ctrl |= SDIO_BUS_WIDTH_4BIT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -947,7 +953,7 @@
 	}
 
 	if (!err && host->sdio_irqs)
-		mmc_signal_sdio_irq(host);
+		wake_up_process(host->sdio_irq_thread);
 	mmc_release_host(host);
 
 	/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f..3d8ceb4 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
 
 #include "sdio_ops.h"
 
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
 {
+	struct mmc_card *card = host->card;
 	int i, ret, count;
 	unsigned char pending;
 	struct sdio_func *func;
 
 	/*
 	 * Optimization, if there is only 1 function interrupt registered
-	 * call irq handler directly
+	 * and we know an IRQ was signaled then call irq handler directly.
+	 * Otherwise do the full probe.
 	 */
 	func = card->sdio_single_irq;
-	if (func) {
+	if (func && host->sdio_irq_pending) {
 		func->irq_handler(func);
 		return 1;
 	}
@@ -116,7 +118,8 @@
 		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
 		if (ret)
 			break;
-		ret = process_sdio_pending_irqs(host->card);
+		ret = process_sdio_pending_irqs(host);
+		host->sdio_irq_pending = false;
 		mmc_release_host(host);
 
 		/*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7..aa131b3 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@
 	  Choose which driver to use for the Atmel MCI Silicon
 
 config MMC_AT91
-	tristate "AT91 SD/MMC Card Interface support"
+	tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
 	depends on ARCH_AT91
 	help
-	  This selects the AT91 MCI controller.
+	  This selects the AT91 MCI controller. This driver will
+	  be removed soon (for more information have a look to
+	  Documentation/feature-removal-schedule.txt). Please use
+	  MMC_ATMEL_MCI.
 
 	  If unsure, say N.
 
@@ -307,16 +310,6 @@
 
 	  If unsure, say N.
 
-config MMC_IMX
-	tristate "Motorola i.MX Multimedia Card Interface support"
-	depends on ARCH_MX1
-	help
-	  This selects the Motorola i.MX Multimedia card Interface.
-	  If you have a i.MX platform with a Multimedia Card slot,
-	  say Y or M here.
-
-	  If unsure, say N.
-
 config MMC_MSM
 	tristate "Qualcomm SDCC Controller Support"
 	depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d..8922b06 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
 
 obj-$(CONFIG_MMC_ARMMMCI)	+= mmci.o
 obj-$(CONFIG_MMC_PXA)		+= pxamci.o
-obj-$(CONFIG_MMC_IMX)		+= imxmmc.o
 obj-$(CONFIG_MMC_MXC)		+= mxcmmc.o
 obj-$(CONFIG_MMC_MXS)		+= mxs-mmc.o
 obj-$(CONFIG_MMC_SDHCI)		+= sdhci.o
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1..ab56f7d 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
 #define atmci_writel(port,reg,value)			\
 	__raw_writel((value), (port)->regs + reg)
 
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+	if (maxburst > 1)
+		return fls(maxburst) - 2;
+	else
+		return 0;
+}
+
 #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476b..f2c115e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
 #define ATMCI_DMA_THRESHOLD	16
 
 enum {
-	EVENT_CMD_COMPLETE = 0,
+	EVENT_CMD_RDY = 0,
 	EVENT_XFER_COMPLETE,
-	EVENT_DATA_COMPLETE,
+	EVENT_NOTBUSY,
 	EVENT_DATA_ERROR,
 };
 
 enum atmel_mci_state {
 	STATE_IDLE = 0,
 	STATE_SENDING_CMD,
-	STATE_SENDING_DATA,
-	STATE_DATA_BUSY,
+	STATE_DATA_XFER,
+	STATE_WAITING_NOTBUSY,
 	STATE_SENDING_STOP,
-	STATE_DATA_ERROR,
+	STATE_END_REQUEST,
 };
 
 enum atmci_xfer_dir {
@@ -78,6 +78,9 @@
 	bool    has_highspeed;
 	bool    has_rwproof;
 	bool	has_odd_clk_div;
+	bool	has_bad_data_ordering;
+	bool	need_reset_after_xfer;
+	bool	need_blksz_mul_4;
 };
 
 struct atmel_mci_dma {
@@ -91,6 +94,11 @@
  * @regs: Pointer to MMIO registers.
  * @sg: Scatterlist entry currently being processed by PIO or PDC code.
  * @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ *      don't have the time to switch pdc buffers so we have to use only
+ *      one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
  * @cur_slot: The slot which is currently using the controller.
  * @mrq: The request currently being processed on @cur_slot,
  *	or NULL if the controller is idle.
@@ -116,6 +124,7 @@
  * @queue: List of slots waiting for access to the controller.
  * @need_clock_update: Update the clock rate before the next request.
  * @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
  * @mode_reg: Value of the MR register.
  * @cfg_reg: Value of the CFG register.
  * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@
 
 	struct scatterlist	*sg;
 	unsigned int		pio_offset;
+	unsigned int		*buffer;
+	unsigned int		buf_size;
+	dma_addr_t		buf_phys_addr;
 
 	struct atmel_mci_slot	*cur_slot;
 	struct mmc_request	*mrq;
@@ -189,6 +201,7 @@
 
 	bool			need_clock_update;
 	bool			need_reset;
+	struct timer_list	timer;
 	u32			mode_reg;
 	u32			cfg_reg;
 	unsigned long		bus_hz;
@@ -480,6 +493,32 @@
 	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 }
 
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+	return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+	struct atmel_mci *host;
+
+	host = (struct atmel_mci *)data;
+
+	dev_dbg(&host->pdev->dev, "software timeout\n");
+
+	if (host->mrq->cmd->data) {
+		host->mrq->cmd->data->error = -ETIMEDOUT;
+		host->data = NULL;
+	} else {
+		host->mrq->cmd->error = -ETIMEDOUT;
+		host->cmd = NULL;
+	}
+	host->need_reset = 1;
+	host->state = STATE_END_REQUEST;
+	smp_wmb();
+	tasklet_schedule(&host->tasklet);
+}
+
 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
 					unsigned int ns)
 {
@@ -591,6 +630,7 @@
 
 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
 {
+	dev_dbg(&host->pdev->dev, "send stop command\n");
 	atmci_send_command(host, data->stop, host->stop_cmdr);
 	atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
 }
@@ -603,6 +643,7 @@
 	enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
 {
 	u32 pointer_reg, counter_reg;
+	unsigned int buf_size;
 
 	if (dir == XFER_RECEIVE) {
 		pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@
 		counter_reg += ATMEL_PDC_SCND_BUF_OFF;
 	}
 
-	atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
-	if (host->data_size <= sg_dma_len(host->sg)) {
+	if (!host->caps.has_rwproof) {
+		buf_size = host->buf_size;
+		atmci_writel(host, pointer_reg, host->buf_phys_addr);
+	} else {
+		buf_size = sg_dma_len(host->sg);
+		atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+	}
+
+	if (host->data_size <= buf_size) {
 		if (host->data_size & 0x3) {
 			/* If size is different from modulo 4, transfer bytes */
 			atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@
  */
 static void atmci_pdc_complete(struct atmel_mci *host)
 {
+	int transfer_size = host->data->blocks * host->data->blksz;
+	int i;
+
 	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+	if ((!host->caps.has_rwproof)
+	    && (host->data->flags & MMC_DATA_READ)) {
+		if (host->caps.has_bad_data_ordering)
+			for (i = 0; i < transfer_size; i++)
+				host->buffer[i] = swab32(host->buffer[i]);
+		sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+		                    host->buffer, transfer_size);
+	}
+
 	atmci_pdc_cleanup(host);
 
 	/*
@@ -678,9 +739,10 @@
 	 * to send the stop command or waiting for NBUSY in this case.
 	 */
 	if (host->data) {
+		dev_dbg(&host->pdev->dev,
+		        "(%s) set pending xfer complete\n", __func__);
 		atmci_set_pending(host, EVENT_XFER_COMPLETE);
 		tasklet_schedule(&host->tasklet);
-		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
 	}
 }
 
@@ -716,6 +778,8 @@
 	 * to send the stop command or waiting for NBUSY in this case.
 	 */
 	if (data) {
+		dev_dbg(&host->pdev->dev,
+		        "(%s) set pending xfer complete\n", __func__);
 		atmci_set_pending(host, EVENT_XFER_COMPLETE);
 		tasklet_schedule(&host->tasklet);
 
@@ -791,6 +855,7 @@
 	u32 iflags, tmp;
 	unsigned int sg_len;
 	enum dma_data_direction dir;
+	int i;
 
 	data->error = -EINPROGRESS;
 
@@ -806,7 +871,7 @@
 		iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
 	} else {
 		dir = DMA_TO_DEVICE;
-		iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+		iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
 	}
 
 	/* Set BLKLEN */
@@ -818,6 +883,16 @@
 	/* Configure PDC */
 	host->data_size = data->blocks * data->blksz;
 	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+	if ((!host->caps.has_rwproof)
+	    && (host->data->flags & MMC_DATA_WRITE)) {
+		sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+		                  host->buffer, host->data_size);
+		if (host->caps.has_bad_data_ordering)
+			for (i = 0; i < host->data_size; i++)
+				host->buffer[i] = swab32(host->buffer[i]);
+	}
+
 	if (host->data_size)
 		atmci_pdc_set_both_buf(host,
 			((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -835,6 +910,7 @@
 	enum dma_data_direction		direction;
 	enum dma_transfer_direction	slave_dirn;
 	unsigned int			sglen;
+	u32				maxburst;
 	u32 iflags;
 
 	data->error = -EINPROGRESS;
@@ -868,17 +944,18 @@
 	if (!chan)
 		return -ENODEV;
 
-	if (host->caps.has_dma)
-		atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
 	if (data->flags & MMC_DATA_READ) {
 		direction = DMA_FROM_DEVICE;
 		host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+		maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
 	} else {
 		direction = DMA_TO_DEVICE;
 		host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+		maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
 	}
 
+	atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
 	sglen = dma_map_sg(chan->device->dev, data->sg,
 			data->sg_len, direction);
 
@@ -931,6 +1008,8 @@
 
 static void atmci_stop_transfer(struct atmel_mci *host)
 {
+	dev_dbg(&host->pdev->dev,
+	        "(%s) set pending xfer complete\n", __func__);
 	atmci_set_pending(host, EVENT_XFER_COMPLETE);
 	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
 }
@@ -940,8 +1019,7 @@
  */
 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
 {
-	atmci_set_pending(host, EVENT_XFER_COMPLETE);
-	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
 }
 
 static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1031,8 @@
 		atmci_dma_cleanup(host);
 	} else {
 		/* Data transfer was stopped by the interrupt handler */
+		dev_dbg(&host->pdev->dev,
+		        "(%s) set pending xfer complete\n", __func__);
 		atmci_set_pending(host, EVENT_XFER_COMPLETE);
 		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
 	}
@@ -977,9 +1057,12 @@
 
 	host->pending_events = 0;
 	host->completed_events = 0;
+	host->cmd_status = 0;
 	host->data_status = 0;
 
-	if (host->need_reset) {
+	dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+	if (host->need_reset || host->caps.need_reset_after_xfer) {
 		iflags = atmci_readl(host, ATMCI_IMR);
 		iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
 		atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1077,7 @@
 
 	iflags = atmci_readl(host, ATMCI_IMR);
 	if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
-		dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+		dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
 				iflags);
 
 	if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1126,8 @@
 	 * prepared yet.)
 	 */
 	atmci_writel(host, ATMCI_IER, iflags);
+
+	mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000));
 }
 
 static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1142,7 @@
 		host->state = STATE_SENDING_CMD;
 		atmci_start_request(host, slot);
 	} else {
+		dev_dbg(&host->pdev->dev, "queue request\n");
 		list_add_tail(&slot->queue_node, &host->queue);
 	}
 	spin_unlock_bh(&host->lock);
@@ -1069,6 +1155,7 @@
 	struct mmc_data		*data;
 
 	WARN_ON(slot->mrq);
+	dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
 
 	/*
 	 * We may "know" the card is gone even though there's still an
@@ -1308,6 +1395,8 @@
 		host->state = STATE_IDLE;
 	}
 
+	del_timer(&host->timer);
+
 	spin_unlock(&host->lock);
 	mmc_request_done(prev_mmc, mrq);
 	spin_lock(&host->lock);
@@ -1330,21 +1419,13 @@
 		cmd->error = -EILSEQ;
 	else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
 		cmd->error = -EIO;
-	else
-		cmd->error = 0;
-
-	if (cmd->error) {
-		dev_dbg(&host->pdev->dev,
-			"command error: status=0x%08x\n", status);
-
-		if (cmd->data) {
-			host->stop_transfer(host);
-			host->data = NULL;
-			atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
-					| ATMCI_TXRDY | ATMCI_RXRDY
-					| ATMCI_DATA_ERROR_FLAGS);
+	else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+		if (host->caps.need_blksz_mul_4) {
+			cmd->error = -EINVAL;
+			host->need_reset = 1;
 		}
-	}
+	} else
+		cmd->error = 0;
 }
 
 static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1488,21 @@
 					break;
 				case STATE_SENDING_CMD:
 					mrq->cmd->error = -ENOMEDIUM;
-					if (!mrq->data)
-						break;
-					/* fall through */
-				case STATE_SENDING_DATA:
+					if (mrq->data)
+						host->stop_transfer(host);
+					break;
+				case STATE_DATA_XFER:
 					mrq->data->error = -ENOMEDIUM;
 					host->stop_transfer(host);
 					break;
-				case STATE_DATA_BUSY:
-				case STATE_DATA_ERROR:
-					if (mrq->data->error == -EINPROGRESS)
-						mrq->data->error = -ENOMEDIUM;
-					if (!mrq->stop)
-						break;
-					/* fall through */
+				case STATE_WAITING_NOTBUSY:
+					mrq->data->error = -ENOMEDIUM;
+					break;
 				case STATE_SENDING_STOP:
 					mrq->stop->error = -ENOMEDIUM;
 					break;
+				case STATE_END_REQUEST:
+					break;
 				}
 
 				atmci_request_end(host, mrq);
@@ -1451,7 +1530,6 @@
 	struct atmel_mci	*host = (struct atmel_mci *)priv;
 	struct mmc_request	*mrq = host->mrq;
 	struct mmc_data		*data = host->data;
-	struct mmc_command	*cmd = host->cmd;
 	enum atmel_mci_state	state = host->state;
 	enum atmel_mci_state	prev_state;
 	u32			status;
@@ -1467,107 +1545,186 @@
 
 	do {
 		prev_state = state;
+		dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
 
 		switch (state) {
 		case STATE_IDLE:
 			break;
 
 		case STATE_SENDING_CMD:
+			/*
+			 * Command has been sent, we are waiting for command
+			 * ready. Then we have three next states possible:
+			 * END_REQUEST by default, WAITING_NOTBUSY if it's a
+			 * command needing it or DATA_XFER if there is data.
+			 */
+			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
 			if (!atmci_test_and_clear_pending(host,
-						EVENT_CMD_COMPLETE))
+						EVENT_CMD_RDY))
 				break;
 
+			dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
 			host->cmd = NULL;
-			atmci_set_completed(host, EVENT_CMD_COMPLETE);
+			atmci_set_completed(host, EVENT_CMD_RDY);
 			atmci_command_complete(host, mrq->cmd);
-			if (!mrq->data || cmd->error) {
-				atmci_request_end(host, host->mrq);
-				goto unlock;
-			}
+			if (mrq->data) {
+				dev_dbg(&host->pdev->dev,
+				        "command with data transfer");
+				/*
+				 * If there is a command error don't start
+				 * data transfer.
+				 */
+				if (mrq->cmd->error) {
+					host->stop_transfer(host);
+					host->data = NULL;
+					atmci_writel(host, ATMCI_IDR,
+					             ATMCI_TXRDY | ATMCI_RXRDY
+					             | ATMCI_DATA_ERROR_FLAGS);
+					state = STATE_END_REQUEST;
+				} else
+					state = STATE_DATA_XFER;
+			} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+				dev_dbg(&host->pdev->dev,
+				        "command response need waiting notbusy");
+				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+				state = STATE_WAITING_NOTBUSY;
+			} else
+				state = STATE_END_REQUEST;
 
-			prev_state = state = STATE_SENDING_DATA;
-			/* fall through */
+			break;
 
-		case STATE_SENDING_DATA:
+		case STATE_DATA_XFER:
 			if (atmci_test_and_clear_pending(host,
 						EVENT_DATA_ERROR)) {
-				host->stop_transfer(host);
-				if (data->stop)
-					atmci_send_stop_cmd(host, data);
-				state = STATE_DATA_ERROR;
+				dev_dbg(&host->pdev->dev, "set completed data error\n");
+				atmci_set_completed(host, EVENT_DATA_ERROR);
+				state = STATE_END_REQUEST;
 				break;
 			}
 
+			/*
+			 * A data transfer is in progress. The event expected
+			 * to move to the next state depends of data transfer
+			 * type (PDC or DMA). Once transfer done we can move
+			 * to the next step which is WAITING_NOTBUSY in write
+			 * case and directly SENDING_STOP in read case.
+			 */
+			dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
 			if (!atmci_test_and_clear_pending(host,
 						EVENT_XFER_COMPLETE))
 				break;
 
+			dev_dbg(&host->pdev->dev,
+			        "(%s) set completed xfer complete\n",
+				__func__);
 			atmci_set_completed(host, EVENT_XFER_COMPLETE);
-			prev_state = state = STATE_DATA_BUSY;
-			/* fall through */
 
-		case STATE_DATA_BUSY:
-			if (!atmci_test_and_clear_pending(host,
-						EVENT_DATA_COMPLETE))
-				break;
-
-			host->data = NULL;
-			atmci_set_completed(host, EVENT_DATA_COMPLETE);
-			status = host->data_status;
-			if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
-				if (status & ATMCI_DTOE) {
-					dev_dbg(&host->pdev->dev,
-							"data timeout error\n");
-					data->error = -ETIMEDOUT;
-				} else if (status & ATMCI_DCRCE) {
-					dev_dbg(&host->pdev->dev,
-							"data CRC error\n");
-					data->error = -EILSEQ;
-				} else {
-					dev_dbg(&host->pdev->dev,
-						"data FIFO error (status=%08x)\n",
-						status);
-					data->error = -EIO;
-				}
+			if (host->data->flags & MMC_DATA_WRITE) {
+				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+				state = STATE_WAITING_NOTBUSY;
+			} else if (host->mrq->stop) {
+				atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+				atmci_send_stop_cmd(host, data);
+				state = STATE_SENDING_STOP;
 			} else {
+				host->data = NULL;
 				data->bytes_xfered = data->blocks * data->blksz;
 				data->error = 0;
-				atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
+				state = STATE_END_REQUEST;
 			}
+			break;
 
-			if (!data->stop) {
-				atmci_request_end(host, host->mrq);
-				goto unlock;
-			}
+		case STATE_WAITING_NOTBUSY:
+			/*
+			 * We can be in the state for two reasons: a command
+			 * requiring waiting not busy signal (stop command
+			 * included) or a write operation. In the latest case,
+			 * we need to send a stop command.
+			 */
+			dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+			if (!atmci_test_and_clear_pending(host,
+						EVENT_NOTBUSY))
+				break;
 
-			prev_state = state = STATE_SENDING_STOP;
-			if (!data->error)
-				atmci_send_stop_cmd(host, data);
-			/* fall through */
+			dev_dbg(&host->pdev->dev, "set completed not busy\n");
+			atmci_set_completed(host, EVENT_NOTBUSY);
+
+			if (host->data) {
+				/*
+				 * For some commands such as CMD53, even if
+				 * there is data transfer, there is no stop
+				 * command to send.
+				 */
+				if (host->mrq->stop) {
+					atmci_writel(host, ATMCI_IER,
+					             ATMCI_CMDRDY);
+					atmci_send_stop_cmd(host, data);
+					state = STATE_SENDING_STOP;
+				} else {
+					host->data = NULL;
+					data->bytes_xfered = data->blocks
+					                     * data->blksz;
+					data->error = 0;
+					state = STATE_END_REQUEST;
+				}
+			} else
+				state = STATE_END_REQUEST;
+			break;
 
 		case STATE_SENDING_STOP:
+			/*
+			 * In this state, it is important to set host->data to
+			 * NULL (which is tested in the waiting notbusy state)
+			 * in order to go to the end request state instead of
+			 * sending stop again.
+			 */
+			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
 			if (!atmci_test_and_clear_pending(host,
-						EVENT_CMD_COMPLETE))
+						EVENT_CMD_RDY))
 				break;
 
+			dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
 			host->cmd = NULL;
+			host->data = NULL;
+			data->bytes_xfered = data->blocks * data->blksz;
+			data->error = 0;
 			atmci_command_complete(host, mrq->stop);
+			if (mrq->stop->error) {
+				host->stop_transfer(host);
+				atmci_writel(host, ATMCI_IDR,
+				             ATMCI_TXRDY | ATMCI_RXRDY
+				             | ATMCI_DATA_ERROR_FLAGS);
+				state = STATE_END_REQUEST;
+			} else {
+				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+				state = STATE_WAITING_NOTBUSY;
+			}
+			break;
+
+		case STATE_END_REQUEST:
+			atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+			                   | ATMCI_DATA_ERROR_FLAGS);
+			status = host->data_status;
+			if (unlikely(status)) {
+				host->stop_transfer(host);
+				host->data = NULL;
+				if (status & ATMCI_DTOE) {
+					data->error = -ETIMEDOUT;
+				} else if (status & ATMCI_DCRCE) {
+					data->error = -EILSEQ;
+				} else {
+					data->error = -EIO;
+				}
+			}
+
 			atmci_request_end(host, host->mrq);
-			goto unlock;
-
-		case STATE_DATA_ERROR:
-			if (!atmci_test_and_clear_pending(host,
-						EVENT_XFER_COMPLETE))
-				break;
-
-			state = STATE_DATA_BUSY;
+			state = STATE_IDLE;
 			break;
 		}
 	} while (state != prev_state);
 
 	host->state = state;
 
-unlock:
 	spin_unlock(&host->lock);
 }
 
@@ -1620,9 +1777,6 @@
 						| ATMCI_DATA_ERROR_FLAGS));
 			host->data_status = status;
 			data->bytes_xfered += nbytes;
-			smp_wmb();
-			atmci_set_pending(host, EVENT_DATA_ERROR);
-			tasklet_schedule(&host->tasklet);
 			return;
 		}
 	} while (status & ATMCI_RXRDY);
@@ -1691,9 +1845,6 @@
 						| ATMCI_DATA_ERROR_FLAGS));
 			host->data_status = status;
 			data->bytes_xfered += nbytes;
-			smp_wmb();
-			atmci_set_pending(host, EVENT_DATA_ERROR);
-			tasklet_schedule(&host->tasklet);
 			return;
 		}
 	} while (status & ATMCI_TXRDY);
@@ -1711,16 +1862,6 @@
 	atmci_set_pending(host, EVENT_XFER_COMPLETE);
 }
 
-static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
-{
-	atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
-
-	host->cmd_status = status;
-	smp_wmb();
-	atmci_set_pending(host, EVENT_CMD_COMPLETE);
-	tasklet_schedule(&host->tasklet);
-}
-
 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
 {
 	int	i;
@@ -1748,17 +1889,21 @@
 			break;
 
 		if (pending & ATMCI_DATA_ERROR_FLAGS) {
+			dev_dbg(&host->pdev->dev, "IRQ: data error\n");
 			atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
-					| ATMCI_RXRDY | ATMCI_TXRDY);
-			pending &= atmci_readl(host, ATMCI_IMR);
+					| ATMCI_RXRDY | ATMCI_TXRDY
+					| ATMCI_ENDRX | ATMCI_ENDTX
+					| ATMCI_RXBUFF | ATMCI_TXBUFE);
 
 			host->data_status = status;
+			dev_dbg(&host->pdev->dev, "set pending data error\n");
 			smp_wmb();
 			atmci_set_pending(host, EVENT_DATA_ERROR);
 			tasklet_schedule(&host->tasklet);
 		}
 
 		if (pending & ATMCI_TXBUFE) {
+			dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
 			atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
 			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
 			/*
@@ -1774,6 +1919,7 @@
 				atmci_pdc_complete(host);
 			}
 		} else if (pending & ATMCI_ENDTX) {
+			dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
 			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
 
 			if (host->data_size) {
@@ -1784,6 +1930,7 @@
 		}
 
 		if (pending & ATMCI_RXBUFF) {
+			dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
 			atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
 			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
 			/*
@@ -1799,6 +1946,7 @@
 				atmci_pdc_complete(host);
 			}
 		} else if (pending & ATMCI_ENDRX) {
+			dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
 			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
 
 			if (host->data_size) {
@@ -1808,23 +1956,44 @@
 			}
 		}
 
-
-		if (pending & ATMCI_NOTBUSY) {
-			atmci_writel(host, ATMCI_IDR,
-					ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
-			if (!host->data_status)
-				host->data_status = status;
+		/*
+		 * First mci IPs, so mainly the ones having pdc, have some
+		 * issues with the notbusy signal. You can't get it after
+		 * data transmission if you have not sent a stop command.
+		 * The appropriate workaround is to use the BLKE signal.
+		 */
+		if (pending & ATMCI_BLKE) {
+			dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+			atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
 			smp_wmb();
-			atmci_set_pending(host, EVENT_DATA_COMPLETE);
+			dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+			atmci_set_pending(host, EVENT_NOTBUSY);
 			tasklet_schedule(&host->tasklet);
 		}
+
+		if (pending & ATMCI_NOTBUSY) {
+			dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+			atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
+			smp_wmb();
+			dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+			atmci_set_pending(host, EVENT_NOTBUSY);
+			tasklet_schedule(&host->tasklet);
+		}
+
 		if (pending & ATMCI_RXRDY)
 			atmci_read_data_pio(host);
 		if (pending & ATMCI_TXRDY)
 			atmci_write_data_pio(host);
 
-		if (pending & ATMCI_CMDRDY)
-			atmci_cmd_interrupt(host, status);
+		if (pending & ATMCI_CMDRDY) {
+			dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+			atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+			host->cmd_status = status;
+			smp_wmb();
+			dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+			atmci_set_pending(host, EVENT_CMD_RDY);
+			tasklet_schedule(&host->tasklet);
+		}
 
 		if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
 			atmci_sdio_interrupt(host, status);
@@ -1877,13 +2046,26 @@
 		mmc->caps |= MMC_CAP_SDIO_IRQ;
 	if (host->caps.has_highspeed)
 		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
-	if (slot_data->bus_width >= 4)
+	/*
+	 * Without the read/write proof capability, it is strongly suggested to
+	 * use only one bit for data to prevent fifo underruns and overruns
+	 * which will corrupt data.
+	 */
+	if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
 		mmc->caps |= MMC_CAP_4_BIT_DATA;
 
-	mmc->max_segs = 64;
-	mmc->max_req_size = 32768 * 512;
-	mmc->max_blk_size = 32768;
-	mmc->max_blk_count = 512;
+	if (atmci_get_version(host) < 0x200) {
+		mmc->max_segs = 256;
+		mmc->max_blk_size = 4095;
+		mmc->max_blk_count = 256;
+		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+		mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+	} else {
+		mmc->max_segs = 64;
+		mmc->max_req_size = 32768 * 512;
+		mmc->max_blk_size = 32768;
+		mmc->max_blk_count = 512;
+	}
 
 	/* Assume card is present initially */
 	set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2189,6 @@
 	}
 }
 
-static inline unsigned int atmci_get_version(struct atmel_mci *host)
-{
-	return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
-}
-
 /*
  * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
  * HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2209,9 @@
 	host->caps.has_highspeed = 0;
 	host->caps.has_rwproof = 0;
 	host->caps.has_odd_clk_div = 0;
+	host->caps.has_bad_data_ordering = 1;
+	host->caps.need_reset_after_xfer = 1;
+	host->caps.need_blksz_mul_4 = 1;
 
 	/* keep only major version number */
 	switch (version & 0xf00) {
@@ -2051,7 +2231,11 @@
 		host->caps.has_highspeed = 1;
 	case 0x200:
 		host->caps.has_rwproof = 1;
+		host->caps.need_blksz_mul_4 = 0;
 	case 0x100:
+		host->caps.has_bad_data_ordering = 0;
+		host->caps.need_reset_after_xfer = 0;
+	case 0x0:
 		break;
 	default:
 		host->caps.has_pdc = 0;
@@ -2132,20 +2316,28 @@
 
 	platform_set_drvdata(pdev, host);
 
+	setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
 	/* We need at least one slot to succeed */
 	nr_slots = 0;
 	ret = -ENODEV;
 	if (pdata->slot[0].bus_width) {
 		ret = atmci_init_slot(host, &pdata->slot[0],
 				0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
-		if (!ret)
+		if (!ret) {
 			nr_slots++;
+			host->buf_size = host->slot[0]->mmc->max_req_size;
+		}
 	}
 	if (pdata->slot[1].bus_width) {
 		ret = atmci_init_slot(host, &pdata->slot[1],
 				1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
-		if (!ret)
+		if (!ret) {
 			nr_slots++;
+			if (host->slot[1]->mmc->max_req_size > host->buf_size)
+				host->buf_size =
+					host->slot[1]->mmc->max_req_size;
+		}
 	}
 
 	if (!nr_slots) {
@@ -2153,6 +2345,17 @@
 		goto err_init_slot;
 	}
 
+	if (!host->caps.has_rwproof) {
+		host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+		                                  &host->buf_phys_addr,
+						  GFP_KERNEL);
+		if (!host->buffer) {
+			ret = -ENOMEM;
+			dev_err(&pdev->dev, "buffer allocation failed\n");
+			goto err_init_slot;
+		}
+	}
+
 	dev_info(&pdev->dev,
 			"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
 			host->mapbase, irq, nr_slots);
@@ -2179,6 +2382,10 @@
 
 	platform_set_drvdata(pdev, NULL);
 
+	if (host->buffer)
+		dma_free_coherent(&pdev->dev, host->buf_size,
+		                  host->buffer, host->buf_phys_addr);
+
 	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
 		if (host->slot[i])
 			atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673..7cf6c62 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@
 MODULE_AUTHOR("Texas Instruments India");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc46..1ca5e72 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@
 	int			last_detect_state;
 };
 
-static struct workqueue_struct *dw_mci_card_workqueue;
-
 #if defined(CONFIG_DEBUG_FS)
 static int dw_mci_req_show(struct seq_file *s, void *v)
 {
@@ -420,6 +418,8 @@
 	p->des3 = host->sg_dma;
 	p->des0 = IDMAC_DES0_ER;
 
+	mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
 	/* Mask out interrupts - get Tx & Rx complete only */
 	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
 		   SDMMC_IDMAC_INT_TI);
@@ -617,14 +617,15 @@
 	u32 div;
 
 	if (slot->clock != host->current_speed) {
-		if (host->bus_hz % slot->clock)
+		div = host->bus_hz / slot->clock;
+		if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
 			/*
 			 * move the + 1 after the divide to prevent
 			 * over-clocking the card.
 			 */
-			div = ((host->bus_hz / slot->clock) >> 1) + 1;
-		else
-			div = (host->bus_hz  / slot->clock) >> 1;
+			div += 1;
+
+		div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
 
 		dev_info(&slot->mmc->class_dev,
 			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -859,10 +860,10 @@
 	int_mask = mci_readl(host, INTMASK);
 	if (enb) {
 		mci_writel(host, INTMASK,
-			   (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+			   (int_mask | SDMMC_INT_SDIO(slot->id)));
 	} else {
 		mci_writel(host, INTMASK,
-			   (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
 	}
 }
 
@@ -941,8 +942,8 @@
 			mdelay(20);
 
 		if (cmd->data) {
-			host->data = NULL;
 			dw_mci_stop_dma(host);
+			host->data = NULL;
 		}
 	}
 }
@@ -1605,7 +1606,7 @@
 
 		if (pending & SDMMC_INT_CD) {
 			mci_writel(host, RINTSTS, SDMMC_INT_CD);
-			queue_work(dw_mci_card_workqueue, &host->card_work);
+			queue_work(host->card_workqueue, &host->card_work);
 		}
 
 		/* Handle SDIO Interrupts */
@@ -1625,7 +1626,6 @@
 	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
-		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
 		host->dma_ops->complete(host);
 	}
 #endif
@@ -1727,7 +1727,8 @@
 
 #ifdef CONFIG_MMC_DW_IDMAC
 				ctrl = mci_readl(host, BMOD);
-				ctrl |= 0x01; /* Software reset of DMA */
+				/* Software reset of DMA */
+				ctrl |= SDMMC_IDMAC_SWRESET;
 				mci_writel(host, BMOD, ctrl);
 #endif
 
@@ -1844,7 +1845,7 @@
 	 * Card may have been plugged in prior to boot so we
 	 * need to run the detect tasklet
 	 */
-	queue_work(dw_mci_card_workqueue, &host->card_work);
+	queue_work(host->card_workqueue, &host->card_work);
 
 	return 0;
 }
@@ -1952,10 +1953,6 @@
 	spin_lock_init(&host->lock);
 	INIT_LIST_HEAD(&host->queue);
 
-
-	host->dma_ops = host->pdata->dma_ops;
-	dw_mci_init_dma(host);
-
 	/*
 	 * Get the host data width - this assumes that HCON has been set with
 	 * the correct values.
@@ -1983,10 +1980,11 @@
 	}
 
 	/* Reset all blocks */
-	if (!mci_wait_reset(&host->dev, host)) {
-		ret = -ENODEV;
-		goto err_dmaunmap;
-	}
+	if (!mci_wait_reset(&host->dev, host))
+		return -ENODEV;
+
+	host->dma_ops = host->pdata->dma_ops;
+	dw_mci_init_dma(host);
 
 	/* Clear the interrupts for the host controller */
 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2021,9 +2019,9 @@
 	mci_writel(host, CLKSRC, 0);
 
 	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
-	dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+	host->card_workqueue = alloc_workqueue("dw-mci-card",
 			WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
-	if (!dw_mci_card_workqueue)
+	if (!host->card_workqueue)
 		goto err_dmaunmap;
 	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
 	ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@
 	free_irq(host->irq, host);
 
 err_workqueue:
-	destroy_workqueue(dw_mci_card_workqueue);
+	destroy_workqueue(host->card_workqueue);
 
 err_dmaunmap:
 	if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@
 	mci_writel(host, CLKSRC, 0);
 
 	free_irq(host->irq, host);
-	destroy_workqueue(dw_mci_card_workqueue);
+	destroy_workqueue(host->card_workqueue);
 	dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 
 	if (host->use_dma && host->dma_ops->exit)
@@ -2172,14 +2170,14 @@
 	if (host->vmmc)
 		regulator_enable(host->vmmc);
 
-	if (host->dma_ops->init)
-		host->dma_ops->init(host);
-
 	if (!mci_wait_reset(&host->dev, host)) {
 		ret = -ENODEV;
 		return ret;
 	}
 
+	if (host->dma_ops->init)
+		host->dma_ops->init(host);
+
 	/* Restore the old value at FIFOTH register */
 	mci_writel(host, FIFOTH, host->fifoth_val);
 
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3ce..0000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
-/*
- *  linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
- *
- *  Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
- *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
- *
- *  derived from pxamci.c by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-#include <mach/mmc.h>
-#include <mach/imx-dma.h>
-
-#include "imxmmc.h"
-
-#define DRIVER_NAME "imx-mmc"
-
-#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
-				 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
-				 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
-
-struct imxmci_host {
-	struct mmc_host		*mmc;
-	spinlock_t		lock;
-	struct resource		*res;
-	void __iomem		*base;
-	int			irq;
-	imx_dmach_t		dma;
-	volatile unsigned int	imask;
-	unsigned int		power_mode;
-	unsigned int		present;
-	struct imxmmc_platform_data *pdata;
-
-	struct mmc_request	*req;
-	struct mmc_command	*cmd;
-	struct mmc_data		*data;
-
-	struct timer_list	timer;
-	struct tasklet_struct	tasklet;
-	unsigned int		status_reg;
-	unsigned long		pending_events;
-	/* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
-	u16			*data_ptr;
-	unsigned int		data_cnt;
-	atomic_t		stuck_timeout;
-
-	unsigned int		dma_nents;
-	unsigned int		dma_size;
-	unsigned int		dma_dir;
-	int			dma_allocated;
-
-	unsigned char		actual_bus_width;
-
-	int			prev_cmd_code;
-
-	struct clk		*clk;
-};
-
-#define IMXMCI_PEND_IRQ_b	0
-#define IMXMCI_PEND_DMA_END_b	1
-#define IMXMCI_PEND_DMA_ERR_b	2
-#define IMXMCI_PEND_WAIT_RESP_b	3
-#define IMXMCI_PEND_DMA_DATA_b	4
-#define IMXMCI_PEND_CPU_DATA_b	5
-#define IMXMCI_PEND_CARD_XCHG_b	6
-#define IMXMCI_PEND_SET_INIT_b	7
-#define IMXMCI_PEND_STARTED_b	8
-
-#define IMXMCI_PEND_IRQ_m	(1 << IMXMCI_PEND_IRQ_b)
-#define IMXMCI_PEND_DMA_END_m	(1 << IMXMCI_PEND_DMA_END_b)
-#define IMXMCI_PEND_DMA_ERR_m	(1 << IMXMCI_PEND_DMA_ERR_b)
-#define IMXMCI_PEND_WAIT_RESP_m	(1 << IMXMCI_PEND_WAIT_RESP_b)
-#define IMXMCI_PEND_DMA_DATA_m	(1 << IMXMCI_PEND_DMA_DATA_b)
-#define IMXMCI_PEND_CPU_DATA_m	(1 << IMXMCI_PEND_CPU_DATA_b)
-#define IMXMCI_PEND_CARD_XCHG_m	(1 << IMXMCI_PEND_CARD_XCHG_b)
-#define IMXMCI_PEND_SET_INIT_m	(1 << IMXMCI_PEND_SET_INIT_b)
-#define IMXMCI_PEND_STARTED_m	(1 << IMXMCI_PEND_STARTED_b)
-
-static void imxmci_stop_clock(struct imxmci_host *host)
-{
-	int i = 0;
-	u16 reg;
-
-	reg = readw(host->base + MMC_REG_STR_STP_CLK);
-	writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-	while (i < 0x1000) {
-		if (!(i & 0x7f)) {
-			reg = readw(host->base + MMC_REG_STR_STP_CLK);
-			writew(reg | STR_STP_CLK_STOP_CLK,
-					host->base + MMC_REG_STR_STP_CLK);
-		}
-
-		reg = readw(host->base + MMC_REG_STATUS);
-		if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
-			/* Check twice before cut */
-			reg = readw(host->base + MMC_REG_STATUS);
-			if (!(reg & STATUS_CARD_BUS_CLK_RUN))
-				return;
-		}
-
-		i++;
-	}
-	dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
-}
-
-static int imxmci_start_clock(struct imxmci_host *host)
-{
-	unsigned int trials = 0;
-	unsigned int delay_limit = 128;
-	unsigned long flags;
-	u16 reg;
-
-	reg = readw(host->base + MMC_REG_STR_STP_CLK);
-	writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
-
-	clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-
-	/*
-	 * Command start of the clock, this usually succeeds in less
-	 * then 6 delay loops, but during card detection (low clockrate)
-	 * it takes up to 5000 delay loops and sometimes fails for the first time
-	 */
-	reg = readw(host->base + MMC_REG_STR_STP_CLK);
-	writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-
-	do {
-		unsigned int delay = delay_limit;
-
-		while (delay--) {
-			reg = readw(host->base + MMC_REG_STATUS);
-			if (reg & STATUS_CARD_BUS_CLK_RUN) {
-				/* Check twice before cut */
-				reg = readw(host->base + MMC_REG_STATUS);
-				if (reg & STATUS_CARD_BUS_CLK_RUN)
-					return 0;
-			}
-
-			if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
-				return 0;
-		}
-
-		local_irq_save(flags);
-		/*
-		 * Ensure, that request is not doubled under all possible circumstances.
-		 * It is possible, that cock running state is missed, because some other
-		 * IRQ or schedule delays this function execution and the clocks has
-		 * been already stopped by other means (response processing, SDHC HW)
-		 */
-		if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
-			reg = readw(host->base + MMC_REG_STR_STP_CLK);
-			writew(reg | STR_STP_CLK_START_CLK,
-					host->base + MMC_REG_STR_STP_CLK);
-		}
-		local_irq_restore(flags);
-
-	} while (++trials < 256);
-
-	dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
-
-	return -1;
-}
-
-static void imxmci_softreset(struct imxmci_host *host)
-{
-	int i;
-
-	/* reset sequence */
-	writew(0x08, host->base + MMC_REG_STR_STP_CLK);
-	writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
-
-	for (i = 0; i < 8; i++)
-		writew(0x05, host->base + MMC_REG_STR_STP_CLK);
-
-	writew(0xff, host->base + MMC_REG_RES_TO);
-	writew(512, host->base + MMC_REG_BLK_LEN);
-	writew(1, host->base + MMC_REG_NOB);
-}
-
-static int imxmci_busy_wait_for_status(struct imxmci_host *host,
-				       unsigned int *pstat, unsigned int stat_mask,
-				       int timeout, const char *where)
-{
-	int loops = 0;
-
-	while (!(*pstat & stat_mask)) {
-		loops += 2;
-		if (loops >= timeout) {
-			dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
-				where, *pstat, stat_mask);
-			return -1;
-		}
-		udelay(2);
-		*pstat |= readw(host->base + MMC_REG_STATUS);
-	}
-	if (!loops)
-		return 0;
-
-	/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
-	if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
-		dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
-			 loops, where, *pstat, stat_mask);
-	return loops;
-}
-
-static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
-{
-	unsigned int nob = data->blocks;
-	unsigned int blksz = data->blksz;
-	unsigned int datasz = nob * blksz;
-	int i;
-
-	if (data->flags & MMC_DATA_STREAM)
-		nob = 0xffff;
-
-	host->data = data;
-	data->bytes_xfered = 0;
-
-	writew(nob, host->base + MMC_REG_NOB);
-	writew(blksz, host->base + MMC_REG_BLK_LEN);
-
-	/*
-	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
-	 * We are in big troubles for non-512 byte transfers according to note in the paragraph
-	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
-	 * The situation is even more complex in reality. The SDHC in not able to handle wll
-	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
-	 * This is required for SCR read at least.
-	 */
-	if (datasz < 512) {
-		host->dma_size = datasz;
-		if (data->flags & MMC_DATA_READ) {
-			host->dma_dir = DMA_FROM_DEVICE;
-
-			/* Hack to enable read SCR */
-			writew(1, host->base + MMC_REG_NOB);
-			writew(512, host->base + MMC_REG_BLK_LEN);
-		} else {
-			host->dma_dir = DMA_TO_DEVICE;
-		}
-
-		/* Convert back to virtual address */
-		host->data_ptr = (u16 *)sg_virt(data->sg);
-		host->data_cnt = 0;
-
-		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
-		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
-		return;
-	}
-
-	if (data->flags & MMC_DATA_READ) {
-		host->dma_dir = DMA_FROM_DEVICE;
-		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
-					     data->sg_len,  host->dma_dir);
-
-		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
-				 host->res->start + MMC_REG_BUFFER_ACCESS,
-				 DMA_MODE_READ);
-
-		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
-		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
-	} else {
-		host->dma_dir = DMA_TO_DEVICE;
-
-		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
-					     data->sg_len,  host->dma_dir);
-
-		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
-				 host->res->start + MMC_REG_BUFFER_ACCESS,
-				 DMA_MODE_WRITE);
-
-		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
-		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
-	}
-
-#if 1	/* This code is there only for consistency checking and can be disabled in future */
-	host->dma_size = 0;
-	for (i = 0; i < host->dma_nents; i++)
-		host->dma_size += data->sg[i].length;
-
-	if (datasz > host->dma_size) {
-		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
-			datasz, host->dma_size);
-	}
-#endif
-
-	host->dma_size = datasz;
-
-	wmb();
-
-	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
-	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
-	/* start DMA engine for read, write is delayed after initial response */
-	if (host->dma_dir == DMA_FROM_DEVICE)
-		imx_dma_enable(host->dma);
-}
-
-static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
-{
-	unsigned long flags;
-	u32 imask;
-
-	WARN_ON(host->cmd != NULL);
-	host->cmd = cmd;
-
-	/* Ensure, that clock are stopped else command programming and start fails */
-	imxmci_stop_clock(host);
-
-	if (cmd->flags & MMC_RSP_BUSY)
-		cmdat |= CMD_DAT_CONT_BUSY;
-
-	switch (mmc_resp_type(cmd)) {
-	case MMC_RSP_R1: /* short CRC, OPCODE */
-	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
-		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
-		break;
-	case MMC_RSP_R2: /* long 136 bit + CRC */
-		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
-		break;
-	case MMC_RSP_R3: /* short */
-		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
-		break;
-	default:
-		break;
-	}
-
-	if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
-		cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
-
-	if (host->actual_bus_width == MMC_BUS_WIDTH_4)
-		cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
-
-	writew(cmd->opcode, host->base + MMC_REG_CMD);
-	writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
-	writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
-	writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
-
-	atomic_set(&host->stuck_timeout, 0);
-	set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
-
-
-	imask = IMXMCI_INT_MASK_DEFAULT;
-	imask &= ~INT_MASK_END_CMD_RES;
-	if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
-		/* imask &= ~INT_MASK_BUF_READY; */
-		imask &= ~INT_MASK_DATA_TRAN;
-		if (cmdat & CMD_DAT_CONT_WRITE)
-			imask &= ~INT_MASK_WRITE_OP_DONE;
-		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
-			imask &= ~INT_MASK_BUF_READY;
-	}
-
-	spin_lock_irqsave(&host->lock, flags);
-	host->imask = imask;
-	writew(host->imask, host->base + MMC_REG_INT_MASK);
-	spin_unlock_irqrestore(&host->lock, flags);
-
-	dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
-		cmd->opcode, cmd->opcode, imask);
-
-	imxmci_start_clock(host);
-}
-
-static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&host->lock, flags);
-
-	host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
-				  IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
-
-	host->imask = IMXMCI_INT_MASK_DEFAULT;
-	writew(host->imask, host->base + MMC_REG_INT_MASK);
-
-	spin_unlock_irqrestore(&host->lock, flags);
-
-	if (req && req->cmd)
-		host->prev_cmd_code = req->cmd->opcode;
-
-	host->req = NULL;
-	host->cmd = NULL;
-	host->data = NULL;
-	mmc_request_done(host->mmc, req);
-}
-
-static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
-{
-	struct mmc_data *data = host->data;
-	int data_error;
-
-	if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
-		imx_dma_disable(host->dma);
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
-			     host->dma_dir);
-	}
-
-	if (stat & STATUS_ERR_MASK) {
-		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
-		if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
-			data->error = -EILSEQ;
-		else if (stat & STATUS_TIME_OUT_READ)
-			data->error = -ETIMEDOUT;
-		else
-			data->error = -EIO;
-	} else {
-		data->bytes_xfered = host->dma_size;
-	}
-
-	data_error = data->error;
-
-	host->data = NULL;
-
-	return data_error;
-}
-
-static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
-{
-	struct mmc_command *cmd = host->cmd;
-	int i;
-	u32 a, b, c;
-	struct mmc_data *data = host->data;
-
-	if (!cmd)
-		return 0;
-
-	host->cmd = NULL;
-
-	if (stat & STATUS_TIME_OUT_RESP) {
-		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
-		cmd->error = -ETIMEDOUT;
-	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
-		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
-		cmd->error = -EILSEQ;
-	}
-
-	if (cmd->flags & MMC_RSP_PRESENT) {
-		if (cmd->flags & MMC_RSP_136) {
-			for (i = 0; i < 4; i++) {
-				a = readw(host->base + MMC_REG_RES_FIFO);
-				b = readw(host->base + MMC_REG_RES_FIFO);
-				cmd->resp[i] = a << 16 | b;
-			}
-		} else {
-			a = readw(host->base + MMC_REG_RES_FIFO);
-			b = readw(host->base + MMC_REG_RES_FIFO);
-			c = readw(host->base + MMC_REG_RES_FIFO);
-			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
-		}
-	}
-
-	dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
-		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
-
-	if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
-		if (host->req->data->flags & MMC_DATA_WRITE) {
-
-			/* Wait for FIFO to be empty before starting DMA write */
-
-			stat = readw(host->base + MMC_REG_STATUS);
-			if (imxmci_busy_wait_for_status(host, &stat,
-							STATUS_APPL_BUFF_FE,
-							40, "imxmci_cmd_done DMA WR") < 0) {
-				cmd->error = -EIO;
-				imxmci_finish_data(host, stat);
-				if (host->req)
-					imxmci_finish_request(host, host->req);
-				dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
-					 stat);
-				return 0;
-			}
-
-			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-				imx_dma_enable(host->dma);
-		}
-	} else {
-		struct mmc_request *req;
-		imxmci_stop_clock(host);
-		req = host->req;
-
-		if (data)
-			imxmci_finish_data(host, stat);
-
-		if (req)
-			imxmci_finish_request(host, req);
-		else
-			dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
-	}
-
-	return 1;
-}
-
-static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
-{
-	struct mmc_data *data = host->data;
-	int data_error;
-
-	if (!data)
-		return 0;
-
-	data_error = imxmci_finish_data(host, stat);
-
-	if (host->req->stop) {
-		imxmci_stop_clock(host);
-		imxmci_start_cmd(host, host->req->stop, 0);
-	} else {
-		struct mmc_request *req;
-		req = host->req;
-		if (req)
-			imxmci_finish_request(host, req);
-		else
-			dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
-	}
-
-	return 1;
-}
-
-static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
-{
-	int i;
-	int burst_len;
-	int trans_done = 0;
-	unsigned int stat = *pstat;
-
-	if (host->actual_bus_width != MMC_BUS_WIDTH_4)
-		burst_len = 16;
-	else
-		burst_len = 64;
-
-	/* This is unfortunately required */
-	dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
-		stat);
-
-	udelay(20);	/* required for clocks < 8MHz*/
-
-	if (host->dma_dir == DMA_FROM_DEVICE) {
-		imxmci_busy_wait_for_status(host, &stat,
-					    STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
-					    STATUS_TIME_OUT_READ,
-					    50, "imxmci_cpu_driven_data read");
-
-		while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
-		       !(stat & STATUS_TIME_OUT_READ) &&
-		       (host->data_cnt < 512)) {
-
-			udelay(20);	/* required for clocks < 8MHz*/
-
-			for (i = burst_len; i >= 2 ; i -= 2) {
-				u16 data;
-				data = readw(host->base + MMC_REG_BUFFER_ACCESS);
-				udelay(10);	/* required for clocks < 8MHz*/
-				if (host->data_cnt+2 <= host->dma_size) {
-					*(host->data_ptr++) = data;
-				} else {
-					if (host->data_cnt < host->dma_size)
-						*(u8 *)(host->data_ptr) = data;
-				}
-				host->data_cnt += 2;
-			}
-
-			stat = readw(host->base + MMC_REG_STATUS);
-
-			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
-				host->data_cnt, burst_len, stat);
-		}
-
-		if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
-			trans_done = 1;
-
-		if (host->dma_size & 0x1ff)
-			stat &= ~STATUS_CRC_READ_ERR;
-
-		if (stat & STATUS_TIME_OUT_READ) {
-			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
-				stat);
-			trans_done = -1;
-		}
-
-	} else {
-		imxmci_busy_wait_for_status(host, &stat,
-					    STATUS_APPL_BUFF_FE,
-					    20, "imxmci_cpu_driven_data write");
-
-		while ((stat & STATUS_APPL_BUFF_FE) &&
-		       (host->data_cnt < host->dma_size)) {
-			if (burst_len >= host->dma_size - host->data_cnt) {
-				burst_len = host->dma_size - host->data_cnt;
-				host->data_cnt = host->dma_size;
-				trans_done = 1;
-			} else {
-				host->data_cnt += burst_len;
-			}
-
-			for (i = burst_len; i > 0 ; i -= 2)
-				writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
-
-			stat = readw(host->base + MMC_REG_STATUS);
-
-			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
-				burst_len, stat);
-		}
-	}
-
-	*pstat = stat;
-
-	return trans_done;
-}
-
-static void imxmci_dma_irq(int dma, void *devid)
-{
-	struct imxmci_host *host = devid;
-	u32 stat = readw(host->base + MMC_REG_STATUS);
-
-	atomic_set(&host->stuck_timeout, 0);
-	host->status_reg = stat;
-	set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
-	tasklet_schedule(&host->tasklet);
-}
-
-static irqreturn_t imxmci_irq(int irq, void *devid)
-{
-	struct imxmci_host *host = devid;
-	u32 stat = readw(host->base + MMC_REG_STATUS);
-	int handled = 1;
-
-	writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
-			host->base + MMC_REG_INT_MASK);
-
-	atomic_set(&host->stuck_timeout, 0);
-	host->status_reg = stat;
-	set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-	set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-	tasklet_schedule(&host->tasklet);
-
-	return IRQ_RETVAL(handled);
-}
-
-static void imxmci_tasklet_fnc(unsigned long data)
-{
-	struct imxmci_host *host = (struct imxmci_host *)data;
-	u32 stat;
-	unsigned int data_dir_mask = 0;	/* STATUS_WR_CRC_ERROR_CODE_MASK */
-	int timeout = 0;
-
-	if (atomic_read(&host->stuck_timeout) > 4) {
-		char *what;
-		timeout = 1;
-		stat = readw(host->base + MMC_REG_STATUS);
-		host->status_reg = stat;
-		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-				what = "RESP+DMA";
-			else
-				what = "RESP";
-		else
-			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-				if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
-					what = "DATA";
-				else
-					what = "DMA";
-			else
-				what = "???";
-
-		dev_err(mmc_dev(host->mmc),
-			"%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
-			what, stat,
-			readw(host->base + MMC_REG_INT_MASK));
-		dev_err(mmc_dev(host->mmc),
-			"CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
-			readw(host->base + MMC_REG_CMD_DAT_CONT),
-			readw(host->base + MMC_REG_BLK_LEN),
-			readw(host->base + MMC_REG_NOB),
-			CCR(host->dma));
-		dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
-			host->cmd ? host->cmd->opcode : 0,
-			host->prev_cmd_code,
-			1 << host->actual_bus_width, host->dma_size);
-	}
-
-	if (!host->present || timeout)
-		host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
-			STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
-
-	if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
-		clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-
-		stat = readw(host->base + MMC_REG_STATUS);
-		/*
-		 * This is not required in theory, but there is chance to miss some flag
-		 * which clears automatically by mask write, FreeScale original code keeps
-		 * stat from IRQ time so do I
-		 */
-		stat |= host->status_reg;
-
-		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
-			stat &= ~STATUS_CRC_READ_ERR;
-
-		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-			imxmci_busy_wait_for_status(host, &stat,
-						    STATUS_END_CMD_RESP | STATUS_ERR_MASK,
-						    20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
-		}
-
-		if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
-			if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-				imxmci_cmd_done(host, stat);
-			if (host->data && (stat & STATUS_ERR_MASK))
-				imxmci_data_done(host, stat);
-		}
-
-		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
-			stat |= readw(host->base + MMC_REG_STATUS);
-			if (imxmci_cpu_driven_data(host, &stat)) {
-				if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-					imxmci_cmd_done(host, stat);
-				atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
-						  &host->pending_events);
-				imxmci_data_done(host, stat);
-			}
-		}
-	}
-
-	if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
-	    !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-
-		stat = readw(host->base + MMC_REG_STATUS);
-		/* Same as above */
-		stat |= host->status_reg;
-
-		if (host->dma_dir == DMA_TO_DEVICE)
-			data_dir_mask = STATUS_WRITE_OP_DONE;
-		else
-			data_dir_mask = STATUS_DATA_TRANS_DONE;
-
-		if (stat & data_dir_mask) {
-			clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
-			imxmci_data_done(host, stat);
-		}
-	}
-
-	if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
-
-		if (host->cmd)
-			imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
-
-		if (host->data)
-			imxmci_data_done(host, STATUS_TIME_OUT_READ |
-					 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
-
-		if (host->req)
-			imxmci_finish_request(host, host->req);
-
-		mmc_detect_change(host->mmc, msecs_to_jiffies(100));
-
-	}
-}
-
-static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
-{
-	struct imxmci_host *host = mmc_priv(mmc);
-	unsigned int cmdat;
-
-	WARN_ON(host->req != NULL);
-
-	host->req = req;
-
-	cmdat = 0;
-
-	if (req->data) {
-		imxmci_setup_data(host, req->data);
-
-		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
-
-		if (req->data->flags & MMC_DATA_WRITE)
-			cmdat |= CMD_DAT_CONT_WRITE;
-
-		if (req->data->flags & MMC_DATA_STREAM)
-			cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
-	}
-
-	imxmci_start_cmd(host, req->cmd, cmdat);
-}
-
-#define CLK_RATE 19200000
-
-static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
-	struct imxmci_host *host = mmc_priv(mmc);
-	int prescaler;
-
-	if (ios->bus_width == MMC_BUS_WIDTH_4) {
-		host->actual_bus_width = MMC_BUS_WIDTH_4;
-		imx_gpio_mode(PB11_PF_SD_DAT3);
-		BLR(host->dma) = 0;	/* burst 64 byte read/write */
-	} else {
-		host->actual_bus_width = MMC_BUS_WIDTH_1;
-		imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
-		BLR(host->dma) = 16;	/* burst 16 byte read/write */
-	}
-
-	if (host->power_mode != ios->power_mode) {
-		switch (ios->power_mode) {
-		case MMC_POWER_OFF:
-			break;
-		case MMC_POWER_UP:
-			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
-			break;
-		case MMC_POWER_ON:
-			break;
-		}
-		host->power_mode = ios->power_mode;
-	}
-
-	if (ios->clock) {
-		unsigned int clk;
-		u16 reg;
-
-		/* The prescaler is 5 for PERCLK2 equal to 96MHz
-		 * then 96MHz / 5 = 19.2 MHz
-		 */
-		clk = clk_get_rate(host->clk);
-		prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
-		switch (prescaler) {
-		case 0:
-		case 1:	prescaler = 0;
-			break;
-		case 2:	prescaler = 1;
-			break;
-		case 3:	prescaler = 2;
-			break;
-		case 4:	prescaler = 4;
-			break;
-		default:
-		case 5:	prescaler = 5;
-			break;
-		}
-
-		dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
-			clk, prescaler);
-
-		for (clk = 0; clk < 8; clk++) {
-			int x;
-			x = CLK_RATE / (1 << clk);
-			if (x <= ios->clock)
-				break;
-		}
-
-		/* enable controller */
-		reg = readw(host->base + MMC_REG_STR_STP_CLK);
-		writew(reg | STR_STP_CLK_ENABLE,
-				host->base + MMC_REG_STR_STP_CLK);
-
-		imxmci_stop_clock(host);
-		writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
-		/*
-		 * Under my understanding, clock should not be started there, because it would
-		 * initiate SDHC sequencer and send last or random command into card
-		 */
-		/* imxmci_start_clock(host); */
-
-		dev_dbg(mmc_dev(host->mmc),
-			"MMC_CLK_RATE: 0x%08x\n",
-			readw(host->base + MMC_REG_CLK_RATE));
-	} else {
-		imxmci_stop_clock(host);
-	}
-}
-
-static int imxmci_get_ro(struct mmc_host *mmc)
-{
-	struct imxmci_host *host = mmc_priv(mmc);
-
-	if (host->pdata && host->pdata->get_ro)
-		return !!host->pdata->get_ro(mmc_dev(mmc));
-	/*
-	 * Board doesn't support read only detection; let the mmc core
-	 * decide what to do.
-	 */
-	return -ENOSYS;
-}
-
-
-static const struct mmc_host_ops imxmci_ops = {
-	.request	= imxmci_request,
-	.set_ios	= imxmci_set_ios,
-	.get_ro		= imxmci_get_ro,
-};
-
-static void imxmci_check_status(unsigned long data)
-{
-	struct imxmci_host *host = (struct imxmci_host *)data;
-
-	if (host->pdata && host->pdata->card_present &&
-	    host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
-		host->present ^= 1;
-		dev_info(mmc_dev(host->mmc), "card %s\n",
-		      host->present ? "inserted" : "removed");
-
-		set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
-		tasklet_schedule(&host->tasklet);
-	}
-
-	if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
-	    test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
-		atomic_inc(&host->stuck_timeout);
-		if (atomic_read(&host->stuck_timeout) > 4)
-			tasklet_schedule(&host->tasklet);
-	} else {
-		atomic_set(&host->stuck_timeout, 0);
-
-	}
-
-	mod_timer(&host->timer, jiffies + (HZ>>1));
-}
-
-static int __init imxmci_probe(struct platform_device *pdev)
-{
-	struct mmc_host *mmc;
-	struct imxmci_host *host = NULL;
-	struct resource *r;
-	int ret = 0, irq;
-	u16 rev_no;
-
-	pr_info("i.MX mmc driver\n");
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	irq = platform_get_irq(pdev, 0);
-	if (!r || irq < 0)
-		return -ENXIO;
-
-	r = request_mem_region(r->start, resource_size(r), pdev->name);
-	if (!r)
-		return -EBUSY;
-
-	mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
-	if (!mmc) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	mmc->ops = &imxmci_ops;
-	mmc->f_min = 150000;
-	mmc->f_max = CLK_RATE/2;
-	mmc->ocr_avail = MMC_VDD_32_33;
-	mmc->caps = MMC_CAP_4_BIT_DATA;
-
-	/* MMC core transfer sizes tunable parameters */
-	mmc->max_segs = 64;
-	mmc->max_seg_size = 64*512;	/* default PAGE_CACHE_SIZE */
-	mmc->max_req_size = 64*512;	/* default PAGE_CACHE_SIZE */
-	mmc->max_blk_size = 2048;
-	mmc->max_blk_count = 65535;
-
-	host = mmc_priv(mmc);
-	host->base = ioremap(r->start, resource_size(r));
-	if (!host->base) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	host->mmc = mmc;
-	host->dma_allocated = 0;
-	host->pdata = pdev->dev.platform_data;
-	if (!host->pdata)
-		dev_warn(&pdev->dev, "No platform data provided!\n");
-
-	spin_lock_init(&host->lock);
-	host->res = r;
-	host->irq = irq;
-
-	host->clk = clk_get(&pdev->dev, "perclk2");
-	if (IS_ERR(host->clk)) {
-		ret = PTR_ERR(host->clk);
-		goto out;
-	}
-	clk_enable(host->clk);
-
-	imx_gpio_mode(PB8_PF_SD_DAT0);
-	imx_gpio_mode(PB9_PF_SD_DAT1);
-	imx_gpio_mode(PB10_PF_SD_DAT2);
-	/* Configured as GPIO with pull-up to ensure right MCC card mode */
-	/* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
-	imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
-	/* imx_gpio_mode(PB11_PF_SD_DAT3); */
-	imx_gpio_mode(PB12_PF_SD_CLK);
-	imx_gpio_mode(PB13_PF_SD_CMD);
-
-	imxmci_softreset(host);
-
-	rev_no = readw(host->base + MMC_REG_REV_NO);
-	if (rev_no != 0x390) {
-		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
-			readw(host->base + MMC_REG_REV_NO));
-		goto out;
-	}
-
-	/* recommended in data sheet */
-	writew(0x2db4, host->base + MMC_REG_READ_TO);
-
-	host->imask = IMXMCI_INT_MASK_DEFAULT;
-	writew(host->imask, host->base + MMC_REG_INT_MASK);
-
-	host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
-	if(host->dma < 0) {
-		dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
-		ret = -EBUSY;
-		goto out;
-	}
-	host->dma_allocated = 1;
-	imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
-	RSSR(host->dma) = DMA_REQ_SDHC;
-
-	tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
-	host->status_reg=0;
-	host->pending_events=0;
-
-	ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
-	if (ret)
-		goto out;
-
-	if (host->pdata && host->pdata->card_present)
-		host->present = host->pdata->card_present(mmc_dev(mmc));
-	else	/* if there is no way to detect assume that card is present */
-		host->present = 1;
-
-	init_timer(&host->timer);
-	host->timer.data = (unsigned long)host;
-	host->timer.function = imxmci_check_status;
-	add_timer(&host->timer);
-	mod_timer(&host->timer, jiffies + (HZ >> 1));
-
-	platform_set_drvdata(pdev, mmc);
-
-	mmc_add_host(mmc);
-
-	return 0;
-
-out:
-	if (host) {
-		if (host->dma_allocated) {
-			imx_dma_free(host->dma);
-			host->dma_allocated = 0;
-		}
-		if (host->clk) {
-			clk_disable(host->clk);
-			clk_put(host->clk);
-		}
-		if (host->base)
-			iounmap(host->base);
-	}
-	if (mmc)
-		mmc_free_host(mmc);
-	release_mem_region(r->start, resource_size(r));
-	return ret;
-}
-
-static int __exit imxmci_remove(struct platform_device *pdev)
-{
-	struct mmc_host *mmc = platform_get_drvdata(pdev);
-
-	platform_set_drvdata(pdev, NULL);
-
-	if (mmc) {
-		struct imxmci_host *host = mmc_priv(mmc);
-
-		tasklet_disable(&host->tasklet);
-
-		del_timer_sync(&host->timer);
-		mmc_remove_host(mmc);
-
-		free_irq(host->irq, host);
-		iounmap(host->base);
-		if (host->dma_allocated) {
-			imx_dma_free(host->dma);
-			host->dma_allocated = 0;
-		}
-
-		tasklet_kill(&host->tasklet);
-
-		clk_disable(host->clk);
-		clk_put(host->clk);
-
-		release_mem_region(host->res->start, resource_size(host->res));
-
-		mmc_free_host(mmc);
-	}
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
-{
-	struct mmc_host *mmc = platform_get_drvdata(dev);
-	int ret = 0;
-
-	if (mmc)
-		ret = mmc_suspend_host(mmc);
-
-	return ret;
-}
-
-static int imxmci_resume(struct platform_device *dev)
-{
-	struct mmc_host *mmc = platform_get_drvdata(dev);
-	struct imxmci_host *host;
-	int ret = 0;
-
-	if (mmc) {
-		host = mmc_priv(mmc);
-		if (host)
-			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
-		ret = mmc_resume_host(mmc);
-	}
-
-	return ret;
-}
-#else
-#define imxmci_suspend  NULL
-#define imxmci_resume   NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver imxmci_driver = {
-	.remove		= __exit_p(imxmci_remove),
-	.suspend	= imxmci_suspend,
-	.resume		= imxmci_resume,
-	.driver		= {
-		.name		= DRIVER_NAME,
-		.owner		= THIS_MODULE,
-	}
-};
-
-static int __init imxmci_init(void)
-{
-	return platform_driver_probe(&imxmci_driver, imxmci_probe);
-}
-
-static void __exit imxmci_exit(void)
-{
-	platform_driver_unregister(&imxmci_driver);
-}
-
-module_init(imxmci_init);
-module_exit(imxmci_exit);
-
-MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4e..0000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#define MMC_REG_STR_STP_CLK		0x00
-#define MMC_REG_STATUS			0x04
-#define MMC_REG_CLK_RATE		0x08
-#define MMC_REG_CMD_DAT_CONT		0x0C
-#define MMC_REG_RES_TO			0x10
-#define MMC_REG_READ_TO			0x14
-#define MMC_REG_BLK_LEN			0x18
-#define MMC_REG_NOB			0x1C
-#define MMC_REG_REV_NO			0x20
-#define MMC_REG_INT_MASK		0x24
-#define MMC_REG_CMD			0x28
-#define MMC_REG_ARGH			0x2C
-#define MMC_REG_ARGL			0x30
-#define MMC_REG_RES_FIFO		0x34
-#define MMC_REG_BUFFER_ACCESS		0x38
-
-#define STR_STP_CLK_IPG_CLK_GATE_DIS    (1<<15)
-#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
-#define STR_STP_CLK_ENDIAN              (1<<5)
-#define STR_STP_CLK_RESET               (1<<3)
-#define STR_STP_CLK_ENABLE              (1<<2)
-#define STR_STP_CLK_START_CLK           (1<<1)
-#define STR_STP_CLK_STOP_CLK            (1<<0)
-#define STATUS_CARD_PRESENCE            (1<<15)
-#define STATUS_SDIO_INT_ACTIVE          (1<<14)
-#define STATUS_END_CMD_RESP             (1<<13)
-#define STATUS_WRITE_OP_DONE            (1<<12)
-#define STATUS_DATA_TRANS_DONE          (1<<11)
-#define STATUS_WR_CRC_ERROR_CODE_MASK   (3<<10)
-#define STATUS_CARD_BUS_CLK_RUN         (1<<8)
-#define STATUS_APPL_BUFF_FF             (1<<7)
-#define STATUS_APPL_BUFF_FE             (1<<6)
-#define STATUS_RESP_CRC_ERR             (1<<5)
-#define STATUS_CRC_READ_ERR             (1<<3)
-#define STATUS_CRC_WRITE_ERR            (1<<2)
-#define STATUS_TIME_OUT_RESP            (1<<1)
-#define STATUS_TIME_OUT_READ            (1<<0)
-#define STATUS_ERR_MASK                 0x2f
-#define CLK_RATE_PRESCALER(x)           ((x) & 0x7)
-#define CLK_RATE_CLK_RATE(x)            (((x) & 0x7) << 3)
-#define CMD_DAT_CONT_CMD_RESP_LONG_OFF  (1<<12)
-#define CMD_DAT_CONT_STOP_READWAIT      (1<<11)
-#define CMD_DAT_CONT_START_READWAIT     (1<<10)
-#define CMD_DAT_CONT_BUS_WIDTH_1        (0<<8)
-#define CMD_DAT_CONT_BUS_WIDTH_4        (2<<8)
-#define CMD_DAT_CONT_INIT               (1<<7)
-#define CMD_DAT_CONT_BUSY               (1<<6)
-#define CMD_DAT_CONT_STREAM_BLOCK       (1<<5)
-#define CMD_DAT_CONT_WRITE              (1<<4)
-#define CMD_DAT_CONT_DATA_ENABLE        (1<<3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
-#define INT_MASK_AUTO_CARD_DETECT       (1<<6)
-#define INT_MASK_DAT0_EN                (1<<5)
-#define INT_MASK_SDIO                   (1<<4)
-#define INT_MASK_BUF_READY              (1<<3)
-#define INT_MASK_END_CMD_RES            (1<<2)
-#define INT_MASK_WRITE_OP_DONE          (1<<1)
-#define INT_MASK_DATA_TRAN              (1<<0)
-#define INT_ALL                         (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b6f3842..50ff19a 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
@@ -25,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/scatterlist.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -1207,21 +1209,77 @@
 	.get_cd		= mmci_get_cd,
 };
 
+#ifdef CONFIG_OF
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+					struct mmci_platform_data *pdata)
+{
+	int bus_width = 0;
+
+	pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+	pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+
+	if (of_get_property(np, "cd-inverted", NULL))
+		pdata->cd_invert = true;
+	else
+		pdata->cd_invert = false;
+
+	of_property_read_u32(np, "max-frequency", &pdata->f_max);
+	if (!pdata->f_max)
+		pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+
+	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+		pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+		pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+
+	of_property_read_u32(np, "bus-width", &bus_width);
+	switch (bus_width) {
+	case 0 :
+		/* No bus-width supplied. */
+		break;
+	case 4 :
+		pdata->capabilities |= MMC_CAP_4_BIT_DATA;
+		break;
+	case 8 :
+		pdata->capabilities |= MMC_CAP_8_BIT_DATA;
+		break;
+	default :
+		pr_warn("%s: Unsupported bus width\n", np->full_name);
+	}
+}
+#else
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+					struct mmci_platform_data *pdata)
+{
+	return;
+}
+#endif
+
 static int __devinit mmci_probe(struct amba_device *dev,
 	const struct amba_id *id)
 {
 	struct mmci_platform_data *plat = dev->dev.platform_data;
+	struct device_node *np = dev->dev.of_node;
 	struct variant_data *variant = id->data;
 	struct mmci_host *host;
 	struct mmc_host *mmc;
 	int ret;
 
-	/* must have platform data */
-	if (!plat) {
-		ret = -EINVAL;
-		goto out;
+	/* Must have platform data or Device Tree. */
+	if (!plat && !np) {
+		dev_err(&dev->dev, "No plat data or DT found\n");
+		return -EINVAL;
 	}
 
+	if (!plat) {
+		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+		if (!plat)
+			return -ENOMEM;
+	}
+
+	if (np)
+		mmci_dt_populate_generic_pdata(np, plat);
+
 	ret = amba_request_regions(dev, DRIVER_NAME);
 	if (ret)
 		goto out;
@@ -1367,6 +1425,10 @@
 	writel(0, host->base + MMCIMASK1);
 	writel(0xfff, host->base + MMCICLEAR);
 
+	if (plat->gpio_cd == -EPROBE_DEFER) {
+		ret = -EPROBE_DEFER;
+		goto err_gpio_cd;
+	}
 	if (gpio_is_valid(plat->gpio_cd)) {
 		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
 		if (ret == 0)
@@ -1390,6 +1452,10 @@
 		if (ret >= 0)
 			host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
 	}
+	if (plat->gpio_wp == -EPROBE_DEFER) {
+		ret = -EPROBE_DEFER;
+		goto err_gpio_wp;
+	}
 	if (gpio_is_valid(plat->gpio_wp)) {
 		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
 		if (ret == 0)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd1..3b9136c 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
 #include <linux/irq.h>
+#include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/mmc/host.h>
 
@@ -51,6 +52,7 @@
 	struct device *dev;
 	struct resource *res;
 	int irq;
+	struct clk *clk;
 	int gpio_card_detect;
 	int gpio_write_protect;
 };
@@ -770,6 +772,13 @@
 	} else
 		host->irq = irq;
 
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	host->clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(host->clk)) {
+		clk_prepare_enable(host->clk);
+	}
+
 	if (mvsd_data->gpio_card_detect) {
 		ret = gpio_request(mvsd_data->gpio_card_detect,
 				   DRIVER_NAME " cd");
@@ -854,6 +863,11 @@
 		mvsd_power_down(host);
 		iounmap(host->base);
 		release_resource(host->res);
+
+		if (!IS_ERR(host->clk)) {
+			clk_disable_unprepare(host->clk);
+			clk_put(host->clk);
+		}
 		mmc_free_host(mmc);
 	}
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b2058b4..28ed52d 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -136,7 +136,8 @@
 	u16			rev_no;
 	unsigned int		cmdat;
 
-	struct clk		*clk;
+	struct clk		*clk_ipg;
+	struct clk		*clk_per;
 
 	int			clock;
 
@@ -672,7 +673,7 @@
 {
 	unsigned int divider;
 	int prescaler = 0;
-	unsigned int clk_in = clk_get_rate(host->clk);
+	unsigned int clk_in = clk_get_rate(host->clk_per);
 
 	while (prescaler <= 0x800) {
 		for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@
 	host->res = r;
 	host->irq = irq;
 
-	host->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(host->clk)) {
-		ret = PTR_ERR(host->clk);
+	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(host->clk_ipg)) {
+		ret = PTR_ERR(host->clk_ipg);
 		goto out_iounmap;
 	}
-	clk_enable(host->clk);
+
+	host->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(host->clk_per)) {
+		ret = PTR_ERR(host->clk_per);
+		goto out_iounmap;
+	}
+
+	clk_prepare_enable(host->clk_per);
+	clk_prepare_enable(host->clk_ipg);
 
 	mxcmci_softreset(host);
 
@@ -917,8 +926,8 @@
 		goto out_clk_put;
 	}
 
-	mmc->f_min = clk_get_rate(host->clk) >> 16;
-	mmc->f_max = clk_get_rate(host->clk) >> 1;
+	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
 
 	/* recommended in data sheet */
 	writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@
 	if (host->dma)
 		dma_release_channel(host->dma);
 out_clk_put:
-	clk_disable(host->clk);
-	clk_put(host->clk);
+	clk_disable_unprepare(host->clk_per);
+	clk_disable_unprepare(host->clk_ipg);
 out_iounmap:
 	iounmap(host->base);
 out_free:
@@ -999,8 +1008,8 @@
 	if (host->dma)
 		dma_release_channel(host->dma);
 
-	clk_disable(host->clk);
-	clk_put(host->clk);
+	clk_disable_unprepare(host->clk_per);
+	clk_disable_unprepare(host->clk_ipg);
 
 	release_mem_region(host->res->start, resource_size(host->res));
 
@@ -1018,7 +1027,8 @@
 
 	if (mmc)
 		ret = mmc_suspend_host(mmc);
-	clk_disable(host->clk);
+	clk_disable_unprepare(host->clk_per);
+	clk_disable_unprepare(host->clk_ipg);
 
 	return ret;
 }
@@ -1029,7 +1039,8 @@
 	struct mxcmci_host *host = mmc_priv(mmc);
 	int ret = 0;
 
-	clk_enable(host->clk);
+	clk_prepare_enable(host->clk_per);
+	clk_prepare_enable(host->clk_ipg);
 	if (mmc)
 		ret = mmc_resume_host(mmc);
 
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb03ddd..277161d 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -23,6 +23,9 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -40,18 +43,15 @@
 #include <linux/module.h>
 #include <linux/fsl/mxs-dma.h>
 #include <linux/pinctrl/consumer.h>
-
-#include <mach/mxs.h>
-#include <mach/common.h>
-#include <mach/mmc.h>
+#include <linux/stmp_device.h>
+#include <linux/mmc/mxs-mmc.h>
 
 #define DRIVER_NAME	"mxs-mmc"
 
 /* card detect polling timeout */
 #define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
 
-#define SSP_VERSION_LATEST	4
-#define ssp_is_old()		(host->version < SSP_VERSION_LATEST)
+#define ssp_is_old(host)	((host)->devid == IMX23_MMC)
 
 /* SSP registers */
 #define HW_SSP_CTRL0				0x000
@@ -86,14 +86,14 @@
 #define  BM_SSP_BLOCK_SIZE_BLOCK_COUNT		(0xffffff << 4)
 #define  BP_SSP_BLOCK_SIZE_BLOCK_SIZE		(0)
 #define  BM_SSP_BLOCK_SIZE_BLOCK_SIZE		(0xf)
-#define HW_SSP_TIMING				(ssp_is_old() ? 0x050 : 0x070)
+#define HW_SSP_TIMING(h)			(ssp_is_old(h) ? 0x050 : 0x070)
 #define  BP_SSP_TIMING_TIMEOUT			(16)
 #define  BM_SSP_TIMING_TIMEOUT			(0xffff << 16)
 #define  BP_SSP_TIMING_CLOCK_DIVIDE		(8)
 #define  BM_SSP_TIMING_CLOCK_DIVIDE		(0xff << 8)
 #define  BP_SSP_TIMING_CLOCK_RATE		(0)
 #define  BM_SSP_TIMING_CLOCK_RATE		(0xff)
-#define HW_SSP_CTRL1				(ssp_is_old() ? 0x060 : 0x080)
+#define HW_SSP_CTRL1(h)				(ssp_is_old(h) ? 0x060 : 0x080)
 #define  BM_SSP_CTRL1_SDIO_IRQ			(1 << 31)
 #define  BM_SSP_CTRL1_SDIO_IRQ_EN		(1 << 30)
 #define  BM_SSP_CTRL1_RESP_ERR_IRQ		(1 << 29)
@@ -116,15 +116,13 @@
 #define  BM_SSP_CTRL1_WORD_LENGTH		(0xf << 4)
 #define  BP_SSP_CTRL1_SSP_MODE			(0)
 #define  BM_SSP_CTRL1_SSP_MODE			(0xf)
-#define HW_SSP_SDRESP0				(ssp_is_old() ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1				(ssp_is_old() ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2				(ssp_is_old() ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3				(ssp_is_old() ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS				(ssp_is_old() ? 0x0c0 : 0x100)
+#define HW_SSP_SDRESP0(h)			(ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h)			(ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h)			(ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h)			(ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h)			(ssp_is_old(h) ? 0x0c0 : 0x100)
 #define  BM_SSP_STATUS_CARD_DETECT		(1 << 28)
 #define  BM_SSP_STATUS_SDIO_IRQ			(1 << 17)
-#define HW_SSP_VERSION				(cpu_is_mx23() ? 0x110 : 0x130)
-#define  BP_SSP_VERSION_MAJOR			(24)
 
 #define BF_SSP(value, field)	(((value) << BP_SSP_##field) & BM_SSP_##field)
 
@@ -139,6 +137,11 @@
 
 #define SSP_PIO_NUM	3
 
+enum mxs_mmc_id {
+	IMX23_MMC,
+	IMX28_MMC,
+};
+
 struct mxs_mmc_host {
 	struct mmc_host			*mmc;
 	struct mmc_request		*mrq;
@@ -146,9 +149,7 @@
 	struct mmc_data			*data;
 
 	void __iomem			*base;
-	int				irq;
-	struct resource			*res;
-	struct resource			*dma_res;
+	int				dma_channel;
 	struct clk			*clk;
 	unsigned int			clk_rate;
 
@@ -158,32 +159,28 @@
 	enum dma_transfer_direction	slave_dirn;
 	u32				ssp_pio_words[SSP_PIO_NUM];
 
-	unsigned int			version;
+	enum mxs_mmc_id			devid;
 	unsigned char			bus_width;
 	spinlock_t			lock;
 	int				sdio_irq_en;
+	int				wp_gpio;
 };
 
 static int mxs_mmc_get_ro(struct mmc_host *mmc)
 {
 	struct mxs_mmc_host *host = mmc_priv(mmc);
-	struct mxs_mmc_platform_data *pdata =
-		mmc_dev(host->mmc)->platform_data;
 
-	if (!pdata)
-		return -EFAULT;
-
-	if (!gpio_is_valid(pdata->wp_gpio))
+	if (!gpio_is_valid(host->wp_gpio))
 		return -EINVAL;
 
-	return gpio_get_value(pdata->wp_gpio);
+	return gpio_get_value(host->wp_gpio);
 }
 
 static int mxs_mmc_get_cd(struct mmc_host *mmc)
 {
 	struct mxs_mmc_host *host = mmc_priv(mmc);
 
-	return !(readl(host->base + HW_SSP_STATUS) &
+	return !(readl(host->base + HW_SSP_STATUS(host)) &
 		 BM_SSP_STATUS_CARD_DETECT);
 }
 
@@ -191,7 +188,7 @@
 {
 	u32 ctrl0, ctrl1;
 
-	mxs_reset_block(host->base);
+	stmp_reset_block(host->base);
 
 	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
 	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@
 	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
 	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
 	       BF_SSP(0, TIMING_CLOCK_RATE),
-	       host->base + HW_SSP_TIMING);
+	       host->base + HW_SSP_TIMING(host));
 
 	if (host->sdio_irq_en) {
 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@
 	}
 
 	writel(ctrl0, host->base + HW_SSP_CTRL0);
-	writel(ctrl1, host->base + HW_SSP_CTRL1);
+	writel(ctrl1, host->base + HW_SSP_CTRL1(host));
 }
 
 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@
 
 	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
 		if (mmc_resp_type(cmd) & MMC_RSP_136) {
-			cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
-			cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
-			cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
-			cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+			cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
+			cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
+			cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
+			cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
 		} else {
-			cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+			cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
 		}
 	}
 
@@ -277,9 +274,9 @@
 
 	spin_lock(&host->lock);
 
-	stat = readl(host->base + HW_SSP_CTRL1);
+	stat = readl(host->base + HW_SSP_CTRL1(host));
 	writel(stat & MXS_MMC_IRQ_BITS,
-	       host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+	       host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
 
 	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
 		mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@
 		blocks = 1;
 
 	/* xfer count, block size and count need to be set differently */
-	if (ssp_is_old()) {
+	if (ssp_is_old(host)) {
 		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
 		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
 			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@
 
 	/* set the timeout count */
 	timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
-	val = readl(host->base + HW_SSP_TIMING);
+	val = readl(host->base + HW_SSP_TIMING(host));
 	val &= ~(BM_SSP_TIMING_TIMEOUT);
 	val |= BF_SSP(timeout, TIMING_TIMEOUT);
-	writel(val, host->base + HW_SSP_TIMING);
+	writel(val, host->base + HW_SSP_TIMING(host));
 
 	/* pio */
 	host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@
 
 	ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
 
-	val = readl(host->base + HW_SSP_TIMING);
+	val = readl(host->base + HW_SSP_TIMING(host));
 	val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
 	val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
 	val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
-	writel(val, host->base + HW_SSP_TIMING);
+	writel(val, host->base + HW_SSP_TIMING(host));
 
 	host->clk_rate = ssp_sck;
 
@@ -637,18 +634,19 @@
 
 	if (enable) {
 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-		       host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+		       host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
-		       host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+		       host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
 
-		if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+		if (readl(host->base + HW_SSP_STATUS(host)) &
+				BM_SSP_STATUS_SDIO_IRQ)
 			mmc_signal_sdio_irq(host->mmc);
 
 	} else {
 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-		       host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+		       host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
-		       host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+		       host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
 	}
 
 	spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@
 	if (!mxs_dma_is_apbh(chan))
 		return false;
 
-	if (chan->chan_id != host->dma_res->start)
+	if (chan->chan_id != host->dma_channel)
 		return false;
 
 	chan->private = &host->dma_data;
@@ -677,11 +675,34 @@
 	return true;
 }
 
+static struct platform_device_id mxs_mmc_ids[] = {
+	{
+		.name = "imx23-mmc",
+		.driver_data = IMX23_MMC,
+	}, {
+		.name = "imx28-mmc",
+		.driver_data = IMX28_MMC,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
+	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
 static int mxs_mmc_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *of_id =
+			of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+	struct device_node *np = pdev->dev.of_node;
 	struct mxs_mmc_host *host;
 	struct mmc_host *mmc;
-	struct resource *iores, *dmares, *r;
+	struct resource *iores, *dmares;
 	struct mxs_mmc_platform_data *pdata;
 	struct pinctrl *pinctrl;
 	int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@
 	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	irq_err = platform_get_irq(pdev, 0);
 	irq_dma = platform_get_irq(pdev, 1);
-	if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+	if (!iores || irq_err < 0 || irq_dma < 0)
 		return -EINVAL;
 
-	r = request_mem_region(iores->start, resource_size(iores), pdev->name);
-	if (!r)
-		return -EBUSY;
-
 	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
-	if (!mmc) {
-		ret = -ENOMEM;
-		goto out_release_mem;
-	}
+	if (!mmc)
+		return -ENOMEM;
 
 	host = mmc_priv(mmc);
-	host->base = ioremap(r->start, resource_size(r));
+	host->base = devm_request_and_ioremap(&pdev->dev, iores);
 	if (!host->base) {
-		ret = -ENOMEM;
+		ret = -EADDRNOTAVAIL;
 		goto out_mmc_free;
 	}
 
-	/* only major verion does matter */
-	host->version = readl(host->base + HW_SSP_VERSION) >>
-			BP_SSP_VERSION_MAJOR;
+	if (np) {
+		host->devid = (enum mxs_mmc_id) of_id->data;
+		/*
+		 * TODO: This is a temporary solution and should be changed
+		 * to use generic DMA binding later when the helpers get in.
+		 */
+		ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+					   &host->dma_channel);
+		if (ret) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to get dma channel\n");
+			goto out_mmc_free;
+		}
+	} else {
+		host->devid = pdev->id_entry->driver_data;
+		host->dma_channel = dmares->start;
+	}
 
 	host->mmc = mmc;
-	host->res = r;
-	host->dma_res = dmares;
-	host->irq = irq_err;
 	host->sdio_irq_en = 0;
 
 	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
 	if (IS_ERR(pinctrl)) {
 		ret = PTR_ERR(pinctrl);
-		goto out_iounmap;
+		goto out_mmc_free;
 	}
 
 	host->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
-		goto out_iounmap;
+		goto out_mmc_free;
 	}
 	clk_prepare_enable(host->clk);
 
@@ -752,11 +778,20 @@
 		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
 
 	pdata =	mmc_dev(host->mmc)->platform_data;
-	if (pdata) {
+	if (!pdata) {
+		u32 bus_width = 0;
+		of_property_read_u32(np, "bus-width", &bus_width);
+		if (bus_width == 4)
+			mmc->caps |= MMC_CAP_4_BIT_DATA;
+		else if (bus_width == 8)
+			mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+		host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+	} else {
 		if (pdata->flags & SLOTF_8_BIT_CAPABLE)
 			mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
 		if (pdata->flags & SLOTF_4_BIT_CAPABLE)
 			mmc->caps |= MMC_CAP_4_BIT_DATA;
+		host->wp_gpio = pdata->wp_gpio;
 	}
 
 	mmc->f_min = 400000;
@@ -765,13 +800,14 @@
 
 	mmc->max_segs = 52;
 	mmc->max_blk_size = 1 << 0xf;
-	mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
-	mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+	mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
+	mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
 	mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
 
 	platform_set_drvdata(pdev, mmc);
 
-	ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+			       DRIVER_NAME, host);
 	if (ret)
 		goto out_free_dma;
 
@@ -779,26 +815,20 @@
 
 	ret = mmc_add_host(mmc);
 	if (ret)
-		goto out_free_irq;
+		goto out_free_dma;
 
 	dev_info(mmc_dev(host->mmc), "initialized\n");
 
 	return 0;
 
-out_free_irq:
-	free_irq(host->irq, host);
 out_free_dma:
 	if (host->dmach)
 		dma_release_channel(host->dmach);
 out_clk_put:
 	clk_disable_unprepare(host->clk);
 	clk_put(host->clk);
-out_iounmap:
-	iounmap(host->base);
 out_mmc_free:
 	mmc_free_host(mmc);
-out_release_mem:
-	release_mem_region(iores->start, resource_size(iores));
 	return ret;
 }
 
@@ -806,12 +836,9 @@
 {
 	struct mmc_host *mmc = platform_get_drvdata(pdev);
 	struct mxs_mmc_host *host = mmc_priv(mmc);
-	struct resource *res = host->res;
 
 	mmc_remove_host(mmc);
 
-	free_irq(host->irq, host);
-
 	platform_set_drvdata(pdev, NULL);
 
 	if (host->dmach)
@@ -820,12 +847,8 @@
 	clk_disable_unprepare(host->clk);
 	clk_put(host->clk);
 
-	iounmap(host->base);
-
 	mmc_free_host(mmc);
 
-	release_mem_region(res->start, resource_size(res));
-
 	return 0;
 }
 
@@ -865,12 +888,14 @@
 static struct platform_driver mxs_mmc_driver = {
 	.probe		= mxs_mmc_probe,
 	.remove		= mxs_mmc_remove,
+	.id_table	= mxs_mmc_ids,
 	.driver		= {
 		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &mxs_mmc_pm_ops,
 #endif
+		.of_match_table = mxs_mmc_dt_ids,
 	},
 };
 
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e5..3e8dcf8 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@
 	struct timer_list       clk_timer;
 	spinlock_t		clk_lock;     /* for changing enabled state */
 	unsigned int            fclk_enabled:1;
+	struct workqueue_struct *mmc_omap_wq;
 
 	struct omap_mmc_platform_data *pdata;
 };
 
-static struct workqueue_struct *mmc_omap_wq;
 
 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
 {
@@ -291,7 +291,7 @@
 		host->next_slot = new_slot;
 		host->mmc = new_slot->mmc;
 		spin_unlock_irqrestore(&host->slot_lock, flags);
-		queue_work(mmc_omap_wq, &host->slot_release_work);
+		queue_work(host->mmc_omap_wq, &host->slot_release_work);
 		return;
 	}
 
@@ -459,7 +459,7 @@
 	}
 
 	host->stop_data = data;
-	queue_work(mmc_omap_wq, &host->send_stop_work);
+	queue_work(host->mmc_omap_wq, &host->send_stop_work);
 }
 
 static void
@@ -639,7 +639,7 @@
 		OMAP_MMC_WRITE(host, IE, 0);
 		disable_irq(host->irq);
 		host->abort = 1;
-		queue_work(mmc_omap_wq, &host->cmd_abort_work);
+		queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
 	}
 	spin_unlock_irqrestore(&host->slot_lock, flags);
 }
@@ -828,7 +828,7 @@
 		host->abort = 1;
 		OMAP_MMC_WRITE(host, IE, 0);
 		disable_irq_nosync(host->irq);
-		queue_work(mmc_omap_wq, &host->cmd_abort_work);
+		queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
 		return IRQ_HANDLED;
 	}
 
@@ -1300,7 +1300,7 @@
 	.set_ios	= mmc_omap_set_ios,
 };
 
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
 {
 	struct mmc_omap_slot *slot = NULL;
 	struct mmc_host *mmc;
@@ -1389,13 +1389,13 @@
 
 	tasklet_kill(&slot->cover_tasklet);
 	del_timer_sync(&slot->cover_timer);
-	flush_workqueue(mmc_omap_wq);
+	flush_workqueue(slot->host->mmc_omap_wq);
 
 	mmc_remove_host(mmc);
 	mmc_free_host(mmc);
 }
 
-static int __init mmc_omap_probe(struct platform_device *pdev)
+static int __devinit mmc_omap_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_omap_host *host = NULL;
@@ -1485,20 +1485,26 @@
 	}
 
 	host->nr_slots = pdata->nr_slots;
+	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+	if (!host->mmc_omap_wq)
+		goto err_plat_cleanup;
+
 	for (i = 0; i < pdata->nr_slots; i++) {
 		ret = mmc_omap_new_slot(host, i);
 		if (ret < 0) {
 			while (--i >= 0)
 				mmc_omap_remove_slot(host->slots[i]);
 
-			goto err_plat_cleanup;
+			goto err_destroy_wq;
 		}
 	}
 
-	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
 	return 0;
 
+err_destroy_wq:
+	destroy_workqueue(host->mmc_omap_wq);
 err_plat_cleanup:
 	if (pdata->cleanup)
 		pdata->cleanup(&pdev->dev);
@@ -1518,7 +1524,7 @@
 	return ret;
 }
 
-static int mmc_omap_remove(struct platform_device *pdev)
+static int __devexit mmc_omap_remove(struct platform_device *pdev)
 {
 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
 	int i;
@@ -1542,6 +1548,7 @@
 	iounmap(host->virt_base);
 	release_mem_region(pdev->resource[0].start,
 			   pdev->resource[0].end - pdev->resource[0].start + 1);
+	destroy_workqueue(host->mmc_omap_wq);
 
 	kfree(host);
 
@@ -1599,7 +1606,8 @@
 #endif
 
 static struct platform_driver mmc_omap_driver = {
-	.remove		= mmc_omap_remove,
+	.probe		= mmc_omap_probe,
+	.remove		= __devexit_p(mmc_omap_remove),
 	.suspend	= mmc_omap_suspend,
 	.resume		= mmc_omap_resume,
 	.driver		= {
@@ -1608,29 +1616,7 @@
 	},
 };
 
-static int __init mmc_omap_init(void)
-{
-	int ret;
-
-	mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-	if (!mmc_omap_wq)
-		return -ENOMEM;
-
-	ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
-	if (ret)
-		destroy_workqueue(mmc_omap_wq);
-	return ret;
-}
-
-static void __exit mmc_omap_exit(void)
-{
-	platform_driver_unregister(&mmc_omap_driver);
-	destroy_workqueue(mmc_omap_wq);
-}
-
-module_init(mmc_omap_init);
-module_exit(mmc_omap_exit);
-
+module_platform_driver(mmc_omap_driver);
 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 56d4499..389a3ee 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -91,6 +91,7 @@
 #define MSBS			(1 << 5)
 #define BCE			(1 << 1)
 #define FOUR_BIT		(1 << 1)
+#define DDR			(1 << 19)
 #define DW8			(1 << 5)
 #define CC			0x1
 #define TC			0x02
@@ -167,7 +168,6 @@
 	int			use_dma, dma_ch;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
-	int			got_dbclk;
 	int			response_busy;
 	int			context_loss;
 	int			vdd;
@@ -520,6 +520,10 @@
 	u32 con;
 
 	con = OMAP_HSMMC_READ(host->base, CON);
+	if (ios->timing == MMC_TIMING_UHS_DDR50)
+		con |= DDR;	/* configure in DDR mode */
+	else
+		con &= ~DDR;
 	switch (ios->bus_width) {
 	case MMC_BUS_WIDTH_8:
 		OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -796,11 +800,12 @@
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
 	int dma_ch;
+	unsigned long flags;
 
-	spin_lock(&host->irq_lock);
+	spin_lock_irqsave(&host->irq_lock, flags);
 	host->req_in_progress = 0;
 	dma_ch = host->dma_ch;
-	spin_unlock(&host->irq_lock);
+	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_hsmmc_disable_irq(host);
 	/* Do not complete the request if DMA is still in progress */
@@ -874,13 +879,14 @@
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
 	int dma_ch;
+	unsigned long flags;
 
 	host->data->error = errno;
 
-	spin_lock(&host->irq_lock);
+	spin_lock_irqsave(&host->irq_lock, flags);
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
-	spin_unlock(&host->irq_lock);
+	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	if (host->use_dma && dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1088,7 @@
 
 	/* Disable the clocks */
 	pm_runtime_put_sync(host->dev);
-	if (host->got_dbclk)
+	if (host->dbclk)
 		clk_disable(host->dbclk);
 
 	/* Turn the power off */
@@ -1093,7 +1099,7 @@
 		ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
 					       vdd);
 	pm_runtime_get_sync(host->dev);
-	if (host->got_dbclk)
+	if (host->dbclk)
 		clk_enable(host->dbclk);
 
 	if (ret != 0)
@@ -1234,6 +1240,7 @@
 	struct omap_hsmmc_host *host = cb_data;
 	struct mmc_data *data;
 	int dma_ch, req_in_progress;
+	unsigned long flags;
 
 	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
 		dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1248,9 @@
 		return;
 	}
 
-	spin_lock(&host->irq_lock);
+	spin_lock_irqsave(&host->irq_lock, flags);
 	if (host->dma_ch < 0) {
-		spin_unlock(&host->irq_lock);
+		spin_unlock_irqrestore(&host->irq_lock, flags);
 		return;
 	}
 
@@ -1253,7 +1260,7 @@
 		/* Fire up the next transfer. */
 		omap_hsmmc_config_dma_params(host, data,
 					   data->sg + host->dma_sg_idx);
-		spin_unlock(&host->irq_lock);
+		spin_unlock_irqrestore(&host->irq_lock, flags);
 		return;
 	}
 
@@ -1264,7 +1271,7 @@
 	req_in_progress = host->req_in_progress;
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
-	spin_unlock(&host->irq_lock);
+	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_free_dma(dma_ch);
 
@@ -1766,7 +1773,7 @@
 		pdata->slots[0].nonremovable = true;
 		pdata->slots[0].no_regulator_off_init = true;
 	}
-	of_property_read_u32(np, "ti,bus-width", &bus_width);
+	of_property_read_u32(np, "bus-width", &bus_width);
 	if (bus_width == 4)
 		pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
 	else if (bus_width == 8)
@@ -1885,21 +1892,17 @@
 
 	omap_hsmmc_context_save(host);
 
-	if (cpu_is_omap2430()) {
-		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
-		/*
-		 * MMC can still work without debounce clock.
-		 */
-		if (IS_ERR(host->dbclk))
-			dev_warn(mmc_dev(host->mmc),
-				"Failed to get debounce clock\n");
-		else
-			host->got_dbclk = 1;
-
-		if (host->got_dbclk)
-			if (clk_enable(host->dbclk) != 0)
-				dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
-							" clk failed\n");
+	host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+	/*
+	 * MMC can still work without debounce clock.
+	 */
+	if (IS_ERR(host->dbclk)) {
+		dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
+		host->dbclk = NULL;
+	} else if (clk_enable(host->dbclk) != 0) {
+		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+		clk_put(host->dbclk);
+		host->dbclk = NULL;
 	}
 
 	/* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1972,7 @@
 		ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
 					   NULL,
 					   omap_hsmmc_detect,
-					   IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+					   IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
 					   mmc_hostname(mmc), host);
 		if (ret) {
 			dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2022,7 @@
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
-	if (host->got_dbclk) {
+	if (host->dbclk) {
 		clk_disable(host->dbclk);
 		clk_put(host->dbclk);
 	}
@@ -2030,7 +2033,9 @@
 err_alloc:
 	omap_hsmmc_gpio_free(pdata);
 err:
-	release_mem_region(res->start, resource_size(res));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res)
+		release_mem_region(res->start, resource_size(res));
 	return ret;
 }
 
@@ -2052,7 +2057,7 @@
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
-	if (host->got_dbclk) {
+	if (host->dbclk) {
 		clk_disable(host->dbclk);
 		clk_put(host->dbclk);
 	}
@@ -2110,7 +2115,7 @@
 				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
 	}
 
-	if (host->got_dbclk)
+	if (host->dbclk)
 		clk_disable(host->dbclk);
 err:
 	pm_runtime_put_sync(host->dev);
@@ -2131,7 +2136,7 @@
 
 	pm_runtime_get_sync(host->dev);
 
-	if (host->got_dbclk)
+	if (host->dbclk)
 		clk_enable(host->dbclk);
 
 	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04..ebbe984 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -71,6 +71,9 @@
 	enum imx_esdhc_type devtype;
 	struct pinctrl *pinctrl;
 	struct esdhc_platform_data boarddata;
+	struct clk *clk_ipg;
+	struct clk *clk_ahb;
+	struct clk *clk_per;
 };
 
 static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@
 	if (!np)
 		return -ENODEV;
 
-	if (of_get_property(np, "fsl,card-wired", NULL))
+	if (of_get_property(np, "non-removable", NULL))
 		boarddata->cd_type = ESDHC_CD_PERMANENT;
 
 	if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@
 	struct sdhci_pltfm_host *pltfm_host;
 	struct sdhci_host *host;
 	struct esdhc_platform_data *boarddata;
-	struct clk *clk;
 	int err;
 	struct pltfm_imx_data *imx_data;
 
@@ -460,14 +462,29 @@
 	imx_data->devtype = pdev->id_entry->driver_data;
 	pltfm_host->priv = imx_data;
 
-	clk = clk_get(mmc_dev(host->mmc), NULL);
-	if (IS_ERR(clk)) {
-		dev_err(mmc_dev(host->mmc), "clk err\n");
-		err = PTR_ERR(clk);
+	imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(imx_data->clk_ipg)) {
+		err = PTR_ERR(imx_data->clk_ipg);
 		goto err_clk_get;
 	}
-	clk_prepare_enable(clk);
-	pltfm_host->clk = clk;
+
+	imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(imx_data->clk_ahb)) {
+		err = PTR_ERR(imx_data->clk_ahb);
+		goto err_clk_get;
+	}
+
+	imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(imx_data->clk_per)) {
+		err = PTR_ERR(imx_data->clk_per);
+		goto err_clk_get;
+	}
+
+	pltfm_host->clk = imx_data->clk_per;
+
+	clk_prepare_enable(imx_data->clk_per);
+	clk_prepare_enable(imx_data->clk_ipg);
+	clk_prepare_enable(imx_data->clk_ahb);
 
 	imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
 	if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@
 no_card_detect_pin:
 no_board_data:
 pin_err:
-	clk_disable_unprepare(pltfm_host->clk);
-	clk_put(pltfm_host->clk);
+	clk_disable_unprepare(imx_data->clk_per);
+	clk_disable_unprepare(imx_data->clk_ipg);
+	clk_disable_unprepare(imx_data->clk_ahb);
 err_clk_get:
 	kfree(imx_data);
 err_imx_data:
@@ -594,8 +612,10 @@
 		gpio_free(boarddata->cd_gpio);
 	}
 
-	clk_disable_unprepare(pltfm_host->clk);
-	clk_put(pltfm_host->clk);
+	clk_disable_unprepare(imx_data->clk_per);
+	clk_disable_unprepare(imx_data->clk_ipg);
+	clk_disable_unprepare(imx_data->clk_ahb);
+
 	kfree(imx_data);
 
 	sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48..d9a4ef4 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@
 #ifdef CONFIG_OF
 static bool sdhci_of_wp_inverted(struct device_node *np)
 {
-	if (of_get_property(np, "sdhci,wp-inverted", NULL))
+	if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+	    of_get_property(np, "wp-inverted", NULL))
 		return true;
 
 	/* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@
 	struct sdhci_host *host = platform_get_drvdata(pdev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	const __be32 *clk;
+	u32 bus_width;
 	int size;
 
 	if (of_device_is_available(np)) {
 		if (of_get_property(np, "sdhci,auto-cmd12", NULL))
 			host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
 
-		if (of_get_property(np, "sdhci,1-bit-only", NULL))
+		if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+		    (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+		    bus_width == 1))
 			host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
 
 		if (sdhci_of_wp_inverted(np))
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164f..a50c205 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@
 		if (sc->ext_cd_irq &&
 		    request_threaded_irq(sc->ext_cd_irq, NULL,
 					 sdhci_s3c_gpio_card_detect_thread,
-					 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+					 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
 					 dev_name(dev), sc) == 0) {
 			int status = gpio_get_value(sc->ext_cd_gpio);
 			if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e..423da81 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
  * Support of SDHCI platform devices for spear soc family
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired by sdhci-pltfm.c
  *
@@ -75,8 +75,6 @@
 	struct spear_sdhci *sdhci;
 	int ret;
 
-	BUG_ON(pdev == NULL);
-
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!iomem) {
 		ret = -ENOMEM;
@@ -84,18 +82,18 @@
 		goto err;
 	}
 
-	if (!request_mem_region(iomem->start, resource_size(iomem),
-				"spear-sdhci")) {
+	if (!devm_request_mem_region(&pdev->dev, iomem->start,
+				resource_size(iomem), "spear-sdhci")) {
 		ret = -EBUSY;
 		dev_dbg(&pdev->dev, "cannot request region\n");
 		goto err;
 	}
 
-	sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+	sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
 	if (!sdhci) {
 		ret = -ENOMEM;
 		dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
-		goto err_kzalloc;
+		goto err;
 	}
 
 	/* clk enable */
@@ -103,13 +101,13 @@
 	if (IS_ERR(sdhci->clk)) {
 		ret = PTR_ERR(sdhci->clk);
 		dev_dbg(&pdev->dev, "Error getting clock\n");
-		goto err_clk_get;
+		goto err;
 	}
 
 	ret = clk_enable(sdhci->clk);
 	if (ret) {
 		dev_dbg(&pdev->dev, "Error enabling clock\n");
-		goto err_clk_enb;
+		goto put_clk;
 	}
 
 	/* overwrite platform_data */
@@ -124,7 +122,7 @@
 	if (IS_ERR(host)) {
 		ret = PTR_ERR(host);
 		dev_dbg(&pdev->dev, "error allocating host\n");
-		goto err_alloc_host;
+		goto disable_clk;
 	}
 
 	host->hw_name = "sdhci";
@@ -132,17 +130,18 @@
 	host->irq = platform_get_irq(pdev, 0);
 	host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
 
-	host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+	host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
+			resource_size(iomem));
 	if (!host->ioaddr) {
 		ret = -ENOMEM;
 		dev_dbg(&pdev->dev, "failed to remap registers\n");
-		goto err_ioremap;
+		goto free_host;
 	}
 
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_dbg(&pdev->dev, "error adding host\n");
-		goto err_add_host;
+		goto free_host;
 	}
 
 	platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@
 	if (sdhci->data->card_power_gpio >= 0) {
 		int val = 0;
 
-		ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+		ret = devm_gpio_request(&pdev->dev,
+				sdhci->data->card_power_gpio, "sdhci");
 		if (ret < 0) {
 			dev_dbg(&pdev->dev, "gpio request fail: %d\n",
 					sdhci->data->card_power_gpio);
-			goto err_pgpio_request;
+			goto set_drvdata;
 		}
 
 		if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@
 		if (ret) {
 			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
 					sdhci->data->card_power_gpio);
-			goto err_pgpio_direction;
+			goto set_drvdata;
 		}
 	}
 
 	if (sdhci->data->card_int_gpio >= 0) {
-		ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+		ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
+				"sdhci");
 		if (ret < 0) {
 			dev_dbg(&pdev->dev, "gpio request fail: %d\n",
 					sdhci->data->card_int_gpio);
-			goto err_igpio_request;
+			goto set_drvdata;
 		}
 
 		ret = gpio_direction_input(sdhci->data->card_int_gpio);
 		if (ret) {
 			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
 					sdhci->data->card_int_gpio);
-			goto err_igpio_direction;
+			goto set_drvdata;
 		}
-		ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+		ret = devm_request_irq(&pdev->dev,
+				gpio_to_irq(sdhci->data->card_int_gpio),
 				sdhci_gpio_irq, IRQF_TRIGGER_LOW,
 				mmc_hostname(host->mmc), pdev);
 		if (ret) {
 			dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
 					sdhci->data->card_int_gpio);
-			goto err_igpio_request_irq;
+			goto set_drvdata;
 		}
 
 	}
 
 	return 0;
 
-err_igpio_request_irq:
-err_igpio_direction:
-	if (sdhci->data->card_int_gpio >= 0)
-		gpio_free(sdhci->data->card_int_gpio);
-err_igpio_request:
-err_pgpio_direction:
-	if (sdhci->data->card_power_gpio >= 0)
-		gpio_free(sdhci->data->card_power_gpio);
-err_pgpio_request:
+set_drvdata:
 	platform_set_drvdata(pdev, NULL);
 	sdhci_remove_host(host, 1);
-err_add_host:
-	iounmap(host->ioaddr);
-err_ioremap:
+free_host:
 	sdhci_free_host(host);
-err_alloc_host:
+disable_clk:
 	clk_disable(sdhci->clk);
-err_clk_enb:
+put_clk:
 	clk_put(sdhci->clk);
-err_clk_get:
-	kfree(sdhci);
-err_kzalloc:
-	release_mem_region(iomem->start, resource_size(iomem));
 err:
 	dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
 	return ret;
@@ -239,35 +227,19 @@
 static int __devexit sdhci_remove(struct platform_device *pdev)
 {
 	struct sdhci_host *host = platform_get_drvdata(pdev);
-	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
-	int dead;
+	int dead = 0;
 	u32 scratch;
 
-	if (sdhci->data) {
-		if (sdhci->data->card_int_gpio >= 0) {
-			free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
-			gpio_free(sdhci->data->card_int_gpio);
-		}
-
-		if (sdhci->data->card_power_gpio >= 0)
-			gpio_free(sdhci->data->card_power_gpio);
-	}
-
 	platform_set_drvdata(pdev, NULL);
-	dead = 0;
 	scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
 	if (scratch == (u32)-1)
 		dead = 1;
 
 	sdhci_remove_host(host, dead);
-	iounmap(host->ioaddr);
 	sdhci_free_host(host);
 	clk_disable(sdhci->clk);
 	clk_put(sdhci->clk);
-	kfree(sdhci);
-	if (iomem)
-		release_mem_region(iomem->start, resource_size(iomem));
 
 	return 0;
 }
@@ -317,5 +289,5 @@
 module_platform_driver(sdhci_driver);
 
 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a169..b38d8a7 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
 
 #include "sdhci-pltfm.h"
 
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL		0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300	0x20
+
 #define NVQUIRK_FORCE_SDHCI_SPEC_200	BIT(0)
 #define NVQUIRK_ENABLE_BLOCK_GAP_DET	BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300	BIT(2)
 
 struct sdhci_tegra_soc_data {
 	struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@
 	return IRQ_HANDLED;
 };
 
+static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_tegra *tegra_host = pltfm_host->priv;
+	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+	if (!(mask & SDHCI_RESET_ALL))
+		return;
+
+	/* Erratum: Enable SDHCI spec v3.00 support */
+	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
+		u32 misc_ctrl;
+
+		misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+		sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+	}
+}
+
 static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@
 	.read_w     = tegra_sdhci_readw,
 	.write_l    = tegra_sdhci_writel,
 	.platform_8bit_width = tegra_sdhci_8bit,
+	.platform_reset_exit = tegra_sdhci_reset_exit,
 };
 
 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@
 
 static struct sdhci_tegra_soc_data soc_data_tegra30 = {
 	.pdata = &sdhci_tegra30_pdata,
+	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
 };
 #endif
 
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdeb..f4b8b4d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@
 	}
 
 	if (count >= 0xF) {
-		pr_warning("%s: Too large timeout requested for CMD%d!\n",
-		       mmc_hostname(host->mmc), cmd->opcode);
+		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+		    mmc_hostname(host->mmc), count, cmd->opcode);
 		count = 0xE;
 	}
 
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5760c1a..27143e0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -128,7 +128,7 @@
 
 config MTD_OF_PARTS
 	tristate "OpenFirmware partitioning information support"
-	default Y
+	default y
 	depends on OF
 	help
 	  This provides a partition parsing function which derives
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 608321e..63d2a64 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
  * Copyright © 2006-2008  Florian Fainelli <florian@openwrt.org>
  *			  Mike Albon <malbon@openwrt.org>
  * Copyright © 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright © 2011-2012  Jonas Gorski <jonas.gorski@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@
 	int namelen = 0;
 	int i;
 	u32 computed_crc;
+	bool rootfs_first = false;
 
 	if (bcm63xx_detect_cfe(master))
 		return -EINVAL;
@@ -109,6 +110,7 @@
 		char *boardid = &(buf->board_id[0]);
 		char *tagversion = &(buf->tag_version[0]);
 
+		sscanf(buf->flash_image_start, "%u", &rootfsaddr);
 		sscanf(buf->kernel_address, "%u", &kerneladdr);
 		sscanf(buf->kernel_length, "%u", &kernellen);
 		sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@
 			tagversion, boardid);
 
 		kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
-		rootfsaddr = kerneladdr + kernellen;
+		rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
 		spareaddr = roundup(totallen, master->erasesize) + cfelen;
 		sparelen = master->size - spareaddr - nvramlen;
-		rootfslen = spareaddr - rootfsaddr;
+
+		if (rootfsaddr < kerneladdr) {
+			/* default Broadcom layout */
+			rootfslen = kerneladdr - rootfsaddr;
+			rootfs_first = true;
+		} else {
+			/* OpenWrt layout */
+			rootfsaddr = kerneladdr + kernellen;
+			rootfslen = spareaddr - rootfsaddr;
+		}
 	} else {
 		pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
 			buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@
 	curpart++;
 
 	if (kernellen > 0) {
-		parts[curpart].name = "kernel";
-		parts[curpart].offset = kerneladdr;
-		parts[curpart].size = kernellen;
+		int kernelpart = curpart;
+
+		if (rootfslen > 0 && rootfs_first)
+			kernelpart++;
+		parts[kernelpart].name = "kernel";
+		parts[kernelpart].offset = kerneladdr;
+		parts[kernelpart].size = kernellen;
 		curpart++;
 	}
 
 	if (rootfslen > 0) {
-		parts[curpart].name = "rootfs";
-		parts[curpart].offset = rootfsaddr;
-		parts[curpart].size = rootfslen;
-		if (sparelen > 0)
-			parts[curpart].size += sparelen;
+		int rootfspart = curpart;
+
+		if (kernellen > 0 && rootfs_first)
+			rootfspart--;
+		parts[rootfspart].name = "rootfs";
+		parts[rootfspart].offset = rootfsaddr;
+		parts[rootfspart].size = rootfslen;
+		if (sparelen > 0  && !rootfs_first)
+			parts[rootfspart].size += sparelen;
 		curpart++;
 	}
 
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d02592e..22d0493 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -317,7 +317,7 @@
 
 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
-		pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+		pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
 	}
 }
 
@@ -328,10 +328,23 @@
 
 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
-		pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+		pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
 	}
 }
 
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+	struct map_info *map = mtd->priv;
+	struct cfi_private *cfi = map->fldrv_priv;
+
+	/*
+	 *  S29NS512P flash uses more than 8bits to report number of sectors,
+	 * which is not permitted by CFI.
+	 */
+	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+	pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
 /* Used to fix CFI-Tables of chips without Extended Query Tables */
 static struct cfi_fixup cfi_nopri_fixup_table[] = {
 	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@
 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index ddf9ec6..4558e0f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -70,7 +70,7 @@
 /* mtdpart_setup() parses into here */
 static struct cmdline_mtd_partition *partitions;
 
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
 static char *cmdline;
 static int cmdline_parsed = 0;
 
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index a4a80b7..681e2ee 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -52,8 +52,6 @@
 
 	while (pages) {
 		page = page_read(mapping, index);
-		if (!page)
-			return -ENOMEM;
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -112,8 +110,6 @@
 		len = len - cpylen;
 
 		page = page_read(dev->blkdev->bd_inode->i_mapping, index);
-		if (!page)
-			return -ENOMEM;
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -148,8 +144,6 @@
 		len = len - cpylen;
 
 		page = page_read(mapping, index);
-		if (!page)
-			return -ENOMEM;
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -271,7 +265,6 @@
 	dev->mtd.flags = MTD_CAP_RAM;
 	dev->mtd._erase = block2mtd_erase;
 	dev->mtd._write = block2mtd_write;
-	dev->mtd._writev = mtd_writev;
 	dev->mtd._sync = block2mtd_sync;
 	dev->mtd._read = block2mtd_read;
 	dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 50aa90a..f70854d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -227,7 +227,7 @@
 	u8 data8, *dst8;
 
 	doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
-	cdr = len & 0x3;
+	cdr = len & 0x1;
 	len4 = len - cdr;
 
 	if (first)
@@ -732,12 +732,24 @@
  * @len: the number of bytes to be read (must be a multiple of 4)
  * @buf: the buffer to be filled in (or NULL is forget bytes)
  * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
  *
  */
 static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
-				  int first)
+				  int first, int last_odd)
 {
-	doc_read_data_area(docg3, buf, len, first);
+	if (last_odd && len > 0) {
+		doc_read_data_area(docg3, buf, 1, first);
+		doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+	} else {
+		doc_read_data_area(docg3, buf, len, first);
+	}
 	doc_delay(docg3, 2);
 	return len;
 }
@@ -850,6 +862,7 @@
 	u8 *buf = ops->datbuf;
 	size_t len, ooblen, nbdata, nboob;
 	u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+	int max_bitflips = 0;
 
 	if (buf)
 		len = ops->len;
@@ -876,7 +889,7 @@
 	ret = 0;
 	skip = from % DOC_LAYOUT_PAGE_SIZE;
 	mutex_lock(&docg3->cascade->lock);
-	while (!ret && (len > 0 || ooblen > 0)) {
+	while (ret >= 0 && (len > 0 || ooblen > 0)) {
 		calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
 			docg3->reliable);
 		nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@
 		ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
 		if (ret < 0)
 			goto err_in_read;
-		ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+		ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
 		if (ret < skip)
 			goto err_in_read;
-		ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
+		ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
 		if (ret < nbdata)
 			goto err_in_read;
 		doc_read_page_getbytes(docg3,
 				       DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
-				       NULL, 0);
-		ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+				       NULL, 0, (skip + nbdata) % 2);
+		ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
 		if (ret < nboob)
 			goto err_in_read;
 		doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
-				       NULL, 0);
+				       NULL, 0, nboob % 2);
 
 		doc_get_bch_hw_ecc(docg3, hwecc);
 		eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@
 			}
 			if (ret > 0) {
 				mtd->ecc_stats.corrected += ret;
-				ret = -EUCLEAN;
+				max_bitflips = max(max_bitflips, ret);
+				ret = max_bitflips;
 			}
 		}
 
@@ -1004,7 +1018,7 @@
 						     DOC_LAYOUT_PAGE_SIZE);
 		if (!ret)
 			doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
-					       buf, 1);
+					       buf, 1, 0);
 		buf += DOC_LAYOUT_PAGE_SIZE;
 	}
 	doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@
 	ret = doc_reset_seq(docg3);
 	if (!ret)
 		ret = doc_read_page_prepare(docg3, block0, block1, page,
-					    ofs + DOC_LAYOUT_WEAR_OFFSET);
+					    ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
 	if (!ret)
 		ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
-					     buf, 1);
+					     buf, 1, 0);
 	doc_read_page_finish(docg3);
 
 	if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 1924d24..5d0d68c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -639,12 +639,16 @@
 	{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
 	{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
 
+	/* Everspin */
+	{ "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
+
 	/* Intel/Numonyx -- xxxs33b */
 	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
 	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
 	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
 
 	/* Macronix */
+	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
 	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
 	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
 	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
@@ -728,6 +732,7 @@
 	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
 
 	/* Catalyst / On Semiconductor -- non-JEDEC */
 	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 797d43c..6796036 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -990,9 +990,9 @@
 		goto err_clk;
 	}
 
-	ret = clk_enable(dev->clk);
+	ret = clk_prepare_enable(dev->clk);
 	if (ret)
-		goto err_clk_enable;
+		goto err_clk_prepare_enable;
 
 	ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
 	if (ret) {
@@ -1020,8 +1020,8 @@
 	free_irq(irq, dev);
 	platform_set_drvdata(pdev, NULL);
 err_irq:
-	clk_disable(dev->clk);
-err_clk_enable:
+	clk_disable_unprepare(dev->clk);
+err_clk_prepare_enable:
 	clk_put(dev->clk);
 err_clk:
 	iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@
 	irq = platform_get_irq(pdev, 0);
 	free_irq(irq, dev);
 
-	clk_disable(dev->clk);
+	clk_disable_unprepare(dev->clk);
 	clk_put(dev->clk);
 	iounmap(dev->io_base);
 	kfree(dev);
@@ -1091,7 +1091,7 @@
 	struct spear_smi *dev = platform_get_drvdata(pdev);
 
 	if (dev && dev->clk)
-		clk_disable(dev->clk);
+		clk_disable_unprepare(dev->clk);
 
 	return 0;
 }
@@ -1102,7 +1102,7 @@
 	int ret = -EPERM;
 
 	if (dev && dev->clk)
-		ret = clk_enable(dev->clk);
+		ret = clk_prepare_enable(dev->clk);
 
 	if (!ret)
 		spear_smi_hw_init(dev);
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17b..45abed6 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@
 
 static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
 {
-	int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+	int qinfo_lines = ARRAY_SIZE(qinfo_array);
 	int i;
 	int bankwidth = map_bankwidth(map) * 8;
 	int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cf..5ba2458 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -224,7 +224,7 @@
 
 config MTD_SCB2_FLASH
 	tristate "BIOS flash chip on Intel SCB2 boards"
-	depends on X86 && MTD_JEDECPROBE
+	depends on X86 && MTD_JEDECPROBE && PCI
 	help
 	  Support for treating the BIOS flash chip on Intel SCB2 boards
 	  as an MTD device - with this you can reprogram your BIOS.
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 92e1f41..93f0317 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -260,18 +260,7 @@
 	.id_table = vr_nor_pci_ids,
 };
 
-static int __init vr_nor_mtd_init(void)
-{
-	return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
-	pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
 
 MODULE_AUTHOR("Andy Lowe");
 MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e3..c03456f 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
 #include <linux/mtd/cfi.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
+#include <linux/of.h>
 
 #include <lantiq_soc.h>
-#include <lantiq_platform.h>
 
 /*
  * The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@
 	struct map_info *map;
 };
 
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+					"cmdlinepart", "ofpart", NULL };
 
 static map_word
 ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,44 +109,40 @@
 	spin_unlock_irqrestore(&ebu_lock, flags);
 }
 
-static int __init
+static int __devinit
 ltq_mtd_probe(struct platform_device *pdev)
 {
-	struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+	struct mtd_part_parser_data ppdata;
 	struct ltq_mtd *ltq_mtd;
-	struct resource *res;
 	struct cfi_private *cfi;
 	int err;
 
+	if (of_machine_is_compatible("lantiq,falcon") &&
+			(ltq_boot_select() != BS_FLASH)) {
+		dev_err(&pdev->dev, "invalid bootstrap options\n");
+		return -ENODEV;
+	}
+
 	ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
 	platform_set_drvdata(pdev, ltq_mtd);
 
 	ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!ltq_mtd->res) {
-		dev_err(&pdev->dev, "failed to get memory resource");
+		dev_err(&pdev->dev, "failed to get memory resource\n");
 		err = -ENOENT;
 		goto err_out;
 	}
 
-	res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
-		resource_size(ltq_mtd->res), dev_name(&pdev->dev));
-	if (!ltq_mtd->res) {
-		dev_err(&pdev->dev, "failed to request mem resource");
+	ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+	ltq_mtd->map->phys = ltq_mtd->res->start;
+	ltq_mtd->map->size = resource_size(ltq_mtd->res);
+	ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
+	if (!ltq_mtd->map->virt) {
+		dev_err(&pdev->dev, "failed to remap mem resource\n");
 		err = -EBUSY;
 		goto err_out;
 	}
 
-	ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
-	ltq_mtd->map->phys = res->start;
-	ltq_mtd->map->size = resource_size(res);
-	ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
-				ltq_mtd->map->phys, ltq_mtd->map->size);
-	if (!ltq_mtd->map->virt) {
-		dev_err(&pdev->dev, "failed to ioremap!\n");
-		err = -ENOMEM;
-		goto err_free;
-	}
-
 	ltq_mtd->map->name = ltq_map_name;
 	ltq_mtd->map->bankwidth = 2;
 	ltq_mtd->map->read = ltq_read16;
@@ -169,9 +166,9 @@
 	cfi->addr_unlock1 ^= 1;
 	cfi->addr_unlock2 ^= 1;
 
-	err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
-					ltq_mtd_data->parts,
-					ltq_mtd_data->nr_parts);
+	ppdata.of_node = pdev->dev.of_node;
+	err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+					&ppdata, NULL, 0);
 	if (err) {
 		dev_err(&pdev->dev, "failed to add partitions\n");
 		goto err_destroy;
@@ -204,32 +201,23 @@
 	return 0;
 }
 
+static const struct of_device_id ltq_mtd_match[] = {
+	{ .compatible = "lantiq,nor" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
 static struct platform_driver ltq_mtd_driver = {
+	.probe = ltq_mtd_probe,
 	.remove = __devexit_p(ltq_mtd_remove),
 	.driver = {
-		.name = "ltq_nor",
+		.name = "ltq-nor",
 		.owner = THIS_MODULE,
+		.of_match_table = ltq_mtd_match,
 	},
 };
 
-static int __init
-init_ltq_mtd(void)
-{
-	int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
-	if (ret)
-		pr_err("ltq_nor: error registering platform driver");
-	return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
-	platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3..f14ce0a 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -352,18 +352,7 @@
 	.id_table =	mtd_pci_ids,
 };
 
-static int __init mtd_pci_maps_init(void)
-{
-	return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
-	pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 934a72c..9dcbc68 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -234,20 +234,7 @@
 	.remove =   __devexit_p(scb2_flash_remove),
 };
 
-static int __init
-scb2_flash_init(void)
-{
-	return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
-	pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 71b0ba7..e7534c8 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -59,7 +59,7 @@
 	}
 };
 
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
 
 #define init_sbc82xx_one_flash(map, br, or)			\
 do {								\
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c837507..5757307 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -250,6 +250,43 @@
 }
 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
 
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	unsigned int bitflip_threshold;
+	int retval;
+
+	retval = kstrtouint(buf, 0, &bitflip_threshold);
+	if (retval)
+		return retval;
+
+	mtd->bitflip_threshold = bitflip_threshold;
+	return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+		   mtd_bitflip_threshold_show,
+		   mtd_bitflip_threshold_store);
+
 static struct attribute *mtd_attrs[] = {
 	&dev_attr_type.attr,
 	&dev_attr_flags.attr,
@@ -260,6 +297,8 @@
 	&dev_attr_oobsize.attr,
 	&dev_attr_numeraseregions.attr,
 	&dev_attr_name.attr,
+	&dev_attr_ecc_strength.attr,
+	&dev_attr_bitflip_threshold.attr,
 	NULL,
 };
 
@@ -322,6 +361,10 @@
 	mtd->index = i;
 	mtd->usecount = 0;
 
+	/* default value if not set by driver */
+	if (mtd->bitflip_threshold == 0)
+		mtd->bitflip_threshold = mtd->ecc_strength;
+
 	if (is_power_of_2(mtd->erasesize))
 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
 	else
@@ -757,12 +800,24 @@
 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
 	     u_char *buf)
 {
+	int ret_code;
 	*retlen = 0;
 	if (from < 0 || from > mtd->size || len > mtd->size - from)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_read(mtd, from, len, retlen, buf);
+
+	/*
+	 * In the absence of an error, drivers return a non-negative integer
+	 * representing the maximum number of bitflips that were corrected on
+	 * any one ecc region (if applicable; zero otherwise).
+	 */
+	ret_code = mtd->_read(mtd, from, len, retlen, buf);
+	if (unlikely(ret_code < 0))
+		return ret_code;
+	if (mtd->ecc_strength == 0)
+		return 0;	/* device lacks ecc */
+	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
 }
 EXPORT_SYMBOL_GPL(mtd_read);
 
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e..551e316 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@
 }
 
 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
-		enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
-		const char *s2, unsigned long l2)
+			    enum kmsg_dump_reason reason)
 {
 	struct mtdoops_context *cxt = container_of(dumper,
 			struct mtdoops_context, dump);
-	unsigned long s1_start, s2_start;
-	unsigned long l1_cpy, l2_cpy;
-	char *dst;
-
-	if (reason != KMSG_DUMP_OOPS &&
-	    reason != KMSG_DUMP_PANIC)
-		return;
 
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
 
-	dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
-	l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
-	l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
-	s2_start = l2 - l2_cpy;
-	s1_start = l1 - l1_cpy;
-
-	memcpy(dst, s1 + s1_start, l1_cpy);
-	memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+	kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+			     record_size - MTDOOPS_HEADER_SIZE, NULL);
 
 	/* Panics must be written immediately */
 	if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@
 		return;
 	}
 
+	cxt->dump.max_reason = KMSG_DUMP_OOPS;
 	cxt->dump.dump = mtdoops_do_dump;
 	err = kmsg_dump_register(&cxt->dump);
 	if (err) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9651c06..d518e4d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -67,12 +67,12 @@
 	stats = part->master->ecc_stats;
 	res = part->master->_read(part->master, from + part->offset, len,
 				  retlen, buf);
-	if (unlikely(res)) {
-		if (mtd_is_bitflip(res))
-			mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
-		if (mtd_is_eccerr(res))
-			mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
-	}
+	if (unlikely(mtd_is_eccerr(res)))
+		mtd->ecc_stats.failed +=
+			part->master->ecc_stats.failed - stats.failed;
+	else
+		mtd->ecc_stats.corrected +=
+			part->master->ecc_stats.corrected - stats.corrected;
 	return res;
 }
 
@@ -517,6 +517,8 @@
 
 	slave->mtd.ecclayout = master->ecclayout;
 	slave->mtd.ecc_strength = master->ecc_strength;
+	slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
 	if (master->_block_isbad) {
 		uint64_t offs = 0;
 
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cec..31bb7e5 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -115,6 +115,46 @@
           Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
 	  platforms.
 
+config MTD_NAND_OMAP_BCH
+	depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+	bool "Enable support for hardware BCH error correction"
+	default n
+	select BCH
+	select BCH_CONST_PARAMS
+	help
+	 Support for hardware BCH error correction.
+
+choice
+	prompt "BCH error correction capability"
+	depends on MTD_NAND_OMAP_BCH
+
+config MTD_NAND_OMAP_BCH8
+	bool "8 bits / 512 bytes (recommended)"
+	help
+	 Support correcting up to 8 bitflips per 512-byte block.
+	 This will use 13 bytes of spare area per 512 bytes of page data.
+	 This is the recommended mode, as 4-bit mode does not work
+	 on some OMAP3 revisions, due to a hardware bug.
+
+config MTD_NAND_OMAP_BCH4
+	bool "4 bits / 512 bytes"
+	help
+	 Support correcting up to 4 bitflips per 512-byte block.
+	 This will use 7 bytes of spare area per 512 bytes of page data.
+	 Note that this mode does not work on some OMAP3 revisions, due to a
+	 hardware bug. Please check your OMAP datasheet before selecting this
+	 mode.
+
+endchoice
+
+if MTD_NAND_OMAP_BCH
+config BCH_CONST_M
+	default 13
+config BCH_CONST_T
+	default 4 if MTD_NAND_OMAP_BCH4
+	default 8 if MTD_NAND_OMAP_BCH8
+endif
+
 config MTD_NAND_IDS
 	tristate
 
@@ -440,7 +480,7 @@
 
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
-        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
         help
 	 Enables NAND Flash support for IMX23 or IMX28.
 	 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 4f20e1d..60a0dfd 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@
 	}
 	err = 0;
 	if (corrected)
-		err = -EUCLEAN;
+		err = 1;	/* return max_bitflips per ecc step */
 	if (uncorrected)
 		err = -EBADMSG;
 out:
@@ -446,7 +446,7 @@
 	}
 	err = 0;
 	if (corrected)
-		err = -EUCLEAN;
+		err = 1;	/* return max_bitflips per ecc step */
 	if (uncorrected)
 		err = -EBADMSG;
 	return err;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2165576..97ac671 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -324,9 +324,10 @@
  * mtd:        mtd info structure
  * chip:       nand chip info structure
  * buf:        buffer to store read data
+ * oob_required:    caller expects OOB data read to chip->oob_poi
  */
-static int atmel_nand_read_page(struct mtd_info *mtd,
-		struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
 {
 	int eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@
 	uint8_t *oob = chip->oob_poi;
 	uint8_t *ecc_pos;
 	int stat;
+	unsigned int max_bitflips = 0;
 
 	/*
 	 * Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@
 	/* check if there's an error */
 	stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-	if (stat < 0)
+	if (stat < 0) {
 		mtd->ecc_stats.failed++;
-	else
+	} else {
 		mtd->ecc_stats.corrected += stat;
+		max_bitflips = max_t(unsigned int, max_bitflips, stat);
+	}
 
 	/* get back to oob start (end of page) */
 	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@
 	/* read the oob */
 	chip->read_buf(mtd, oob, mtd->oobsize);
 
-	return 0;
+	return max_bitflips;
 }
 
 /*
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 73abbc3..9f609d2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -508,8 +508,6 @@
 	this->chip_delay = 30;
 	this->ecc.mode = NAND_ECC_SOFT;
 
-	this->options = NAND_NO_AUTOINCR;
-
 	if (pd->devwidth)
 		this->options |= NAND_BUSWIDTH_16;
 
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index a930666..5914bb3 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -22,9 +22,9 @@
 
 /* ---- Private Function Prototypes -------------------------------------- */
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int page);
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, const uint8_t *buf);
+	struct nand_chip *chip, const uint8_t *buf, int oob_required);
 
 /* ---- Private Variables ------------------------------------------------ */
 
@@ -103,11 +103,12 @@
 *  @mtd:	mtd info structure
 *  @chip:	nand chip info structure
 *  @buf:	buffer to store read data
+*  @oob_required:	caller expects OOB data read to chip->oob_poi
 *
 ***************************************************************************/
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
 				       struct nand_chip *chip, uint8_t * buf,
-						 int page)
+				       int oob_required, int page)
 {
 	int sectorIdx = 0;
 	int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@
 	uint8_t eccCalc[NAND_ECC_NUM_BYTES];
 	int sectorOobSize = mtd->oobsize / eccsteps;
 	int stat;
+	unsigned int max_bitflips = 0;
 
 	for (sectorIdx = 0; sectorIdx < eccsteps;
 			sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@
 			}
 #endif
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
 		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /****************************************************************************
@@ -188,10 +191,11 @@
 *  @mtd:	mtd info structure
 *  @chip:	nand chip info structure
 *  @buf:	data buffer
+*  @oob_required:	must write chip->oob_poi to OOB
 *
 ***************************************************************************/
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, const uint8_t *buf)
+	struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
 	int sectorIdx = 0;
 	int eccsize = chip->ecc.size;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 6908cdd..c855e7c 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -341,7 +341,7 @@
 	 * for MLC parts which may have permanently stuck bits.
 	 */
 	struct nand_chip *chip = mtd->priv;
-	int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+	int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
 	if (ret < 0)
 		return -EFAULT;
 	else {
@@ -476,12 +476,7 @@
 		this->badblock_pattern = &largepage_bbt;
 	}
 
-	/*
-	 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
-	 * conservative guess, given 13 ecc bytes and using bch alg.
-	 * (Assume Galois field order m=15 to allow a margin of error.)
-	 */
-	this->ecc.strength = 6;
+	this->ecc.strength = 8;
 
 #endif
 
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index d7b86b9..3f1c185 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@
 }
 
 static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-		uint8_t *buf, int page)
+		uint8_t *buf, int oob_required, int page)
 {
 	bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
 	bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@
 }
 
 static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-		const uint8_t *buf)
+		const uint8_t *buf, int oob_required)
 {
 	bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
 	bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2a96e1a..41371ba 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -364,25 +364,27 @@
 
 /* Don't use -- use nand_read_oob_std for now */
 static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			      int page, int sndcmd)
+			      int page)
 {
 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 1;
+	return 0;
 }
 /**
  * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
  * @mtd:	mtd info structure
  * @chip:	nand chip info structure
  * @buf:	buffer to store read data
+ * @oob_required:	caller expects OOB data read to chip->oob_poi
  *
  * The hw generator calculates the error syndrome automatically. Therefor
  * we need a special oob layout and handling.
  */
 static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			       uint8_t *buf, int page)
+			       uint8_t *buf, int oob_required, int page)
 {
 	struct cafe_priv *cafe = mtd->priv;
+	unsigned int max_bitflips = 0;
 
 	cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
 		     cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@
 		} else {
 			dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
 			mtd->ecc_stats.corrected += n;
+			max_bitflips = max_t(unsigned int, max_bitflips, n);
 		}
 	}
 
-	return 0;
+	return max_bitflips;
 }
 
 static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@
 
 
 static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
-					  struct nand_chip *chip, const uint8_t *buf)
+					  struct nand_chip *chip,
+					  const uint8_t *buf, int oob_required)
 {
 	struct cafe_priv *cafe = mtd->priv;
 
@@ -530,16 +534,17 @@
 }
 
 static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int page, int cached, int raw)
+				const uint8_t *buf, int oob_required, int page,
+				int cached, int raw)
 {
 	int status;
 
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		chip->ecc.write_page_raw(mtd, chip, buf);
+		chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
 	else
-		chip->ecc.write_page(mtd, chip, buf);
+		chip->ecc.write_page(mtd, chip, buf, oob_required);
 
 	/*
 	 * Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@
 
 	/* Enable the following for a flash based bad block table */
 	cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
-	cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+	cafe->nand.options = NAND_OWN_BUFFERS;
 
 	if (skipbbt) {
 		cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@
 	.resume = cafe_nand_resume,
 };
 
-static int __init cafe_nand_init(void)
-{
-	return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
-	pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 821c34c..adb6c3e 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -240,7 +240,6 @@
 
 	/* Enable the following for a flash based bad block table */
 	this->bbt_options = NAND_BBT_USE_FLASH;
-	this->options = NAND_NO_AUTOINCR;
 
 	/* Scan to find existence of the device */
 	if (nand_scan(new_mtd, 1)) {
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index a9e57d6..0650aaf 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -924,9 +924,10 @@
 #define ECC_LAST_ERR(x)		((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
 
 static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
-					uint32_t irq_status)
+		       uint32_t irq_status, unsigned int *max_bitflips)
 {
 	bool check_erased_page = false;
+	unsigned int bitflips = 0;
 
 	if (irq_status & INTR_STATUS__ECC_ERR) {
 		/* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@
 					/* correct the ECC error */
 					buf[offset] ^= err_correction_value;
 					denali->mtd.ecc_stats.corrected++;
+					bitflips++;
 				}
 			} else {
 				/* if the error is not correctable, need to
@@ -984,6 +986,7 @@
 		clear_interrupts(denali);
 		denali_set_intr_modes(denali, true);
 	}
+	*max_bitflips = bitflips;
 	return check_erased_page;
 }
 
@@ -1084,7 +1087,7 @@
  * by write_page above.
  * */
 static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf)
+				const uint8_t *buf, int oob_required)
 {
 	/* for regular page writes, we let HW handle all the ECC
 	 * data written to the device. */
@@ -1096,7 +1099,7 @@
  * write_page() function above.
  */
 static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-					const uint8_t *buf)
+					const uint8_t *buf, int oob_required)
 {
 	/* for raw page writes, we want to disable ECC and simply write
 	   whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@
 }
 
 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			   int page, int sndcmd)
+			   int page)
 {
 	read_oob_data(mtd, chip->oob_poi, page);
 
-	return 0; /* notify NAND core to send command to
-			   NAND device. */
+	return 0;
 }
 
 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			    uint8_t *buf, int page)
+			    uint8_t *buf, int oob_required, int page)
 {
+	unsigned int max_bitflips;
 	struct denali_nand_info *denali = mtd_to_denali(mtd);
 
 	dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@
 
 	memcpy(buf, denali->buf.buf, mtd->writesize);
 
-	check_erased_page = handle_ecc(denali, buf, irq_status);
+	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
 	denali_enable_dma(denali, false);
 
 	if (check_erased_page) {
@@ -1167,11 +1170,11 @@
 				denali->mtd.ecc_stats.failed++;
 		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int page)
+				uint8_t *buf, int oob_required, int page)
 {
 	struct denali_nand_info *denali = mtd_to_denali(mtd);
 
@@ -1702,17 +1705,4 @@
 	.remove = denali_pci_remove,
 };
 
-static int __devinit denali_init(void)
-{
-	printk(KERN_INFO "Spectra MTD driver\n");
-	return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
-	pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index b082026..a225e49 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -720,6 +720,7 @@
 	struct docg4_priv *doc = nand->priv;
 	void __iomem *docptr = doc->virtadr;
 	uint16_t status, edc_err, *buf16;
+	int bits_corrected = 0;
 
 	dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
 
@@ -772,7 +773,7 @@
 
 		/* If bitflips are reported, attempt to correct with ecc */
 		if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
-			int bits_corrected = correct_data(mtd, buf, page);
+			bits_corrected = correct_data(mtd, buf, page);
 			if (bits_corrected == -EBADMSG)
 				mtd->ecc_stats.failed++;
 			else
@@ -781,24 +782,24 @@
 	}
 
 	writew(0, docptr + DOC_DATAEND);
-	return 0;
+	return bits_corrected;
 }
 
 
 static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-			       uint8_t *buf, int page)
+			       uint8_t *buf, int oob_required, int page)
 {
 	return read_page(mtd, nand, buf, page, false);
 }
 
 static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
-			   uint8_t *buf, int page)
+			   uint8_t *buf, int oob_required, int page)
 {
 	return read_page(mtd, nand, buf, page, true);
 }
 
 static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
-			  int page, int sndcmd)
+			  int page)
 {
 	struct docg4_priv *doc = nand->priv;
 	void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@
 }
 
 static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-				 const uint8_t *buf)
+				 const uint8_t *buf, int oob_required)
 {
 	return write_page(mtd, nand, buf, false);
 }
 
 static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
-			     const uint8_t *buf)
+			     const uint8_t *buf, int oob_required)
 {
 	return write_page(mtd, nand, buf, true);
 }
@@ -1002,7 +1003,7 @@
 		return -ENOMEM;
 
 	read_page_prologue(mtd, g4_addr);
-	status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+	status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
 	if (status)
 		goto exit;
 
@@ -1079,7 +1080,7 @@
 
 	/* write first page of block */
 	write_page_prologue(mtd, g4_addr);
-	docg4_write_page(mtd, nand, buf);
+	docg4_write_page(mtd, nand, buf, 1);
 	ret = pageprog(mtd);
 	if (!ret)
 		mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@
 	nand->ecc.prepad = 8;
 	nand->ecc.bytes	= 8;
 	nand->ecc.strength = DOCG4_T;
-	nand->options =
-		NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+	nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
 	nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
 	nand->controller = &nand->hwcontrol;
 	spin_lock_init(&nand->controller->lock);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80b5264..7842938 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,6 +75,7 @@
 	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
 	unsigned int oob;        /* Non zero if operating on OOB data     */
 	unsigned int counter;	 /* counter for the initializations	  */
+	unsigned int max_bitflips;  /* Saved during READ0 cmd		  */
 };
 
 /* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@
 	if (chip->ecc.mode != NAND_ECC_HW)
 		return 0;
 
+	elbc_fcm_ctrl->max_bitflips = 0;
+
 	if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
 		uint32_t lteccr = in_be32(&lbc->lteccr);
 		/*
@@ -262,11 +265,16 @@
 		 * bits 28-31 are uncorrectable errors, marked elsewhere.
 		 * for small page nand only 1 bit is used.
 		 * if the ELBC doesn't have the lteccr register it reads 0
+		 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+		 * count the number of sub-pages with bitflips and update
+		 * ecc_stats.corrected accordingly.
 		 */
 		if (lteccr & 0x000F000F)
 			out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
-		if (lteccr & 0x000F0000)
+		if (lteccr & 0x000F0000) {
 			mtd->ecc_stats.corrected++;
+			elbc_fcm_ctrl->max_bitflips = 1;
+		}
 	}
 
 	return 0;
@@ -738,26 +746,28 @@
 	return 0;
 }
 
-static int fsl_elbc_read_page(struct mtd_info *mtd,
-                              struct nand_chip *chip,
-			      uint8_t *buf,
-			      int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			      uint8_t *buf, int oob_required, int page)
 {
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
 	fsl_elbc_read_buf(mtd, buf, mtd->writesize);
-	fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	if (oob_required)
+		fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
 	if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
 		mtd->ecc_stats.failed++;
 
-	return 0;
+	return elbc_fcm_ctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_elbc_write_page(struct mtd_info *mtd,
-                                struct nand_chip *chip,
-                                const uint8_t *buf)
+static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required)
 {
 	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
 	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@
 	chip->bbt_md = &bbt_mirror_descr;
 
 	/* set up nand options */
-	chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+	chip->options = NAND_NO_READRDY;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@
 		chip->ecc.size = 512;
 		chip->ecc.bytes = 3;
 		chip->ecc.strength = 1;
-		/*
-		 * FIXME: can hardware ecc correct 4 bitflips if page size is
-		 * 2k?  Then does hardware report number of corrections for this
-		 * case?  If so, ecc_stats reporting needs to be fixed as well.
-		 */
 	} else {
 		/* otherwise fall back to default software ECC */
 		chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index c30ac7b..9602c1b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -63,6 +63,7 @@
 	unsigned int oob;	/* Non zero if operating on OOB data	*/
 	unsigned int eccread;	/* Non zero for a full-page ECC read	*/
 	unsigned int counter;	/* counter for the initializations	*/
+	unsigned int max_bitflips;  /* Saved during READ0 cmd		*/
 };
 
 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@
 	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
 		dev_err(priv->dev, "NAND Flash Write Protect Error\n");
 
+	nctrl->max_bitflips = 0;
+
 	if (nctrl->eccread) {
 		int errors;
 		int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@
 			}
 
 			mtd->ecc_stats.corrected += errors;
+			nctrl->max_bitflips = max_t(unsigned int,
+						    nctrl->max_bitflips,
+						    errors);
 		}
 
 		nctrl->eccread = 0;
@@ -375,21 +381,31 @@
 
 		return;
 
-	/* READID must read all 8 possible bytes */
 	case NAND_CMD_READID:
+	case NAND_CMD_PARAM: {
+		int timing = IFC_FIR_OP_RB;
+		if (command == NAND_CMD_PARAM)
+			timing = IFC_FIR_OP_RBCD;
+
 		out_be32(&ifc->ifc_nand.nand_fir0,
 				(IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
 				(IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-				(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+				(timing << IFC_NAND_FIR0_OP2_SHIFT));
 		out_be32(&ifc->ifc_nand.nand_fcr0,
-				NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
-		/* 8 bytes for manuf, device and exts */
-		out_be32(&ifc->ifc_nand.nand_fbcr, 8);
-		ifc_nand_ctrl->read_bytes = 8;
+				command << IFC_NAND_FCR0_CMD0_SHIFT);
+		out_be32(&ifc->ifc_nand.row3, column);
+
+		/*
+		 * although currently it's 8 bytes for READID, we always read
+		 * the maximum 256 bytes(for PARAM)
+		 */
+		out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+		ifc_nand_ctrl->read_bytes = 256;
 
 		set_addr(mtd, 0, 0, 0);
 		fsl_ifc_run_command(mtd);
 		return;
+	}
 
 	/* ERASE1 stores the block and page address */
 	case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@
 	return nand_fsr | NAND_STATUS_WP;
 }
 
-static int fsl_ifc_read_page(struct mtd_info *mtd,
-			      struct nand_chip *chip,
-			      uint8_t *buf, int page)
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			     uint8_t *buf, int oob_required, int page)
 {
 	struct fsl_ifc_mtd *priv = chip->priv;
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
 
 	fsl_ifc_read_buf(mtd, buf, mtd->writesize);
-	fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	if (oob_required)
+		fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
 	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
 		dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@
 	if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
 		mtd->ecc_stats.failed++;
 
-	return 0;
+	return nctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_ifc_write_page(struct mtd_info *mtd,
-				struct nand_chip *chip,
-				const uint8_t *buf)
+static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			       const uint8_t *buf, int oob_required)
 {
 	fsl_ifc_write_buf(mtd, buf, mtd->writesize);
 	fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@
 	out_be32(&ifc->ifc_nand.ncfgr, 0x0);
 
 	/* set up nand options */
-	chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+	chip->options = NAND_NO_READRDY;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 
@@ -811,6 +827,7 @@
 	/* Hardware generates ECC per 512 Bytes */
 	chip->ecc.size = 512;
 	chip->ecc.bytes = 8;
+	chip->ecc.strength = 4;
 
 	switch (csor & CSOR_NAND_PGS_MASK) {
 	case CSOR_NAND_PGS_512:
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1b8330e..38d2624 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -692,6 +692,7 @@
  * @mtd:	mtd info structure
  * @chip:	nand chip info structure
  * @buf:	buffer to store read data
+ * @oob_required:	caller expects OOB data read to chip->oob_poi
  * @page:	page number to read
  *
  * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@
  * max of 8 bits)
  */
 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				 uint8_t *buf, int page)
+				 uint8_t *buf, int oob_required, int page)
 {
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@
 	 */
 	uint16_t ecc_oob[7];
 	uint8_t *oob = (uint8_t *)&ecc_oob[0];
+	unsigned int max_bitflips = 0;
 
 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
 		chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
 
-	return 0;
+	return max_bitflips;
 }
 
 /*
@@ -994,9 +998,9 @@
 		return PTR_ERR(host->clk);
 	}
 
-	ret = clk_enable(host->clk);
+	ret = clk_prepare_enable(host->clk);
 	if (ret)
-		goto err_clk_enable;
+		goto err_clk_prepare_enable;
 
 	/*
 	 * This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@
 	if (host->mode == USE_DMA_ACCESS)
 		dma_release_channel(host->read_dma_chan);
 err_req_read_chnl:
-	clk_disable(host->clk);
-err_clk_enable:
+	clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
 	clk_put(host->clk);
 	return ret;
 }
@@ -1198,7 +1202,7 @@
 			dma_release_channel(host->write_dma_chan);
 			dma_release_channel(host->read_dma_chan);
 		}
-		clk_disable(host->clk);
+		clk_disable_unprepare(host->clk);
 		clk_put(host->clk);
 	}
 
@@ -1210,7 +1214,7 @@
 {
 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
 	if (host)
-		clk_disable(host->clk);
+		clk_disable_unprepare(host->clk);
 	return 0;
 }
 
@@ -1218,7 +1222,7 @@
 {
 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
 	if (host) {
-		clk_enable(host->clk);
+		clk_prepare_enable(host->clk);
 		fsmc_nand_setup(host->regs_va, host->bank,
 				host->nand.options & NAND_BUSWIDTH_16,
 				host->dev_timings);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 4effb8c..a092451 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -51,15 +51,26 @@
 
 #define BP_BCH_FLASH0LAYOUT0_ECC0		12
 #define BM_BCH_FLASH0LAYOUT0_ECC0	(0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v)		\
-	(((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0		11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)				\
+	(GPMI_IS_MX6Q(x)					\
+		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)	\
+		: (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)		\
+			& BM_BCH_FLASH0LAYOUT0_ECC0)		\
+	)
 
 #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE		0
 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE		\
 			(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v)	\
-	(((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
-					 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE	\
+			(0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)				\
+	(GPMI_IS_MX6Q(x)						\
+		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)	\
+		: ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)		\
+	)
 
 #define HW_BCH_FLASH0LAYOUT1			0x00000090
 
@@ -72,13 +83,24 @@
 
 #define BP_BCH_FLASH0LAYOUT1_ECCN		12
 #define BM_BCH_FLASH0LAYOUT1_ECCN	(0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v)		\
-	(((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN		11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)				\
+	(GPMI_IS_MX6Q(x)					\
+		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)	\
+		: (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)		\
+			& BM_BCH_FLASH0LAYOUT1_ECCN)		\
+	)
 
 #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE		0
 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE		\
 			(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v)	\
-	(((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
-					 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE	\
+			(0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)				\
+	(GPMI_IS_MX6Q(x)						\
+		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)	\
+		: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)		\
+	)
 #endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index e8ea710..a1f4332 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -21,7 +21,6 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
-#include <mach/mxs.h>
 
 #include "gpmi-nand.h"
 #include "gpmi-regs.h"
@@ -37,6 +36,8 @@
 	.max_dll_delay_in_ns         = 16,
 };
 
+#define MXS_SET_ADDR		0x4
+#define MXS_CLR_ADDR		0x8
 /*
  * Clear the bit and poll it cleared.  This is usually called with
  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@
 	int timeout = 0x400;
 
 	/* clear the bit */
-	__mxs_clrl(mask, addr);
+	writel(mask, addr + MXS_CLR_ADDR);
 
 	/*
 	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@
 		goto error;
 
 	/* clear CLKGATE */
-	__mxs_clrl(MODULE_CLKGATE, reset_addr);
+	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
 
 	if (!just_enable) {
 		/* set SFTRST to reset the block */
-		__mxs_setl(MODULE_SFTRST, reset_addr);
+		writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
 		udelay(1);
 
 		/* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@
 	/* Configure layout 0. */
 	writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
 			| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
-			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
-			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
 			r->bch_regs + HW_BCH_FLASH0LAYOUT0);
 
 	writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
-			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
-			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
 			r->bch_regs + HW_BCH_FLASH0LAYOUT1);
 
 	/* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@
 	return max(k, min);
 }
 
+#define DEF_MIN_PROP_DELAY	5
+#define DEF_MAX_PROP_DELAY	9
 /* Apply timing to current hardware conditions. */
 static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
 					struct gpmi_nfc_hardware_timing *hw)
 {
-	struct gpmi_nand_platform_data *pdata = this->pdata;
 	struct timing_threshod *nfc = &timing_default_threshold;
 	struct nand_chip *nand = &this->nand;
 	struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@
 	int ideal_sample_delay_in_ns;
 	unsigned int sample_delay_factor;
 	int tEYE;
-	unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
-	unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+	unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+	unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
 
 	/*
 	 * If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@
 	if (GPMI_IS_MX23(this)) {
 		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
 		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
-	} else if (GPMI_IS_MX28(this)) {
+	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+		/* MX28 shares the same R/B register as MX6Q. */
 		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
 		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
 	} else
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index b68e043..a05b7b4 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,6 +25,8 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
@@ -387,7 +389,7 @@
 static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
 {
 	struct gpmi_nand_data *this = param;
-	struct resource *r = this->private;
+	int dma_channel = (int)this->private;
 
 	if (!mxs_dma_is_apbh(chan))
 		return false;
@@ -399,7 +401,7 @@
 	 *	for mx28 :	MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
 	 *		(These eight channels share the same IRQ!)
 	 */
-	if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+	if (dma_channel == chan->chan_id) {
 		chan->private = &this->dma_data;
 		return true;
 	}
@@ -419,57 +421,45 @@
 static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
 {
 	struct platform_device *pdev = this->pdev;
-	struct gpmi_nand_platform_data *pdata = this->pdata;
-	struct resources *res = &this->resources;
-	struct resource *r, *r_dma;
-	unsigned int i;
+	struct resource *r_dma;
+	struct device_node *dn;
+	int dma_channel;
+	unsigned int ret;
+	struct dma_chan *dma_chan;
+	dma_cap_mask_t mask;
 
-	r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
-					GPMI_NAND_DMA_CHANNELS_RES_NAME);
+	/* dma channel, we only use the first one. */
+	dn = pdev->dev.of_node;
+	ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
+	if (ret) {
+		pr_err("unable to get DMA channel from dt.\n");
+		goto acquire_err;
+	}
+	this->private = (void *)dma_channel;
+
+	/* gpmi dma interrupt */
 	r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
 					GPMI_NAND_DMA_INTERRUPT_RES_NAME);
-	if (!r || !r_dma) {
+	if (!r_dma) {
 		pr_err("Can't get resource for DMA\n");
-		return -ENXIO;
+		goto acquire_err;
+	}
+	this->dma_data.chan_irq = r_dma->start;
+
+	/* request dma channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+	if (!dma_chan) {
+		pr_err("dma_request_channel failed.\n");
+		goto acquire_err;
 	}
 
-	/* used in gpmi_dma_filter() */
-	this->private = r;
-
-	for (i = r->start; i <= r->end; i++) {
-		struct dma_chan *dma_chan;
-		dma_cap_mask_t mask;
-
-		if (i - r->start >= pdata->max_chip_count)
-			break;
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-
-		/* get the DMA interrupt */
-		if (r_dma->start == r_dma->end) {
-			/* only register the first. */
-			if (i == r->start)
-				this->dma_data.chan_irq = r_dma->start;
-			else
-				this->dma_data.chan_irq = NO_IRQ;
-		} else
-			this->dma_data.chan_irq = r_dma->start + (i - r->start);
-
-		dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
-		if (!dma_chan)
-			goto acquire_err;
-
-		/* fill the first empty item */
-		this->dma_chans[i - r->start] = dma_chan;
-	}
-
-	res->dma_low_channel = r->start;
-	res->dma_high_channel = i;
+	this->dma_chans[0] = dma_chan;
 	return 0;
 
 acquire_err:
-	pr_err("Can't acquire DMA channel %u\n", i);
 	release_dma_channels(this);
 	return -EINVAL;
 }
@@ -851,7 +841,7 @@
 }
 
 static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int page)
+				uint8_t *buf, int oob_required, int page)
 {
 	struct gpmi_nand_data *this = chip->priv;
 	struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,28 +907,31 @@
 		mtd->ecc_stats.corrected += corrected;
 	}
 
-	/*
-	 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
-	 * details about our policy for delivering the OOB.
-	 *
-	 * We fill the caller's buffer with set bits, and then copy the block
-	 * mark to th caller's buffer. Note that, if block mark swapping was
-	 * necessary, it has already been done, so we can rely on the first
-	 * byte of the auxiliary buffer to contain the block mark.
-	 */
-	memset(chip->oob_poi, ~0, mtd->oobsize);
-	chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+	if (oob_required) {
+		/*
+		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+		 * for details about our policy for delivering the OOB.
+		 *
+		 * We fill the caller's buffer with set bits, and then copy the
+		 * block mark to th caller's buffer. Note that, if block mark
+		 * swapping was necessary, it has already been done, so we can
+		 * rely on the first byte of the auxiliary buffer to contain
+		 * the block mark.
+		 */
+		memset(chip->oob_poi, ~0, mtd->oobsize);
+		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
 
-	read_page_swap_end(this, buf, mtd->writesize,
-			this->payload_virt, this->payload_phys,
-			nfc_geo->payload_size,
-			payload_virt, payload_phys);
+		read_page_swap_end(this, buf, mtd->writesize,
+				this->payload_virt, this->payload_phys,
+				nfc_geo->payload_size,
+				payload_virt, payload_phys);
+	}
 exit_nfc:
 	return ret;
 }
 
-static void gpmi_ecc_write_page(struct mtd_info *mtd,
-				struct nand_chip *chip, const uint8_t *buf)
+static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required)
 {
 	struct gpmi_nand_data *this = chip->priv;
 	struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@
  * this driver.
  */
 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-				int page, int sndcmd)
+				int page)
 {
 	struct gpmi_nand_data *this = chip->priv;
 
@@ -1100,11 +1093,7 @@
 		chip->oob_poi[0] = chip->read_byte(mtd);
 	}
 
-	/*
-	 * Return true, indicating that the next call to this function must send
-	 * a command.
-	 */
-	return true;
+	return 0;
 }
 
 static int
@@ -1318,7 +1307,7 @@
 		/* Write the first page of the current stride. */
 		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
 		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-		chip->ecc.write_page_raw(mtd, chip, buffer);
+		chip->ecc.write_page_raw(mtd, chip, buffer, 0);
 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 
 		/* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@
 	if (ret)
 		return ret;
 
+	/* Adjust the ECC strength according to the chip. */
+	this->nand.ecc.strength = this->bch_geometry.ecc_strength;
+	this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+
 	/* NAND boot init, depends on the gpmi_set_geometry(). */
 	return nand_boot_init(this);
 }
@@ -1471,9 +1464,9 @@
 
 static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
 {
-	struct gpmi_nand_platform_data *pdata = this->pdata;
 	struct mtd_info  *mtd = &this->mtd;
 	struct nand_chip *chip = &this->nand;
+	struct mtd_part_parser_data ppdata = {};
 	int ret;
 
 	/* init current chip */
@@ -1502,6 +1495,7 @@
 	chip->options		|= NAND_NO_SUBPAGE_WRITE;
 	chip->ecc.mode		= NAND_ECC_HW;
 	chip->ecc.size		= 1;
+	chip->ecc.strength	= 8;
 	chip->ecc.layout	= &gpmi_hw_ecclayout;
 
 	/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@
 	if (ret)
 		goto err_out;
 
-	ret = nand_scan(mtd, pdata->max_chip_count);
+	ret = nand_scan(mtd, 1);
 	if (ret) {
 		pr_err("Chip scan failed\n");
 		goto err_out;
 	}
 
-	ret = mtd_device_parse_register(mtd, NULL, NULL,
-			pdata->partitions, pdata->partition_count);
+	ppdata.of_node = this->pdev->dev.of_node;
+	ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
 	if (ret)
 		goto err_out;
 	return 0;
@@ -1528,12 +1522,41 @@
 	return ret;
 }
 
+static const struct platform_device_id gpmi_ids[] = {
+	{ .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
+	{ .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
+	{ .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
+	{},
+};
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+	{
+		.compatible = "fsl,imx23-gpmi-nand",
+		.data = (void *)&gpmi_ids[IS_MX23]
+	}, {
+		.compatible = "fsl,imx28-gpmi-nand",
+		.data = (void *)&gpmi_ids[IS_MX28]
+	}, {
+		.compatible = "fsl,imx6q-gpmi-nand",
+		.data = (void *)&gpmi_ids[IS_MX6Q]
+	}, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
 static int __devinit gpmi_nand_probe(struct platform_device *pdev)
 {
-	struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
 	struct gpmi_nand_data *this;
+	const struct of_device_id *of_id;
 	int ret;
 
+	of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+	if (of_id) {
+		pdev->id_entry = of_id->data;
+	} else {
+		pr_err("Failed to find the right device id.\n");
+		return -ENOMEM;
+	}
+
 	this = kzalloc(sizeof(*this), GFP_KERNEL);
 	if (!this) {
 		pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@
 	platform_set_drvdata(pdev, this);
 	this->pdev  = pdev;
 	this->dev   = &pdev->dev;
-	this->pdata = pdata;
-
-	if (pdata->platform_init) {
-		ret = pdata->platform_init();
-		if (ret)
-			goto platform_init_error;
-	}
 
 	ret = acquire_resources(this);
 	if (ret)
@@ -1567,7 +1583,6 @@
 
 exit_nfc_init:
 	release_resources(this);
-platform_init_error:
 exit_acquire_resources:
 	platform_set_drvdata(pdev, NULL);
 	kfree(this);
@@ -1585,19 +1600,10 @@
 	return 0;
 }
 
-static const struct platform_device_id gpmi_ids[] = {
-	{
-		.name = "imx23-gpmi-nand",
-		.driver_data = IS_MX23,
-	}, {
-		.name = "imx28-gpmi-nand",
-		.driver_data = IS_MX28,
-	}, {},
-};
-
 static struct platform_driver gpmi_nand_driver = {
 	.driver = {
 		.name = "gpmi-nand",
+		.of_match_table = gpmi_nand_id_table,
 	},
 	.probe   = gpmi_nand_probe,
 	.remove  = __exit_p(gpmi_nand_remove),
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ec6180d..ce5daa1 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -266,8 +266,10 @@
 #define STATUS_UNCORRECTABLE	0xfe
 
 /* Use the platform_id to distinguish different Archs. */
-#define IS_MX23			0x1
-#define IS_MX28			0x2
+#define IS_MX23			0x0
+#define IS_MX28			0x1
+#define IS_MX6Q			0x2
 #define GPMI_IS_MX23(x)		((x)->pdev->id_entry->driver_data == IS_MX23)
 #define GPMI_IS_MX28(x)		((x)->pdev->id_entry->driver_data == IS_MX28)
+#define GPMI_IS_MX6Q(x)		((x)->pdev->id_entry->driver_data == IS_MX6Q)
 #endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9bf5ce5..50166e9 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -124,7 +124,6 @@
 	/* 15 us command delay time */
 	this->chip_delay = 50;
 	this->ecc.mode = NAND_ECC_SOFT;
-	this->options = NAND_NO_AUTOINCR;
 
 	/* Scan to find existence of the device */
 	if (nand_scan(h1910_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e4147e8..a6fa884 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,11 +332,7 @@
 	chip->ecc.mode		= NAND_ECC_HW_OOB_FIRST;
 	chip->ecc.size		= 512;
 	chip->ecc.bytes		= 9;
-	chip->ecc.strength	= 2;
-	/*
-	 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
-	 * conservative guess, given 9 ecc bytes and reed-solomon alg.
-	 */
+	chip->ecc.strength	= 4;
 
 	if (pdata)
 		chip->ecc.layout = pdata->ecc_layout;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c240cf1..c259c24 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -734,7 +734,6 @@
 	chip->write_buf = mpc5121_nfc_write_buf;
 	chip->verify_buf = mpc5121_nfc_verify_buf;
 	chip->select_chip = mpc5121_nfc_select_chip;
-	chip->options = NAND_NO_AUTOINCR;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->ecc.mode = NAND_ECC_SOFT;
 
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index cc0678a..c58e6a9 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,8 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
 
 #include <asm/mach/flash.h>
 #include <mach/mxc_nand.h>
@@ -140,13 +142,47 @@
 
 #define NFC_V3_DELAY_LINE		(host->regs_ip + 0x34)
 
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+	void (*preset)(struct mtd_info *);
+	void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+	void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+	void (*send_page)(struct mtd_info *, unsigned int);
+	void (*send_read_id)(struct mxc_nand_host *);
+	uint16_t (*get_dev_status)(struct mxc_nand_host *);
+	int (*check_int)(struct mxc_nand_host *);
+	void (*irq_control)(struct mxc_nand_host *, int);
+	u32 (*get_ecc_status)(struct mxc_nand_host *);
+	struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+	void (*select_chip)(struct mtd_info *mtd, int chip);
+	int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+			u_char *read_ecc, u_char *calc_ecc);
+
+	/*
+	 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+	 * (CONFIG1:INT_MSK is set). To handle this the driver uses
+	 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+	 */
+	int irqpending_quirk;
+	int needs_ip;
+
+	size_t regs_offset;
+	size_t spare0_offset;
+	size_t axi_offset;
+
+	int spare_len;
+	int eccbytes;
+	int eccsize;
+};
+
 struct mxc_nand_host {
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
 	struct device		*dev;
 
-	void			*spare0;
-	void			*main_area0;
+	void __iomem		*spare0;
+	void __iomem		*main_area0;
 
 	void __iomem		*base;
 	void __iomem		*regs;
@@ -163,16 +199,9 @@
 
 	uint8_t			*data_buf;
 	unsigned int		buf_start;
-	int			spare_len;
 
-	void			(*preset)(struct mtd_info *);
-	void			(*send_cmd)(struct mxc_nand_host *, uint16_t, int);
-	void			(*send_addr)(struct mxc_nand_host *, uint16_t, int);
-	void			(*send_page)(struct mtd_info *, unsigned int);
-	void			(*send_read_id)(struct mxc_nand_host *);
-	uint16_t		(*get_dev_status)(struct mxc_nand_host *);
-	int			(*check_int)(struct mxc_nand_host *);
-	void			(*irq_control)(struct mxc_nand_host *, int);
+	const struct mxc_nand_devtype_data *devtype_data;
+	struct mxc_nand_platform_data pdata;
 };
 
 /* OOB placement block for use with hardware ecc generation */
@@ -242,21 +271,7 @@
 	}
 };
 
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
-{
-	struct mxc_nand_host *host = dev_id;
-
-	if (!host->check_int(host))
-		return IRQ_NONE;
-
-	host->irq_control(host, 0);
-
-	complete(&host->op_completion);
-
-	return IRQ_HANDLED;
-}
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
 static int check_int_v3(struct mxc_nand_host *host)
 {
@@ -280,26 +295,12 @@
 	if (!(tmp & NFC_V1_V2_CONFIG2_INT))
 		return 0;
 
-	if (!cpu_is_mx21())
+	if (!host->devtype_data->irqpending_quirk)
 		writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
 
 	return 1;
 }
 
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
-	if (activate)
-		enable_irq(host->irq);
-	else
-		disable_irq_nosync(host->irq);
-}
-
 static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
 {
 	uint16_t tmp;
@@ -328,6 +329,47 @@
 	writel(tmp, NFC_V3_CONFIG2);
 }
 
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+	if (host->devtype_data->irqpending_quirk) {
+		if (activate)
+			enable_irq(host->irq);
+		else
+			disable_irq_nosync(host->irq);
+	} else {
+		host->devtype_data->irq_control(host, activate);
+	}
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+	return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+	return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+	return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+	struct mxc_nand_host *host = dev_id;
+
+	if (!host->devtype_data->check_int(host))
+		return IRQ_NONE;
+
+	irq_control(host, 0);
+
+	complete(&host->op_completion);
+
+	return IRQ_HANDLED;
+}
+
 /* This function polls the NANDFC to wait for the basic operation to
  * complete by checking the INT bit of config2 register.
  */
@@ -336,14 +378,14 @@
 	int max_retries = 8000;
 
 	if (useirq) {
-		if (!host->check_int(host)) {
+		if (!host->devtype_data->check_int(host)) {
 			INIT_COMPLETION(host->op_completion);
-			host->irq_control(host, 1);
+			irq_control(host, 1);
 			wait_for_completion(&host->op_completion);
 		}
 	} else {
 		while (max_retries-- > 0) {
-			if (host->check_int(host))
+			if (host->devtype_data->check_int(host))
 				break;
 
 			udelay(1);
@@ -374,7 +416,7 @@
 	writew(cmd, NFC_V1_V2_FLASH_CMD);
 	writew(NFC_CMD, NFC_V1_V2_CONFIG2);
 
-	if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+	if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
 		int max_retries = 100;
 		/* Reset completion is indicated by NFC_CONFIG2 */
 		/* being set to 0 */
@@ -433,13 +475,27 @@
 	wait_op_done(host, false);
 }
 
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+
+	/* NANDFC buffer 0 is used for page read/write */
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+	writew(ops, NFC_V1_V2_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
 {
 	struct nand_chip *nand_chip = mtd->priv;
 	struct mxc_nand_host *host = nand_chip->priv;
 	int bufs, i;
 
-	if (nfc_is_v1() && mtd->writesize > 512)
+	if (mtd->writesize > 512)
 		bufs = 4;
 	else
 		bufs = 1;
@@ -463,7 +519,7 @@
 
 	wait_op_done(host, true);
 
-	memcpy(host->data_buf, host->main_area0, 16);
+	memcpy_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +535,7 @@
 	/* Wait for operation to complete */
 	wait_op_done(host, true);
 
-	memcpy(host->data_buf, host->main_area0, 16);
+	memcpy_fromio(host->data_buf, host->main_area0, 16);
 
 	if (this->options & NAND_BUSWIDTH_16) {
 		/* compress the ID info */
@@ -555,7 +611,7 @@
 	 * additional correction.  2-Bit errors cannot be corrected by
 	 * HW ECC, so we need to return failure
 	 */
-	uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+	uint16_t ecc_status = get_ecc_status_v1(host);
 
 	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
 		pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +636,7 @@
 
 	no_subpages = mtd->writesize >> 9;
 
-	if (nfc_is_v21())
-		ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
-	else
-		ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+	ecc_stat = host->devtype_data->get_ecc_status(host);
 
 	do {
 		err = ecc_stat & ecc_bit_mask;
@@ -616,7 +669,7 @@
 
 	/* Check for status request */
 	if (host->status_request)
-		return host->get_dev_status(host) & 0xFF;
+		return host->devtype_data->get_dev_status(host) & 0xFF;
 
 	ret = *(uint8_t *)(host->data_buf + host->buf_start);
 	host->buf_start++;
@@ -682,7 +735,28 @@
 
 /* This function is used by upper layer for select and
  * deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+
+	if (chip == -1) {
+		/* Disable the NFC clock */
+		if (host->clk_act) {
+			clk_disable_unprepare(host->clk);
+			host->clk_act = 0;
+		}
+		return;
+	}
+
+	if (!host->clk_act) {
+		/* Enable the NFC clock */
+		clk_prepare_enable(host->clk);
+		host->clk_act = 1;
+	}
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
 {
 	struct nand_chip *nand_chip = mtd->priv;
 	struct mxc_nand_host *host = nand_chip->priv;
@@ -702,10 +776,8 @@
 		host->clk_act = 1;
 	}
 
-	if (nfc_is_v21()) {
-		host->active_cs = chip;
-		writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-	}
+	host->active_cs = chip;
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
 }
 
 /*
@@ -718,23 +790,23 @@
 	u16 i, j;
 	u16 n = mtd->writesize >> 9;
 	u8 *d = host->data_buf + mtd->writesize;
-	u8 *s = host->spare0;
-	u16 t = host->spare_len;
+	u8 __iomem *s = host->spare0;
+	u16 t = host->devtype_data->spare_len;
 
 	j = (mtd->oobsize / n >> 1) << 1;
 
 	if (bfrom) {
 		for (i = 0; i < n - 1; i++)
-			memcpy(d + i * j, s + i * t, j);
+			memcpy_fromio(d + i * j, s + i * t, j);
 
 		/* the last section */
-		memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+		memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
 	} else {
 		for (i = 0; i < n - 1; i++)
-			memcpy(&s[i * t], &d[i * j], j);
+			memcpy_toio(&s[i * t], &d[i * j], j);
 
 		/* the last section */
-		memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+		memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
 	}
 }
 
@@ -751,34 +823,44 @@
 		 * perform a read/write buf operation, the saved column
 		  * address is used to index into the full page.
 		 */
-		host->send_addr(host, 0, page_addr == -1);
+		host->devtype_data->send_addr(host, 0, page_addr == -1);
 		if (mtd->writesize > 512)
 			/* another col addr cycle for 2k page */
-			host->send_addr(host, 0, false);
+			host->devtype_data->send_addr(host, 0, false);
 	}
 
 	/* Write out page address, if necessary */
 	if (page_addr != -1) {
 		/* paddr_0 - p_addr_7 */
-		host->send_addr(host, (page_addr & 0xff), false);
+		host->devtype_data->send_addr(host, (page_addr & 0xff), false);
 
 		if (mtd->writesize > 512) {
 			if (mtd->size >= 0x10000000) {
 				/* paddr_8 - paddr_15 */
-				host->send_addr(host, (page_addr >> 8) & 0xff, false);
-				host->send_addr(host, (page_addr >> 16) & 0xff, true);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff,
+						false);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 16) & 0xff,
+						true);
 			} else
 				/* paddr_8 - paddr_15 */
-				host->send_addr(host, (page_addr >> 8) & 0xff, true);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff, true);
 		} else {
 			/* One more address cycle for higher density devices */
 			if (mtd->size >= 0x4000000) {
 				/* paddr_8 - paddr_15 */
-				host->send_addr(host, (page_addr >> 8) & 0xff, false);
-				host->send_addr(host, (page_addr >> 16) & 0xff, true);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff,
+						false);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 16) & 0xff,
+						true);
 			} else
 				/* paddr_8 - paddr_15 */
-				host->send_addr(host, (page_addr >> 8) & 0xff, true);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff, true);
 		}
 	}
 }
@@ -800,7 +882,7 @@
 		return 8;
 }
 
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
 {
 	struct nand_chip *nand_chip = mtd->priv;
 	struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +891,40 @@
 	if (nand_chip->ecc.mode == NAND_ECC_HW)
 		config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
 
-	if (nfc_is_v21())
-		config1 |= NFC_V2_CONFIG1_FP_INT;
-
-	if (!cpu_is_mx21())
+	if (!host->devtype_data->irqpending_quirk)
 		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
 
-	if (nfc_is_v21() && mtd->writesize) {
+	host->eccsize = 1;
+
+	writew(config1, NFC_V1_V2_CONFIG1);
+	/* preset operation */
+
+	/* Unlock the internal RAM Buffer */
+	writew(0x2, NFC_V1_V2_CONFIG);
+
+	/* Blocks to be unlocked */
+	writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+	writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+	/* Unlock Block Command for given address range */
+	writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd->priv;
+	struct mxc_nand_host *host = nand_chip->priv;
+	uint16_t config1 = 0;
+
+	if (nand_chip->ecc.mode == NAND_ECC_HW)
+		config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+	config1 |= NFC_V2_CONFIG1_FP_INT;
+
+	if (!host->devtype_data->irqpending_quirk)
+		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+	if (mtd->writesize) {
 		uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
 
 		host->eccsize = get_eccsize(mtd);
@@ -834,20 +943,14 @@
 	writew(0x2, NFC_V1_V2_CONFIG);
 
 	/* Blocks to be unlocked */
-	if (nfc_is_v21()) {
-		writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
-		writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
-		writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
-		writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
-		writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
-		writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
-		writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
-		writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
-	} else if (nfc_is_v1()) {
-		writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
-		writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
-	} else
-		BUG();
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
 
 	/* Unlock Block Command for given address range */
 	writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1040,15 @@
 	/* Command pre-processing step */
 	switch (command) {
 	case NAND_CMD_RESET:
-		host->preset(mtd);
-		host->send_cmd(host, command, false);
+		host->devtype_data->preset(mtd);
+		host->devtype_data->send_cmd(host, command, false);
 		break;
 
 	case NAND_CMD_STATUS:
 		host->buf_start = 0;
 		host->status_request = true;
 
-		host->send_cmd(host, command, true);
+		host->devtype_data->send_cmd(host, command, true);
 		mxc_do_addr_cycle(mtd, column, page_addr);
 		break;
 
@@ -958,15 +1061,16 @@
 
 		command = NAND_CMD_READ0; /* only READ0 is valid */
 
-		host->send_cmd(host, command, false);
+		host->devtype_data->send_cmd(host, command, false);
 		mxc_do_addr_cycle(mtd, column, page_addr);
 
 		if (mtd->writesize > 512)
-			host->send_cmd(host, NAND_CMD_READSTART, true);
+			host->devtype_data->send_cmd(host,
+					NAND_CMD_READSTART, true);
 
-		host->send_page(mtd, NFC_OUTPUT);
+		host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-		memcpy(host->data_buf, host->main_area0, mtd->writesize);
+		memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
 		copy_spare(mtd, true);
 		break;
 
@@ -977,28 +1081,28 @@
 
 		host->buf_start = column;
 
-		host->send_cmd(host, command, false);
+		host->devtype_data->send_cmd(host, command, false);
 		mxc_do_addr_cycle(mtd, column, page_addr);
 		break;
 
 	case NAND_CMD_PAGEPROG:
-		memcpy(host->main_area0, host->data_buf, mtd->writesize);
+		memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
 		copy_spare(mtd, false);
-		host->send_page(mtd, NFC_INPUT);
-		host->send_cmd(host, command, true);
+		host->devtype_data->send_page(mtd, NFC_INPUT);
+		host->devtype_data->send_cmd(host, command, true);
 		mxc_do_addr_cycle(mtd, column, page_addr);
 		break;
 
 	case NAND_CMD_READID:
-		host->send_cmd(host, command, true);
+		host->devtype_data->send_cmd(host, command, true);
 		mxc_do_addr_cycle(mtd, column, page_addr);
-		host->send_read_id(host);
+		host->devtype_data->send_read_id(host);
 		host->buf_start = column;
 		break;
 
 	case NAND_CMD_ERASE1:
 	case NAND_CMD_ERASE2:
-		host->send_cmd(host, command, false);
+		host->devtype_data->send_cmd(host, command, false);
 		mxc_do_addr_cycle(mtd, column, page_addr);
 
 		break;
@@ -1032,15 +1136,191 @@
 	.pattern = mirror_pattern,
 };
 
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+	.preset = preset_v1,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v1,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v1,
+	.ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+	.ecclayout_2k = &nandv1_hw_eccoob_largepage,
+	.ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v1,
+	.irqpending_quirk = 1,
+	.needs_ip = 0,
+	.regs_offset = 0xe00,
+	.spare0_offset = 0x800,
+	.spare_len = 16,
+	.eccbytes = 3,
+	.eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+	.preset = preset_v1,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v1,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v1,
+	.ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+	.ecclayout_2k = &nandv1_hw_eccoob_largepage,
+	.ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v1,
+	.irqpending_quirk = 0,
+	.needs_ip = 0,
+	.regs_offset = 0xe00,
+	.spare0_offset = 0x800,
+	.axi_offset = 0,
+	.spare_len = 16,
+	.eccbytes = 3,
+	.eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+	.preset = preset_v2,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v2,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v2,
+	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
+	.ecclayout_4k = &nandv2_hw_eccoob_4k,
+	.select_chip = mxc_nand_select_chip_v2,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.irqpending_quirk = 0,
+	.needs_ip = 0,
+	.regs_offset = 0x1e00,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0,
+	.spare_len = 64,
+	.eccbytes = 9,
+	.eccsize = 0,
+};
+
+/* v3: i.MX51, i.MX53 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+	.preset = preset_v3,
+	.send_cmd = send_cmd_v3,
+	.send_addr = send_addr_v3,
+	.send_page = send_page_v3,
+	.send_read_id = send_read_id_v3,
+	.get_dev_status = get_dev_status_v3,
+	.check_int = check_int_v3,
+	.irq_control = irq_control_v3,
+	.get_ecc_status = get_ecc_status_v3,
+	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
+	.ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.irqpending_quirk = 0,
+	.needs_ip = 1,
+	.regs_offset = 0,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0x1e00,
+	.spare_len = 64,
+	.eccbytes = 0,
+	.eccsize = 0,
+};
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+	{
+		.compatible = "fsl,imx21-nand",
+		.data = &imx21_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx27-nand",
+		.data = &imx27_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx25-nand",
+		.data = &imx25_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx51-nand",
+		.data = &imx51_nand_devtype_data,
+	},
+	{ /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+	struct device_node *np = host->dev->of_node;
+	struct mxc_nand_platform_data *pdata = &host->pdata;
+	const struct of_device_id *of_id =
+		of_match_device(mxcnd_dt_ids, host->dev);
+	int buswidth;
+
+	if (!np)
+		return 1;
+
+	if (of_get_nand_ecc_mode(np) >= 0)
+		pdata->hw_ecc = 1;
+
+	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+	buswidth = of_get_nand_bus_width(np);
+	if (buswidth < 0)
+		return buswidth;
+
+	pdata->width = buswidth / 8;
+
+	host->devtype_data = of_id->data;
+
+	return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+	return 1;
+}
+#endif
+
+static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
+{
+	struct mxc_nand_platform_data *pdata = host->dev->platform_data;
+
+	if (!pdata)
+		return -ENODEV;
+
+	host->pdata = *pdata;
+
+	if (nfc_is_v1()) {
+		if (cpu_is_mx21())
+			host->devtype_data = &imx21_nand_devtype_data;
+		else
+			host->devtype_data = &imx27_nand_devtype_data;
+	} else if (nfc_is_v21()) {
+		host->devtype_data = &imx25_nand_devtype_data;
+	} else if (nfc_is_v3_2()) {
+		host->devtype_data = &imx51_nand_devtype_data;
+	} else
+		BUG();
+
+	return 0;
+}
+
 static int __init mxcnd_probe(struct platform_device *pdev)
 {
 	struct nand_chip *this;
 	struct mtd_info *mtd;
-	struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
 	struct mxc_nand_host *host;
 	struct resource *res;
 	int err = 0;
-	struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
 	/* Allocate memory for MTD device structure and private data */
 	host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1345,6 @@
 	this->priv = host;
 	this->dev_ready = mxc_nand_dev_ready;
 	this->cmdfunc = mxc_nand_command;
-	this->select_chip = mxc_nand_select_chip;
 	this->read_byte = mxc_nand_read_byte;
 	this->read_word = mxc_nand_read_word;
 	this->write_buf = mxc_nand_write_buf;
@@ -1078,7 +1357,7 @@
 		goto eclk;
 	}
 
-	clk_enable(host->clk);
+	clk_prepare_enable(host->clk);
 	host->clk_act = 1;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1095,36 +1374,26 @@
 
 	host->main_area0 = host->base;
 
-	if (nfc_is_v1() || nfc_is_v21()) {
-		host->preset = preset_v1_v2;
-		host->send_cmd = send_cmd_v1_v2;
-		host->send_addr = send_addr_v1_v2;
-		host->send_page = send_page_v1_v2;
-		host->send_read_id = send_read_id_v1_v2;
-		host->get_dev_status = get_dev_status_v1_v2;
-		host->check_int = check_int_v1_v2;
-		if (cpu_is_mx21())
-			host->irq_control = irq_control_mx21;
-		else
-			host->irq_control = irq_control_v1_v2;
-	}
+	err = mxcnd_probe_dt(host);
+	if (err > 0)
+		err = mxcnd_probe_pdata(host);
+	if (err < 0)
+		goto eirq;
 
-	if (nfc_is_v21()) {
-		host->regs = host->base + 0x1e00;
-		host->spare0 = host->base + 0x1000;
-		host->spare_len = 64;
-		oob_smallpage = &nandv2_hw_eccoob_smallpage;
-		oob_largepage = &nandv2_hw_eccoob_largepage;
-		this->ecc.bytes = 9;
-	} else if (nfc_is_v1()) {
-		host->regs = host->base + 0xe00;
-		host->spare0 = host->base + 0x800;
-		host->spare_len = 16;
-		oob_smallpage = &nandv1_hw_eccoob_smallpage;
-		oob_largepage = &nandv1_hw_eccoob_largepage;
-		this->ecc.bytes = 3;
-		host->eccsize = 1;
-	} else if (nfc_is_v3_2()) {
+	if (host->devtype_data->regs_offset)
+		host->regs = host->base + host->devtype_data->regs_offset;
+	host->spare0 = host->base + host->devtype_data->spare0_offset;
+	if (host->devtype_data->axi_offset)
+		host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+	this->ecc.bytes = host->devtype_data->eccbytes;
+	host->eccsize = host->devtype_data->eccsize;
+
+	this->select_chip = host->devtype_data->select_chip;
+	this->ecc.size = 512;
+	this->ecc.layout = host->devtype_data->ecclayout_512;
+
+	if (host->devtype_data->needs_ip) {
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 		if (!res) {
 			err = -ENODEV;
@@ -1135,42 +1404,22 @@
 			err = -ENOMEM;
 			goto eirq;
 		}
-		host->regs_axi = host->base + 0x1e00;
-		host->spare0 = host->base + 0x1000;
-		host->spare_len = 64;
-		host->preset = preset_v3;
-		host->send_cmd = send_cmd_v3;
-		host->send_addr = send_addr_v3;
-		host->send_page = send_page_v3;
-		host->send_read_id = send_read_id_v3;
-		host->check_int = check_int_v3;
-		host->get_dev_status = get_dev_status_v3;
-		host->irq_control = irq_control_v3;
-		oob_smallpage = &nandv2_hw_eccoob_smallpage;
-		oob_largepage = &nandv2_hw_eccoob_largepage;
-	} else
-		BUG();
+	}
 
-	this->ecc.size = 512;
-	this->ecc.layout = oob_smallpage;
-
-	if (pdata->hw_ecc) {
+	if (host->pdata.hw_ecc) {
 		this->ecc.calculate = mxc_nand_calculate_ecc;
 		this->ecc.hwctl = mxc_nand_enable_hwecc;
-		if (nfc_is_v1())
-			this->ecc.correct = mxc_nand_correct_data_v1;
-		else
-			this->ecc.correct = mxc_nand_correct_data_v2_v3;
+		this->ecc.correct = host->devtype_data->correct_data;
 		this->ecc.mode = NAND_ECC_HW;
 	} else {
 		this->ecc.mode = NAND_ECC_SOFT;
 	}
 
-	/* NAND bus width determines access funtions used by upper layer */
-	if (pdata->width == 2)
+	/* NAND bus width determines access functions used by upper layer */
+	if (host->pdata.width == 2)
 		this->options |= NAND_BUSWIDTH_16;
 
-	if (pdata->flash_bbt) {
+	if (host->pdata.flash_bbt) {
 		this->bbt_td = &bbt_main_descr;
 		this->bbt_md = &bbt_mirror_descr;
 		/* update flash based bbt */
@@ -1182,28 +1431,25 @@
 	host->irq = platform_get_irq(pdev, 0);
 
 	/*
-	 * mask the interrupt. For i.MX21 explicitely call
-	 * irq_control_v1_v2 to use the mask bit. We can't call
-	 * disable_irq_nosync() for an interrupt we do not own yet.
+	 * Use host->devtype_data->irq_control() here instead of irq_control()
+	 * because we must not disable_irq_nosync without having requested the
+	 * irq.
 	 */
-	if (cpu_is_mx21())
-		irq_control_v1_v2(host, 0);
-	else
-		host->irq_control(host, 0);
+	host->devtype_data->irq_control(host, 0);
 
 	err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
 	if (err)
 		goto eirq;
 
-	host->irq_control(host, 0);
-
 	/*
-	 * Now that the interrupt is disabled make sure the interrupt
-	 * mask bit is cleared on i.MX21. Otherwise we can't read
-	 * the interrupt status bit on this machine.
+	 * Now that we "own" the interrupt make sure the interrupt mask bit is
+	 * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+	 * on this machine.
 	 */
-	if (cpu_is_mx21())
-		irq_control_v1_v2(host, 1);
+	if (host->devtype_data->irqpending_quirk) {
+		disable_irq_nosync(host->irq);
+		host->devtype_data->irq_control(host, 1);
+	}
 
 	/* first scan to find the device and get the page size */
 	if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1458,12 @@
 	}
 
 	/* Call preset again, with correct writesize this time */
-	host->preset(mtd);
+	host->devtype_data->preset(mtd);
 
 	if (mtd->writesize == 2048)
-		this->ecc.layout = oob_largepage;
-	if (nfc_is_v21() && mtd->writesize == 4096)
-		this->ecc.layout = &nandv2_hw_eccoob_4k;
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
-		goto escan;
-	}
+		this->ecc.layout = host->devtype_data->ecclayout_2k;
+	else if (mtd->writesize == 4096)
+		this->ecc.layout = host->devtype_data->ecclayout_4k;
 
 	if (this->ecc.mode == NAND_ECC_HW) {
 		if (nfc_is_v1())
@@ -1232,9 +1472,19 @@
 			this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
 	}
 
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		err = -ENXIO;
+		goto escan;
+	}
+
 	/* Register the partitions */
-	mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
-				  pdata->nr_parts);
+	mtd_device_parse_register(mtd, part_probes,
+			&(struct mtd_part_parser_data){
+				.of_node = pdev->dev.of_node,
+			},
+			host->pdata.parts,
+			host->pdata.nr_parts);
 
 	platform_set_drvdata(pdev, host);
 
@@ -1275,6 +1525,8 @@
 static struct platform_driver mxcnd_driver = {
 	.driver = {
 		   .name = DRIVER_NAME,
+		   .owner = THIS_MODULE,
+		   .of_match_table = of_match_ptr(mxcnd_dt_ids),
 	},
 	.remove = __devexit_p(mxcnd_remove),
 };
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0..d47586c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1066,15 +1066,17 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-			      uint8_t *buf, int page)
+			      uint8_t *buf, int oob_required, int page)
 {
 	chip->read_buf(mtd, buf, mtd->writesize);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	if (oob_required)
+		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 	return 0;
 }
 
@@ -1083,13 +1085,14 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * We need a special oob layout and handling even when OOB isn't used.
  */
 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
-					struct nand_chip *chip,
-					uint8_t *buf, int page)
+				       struct nand_chip *chip, uint8_t *buf,
+				       int oob_required, int page)
 {
 	int eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  */
 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int page)
+				uint8_t *buf, int oob_required, int page)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	unsigned int max_bitflips = 0;
 
-	chip->ecc.read_page_raw(mtd, chip, buf, page);
+	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@
 		int stat;
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1180,6 +1187,7 @@
 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
 	int index = 0;
+	unsigned int max_bitflips = 0;
 
 	/* Column address within the page aligned to ECC size (256bytes) */
 	start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@
 
 		stat = chip->ecc.correct(mtd, p,
 			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1257,12 +1267,13 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers which need a special oob layout.
  */
 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int page)
+				uint8_t *buf, int oob_required, int page)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	unsigned int max_bitflips = 0;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@
 		int stat;
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1302,6 +1316,7 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@
  * the data area, by overwriting the NAND manufacturer bad block markings.
  */
 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int page)
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@
 	uint8_t *ecc_code = chip->buffers->ecccode;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	unsigned int max_bitflips = 0;
 
 	/* Read the OOB area first */
 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 	}
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1350,19 +1368,21 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-				   uint8_t *buf, int page)
+				   uint8_t *buf, int oob_required, int page)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *p = buf;
 	uint8_t *oob = chip->oob_poi;
+	unsigned int max_bitflips = 0;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		int stat;
@@ -1379,10 +1399,12 @@
 		chip->read_buf(mtd, oob, eccbytes);
 		stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-		if (stat < 0)
+		if (stat < 0) {
 			mtd->ecc_stats.failed++;
-		else
+		} else {
 			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
 
 		oob += eccbytes;
 
@@ -1397,7 +1419,7 @@
 	if (i)
 		chip->read_buf(mtd, oob, i);
 
-	return 0;
+	return max_bitflips;
 }
 
 /**
@@ -1459,11 +1481,9 @@
 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 			    struct mtd_oob_ops *ops)
 {
-	int chipnr, page, realpage, col, bytes, aligned;
+	int chipnr, page, realpage, col, bytes, aligned, oob_required;
 	struct nand_chip *chip = mtd->priv;
 	struct mtd_ecc_stats stats;
-	int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
-	int sndcmd = 1;
 	int ret = 0;
 	uint32_t readlen = ops->len;
 	uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@
 		mtd->oobavail : mtd->oobsize;
 
 	uint8_t *bufpoi, *oob, *buf;
+	unsigned int max_bitflips = 0;
 
 	stats = mtd->ecc_stats;
 
@@ -1484,6 +1505,7 @@
 
 	buf = ops->datbuf;
 	oob = ops->oobbuf;
+	oob_required = oob ? 1 : 0;
 
 	while (1) {
 		bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@
 		if (realpage != chip->pagebuf || oob) {
 			bufpoi = aligned ? buf : chip->buffers->databuf;
 
-			if (likely(sndcmd)) {
-				chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-				sndcmd = 0;
-			}
+			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
-			/* Now read the page into the buffer */
+			/*
+			 * Now read the page into the buffer.  Absent an error,
+			 * the read methods return max bitflips per ecc step.
+			 */
 			if (unlikely(ops->mode == MTD_OPS_RAW))
-				ret = chip->ecc.read_page_raw(mtd, chip,
-							      bufpoi, page);
+				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+							      oob_required,
+							      page);
 			else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
 				ret = chip->ecc.read_subpage(mtd, chip,
 							col, bytes, bufpoi);
 			else
 				ret = chip->ecc.read_page(mtd, chip, bufpoi,
-							  page);
+							  oob_required, page);
 			if (ret < 0) {
 				if (!aligned)
 					/* Invalidate page cache */
@@ -1515,22 +1538,25 @@
 				break;
 			}
 
+			max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
 			/* Transfer not aligned data */
 			if (!aligned) {
 				if (!NAND_SUBPAGE_READ(chip) && !oob &&
 				    !(mtd->ecc_stats.failed - stats.failed) &&
-				    (ops->mode != MTD_OPS_RAW))
+				    (ops->mode != MTD_OPS_RAW)) {
 					chip->pagebuf = realpage;
-				else
+					chip->pagebuf_bitflips = ret;
+				} else {
 					/* Invalidate page cache */
 					chip->pagebuf = -1;
+				}
 				memcpy(buf, chip->buffers->databuf + col, bytes);
 			}
 
 			buf += bytes;
 
 			if (unlikely(oob)) {
-
 				int toread = min(oobreadlen, max_oobsize);
 
 				if (toread) {
@@ -1541,13 +1567,7 @@
 			}
 
 			if (!(chip->options & NAND_NO_READRDY)) {
-				/*
-				 * Apply delay or wait for ready/busy pin. Do
-				 * this before the AUTOINCR check, so no
-				 * problems arise if a chip which does auto
-				 * increment is marked as NOAUTOINCR by the
-				 * board driver.
-				 */
+				/* Apply delay or wait for ready/busy pin */
 				if (!chip->dev_ready)
 					udelay(chip->chip_delay);
 				else
@@ -1556,6 +1576,8 @@
 		} else {
 			memcpy(buf, chip->buffers->databuf + col, bytes);
 			buf += bytes;
+			max_bitflips = max_t(unsigned int, max_bitflips,
+					     chip->pagebuf_bitflips);
 		}
 
 		readlen -= bytes;
@@ -1575,26 +1597,19 @@
 			chip->select_chip(mtd, -1);
 			chip->select_chip(mtd, chipnr);
 		}
-
-		/*
-		 * Check, if the chip supports auto page increment or if we
-		 * have hit a block boundary.
-		 */
-		if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-			sndcmd = 1;
 	}
 
 	ops->retlen = ops->len - (size_t) readlen;
 	if (oob)
 		ops->oobretlen = ops->ooblen - oobreadlen;
 
-	if (ret)
+	if (ret < 0)
 		return ret;
 
 	if (mtd->ecc_stats.failed - stats.failed)
 		return -EBADMSG;
 
-	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+	return max_bitflips;
 }
 
 /**
@@ -1630,17 +1645,13 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-			     int page, int sndcmd)
+			     int page)
 {
-	if (sndcmd) {
-		chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-		sndcmd = 0;
-	}
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return sndcmd;
+	return 0;
 }
 
 /**
@@ -1649,10 +1660,9 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-				  int page, int sndcmd)
+				  int page)
 {
 	uint8_t *buf = chip->oob_poi;
 	int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@
 	if (length > 0)
 		chip->read_buf(mtd, bufpoi, length);
 
-	return 1;
+	return 0;
 }
 
 /**
@@ -1775,13 +1785,13 @@
 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
 			    struct mtd_oob_ops *ops)
 {
-	int page, realpage, chipnr, sndcmd = 1;
+	int page, realpage, chipnr;
 	struct nand_chip *chip = mtd->priv;
 	struct mtd_ecc_stats stats;
-	int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
 	int readlen = ops->ooblen;
 	int len;
 	uint8_t *buf = ops->oobbuf;
+	int ret = 0;
 
 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
 			__func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@
 
 	while (1) {
 		if (ops->mode == MTD_OPS_RAW)
-			sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+			ret = chip->ecc.read_oob_raw(mtd, chip, page);
 		else
-			sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+			ret = chip->ecc.read_oob(mtd, chip, page);
+
+		if (ret < 0)
+			break;
 
 		len = min(len, readlen);
 		buf = nand_transfer_oob(chip, buf, ops, len);
 
 		if (!(chip->options & NAND_NO_READRDY)) {
-			/*
-			 * Apply delay or wait for ready/busy pin. Do this
-			 * before the AUTOINCR check, so no problems arise if a
-			 * chip which does auto increment is marked as
-			 * NOAUTOINCR by the board driver.
-			 */
+			/* Apply delay or wait for ready/busy pin */
 			if (!chip->dev_ready)
 				udelay(chip->chip_delay);
 			else
@@ -1851,16 +1859,12 @@
 			chip->select_chip(mtd, -1);
 			chip->select_chip(mtd, chipnr);
 		}
-
-		/*
-		 * Check, if the chip supports auto page increment or if we
-		 * have hit a block boundary.
-		 */
-		if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-			sndcmd = 1;
 	}
 
-	ops->oobretlen = ops->ooblen;
+	ops->oobretlen = ops->ooblen - readlen;
+
+	if (ret < 0)
+		return ret;
 
 	if (mtd->ecc_stats.failed - stats.failed)
 		return -EBADMSG;
@@ -1919,14 +1923,16 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf)
+				const uint8_t *buf, int oob_required)
 {
 	chip->write_buf(mtd, buf, mtd->writesize);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	if (oob_required)
+		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 /**
@@ -1934,12 +1940,13 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * We need a special oob layout and handling even when ECC isn't checked.
  */
 static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
 					struct nand_chip *chip,
-					const uint8_t *buf)
+					const uint8_t *buf, int oob_required)
 {
 	int eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-				  const uint8_t *buf)
+				  const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@
 	for (i = 0; i < chip->ecc.total; i++)
 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
 
-	chip->ecc.write_page_raw(mtd, chip, buf);
+	chip->ecc.write_page_raw(mtd, chip, buf, 1);
 }
 
 /**
@@ -1999,9 +2007,10 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				  const uint8_t *buf)
+				  const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static void nand_write_page_syndrome(struct mtd_info *mtd,
-				    struct nand_chip *chip, const uint8_t *buf)
+				    struct nand_chip *chip,
+				    const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@
  * @mtd: MTD device structure
  * @chip: NAND chip descriptor
  * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
  * @cached: cached programming
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			   const uint8_t *buf, int page, int cached, int raw)
+			   const uint8_t *buf, int oob_required, int page,
+			   int cached, int raw)
 {
 	int status;
 
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		chip->ecc.write_page_raw(mtd, chip, buf);
+		chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
 	else
-		chip->ecc.write_page(mtd, chip, buf);
+		chip->ecc.write_page(mtd, chip, buf, oob_required);
 
 	/*
 	 * Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@
 
 	if (chip->verify_buf(mtd, buf, mtd->writesize))
 		return -EIO;
+
+	/* Make sure the next page prog is preceded by a status read */
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 #endif
 	return 0;
 }
@@ -2202,6 +2218,7 @@
 	uint8_t *oob = ops->oobbuf;
 	uint8_t *buf = ops->datbuf;
 	int ret, subpage;
+	int oob_required = oob ? 1 : 0;
 
 	ops->retlen = 0;
 	if (!writelen)
@@ -2264,8 +2281,8 @@
 			memset(chip->oob_poi, 0xff, mtd->oobsize);
 		}
 
-		ret = chip->write_page(mtd, chip, wbuf, page, cached,
-				       (ops->mode == MTD_OPS_RAW));
+		ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
+				       cached, (ops->mode == MTD_OPS_RAW));
 		if (ret)
 			break;
 
@@ -2898,8 +2915,7 @@
 		*busw = NAND_BUSWIDTH_16;
 
 	chip->options &= ~NAND_CHIPOPTIONS_MSK;
-	chip->options |= (NAND_NO_READRDY |
-			NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+	chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
 
 	pr_info("ONFI flash detected\n");
 	return 1;
@@ -3076,11 +3092,6 @@
 		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
 ident_done:
 
-	/*
-	 * Set chip as a default. Board drivers can override it, if necessary.
-	 */
-	chip->options |= NAND_NO_AUTOINCR;
-
 	/* Try to identify manufacturer */
 	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
 		if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@
 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
 		chip->cmdfunc = nand_command_lp;
 
-	pr_info("NAND device: Manufacturer ID:"
-		" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
-		nand_manuf_ids[maf_idx].name,
-		chip->onfi_version ? chip->onfi_params.model : type->name);
+	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+		" page size: %d, OOB size: %d\n",
+		*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+		chip->onfi_version ? chip->onfi_params.model : type->name,
+		mtd->writesize, mtd->oobsize);
 
 	return type;
 }
@@ -3329,8 +3341,13 @@
 		if (!chip->ecc.write_oob)
 			chip->ecc.write_oob = nand_write_oob_syndrome;
 
-		if (mtd->writesize >= chip->ecc.size)
+		if (mtd->writesize >= chip->ecc.size) {
+			if (!chip->ecc.strength) {
+				pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+				BUG();
+			}
 			break;
+		}
 		pr_warn("%d byte HW ECC not possible on "
 			   "%d byte page size, fallback to SW ECC\n",
 			   chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@
 			BUG();
 		}
 		chip->ecc.strength =
-			chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+			chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
 		break;
 
 	case NAND_ECC_NONE:
@@ -3483,7 +3500,7 @@
 
 	/* propagate ecc info to mtd_info */
 	mtd->ecclayout = chip->ecc.layout;
-	mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+	mtd->ecc_strength = chip->ecc.strength;
 
 	/* Check, if we should skip the bad block table scan */
 	if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 20a112f..30d1319 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -324,6 +324,7 @@
 
 		buf += mtd->oobsize + mtd->writesize;
 		len -= mtd->writesize;
+		offs += mtd->writesize;
 	}
 	return 0;
 }
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8c..621b70b 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@
 	 * These are the new chips with large page size. The pagesize and the
 	 * erasesize is determined from the extended id bytes
 	 */
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
 	/* 512 Megabit */
@@ -157,9 +157,7 @@
 	 * writes possible, but not implemented now
 	 */
 	{"AND 128MiB 3,3V 8-bit",	0x01, 2048, 128, 0x4000,
-	 NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
-	 BBT_AUTO_REFRESH
-	},
+	 NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
 
 	{NULL,}
 };
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261f478..6cc8fbf 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -268,7 +268,6 @@
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR     0x00000020 /* page number auto incrementation is possible */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -594,7 +593,7 @@
 		ns->options |= OPT_PAGE256;
 	}
 	else if (ns->geom.pgsz == 512) {
-		ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+		ns->options |= OPT_PAGE512;
 		if (ns->busw == 8)
 			ns->options |= OPT_PAGE512_8BIT;
 	} else if (ns->geom.pgsz == 2048) {
@@ -663,8 +662,6 @@
         for (i = 0; nand_flash_ids[i].name != NULL; i++) {
                 if (second_id_byte != nand_flash_ids[i].id)
                         continue;
-		if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
-			ns->options |= OPT_AUTOINCR;
 	}
 
 	if (ns->busw == 16)
@@ -1936,20 +1933,8 @@
 	if (ns->regs.count == ns->regs.num) {
 		NS_DBG("read_byte: all bytes were read\n");
 
-		/*
-		 * The OPT_AUTOINCR allows to read next consecutive pages without
-		 * new read operation cycle.
-		 */
-		if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-			ns->regs.count = 0;
-			if (ns->regs.row + 1 < ns->geom.pgnum)
-				ns->regs.row += 1;
-			NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
-			do_state_action(ns, ACTION_CPY);
-		}
-		else if (NS_STATE(ns->nxstate) == STATE_READY)
+		if (NS_STATE(ns->nxstate) == STATE_READY)
 			switch_state(ns);
-
 	}
 
 	return outb;
@@ -2203,14 +2188,7 @@
 	ns->regs.count += len;
 
 	if (ns->regs.count == ns->regs.num) {
-		if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-			ns->regs.count = 0;
-			if (ns->regs.row + 1 < ns->geom.pgnum)
-				ns->regs.row += 1;
-			NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
-			do_state_action(ns, ACTION_CPY);
-		}
-		else if (NS_STATE(ns->nxstate) == STATE_READY)
+		if (NS_STATE(ns->nxstate) == STATE_READY)
 			switch_state(ns);
 	}
 
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c2b0bba..d7f681d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -21,6 +21,10 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+#include <linux/bch.h>
+#endif
+
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
@@ -127,6 +131,11 @@
 	} iomode;
 	u_char				*buf;
 	int					buf_len;
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+	struct bch_control             *bch;
+	struct nand_ecclayout           ecclayout;
+#endif
 };
 
 /**
@@ -402,7 +411,7 @@
 			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
 	if (ret)
 		/* PFPW engine is busy, use cpu copy method */
-		goto out_copy;
+		goto out_copy_unmap;
 
 	init_completion(&info->comp);
 
@@ -421,6 +430,8 @@
 	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
 	return 0;
 
+out_copy_unmap:
+	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
 out_copy:
 	if (info->nand.options & NAND_BUSWIDTH_16)
 		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@
 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
 							mtd);
 	unsigned long timeo = jiffies;
-	int status = NAND_STATUS_FAIL, state = this->state;
+	int status, state = this->state;
 
 	if (state == FL_ERASING)
 		timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@
 			break;
 		cond_resched();
 	}
+
+	status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
 	return status;
 }
 
@@ -925,6 +938,226 @@
 	return 1;
 }
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+
+/**
+ * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+	int nerrors;
+	unsigned int dev_width;
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+	struct nand_chip *chip = mtd->priv;
+
+	nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
+	dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+	/*
+	 * Program GPMC to perform correction on one 512-byte sector at a time.
+	 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
+	 * gives a slight (5%) performance gain (but requires additional code).
+	 */
+	(void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+}
+
+/**
+ * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
+				    u_char *ecc_code)
+{
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+	return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
+				    u_char *ecc_code)
+{
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+	return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_correct_data_bch - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
+				  u_char *read_ecc, u_char *calc_ecc)
+{
+	int i, count;
+	/* cannot correct more than 8 errors */
+	unsigned int errloc[8];
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+
+	count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
+			   errloc);
+	if (count > 0) {
+		/* correct errors */
+		for (i = 0; i < count; i++) {
+			/* correct data only, not ecc bytes */
+			if (errloc[i] < 8*512)
+				data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
+			pr_debug("corrected bitflip %u\n", errloc[i]);
+		}
+	} else if (count < 0) {
+		pr_err("ecc unrecoverable error\n");
+	}
+	return count;
+}
+
+/**
+ * omap3_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+	if (info->bch) {
+		free_bch(info->bch);
+		info->bch = NULL;
+	}
+}
+
+/**
+ * omap3_init_bch - Initialize BCH ECC
+ * @mtd: MTD device structure
+ * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
+ */
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+	int ret, max_errors;
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+#ifdef CONFIG_MTD_NAND_OMAP_BCH8
+	const int hw_errors = 8;
+#else
+	const int hw_errors = 4;
+#endif
+	info->bch = NULL;
+
+	max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+	if (max_errors != hw_errors) {
+		pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
+		       max_errors, hw_errors);
+		goto fail;
+	}
+
+	/* initialize GPMC BCH engine */
+	ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
+	if (ret)
+		goto fail;
+
+	/* software bch library is only used to detect and locate errors */
+	info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
+	if (!info->bch)
+		goto fail;
+
+	info->nand.ecc.size    = 512;
+	info->nand.ecc.hwctl   = omap3_enable_hwecc_bch;
+	info->nand.ecc.correct = omap3_correct_data_bch;
+	info->nand.ecc.mode    = NAND_ECC_HW;
+
+	/*
+	 * The number of corrected errors in an ecc block that will trigger
+	 * block scrubbing defaults to the ecc strength (4 or 8).
+	 * Set mtd->bitflip_threshold here to define a custom threshold.
+	 */
+
+	if (max_errors == 8) {
+		info->nand.ecc.strength  = 8;
+		info->nand.ecc.bytes     = 13;
+		info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+	} else {
+		info->nand.ecc.strength  = 4;
+		info->nand.ecc.bytes     = 7;
+		info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+	}
+
+	pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+	return 0;
+fail:
+	omap3_free_bch(mtd);
+	return -1;
+}
+
+/**
+ * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
+ * @mtd: MTD device structure
+ */
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+	int i, steps;
+	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+						   mtd);
+	struct nand_ecclayout *layout = &info->ecclayout;
+
+	/* build oob layout */
+	steps = mtd->writesize/info->nand.ecc.size;
+	layout->eccbytes = steps*info->nand.ecc.bytes;
+
+	/* do not bother creating special oob layouts for small page devices */
+	if (mtd->oobsize < 64) {
+		pr_err("BCH ecc is not supported on small page devices\n");
+		goto fail;
+	}
+
+	/* reserve 2 bytes for bad block marker */
+	if (layout->eccbytes+2 > mtd->oobsize) {
+		pr_err("no oob layout available for oobsize %d eccbytes %u\n",
+		       mtd->oobsize, layout->eccbytes);
+		goto fail;
+	}
+
+	/* put ecc bytes at oob tail */
+	for (i = 0; i < layout->eccbytes; i++)
+		layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+	layout->oobfree[0].offset = 2;
+	layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+	info->nand.ecc.layout = layout;
+
+	if (!(info->nand.options & NAND_BUSWIDTH_16))
+		info->nand.badblock_pattern = &bb_descrip_flashbased;
+	return 0;
+fail:
+	omap3_free_bch(mtd);
+	return -1;
+}
+
+#else
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+	pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
+	return -1;
+}
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+	return -1;
+}
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+}
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+
 static int __devinit omap_nand_probe(struct platform_device *pdev)
 {
 	struct omap_nand_info		*info;
@@ -1063,6 +1296,13 @@
 		info->nand.ecc.hwctl            = omap_enable_hwecc;
 		info->nand.ecc.correct          = omap_correct_data;
 		info->nand.ecc.mode             = NAND_ECC_HW;
+	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+		err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
+		if (err) {
+			err = -EINVAL;
+			goto out_release_mem_region;
+		}
 	}
 
 	/* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@
 					(offset + omap_oobinfo.eccbytes);
 
 		info->nand.ecc.layout = &omap_oobinfo;
+	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+		/* build OOB layout for BCH ECC correction */
+		err = omap3_init_bch_tail(&info->mtd);
+		if (err) {
+			err = -EINVAL;
+			goto out_release_mem_region;
+		}
 	}
 
 	/* second phase scan */
@@ -1122,6 +1370,7 @@
 	struct mtd_info *mtd = platform_get_drvdata(pdev);
 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
 							mtd);
+	omap3_free_bch(&info->mtd);
 
 	platform_set_drvdata(pdev, NULL);
 	if (info->dma_ch != -1)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0f50ef3..513dc88 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -17,6 +17,8 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
 #include <asm/io.h>
 #include <asm/sizes.h>
 #include <mach/hardware.h>
@@ -79,6 +81,7 @@
 	struct nand_chip *nc;
 	struct orion_nand_data *board;
 	struct resource *res;
+	struct clk *clk;
 	void __iomem *io_base;
 	int ret = 0;
 	u32 val = 0;
@@ -155,6 +158,14 @@
 
 	platform_set_drvdata(pdev, mtd);
 
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_prepare_enable(clk);
+		clk_put(clk);
+	}
+
 	if (nand_scan(mtd, 1)) {
 		ret = -ENXIO;
 		goto no_dev;
@@ -184,6 +195,7 @@
 {
 	struct mtd_info *mtd = platform_get_drvdata(pdev);
 	struct nand_chip *nc = mtd->priv;
+	struct clk *clk;
 
 	nand_release(mtd);
 
@@ -191,6 +203,12 @@
 
 	kfree(nc);
 
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 974dbf8..1440e51 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,6 @@
 	chip->ecc.mode = NAND_ECC_SOFT;
 
 	/* Enable the following for a flash based bad block table */
-	chip->options = NAND_NO_AUTOINCR;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	/* Scan to find existence of the device */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 6404e6e..1bcb520 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -23,14 +23,18 @@
 	void __iomem		*io_base;
 };
 
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
 /*
  * Probe for the NAND device.
  */
 static int __devinit plat_nand_probe(struct platform_device *pdev)
 {
 	struct platform_nand_data *pdata = pdev->dev.platform_data;
+	struct mtd_part_parser_data ppdata;
 	struct plat_nand_data *data;
 	struct resource *res;
+	const char **part_types;
 	int err = 0;
 
 	if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@
 	data->chip.select_chip = pdata->ctrl.select_chip;
 	data->chip.write_buf = pdata->ctrl.write_buf;
 	data->chip.read_buf = pdata->ctrl.read_buf;
+	data->chip.read_byte = pdata->ctrl.read_byte;
 	data->chip.chip_delay = pdata->chip.chip_delay;
 	data->chip.options |= pdata->chip.options;
 	data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@
 		goto out;
 	}
 
-	err = mtd_device_parse_register(&data->mtd,
-					pdata->chip.part_probe_types, NULL,
+	part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+	ppdata.of_node = pdev->dev.of_node;
+	err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
 					pdata->chip.partitions,
 					pdata->chip.nr_partitions);
 
@@ -140,12 +147,19 @@
 	return 0;
 }
 
+static const struct of_device_id plat_nand_match[] = {
+	{ .compatible = "gen_nand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
 static struct platform_driver plat_nand_driver = {
-	.probe		= plat_nand_probe,
-	.remove		= __devexit_p(plat_nand_remove),
-	.driver		= {
-		.name	= "gen_nand",
-		.owner	= THIS_MODULE,
+	.probe	= plat_nand_probe,
+	.remove	= __devexit_p(plat_nand_remove),
+	.driver	= {
+		.name		= "gen_nand",
+		.owner		= THIS_MODULE,
+		.of_match_table = plat_nand_match,
 	},
 };
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index def50ca..252aaef 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -682,14 +682,15 @@
 }
 
 static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
-		struct nand_chip *chip, const uint8_t *buf)
+		struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
 	chip->write_buf(mtd, buf, mtd->writesize);
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
-		struct nand_chip *chip, uint8_t *buf, int page)
+		struct nand_chip *chip, uint8_t *buf, int oob_required,
+		int page)
 {
 	struct pxa3xx_nand_host *host = mtd->priv;
 	struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@
 	chip->ecc.size = host->page_size;
 	chip->ecc.strength = 1;
 
-	chip->options = NAND_NO_AUTOINCR;
 	chip->options |= NAND_NO_READRDY;
 	if (host->reg_ndcr & NDCR_DWIDTH_M)
 		chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index c204018..8cb6277 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -539,14 +539,11 @@
  * nand_read_oob_syndrome assumes we can send column address - we can't
  */
 static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			     int page, int sndcmd)
+			     int page)
 {
-	if (sndcmd) {
-		chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-		sndcmd = 0;
-	}
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return sndcmd;
+	return 0;
 }
 
 /*
@@ -1104,18 +1101,7 @@
 	.driver.pm	= &r852_pm_ops,
 };
 
-static __init int r852_module_init(void)
-{
-	return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
-	pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e9b2b26..aa9b8a5 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -344,7 +344,7 @@
 }
 
 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int page)
+				uint8_t *buf, int oob_required, int page)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@
 		if (flctl->hwecc_cant_correct[i])
 			mtd->ecc_stats.failed++;
 		else
-			mtd->ecc_stats.corrected += 0;
+			mtd->ecc_stats.corrected += 0; /* FIXME */
 	}
 
 	return 0;
 }
 
 static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				   const uint8_t *buf)
+				   const uint8_t *buf, int oob_required)
 {
 	int i, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@
 	flctl->hwecc = pdata->has_hwecc;
 	flctl->holden = pdata->use_holden;
 
-	nand->options = NAND_NO_AUTOINCR;
-
 	/* Set address of hardware control function */
 	/* 20 us command delay time */
 	nand->chip_delay = 20;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 774c3c2..082bcdc 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -94,17 +94,16 @@
 	{NULL,}
 };
 
-#define XD_TYPEM       (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
 static struct nand_flash_dev nand_xd_flash_ids[] = {
 
 	{"xD 16MiB 3,3V",    0x73, 512, 16, 0x4000, 0},
 	{"xD 32MiB 3,3V",    0x75, 512, 32, 0x4000, 0},
 	{"xD 64MiB 3,3V",    0x76, 512, 64, 0x4000, 0},
 	{"xD 128MiB 3,3V",   0x79, 512, 128, 0x4000, 0},
-	{"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, XD_TYPEM},
-	{"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, XD_TYPEM},
-	{"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, XD_TYPEM},
-	{"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, XD_TYPEM},
+	{"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
+	{"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
+	{"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
+	{"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
 	{NULL,}
 };
 
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3ce12e..7153e0d2 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1201,7 +1201,8 @@
 	if (mtd->ecc_stats.failed - stats.failed)
 		return -EBADMSG;
 
-	return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+	/* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+	return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
@@ -1333,7 +1334,8 @@
 	if (mtd->ecc_stats.failed - stats.failed)
 		return -EBADMSG;
 
-	return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+	/* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+	return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9f957c2..7c13803 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -264,6 +264,9 @@
  */
 int ubi_debugfs_init(void)
 {
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
 	dfs_rootdir = debugfs_create_dir("ubi", NULL);
 	if (IS_ERR_OR_NULL(dfs_rootdir)) {
 		int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@
  */
 void ubi_debugfs_exit(void)
 {
-	debugfs_remove(dfs_rootdir);
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove(dfs_rootdir);
 }
 
 /* Read an UBI debugfs file */
@@ -403,6 +407,9 @@
 	struct dentry *dent;
 	struct ubi_debug_info *d = ubi->dbg;
 
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
 	n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
 		     ubi->ubi_num);
 	if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@
  */
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
-	debugfs_remove_recursive(ubi->dbg->dfs_dir);
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove_recursive(ubi->dbg->dfs_dir);
 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9df100a..b6be644 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1262,11 +1262,11 @@
 	dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
 	       vol_id, lnum, ubi->works_count);
 
-	down_write(&ubi->work_sem);
 	while (found) {
 		struct ubi_work *wrk;
 		found = 0;
 
+		down_read(&ubi->work_sem);
 		spin_lock(&ubi->wl_lock);
 		list_for_each_entry(wrk, &ubi->works, list) {
 			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@
 				spin_unlock(&ubi->wl_lock);
 
 				err = wrk->func(ubi, wrk, 0);
-				if (err)
-					goto out;
+				if (err) {
+					up_read(&ubi->work_sem);
+					return err;
+				}
+
 				spin_lock(&ubi->wl_lock);
 				found = 1;
 				break;
 			}
 		}
 		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->work_sem);
 	}
 
-out:
+	/*
+	 * Make sure all the works which have been done in parallel are
+	 * finished.
+	 */
+	down_write(&ubi->work_sem);
 	up_write(&ubi->work_sem);
+
 	return err;
 }
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9..b9c2ae6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/pkt_sched.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -381,8 +382,6 @@
 	return next;
 }
 
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -395,7 +394,9 @@
 {
 	skb->dev = slave_dev;
 
-	skb->queue_mapping = bond_queue_mapping(skb);
+	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+		     sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+	skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
 
 	if (unlikely(netpoll_tx_running(slave_dev)))
 		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -4171,7 +4172,7 @@
 	/*
 	 * Save the original txq to restore before passing to the driver
 	 */
-	bond_queue_mapping(skb) = skb->queue_mapping;
+	qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
 
 	if (unlikely(txq >= dev->real_num_tx_queues)) {
 		do {
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index ad284ba..3cea38d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -150,14 +150,25 @@
 	}
 }
 
+static const char *bond_slave_link_status(s8 link)
+{
+	static const char * const status[] = {
+		[BOND_LINK_UP] = "up",
+		[BOND_LINK_FAIL] = "going down",
+		[BOND_LINK_DOWN] = "down",
+		[BOND_LINK_BACK] = "going back",
+	};
+
+	return status[link];
+}
+
 static void bond_info_show_slave(struct seq_file *seq,
 				 const struct slave *slave)
 {
 	struct bonding *bond = seq->private;
 
 	seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-	seq_printf(seq, "MII Status: %s\n",
-		   (slave->link == BOND_LINK_UP) ?  "up" : "down");
+	seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
 	if (slave->speed == SPEED_UNKNOWN)
 		seq_printf(seq, "Speed: %s\n", "Unknown");
 	else
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f0..485bedb 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@
 		}
 	}
 
-	pr_info("%s: Unable to set %.*s as primary slave.\n",
-		bond->dev->name, (int)strlen(buf) - 1, buf);
+	strncpy(bond->params.primary, ifname, IFNAMSIZ);
+	bond->params.primary[IFNAMSIZ - 1] = 0;
+
+	pr_info("%s: Recording %s as primary, "
+		"but it has not been enslaved to %s yet.\n",
+		bond->dev->name, ifname, bond->dev->name);
 out:
 	write_unlock_bh(&bond->curr_slave_lock);
 	read_unlock(&bond->lock);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 1520814..4a27adb 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -693,8 +693,6 @@
 			 */
 			memcpy(rx_buf, (u8 *)piggy_desc,
 					CFHSI_DESC_SHORT_SZ);
-			/* Mark no embedded frame here */
-			piggy_desc->offset = 0;
 			if (desc_pld_len == -EPROTO)
 				goto out_of_sync;
 		}
@@ -737,6 +735,8 @@
 			/* Extract any payload in piggyback descriptor. */
 			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
 				goto out_of_sync;
+			/* Mark no embedded frame after extracting it */
+			piggy_desc->offset = 0;
 		}
 	}
 
@@ -1178,6 +1178,7 @@
 		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
 			__func__, res);
 		free_netdev(ndev);
+		return -ENODEV;
 	}
 	/* Add CAIF HSI device to list. */
 	spin_lock(&cfhsi_list_lock);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda0..86cd532 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -590,8 +590,8 @@
 	priv->write_reg(priv, &priv->regs->control,
 			CONTROL_ENABLE_AR);
 
-	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
-					CAN_CTRLMODE_LOOPBACK)) {
+	if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+	    (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
 		/* loopback + silent mode : useful for hot self-test */
 		priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
 				CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
@@ -686,7 +686,7 @@
  *
  * We iterate from priv->tx_echo to priv->tx_next and check if the
  * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
  */
 static void c_can_do_tx(struct net_device *dev)
 {
@@ -698,7 +698,7 @@
 	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
 		msg_obj_no = get_tx_echo_msg_obj(priv);
 		val = c_can_read_reg32(priv, &priv->regs->txrqst1);
-		if (!(val & (1 << msg_obj_no))) {
+		if (!(val & (1 << (msg_obj_no - 1)))) {
 			can_get_echo_skb(dev,
 					msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
 			stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@
 					& IF_MCONT_DLC_MASK;
 			stats->tx_packets++;
 			c_can_inval_msg_object(dev, 0, msg_obj_no);
+		} else {
+			break;
 		}
 	}
 
@@ -950,7 +952,7 @@
 	struct net_device *dev = napi->dev;
 	struct c_can_priv *priv = netdev_priv(dev);
 
-	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+	irqstatus = priv->irqstatus;
 	if (!irqstatus)
 		goto end;
 
@@ -1028,12 +1030,11 @@
 
 static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
-	u16 irqstatus;
 	struct net_device *dev = (struct net_device *)dev_id;
 	struct c_can_priv *priv = netdev_priv(dev);
 
-	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
-	if (!irqstatus)
+	priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+	if (!priv->irqstatus)
 		return IRQ_NONE;
 
 	/* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@
 		goto exit_irq_fail;
 	}
 
+	napi_enable(&priv->napi);
+
 	/* start the c_can controller */
 	c_can_start(dev);
 
-	napi_enable(&priv->napi);
 	netif_start_queue(dev);
 
 	return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef..5f32d34 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -76,6 +76,7 @@
 	unsigned int tx_next;
 	unsigned int tx_echo;
 	void *priv;		/* for board-specific data */
+	u16 irqstatus;
 };
 
 struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115ee..688371c 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@
 	struct cc770_platform_data *pdata = pdev->dev.platform_data;
 
 	priv->can.clock.freq = pdata->osc_freq;
-	if (priv->cpu_interface | CPUIF_DSC)
+	if (priv->cpu_interface & CPUIF_DSC)
 		priv->can.clock.freq /= 2;
 	priv->clkout = pdata->cor;
 	priv->bus_config = pdata->bcr;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 38c0690..81d4741 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -939,12 +939,12 @@
 		return PTR_ERR(pinctrl);
 
 	if (pdev->dev.of_node) {
-		const u32 *clock_freq_p;
+		const __be32 *clock_freq_p;
 
 		clock_freq_p = of_get_property(pdev->dev.of_node,
 						"clock-frequency", NULL);
 		if (clock_freq_p)
-			clock_freq = *clock_freq_p;
+			clock_freq = be32_to_cpup(clock_freq_p);
 	}
 
 	if (!clock_freq) {
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index ec03b40..9c755db 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1131,7 +1131,6 @@
 e100rxtx_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *)dev_id;
-	struct net_local *np = netdev_priv(dev);
 	unsigned long irqbits;
 
 	/*
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a..bab0158 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@
 	rtnl_lock();
 	err = __rtnl_link_register(&dummy_link_ops);
 
-	for (i = 0; i < numdummies && !err; i++)
+	for (i = 0; i < numdummies && !err; i++) {
 		err = dummy_init_one();
+		cond_resched();
+	}
 	if (err < 0)
 		__rtnl_link_unregister(&dummy_link_ops);
 	rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2..7de8241 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -747,21 +747,6 @@
 
 #define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
-#define BNX2X_IP_CSUM_ERR(cqe) \
-			(!((cqe)->fast_path_cqe.status_flags & \
-			   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
-			 ((cqe)->fast_path_cqe.type_error_flags & \
-			  ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
-			(!((cqe)->fast_path_cqe.status_flags & \
-			   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
-			 ((cqe)->fast_path_cqe.type_error_flags & \
-			  ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
-			(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
 				(((le16_to_cpu(flags) & \
 				   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743b..8098eea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -190,7 +190,7 @@
 
 		if ((netif_tx_queue_stopped(txq)) &&
 		    (bp->state == BNX2X_STATE_OPEN) &&
-		    (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+		    (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
 			netif_tx_wake_queue(txq);
 
 		__netif_tx_unlock(txq);
@@ -617,6 +617,25 @@
 	return 0;
 }
 
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+				struct bnx2x_fastpath *fp)
+{
+	/* Do nothing if no IP/L4 csum validation was done */
+
+	if (cqe->fast_path_cqe.status_flags &
+	    (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+	     ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+		return;
+
+	/* If both IP/L4 validation were done, check if an error was found. */
+
+	if (cqe->fast_path_cqe.type_error_flags &
+	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+		fp->eth_q_stats.hw_csum_err++;
+	else
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -806,13 +825,9 @@
 
 		skb_checksum_none_assert(skb);
 
-		if (bp->dev->features & NETIF_F_RXCSUM) {
+		if (bp->dev->features & NETIF_F_RXCSUM)
+			bnx2x_csum_validate(skb, cqe, fp);
 
-			if (likely(BNX2X_RX_CSUM_OK(cqe)))
-				skb->ip_summed = CHECKSUM_UNNECESSARY;
-			else
-				fp->eth_q_stats.hw_csum_err++;
-		}
 
 		skb_record_rx_queue(skb, fp->rx_queue);
 
@@ -2501,8 +2516,6 @@
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
  */
 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
 				   struct bnx2x_fp_txdata *txdata,
@@ -3156,7 +3169,7 @@
 
 	txdata->tx_bd_prod += nbd;
 
-	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
 		netif_tx_stop_queue(txq);
 
 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3165,7 +3178,7 @@
 		smp_mb();
 
 		fp->eth_q_stats.driver_xoff++;
-		if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+		if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
 			netif_tx_wake_queue(txq);
 	}
 	txdata->tx_pkt++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a3fb721..6e7d5c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -40,6 +40,7 @@
 #define I2C_BSC0			0
 #define I2C_BSC1			1
 #define I2C_WA_RETRY_CNT		3
+#define I2C_WA_PWR_ITER			(I2C_WA_RETRY_CNT - 1)
 #define MCPR_IMC_COMMAND_READ_OP	1
 #define MCPR_IMC_COMMAND_WRITE_OP	2
 
@@ -7659,6 +7660,28 @@
 	return -EINVAL;
 }
 
+static void bnx2x_warpcore_power_module(struct link_params *params,
+					struct bnx2x_phy *phy,
+					u8 power)
+{
+	u32 pin_cfg;
+	struct bnx2x *bp = params->bp;
+
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+			PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+			PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+		       power, pin_cfg);
+	/* Low ==> corresponding SFP+ module is powered
+	 * high ==> the SFP+ module is powered down
+	 */
+	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 						 struct link_params *params,
 						 u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@
 	/* 4 byte aligned address */
 	addr32 = addr & (~0x3);
 	do {
+		if (cnt == I2C_WA_PWR_ITER) {
+			bnx2x_warpcore_power_module(params, phy, 0);
+			/* Note that 100us are not enough here */
+			usleep_range(1000,1000);
+			bnx2x_warpcore_power_module(params, phy, 1);
+		}
 		rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
 				    data_array);
 	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@
 		bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
 }
 
-static void bnx2x_warpcore_power_module(struct link_params *params,
-					struct bnx2x_phy *phy,
-					u8 power)
-{
-	u32 pin_cfg;
-	struct bnx2x *bp = params->bp;
-
-	pin_cfg = (REG_RD(bp, params->shmem_base +
-			  offsetof(struct shmem_region,
-			dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
-			PORT_HW_CFG_E3_PWR_DIS_MASK) >>
-			PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
-	if (pin_cfg == PIN_CFG_NA)
-		return;
-	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
-		       power, pin_cfg);
-	/* Low ==> corresponding SFP+ module is powered
-	 * high ==> the SFP+ module is powered down
-	 */
-	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
 				    struct link_params *params)
 {
@@ -9748,7 +9754,7 @@
 
 	msleep(1);
 
-	if (!(CHIP_IS_E1(bp)))
+	if (!(CHIP_IS_E1x(bp)))
 		port = BP_PATH(bp);
 	else
 		port = params->port;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb51..e47ff8b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@
 		}
 	}
 
-	if (tg3_flag(tp, 5755_PLUS))
+	if (tg3_flag(tp, 5755_PLUS) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 		tg3_flag_set(tp, SHORT_DMA_BUG);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8d06ea3..921c208 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -122,15 +122,15 @@
 			goto done;
 
 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
-			dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
-				"permitted to execute this cmd (opcode %d)\n",
-				opcode);
+			dev_warn(&adapter->pdev->dev,
+				 "opcode %d-%d is not permitted\n",
+				 opcode, subsystem);
 		} else {
 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 					CQE_STATUS_EXTD_MASK;
-			dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
-				"status %d, extd-status %d\n",
-				opcode, compl_status, extd_status);
+			dev_err(&adapter->pdev->dev,
+				"opcode %d-%d failed:status %d-%d\n",
+				opcode, subsystem, compl_status, extd_status);
 		}
 	}
 done:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 9625bf4..b3f3fc3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1566,7 +1566,7 @@
 	u32 rsvd0[BE_TXP_SW_SZ];
 	struct be_erx_stats_v1 erx;
 	struct be_pmem_stats pmem;
-	u32 rsvd1[3];
+	u32 rsvd1[18];
 };
 
 struct be_cmd_req_get_stats_v1 {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd30..501dfa9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@
 
 	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
 	if (copied) {
+		int gso_segs = skb_shinfo(skb)->gso_segs;
+
 		/* record the sent skb in the sent_skb table */
 		BUG_ON(txo->sent_skb_list[start]);
 		txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@
 
 		be_txq_notify(adapter, txq->id, wrb_cnt);
 
-		be_tx_stats_update(txo, wrb_cnt, copied,
-				skb_shinfo(skb)->gso_segs, stopped);
+		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
 	} else {
 		txq->head = start;
 		dev_kfree_skb_any(skb);
@@ -3236,7 +3237,7 @@
 
 	netdev->flags |= IFF_MULTICAST;
 
-	netif_set_gso_max_size(netdev, 65535);
+	netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
 
 	netdev->netdev_ops = &be_netdev_ops;
 
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 8f2cf8c..ff7f4c5 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -207,7 +207,8 @@
 
 	struct net_device *netdev;
 
-	struct clk *clk;
+	struct clk *clk_ipg;
+	struct clk *clk_ahb;
 
 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
 	unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@
 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
 	 * document.
 	 */
-	fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+	fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
 		fep->phy_speed--;
 	fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@
 		goto failed_pin;
 	}
 
-	fep->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(fep->clk)) {
-		ret = PTR_ERR(fep->clk);
+	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(fep->clk_ipg)) {
+		ret = PTR_ERR(fep->clk_ipg);
 		goto failed_clk;
 	}
-	clk_prepare_enable(fep->clk);
+
+	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(fep->clk_ahb)) {
+		ret = PTR_ERR(fep->clk_ahb);
+		goto failed_clk;
+	}
+
+	clk_prepare_enable(fep->clk_ahb);
+	clk_prepare_enable(fep->clk_ipg);
 
 	ret = fec_enet_init(ndev);
 	if (ret)
@@ -1646,8 +1655,8 @@
 	fec_enet_mii_remove(fep);
 failed_mii_init:
 failed_init:
-	clk_disable_unprepare(fep->clk);
-	clk_put(fep->clk);
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
 failed_pin:
 failed_clk:
 	for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@
 		if (irq > 0)
 			free_irq(irq, ndev);
 	}
-	clk_disable_unprepare(fep->clk);
-	clk_put(fep->clk);
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
 	iounmap(fep->hwp);
 	free_netdev(ndev);
 
@@ -1705,7 +1714,8 @@
 		fec_stop(ndev);
 		netif_device_detach(ndev);
 	}
-	clk_disable_unprepare(fep->clk);
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
 
 	return 0;
 }
@@ -1716,7 +1726,8 @@
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	clk_prepare_enable(fep->clk);
+	clk_prepare_enable(fep->clk_ahb);
+	clk_prepare_enable(fep->clk_ipg);
 	if (netif_running(ndev)) {
 		fec_restart(ndev, fep->full_duplex);
 		netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b..2933d08 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@
 		length = status & BCOM_FEC_RX_BD_LEN_MASK;
 		skb_put(rskb, length - 4);	/* length without CRC32 */
 		rskb->protocol = eth_type_trans(rskb, dev);
-		if (!skb_defer_rx_timestamp(skb))
+		if (!skb_defer_rx_timestamp(rskb))
 			netif_rx(rskb);
 
 		spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 79b07ec..0cafe4f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -122,8 +122,10 @@
 
 config IGB_PTP
 	bool "PTP Hardware Clock (PHC)"
-	default y
-	depends on IGB && PTP_1588_CLOCK
+	default n
+	depends on IGB && EXPERIMENTAL
+	select PPS
+	select PTP_1588_CLOCK
 	---help---
 	  Say Y here if you want to use PTP Hardware Clock (PHC) in the
 	  driver.  Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@
 config IXGBE_PTP
 	bool "PTP Clock Support"
 	default n
-	depends on IXGBE && PTP_1588_CLOCK
+	depends on IXGBE && EXPERIMENTAL
+	select PPS
+	select PTP_1588_CLOCK
 	---help---
 	  Say Y here if you want support for 1588 Timestamping with a
 	  PHC device, using the PTP 1588 Clock support. This is
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c8..7483ca0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4080,7 +4080,7 @@
 				spin_lock_irqsave(&adapter->stats_lock,
 				                  irq_flags);
 				e1000_tbi_adjust_stats(hw, &adapter->stats,
-				                       length, skb->data);
+						       length, mapped);
 				spin_unlock_irqrestore(&adapter->stats_lock,
 				                       irq_flags);
 				length--;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075..905e214 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@
 	 * When SoL/IDER sessions are active, autoneg/speed/duplex
 	 * cannot be changed
 	 */
-	if (hw->phy.ops.check_reset_block(hw)) {
+	if (hw->phy.ops.check_reset_block &&
+	    hw->phy.ops.check_reset_block(hw)) {
 		e_err("Cannot change link characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
 	}
@@ -1615,7 +1616,8 @@
 	 * PHY loopback cannot be performed if SoL/IDER
 	 * sessions are active
 	 */
-	if (hw->phy.ops.check_reset_block(hw)) {
+	if (hw->phy.ops.check_reset_block &&
+	    hw->phy.ops.check_reset_block(hw)) {
 		e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
 		*data = 0;
 		goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba..238ab2f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
 #define I217_EEE_100_SUPPORTED  (1 << 1)	/* 100BaseTx EEE supported */
 
 /* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL                 BM_PHY_REG(BM_WUC_PAGE, 70)
 #define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
 #define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK                0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET    0x1000
 #define I217_CGFREG                     PHY_REG(772, 29)
-#define I217_CGFREG_MASK                0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET    0x0002
 #define I217_MEMPWR                     PHY_REG(772, 26)
-#define I217_MEMPWR_MASK                0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
 
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
@@ -4089,12 +4089,12 @@
 			 * power good.
 			 */
 			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
-			phy_reg |= I217_SxCTRL_MASK;
+			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
 			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
 
 			/* Disable the SMB release on LCD reset. */
 			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
-			phy_reg &= ~I217_MEMPWR;
+			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 		}
 
@@ -4103,7 +4103,7 @@
 		 * Support
 		 */
 		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
-		phy_reg |= I217_CGFREG_MASK;
+		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 
 release:
@@ -4176,7 +4176,7 @@
 			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
 			if (ret_val)
 				goto release;
-			phy_reg |= I217_MEMPWR_MASK;
+			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 
 			/* Disable Proxy */
@@ -4186,7 +4186,7 @@
 		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
 		if (ret_val)
 			goto release;
-		phy_reg &= ~I217_CGFREG_MASK;
+		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 release:
 		if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3..a134399 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@
 	 * In the case of the phy reset being blocked, we already have a link.
 	 * We do not need to set it up again.
 	 */
-	if (hw->phy.ops.check_reset_block(hw))
+	if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
 		return 0;
 
 	/*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435..31d37a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6237,7 +6237,7 @@
 		adapter->hw.phy.ms_type = e1000_ms_hw_default;
 	}
 
-	if (hw->phy.ops.check_reset_block(hw))
+	if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
 		e_info("PHY reset is blocked due to SOL/IDER session.\n");
 
 	/* Set initial default active device features */
@@ -6404,7 +6404,7 @@
 	if (!(adapter->flags & FLAG_HAS_AMT))
 		e1000e_release_hw_control(adapter);
 err_eeprom:
-	if (!hw->phy.ops.check_reset_block(hw))
+	if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
 		e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
 	kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d01..b860d4f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@
 	s32 ret_val;
 	u32 ctrl;
 
-	ret_val = phy->ops.check_reset_block(hw);
-	if (ret_val)
-		return 0;
+	if (phy->ops.check_reset_block) {
+		ret_val = phy->ops.check_reset_block(hw);
+		if (ret_val)
+			return 0;
+	}
 
 	ret_val = phy->ops.acquire(hw);
 	if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index e650839..5e84eaa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -206,8 +206,6 @@
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
 		break;
 	case e1000_i350:
-	case e1000_i210:
-	case e1000_i211:
 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
 		break;
 	default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 3ef3c52..7af291e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -196,7 +196,7 @@
 	__IXGBE_HANG_CHECK_ARMED,
 	__IXGBE_RX_RSC_ENABLED,
 	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
-	__IXGBE_RX_FCOE_BUFSZ,
+	__IXGBE_RX_FCOE,
 };
 
 #define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@
 #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-	return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+	return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
 }
 #else
 #define ixgbe_rx_pg_order(_ring) 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index af1a531..c377706 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -634,7 +634,7 @@
 			f = &adapter->ring_feature[RING_F_FCOE];
 			if ((rxr_idx >= f->mask) &&
 			    (rxr_idx < f->mask + f->indices))
-				set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+				set_bit(__IXGBE_RX_FCOE, &ring->state);
 		}
 
 #endif /* IXGBE_FCOE */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457..18ca3bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1058,17 +1058,17 @@
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  * @rx_desc: advanced rx descriptor
  *
  * Returns : true if it is FCoE pkt
  */
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
 				    union ixgbe_adv_rx_desc *rx_desc)
 {
 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-	return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+	return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
 	       ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
 		(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
 			     IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@
 
 	/* alloc new page for storage */
 	if (likely(!page)) {
-		page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+		page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
 				   ixgbe_rx_pg_order(rx_ring));
 		if (unlikely(!page)) {
 			rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1390,8 @@
 				     union ixgbe_adv_rx_desc *rx_desc,
 				     struct sk_buff *skb)
 {
+	struct net_device *dev = rx_ring->netdev;
+
 	ixgbe_update_rsc_stats(rx_ring, skb);
 
 	ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@
 		ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
 #endif
 
-	if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+	    ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
 		u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
 		__vlan_hwaccel_put_tag(skb, vid);
 	}
 
 	skb_record_rx_queue(skb, rx_ring->queue_index);
 
-	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+	skb->protocol = eth_type_trans(skb, dev);
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1549,12 @@
 		skb->truesize -= ixgbe_rx_bufsz(rx_ring);
 	}
 
+#ifdef IXGBE_FCOE
+	/* do not attempt to pad FCoE Frames as this will disrupt DDP */
+	if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+		return false;
+
+#endif
 	/* if skb_pad returns an error the skb was freed */
 	if (unlikely(skb->len < 60)) {
 		int pad_len = 60 - skb->len;
@@ -1772,7 +1781,7 @@
 
 #ifdef IXGBE_FCOE
 		/* if ddp, not passing to ULD unless for FCP_RSP or error */
-		if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+		if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
 			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
 			if (!ddp_bytes) {
 				dev_kfree_skb_any(skb);
@@ -3607,10 +3616,6 @@
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		netif_set_gso_max_size(adapter->netdev, 32768);
 
-
-	/* Enable VLAN tag insert/strip */
-	adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
 	hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 
 #ifdef IXGBE_FCOE
@@ -6701,11 +6706,6 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-#ifdef CONFIG_DCB
-	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-		features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
 	/* return error if RXHASH is being enabled when RSS is not supported */
 	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
 		features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6718,6 @@
 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
 		features &= ~NETIF_F_LRO;
 
-
 	return features;
 }
 
@@ -6766,6 +6765,11 @@
 		need_reset = true;
 	}
 
+	if (features & NETIF_F_HW_VLAN_RX)
+		ixgbe_vlan_strip_enable(adapter);
+	else
+		ixgbe_vlan_strip_disable(adapter);
+
 	if (changed & NETIF_F_RXALL)
 		need_reset = true;
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ddc6a4d..dcebd12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -708,6 +708,7 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 incval = 0;
+	u32 timinca = 0;
 	u32 shift = 0;
 	u32 cycle_speed;
 	unsigned long flags;
@@ -730,8 +731,16 @@
 		break;
 	}
 
-	/* Bail if the cycle speed didn't change */
-	if (adapter->cycle_speed == cycle_speed)
+	/*
+	 * grab the current TIMINCA value from the register so that it can be
+	 * double checked. If the register value has been cleared, it must be
+	 * reset to the correct value for generating a cyclecounter. If
+	 * TIMINCA is zero, the SYSTIME registers do not increment at all.
+	 */
+	timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+	/* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+	if (adapter->cycle_speed == cycle_speed && timinca)
 		return;
 
 	/* disable the SDP clock out */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c8950da..f0f06b2 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -57,6 +57,7 @@
 #include <linux/types.h>
 #include <linux/inet_lro.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 
 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
 static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@
 	/*
 	 * Hardware-specific parameters.
 	 */
-	unsigned int t_clk;
 	int extended_rx_coal_limit;
 	int tx_bw_control;
 	int tx_csum_limit;
+
 };
 
 #define TX_BW_CONTROL_ABSENT		0
@@ -431,6 +432,14 @@
 	int tx_desc_sram_size;
 	int txq_count;
 	struct tx_queue txq[8];
+
+	/*
+	 * Hardware-specific parameters.
+	 */
+#if defined(CONFIG_HAVE_CLK)
+	struct clk *clk;
+#endif
+	unsigned int t_clk;
 };
 
 
@@ -1010,7 +1019,7 @@
 	int mtu;
 	int bucket_size;
 
-	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
 	if (token_rate > 1023)
 		token_rate = 1023;
 
@@ -1042,7 +1051,7 @@
 	int token_rate;
 	int bucket_size;
 
-	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
 	if (token_rate > 1023)
 		token_rate = 1023;
 
@@ -1309,7 +1318,7 @@
 		temp = (val & 0x003fff00) >> 8;
 
 	temp *= 64000000;
-	do_div(temp, mp->shared->t_clk);
+	do_div(temp, mp->t_clk);
 
 	return (unsigned int)temp;
 }
@@ -1319,7 +1328,7 @@
 	u64 temp;
 	u32 val;
 
-	temp = (u64)usec * mp->shared->t_clk;
+	temp = (u64)usec * mp->t_clk;
 	temp += 31999999;
 	do_div(temp, 64000000);
 
@@ -1345,7 +1354,7 @@
 
 	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
 	temp *= 64000000;
-	do_div(temp, mp->shared->t_clk);
+	do_div(temp, mp->t_clk);
 
 	return (unsigned int)temp;
 }
@@ -1354,7 +1363,7 @@
 {
 	u64 temp;
 
-	temp = (u64)usec * mp->shared->t_clk;
+	temp = (u64)usec * mp->t_clk;
 	temp += 31999999;
 	do_div(temp, 64000000);
 
@@ -2663,10 +2672,6 @@
 	if (dram)
 		mv643xx_eth_conf_mbus_windows(msp, dram);
 
-	/*
-	 * Detect hardware parameters.
-	 */
-	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
 	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
 					pd->tx_csum_limit : 9 * 1024;
 	infer_hw_params(msp);
@@ -2891,6 +2896,18 @@
 
 	mp->dev = dev;
 
+	/*
+	 * Start with a default rate, and if there is a clock, allow
+	 * it to override the default.
+	 */
+	mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
+	mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+	if (!IS_ERR(mp->clk)) {
+		clk_prepare_enable(mp->clk);
+		mp->t_clk = clk_get_rate(mp->clk);
+	}
+#endif
 	set_params(mp, pd);
 	netif_set_real_num_tx_queues(dev, mp->txq_count);
 	netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2996,14 @@
 	if (mp->phy != NULL)
 		phy_detach(mp->phy);
 	cancel_work_sync(&mp->tx_timeout_task);
+
+#if defined(CONFIG_HAVE_CLK)
+	if (!IS_ERR(mp->clk)) {
+		clk_disable_unprepare(mp->clk);
+		clk_put(mp->clk);
+	}
+#endif
+
 	free_netdev(mp->dev);
 
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f..28a5445 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@
 	struct sky2_port *sky2 = netdev_priv(dev);
 	netdev_features_t changed = dev->features ^ features;
 
-	if (changed & NETIF_F_RXCSUM) {
-		bool on = features & NETIF_F_RXCSUM;
-		sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-			     on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+	if ((changed & NETIF_F_RXCSUM) &&
+	    !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+		sky2_write32(sky2->hw,
+			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     (features & NETIF_F_RXCSUM)
+			     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
 	}
 
 	if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1..842c8ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@
 		.out_is_imm = false,
 		.encode_slave_id = false,
 		.verify = NULL,
-		.wrapper = NULL
+		.wrapper = mlx4_QUERY_FW_wrapper
 	},
 	{
 		.opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@
 		.out_is_imm = false,
 		.encode_slave_id = false,
 		.verify = NULL,
-		.wrapper = NULL
+		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
 	},
 	{
 		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b242..69ba572 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@
 	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
 	struct mlx4_en_priv *priv;
 
-	if (!mdev->pndev[port])
-		return;
-
-	priv = netdev_priv(mdev->pndev[port]);
 	switch (event) {
 	case MLX4_DEV_EVENT_PORT_UP:
 	case MLX4_DEV_EVENT_PORT_DOWN:
+		if (!mdev->pndev[port])
+			return;
+		priv = netdev_priv(mdev->pndev[port]);
 		/* To prevent races, we poll the link state in a separate
 		  task rather than changing it here */
 		priv->link_state = event;
@@ -154,7 +153,10 @@
 		break;
 
 	default:
-		mlx4_warn(mdev, "Unhandled event: %d\n", event);
+		if (port < 1 || port > dev->caps.num_ports ||
+		    !mdev->pndev[port])
+			return;
+		mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
 	}
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 926d8aa..073b85b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -929,15 +929,20 @@
 		if (priv->rx_cq[i].buf)
 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
 	}
+
+	if (priv->base_tx_qpn) {
+		mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+		priv->base_tx_qpn = 0;
+	}
 }
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
 	struct mlx4_en_port_profile *prof = priv->prof;
 	int i;
-	int base_tx_qpn, err;
+	int err;
 
-	err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+	err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
 	if (err) {
 		en_err(priv, "failed reserving range for TX rings\n");
 		return err;
@@ -949,7 +954,7 @@
 				      prof->tx_ring_size, i, TX))
 			goto err;
 
-		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
 					   prof->tx_ring_size, TXBB_SIZE))
 			goto err;
 	}
@@ -969,7 +974,6 @@
 
 err:
 	en_err(priv, "Failed to allocate NIC resources\n");
-	mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
 	return -ENOMEM;
 }
 
@@ -1204,9 +1208,11 @@
 	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
 	/* Configure port */
+	mlx4_en_calc_rx_buf(dev);
 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-				    MLX4_EN_MIN_MTU,
-				    0, 0, 0, 0);
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    prof->tx_pause, prof->tx_ppp,
+				    prof->rx_pause, prof->rx_ppp);
 	if (err) {
 		en_err(priv, "Failed setting port general configurations "
 		       "for port %d, with error %d\n", priv->port, err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8ef..bce98d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -426,7 +426,7 @@
 
 			mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
 
-			if (flr_slave > dev->num_slaves) {
+			if (flr_slave >= dev->num_slaves) {
 				mlx4_warn(dev,
 					  "Got FLR for unknown function: %d\n",
 					  flr_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6..9c83bb8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -412,7 +412,7 @@
 	outbox = mailbox->buf;
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-			   MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
+			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 	if (err)
 		goto out;
 
@@ -590,8 +590,7 @@
 
 		for (i = 1; i <= dev_cap->num_ports; ++i) {
 			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-					   MLX4_CMD_TIME_CLASS_B,
-					   !mlx4_is_slave(dev));
+					   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 			if (err)
 				goto out;
 
@@ -669,6 +668,28 @@
 	return err;
 }
 
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd)
+{
+	int	err = 0;
+	u8	field;
+
+	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+	if (err)
+		return err;
+
+	/* For guests, report Blueflame disabled */
+	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
+	field &= 0x7f;
+	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+
+	return 0;
+}
+
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
 			    struct mlx4_vhcr *vhcr,
 			    struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@
 		((fw_ver & 0xffff0000ull) >> 16) |
 		((fw_ver & 0x0000ffffull) << 16);
 
+	if (mlx4_is_slave(dev))
+		goto out;
+
 	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
 	dev->caps.function = lg;
 
@@ -927,6 +951,27 @@
 	return err;
 }
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	u8 *outbuf;
+	int err;
+
+	outbuf = outbox->buf;
+	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+	if (err)
+		return err;
+
+	/* for slaves, zero out everything except FW version */
+	outbuf[0] = outbuf[1] = 0;
+	memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+	return 0;
+}
+
 static void get_board_id(void *vsd, char *board_id)
 {
 	int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a6..a0313de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -142,12 +142,6 @@
 	struct pci_dev *pdev;
 };
 
-static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
-{
-	return dev->caps.reserved_eqs +
-		MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
-}
-
 int mlx4_check_port_params(struct mlx4_dev *dev,
 			   enum mlx4_port_type *port_type)
 {
@@ -217,6 +211,7 @@
 	}
 
 	dev->caps.num_ports	     = dev_cap->num_ports;
+	dev->phys_caps.num_phys_eqs  = MLX4_MAX_EQ_NUM;
 	for (i = 1; i <= dev->caps.num_ports; ++i) {
 		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
 		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@
 	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
 
 	memset(&dev_cap, 0, sizeof(dev_cap));
+	dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
 	err = mlx4_dev_cap(dev, &dev_cap);
 	if (err) {
 		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
 		return err;
 	}
 
+	err = mlx4_QUERY_FW(dev);
+	if (err)
+		mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+
 	page_size = ~dev->caps.page_size_cap + 1;
 	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
 	if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@
 	dev->caps.num_mgms              = 0;
 	dev->caps.num_amgms             = 0;
 
-	for (i = 1; i <= dev->caps.num_ports; ++i)
-		dev->caps.port_mask[i] = dev->caps.port_type[i];
-
 	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
 		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
 			 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
 		return -ENODEV;
 	}
 
+	for (i = 1; i <= dev->caps.num_ports; ++i)
+		dev->caps.port_mask[i] = dev->caps.port_type[i];
+
 	if (dev->caps.uar_page_size * (dev->caps.num_uars -
 				       dev->caps.reserved_uars) >
 				       pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@
 		return -ENODEV;
 	}
 
-#if 0
-	mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
-	mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
-		  dev->caps.num_uars, dev->caps.reserved_uars,
-		  dev->caps.uar_page_size * dev->caps.num_uars,
-		  pci_resource_len(dev->pdev, 2));
-	mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
-		  dev->caps.reserved_eqs);
-	mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
-		  dev->caps.num_pds, dev->caps.reserved_pds,
-		  dev->caps.slave_pd_shift, dev->caps.pd_base);
-#endif
 	return 0;
 }
 
@@ -810,9 +798,8 @@
 	if (err)
 		goto err_srq;
 
-	num_eqs = (mlx4_is_master(dev)) ?
-		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-		dev->caps.num_eqs;
+	num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+		  dev->caps.num_eqs;
 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
 				  cmpt_base +
 				  ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@
 	}
 
 
-	num_eqs = (mlx4_is_master(dev)) ?
-		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-		dev->caps.num_eqs;
+	num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+		   dev->caps.num_eqs;
 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
 				  num_eqs, num_eqs, 0, 0);
@@ -1989,6 +1975,8 @@
 	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
 	    !mlx4_is_mfunc(dev)) {
 		dev->flags &= ~MLX4_FLAG_MSI_X;
+		dev->caps.num_comp_vectors = 1;
+		dev->caps.comp_pool	   = 0;
 		pci_disable_msix(pdev);
 		err = mlx4_setup_hca(dev);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a..e5d2022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1039,6 +1039,11 @@
 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 				enum mlx4_res_tracker_free_type type);
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
 			  struct mlx4_vhcr *vhcr,
 			  struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@
 			    struct mlx4_cmd_mailbox *inbox,
 			    struct mlx4_cmd_mailbox *outbox,
 			    struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd);
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
 			    struct mlx4_vhcr *vhcr,
 			    struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6ae3509..225c20d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -495,6 +495,7 @@
 	int vids[128];
 	bool wol;
 	struct device *ddev;
+	int base_tx_qpn;
 
 #ifdef CONFIG_MLX4_EN_DCB
 	struct ieee_ets ets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a..a8fb529 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -697,10 +697,10 @@
 	if (slave != dev->caps.function)
 		memset(inbox->buf, 0, 256);
 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
-		*(u8 *) inbox->buf	   = !!reset_qkey_viols << 6;
+		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
 	} else {
-		((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5ade..b83bc92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@
 	profile[MLX4_RES_AUXC].num    = request->num_qp;
 	profile[MLX4_RES_SRQ].num     = request->num_srq;
 	profile[MLX4_RES_CQ].num      = request->num_cq;
-	profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+	profile[MLX4_RES_EQ].num      = mlx4_is_mfunc(dev) ?
+					dev->phys_caps.num_phys_eqs :
+					min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
 	profile[MLX4_RES_DMPT].num    = request->num_mpt;
 	profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
 	profile[MLX4_RES_MTT].num     = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@
 			init_hca->log_num_cqs = profile[i].log_num;
 			break;
 		case MLX4_RES_EQ:
-			dev->caps.num_eqs     = profile[i].num;
+			dev->caps.num_eqs     = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
+									 MAX_MSIX));
 			init_hca->eqc_base    = profile[i].start;
-			init_hca->log_num_eqs = profile[i].log_num;
+			init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
 			break;
 		case MLX4_RES_DMPT:
 			dev->caps.num_mpts	= profile[i].num;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666f..083d671 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@
 			/* Update stats */
 			ndev->stats.tx_packets++;
 			ndev->stats.tx_bytes += skb->len;
-
-			/* Free buffer */
-			dev_kfree_skb_irq(skb);
 		}
+		dev_kfree_skb_irq(skb);
 
 		txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
 	}
 
-	if (netif_queue_stopped(ndev))
-		netif_wake_queue(ndev);
+	if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+		if (netif_queue_stopped(ndev))
+			netif_wake_queue(ndev);
+	}
 }
 
 static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@
 	.ndo_set_rx_mode	= lpc_eth_set_multicast_list,
 	.ndo_do_ioctl		= lpc_eth_ioctl,
 	.ndo_set_mac_address	= lpc_set_mac_address,
+	.ndo_change_mtu		= eth_change_mtu,
 };
 
 static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 46e77a2..ad98f4d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -479,7 +479,7 @@
 
 	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
 		pfn = pci_info[i].id;
-		if (pfn > QLCNIC_MAX_PCI_FUNC) {
+		if (pfn >= QLCNIC_MAX_PCI_FUNC) {
 			ret = QL_STATUS_INVALID_PARAM;
 			goto err_eswitch;
 		}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de7364..d1827e8 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@
 	if (err) {
 		dev_err(&pdev->dev, "32-bit PCI DMA addresses"
 				"not supported by the card\n");
-		goto err_out;
+		goto err_out_disable_dev;
 	}
 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (err) {
 		dev_err(&pdev->dev, "32-bit PCI DMA addresses"
 				"not supported by the card\n");
-		goto err_out;
+		goto err_out_disable_dev;
 	}
 
 	/* IO Size check */
 	if (pci_resource_len(pdev, bar) < io_size) {
 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
 		err = -EIO;
-		goto err_out;
+		goto err_out_disable_dev;
 	}
 
 	pci_set_master(pdev);
@@ -1117,7 +1117,7 @@
 	dev = alloc_etherdev(sizeof(struct r6040_private));
 	if (!dev) {
 		err = -ENOMEM;
-		goto err_out;
+		goto err_out_disable_dev;
 	}
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@
 err_out_mdio:
 	mdiobus_free(lp->mii_bus);
 err_out_unmap:
+	netif_napi_del(&lp->napi);
+	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
 	pci_release_regions(pdev);
 err_out_free_dev:
 	free_netdev(dev);
+err_out_disable_dev:
+	pci_disable_device(pdev);
 err_out:
 	return err;
 }
@@ -1251,6 +1255,9 @@
 	mdiobus_unregister(lp->mii_bus);
 	kfree(lp->mii_bus->irq);
 	mdiobus_free(lp->mii_bus);
+	netif_napi_del(&lp->napi);
+	pci_set_drvdata(pdev, NULL);
+	pci_iounmap(pdev, lp->base);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290..995d0cf 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@
 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
+	cpw32_f(HiTxRingAddr, 0);
+	cpw32_f(HiTxRingAddr + 4, 0);
+
+	ring_dma = cp->ring_dma;
+	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
 	cp_start_hw(cp);
 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 
@@ -992,17 +1003,6 @@
 
 	cpw8(Config5, cpr8(Config5) & PMEStatus);
 
-	cpw32_f(HiTxRingAddr, 0);
-	cpw32_f(HiTxRingAddr + 4, 0);
-
-	ring_dma = cp->ring_dma;
-	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
 	cpw16(MultiIntr, 0);
 
 	cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@
 
 static void eeprom_cmd_end(void __iomem *ee_addr)
 {
-	writeb (~EE_CS, ee_addr);
+	writeb(0, ee_addr);
 	eeprom_delay ();
 }
 
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076..1d83565 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@
 	}
 
 	/* Terminate the EEPROM access. */
-	RTL_W8 (Cfg9346, ~EE_CS);
+	RTL_W8(Cfg9346, 0);
 	eeprom_delay ();
 
 	return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 00b4f56..d7a04e0 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3894,6 +3894,7 @@
 	case RTL_GIGA_MAC_VER_22:
 	case RTL_GIGA_MAC_VER_23:
 	case RTL_GIGA_MAC_VER_24:
+	case RTL_GIGA_MAC_VER_34:
 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
 		break;
 	default:
@@ -5889,11 +5890,7 @@
 	if (status & LinkChg)
 		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
 
-	napi_disable(&tp->napi);
-	rtl_irq_disable(tp);
-
-	napi_enable(&tp->napi);
-	napi_schedule(&tp->napi);
+	rtl_irq_enable_all(tp);
 }
 
 static void rtl_task(struct work_struct *work)
@@ -6345,6 +6342,8 @@
 
 	cancel_work_sync(&tp->wk.work);
 
+	netif_napi_del(&tp->napi);
+
 	unregister_netdev(dev);
 
 	rtl_release_firmware(tp);
@@ -6668,6 +6667,7 @@
 	return rc;
 
 err_out_msi_4:
+	netif_napi_del(&tp->napi);
 	rtl_disable_msi(pdev, tp);
 	iounmap(ioaddr);
 err_out_free_res_3:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c221..79bf09b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1011,7 +1011,7 @@
 }
 
 /* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	struct sh_eth_rxdesc *rxdesc;
@@ -1101,8 +1101,14 @@
 
 	/* Restart Rx engine if stopped. */
 	/* If we don't need to check status, don't. -KDU */
-	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+		/* fix the values for the next receiving if RDE is set */
+		if (intr_status & EESR_RDE)
+			mdp->cur_rx = mdp->dirty_rx =
+				(sh_eth_read(ndev, RDFAR) -
+				 sh_eth_read(ndev, RDLAR)) >> 4;
 		sh_eth_write(ndev, EDRRR_R, EDRRR);
+	}
 
 	return 0;
 }
@@ -1199,8 +1205,6 @@
 		/* Receive Descriptor Empty int */
 		ndev->stats.rx_over_errors++;
 
-		if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
-			sh_eth_write(ndev, EDRRR_R, EDRRR);
 		if (netif_msg_rx_err(mdp))
 			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
 	}
@@ -1271,7 +1275,7 @@
 			EESR_RTSF | /* short frame recv */
 			EESR_PRE  | /* PHY-LSI recv error */
 			EESR_CERF)){ /* recv frame CRC error */
-		sh_eth_rx(ndev);
+		sh_eth_rx(ndev, intr_status);
 	}
 
 	/* Tx Check */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f..1466e5d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@
 
 	retval = smsc911x_request_resources(pdev);
 	if (retval)
-		goto out_return_resources;
+		goto out_request_resources_fail;
 
 	retval = smsc911x_enable_resources(pdev);
 	if (retval)
-		goto out_disable_resources;
+		goto out_enable_resources_fail;
 
 	if (pdata->ioaddr == NULL) {
 		SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@
 	free_irq(dev->irq, dev);
 out_disable_resources:
 	(void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
 	smsc911x_free_resources(pdev);
+out_request_resources_fail:
 	platform_set_drvdata(pdev, NULL);
 	iounmap(pdata->ioaddr);
 	free_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 0364283..9f44827 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@
 if STMMAC_ETH
 
 config STMMAC_PLATFORM
-	tristate "STMMAC platform bus support"
+	bool "STMMAC Platform bus support"
 	depends on STMMAC_ETH
 	default y
 	---help---
@@ -26,7 +26,7 @@
 	  If unsure, say N.
 
 config STMMAC_PCI
-	tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+	bool "STMMAC PCI bus support (EXPERIMENTAL)"
 	depends on STMMAC_ETH && PCI && EXPERIMENTAL
 	---help---
 	  This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060..dc20c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
+#include <linux/pci.h>
 #include "common.h"
 #ifdef CONFIG_STMMAC_TIMER
 #include "stmmac_timer.h"
@@ -95,7 +96,6 @@
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
-
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@
 static inline int stmmac_clk_enable(struct stmmac_priv *priv)
 {
 	if (!IS_ERR(priv->stmmac_clk))
-		return clk_enable(priv->stmmac_clk);
+		return clk_prepare_enable(priv->stmmac_clk);
 
 	return 0;
 }
@@ -119,7 +119,7 @@
 	if (IS_ERR(priv->stmmac_clk))
 		return;
 
-	clk_disable(priv->stmmac_clk);
+	clk_disable_unprepare(priv->stmmac_clk);
 }
 static inline int stmmac_clk_get(struct stmmac_priv *priv)
 {
@@ -143,3 +143,60 @@
 	return 0;
 }
 #endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+	int err;
+
+	err = platform_driver_register(&stmmac_pltfr_driver);
+	if (err)
+		pr_err("stmmac: failed to register the platform driver\n");
+
+	return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+	platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+	pr_debug("stmmac: do not register the platf driver\n");
+
+	return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+	int err;
+
+	err = pci_register_driver(&stmmac_pci_driver);
+	if (err)
+		pr_err("stmmac: failed to register the PCI driver\n");
+
+	return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+	pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+	pr_debug("stmmac: do not register the PCI driver\n");
+
+	return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7096633..51b3b68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -833,8 +833,9 @@
 
 /**
  * stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
 {
 	if (priv->plat->enh_desc) {
@@ -1861,6 +1862,8 @@
 /**
  * stmmac_dvr_probe
  * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  */
@@ -2090,6 +2093,34 @@
 }
 #endif /* CONFIG_PM */
 
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+	int err_plt = 0;
+	int err_pci = 0;
+
+	err_plt = stmmac_register_platform();
+	err_pci = stmmac_register_pci();
+
+	if ((err_pci) && (err_plt)) {
+		pr_err("stmmac: driver registration failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+	stmmac_unregister_platform();
+	stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab53..cf826e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -179,7 +179,7 @@
 
 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
 
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
 	.name = STMMAC_RESOURCE_NAME,
 	.id_table = stmmac_id_table,
 	.probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@
 #endif
 };
 
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-	int ret;
-
-	ret = pci_register_driver(&stmmac_driver);
-	if (ret < 0)
-		pr_err("%s: ERROR: driver registration failed\n", __func__);
-
-	return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-	pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
 MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f08..680d2b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -255,7 +255,7 @@
 };
 MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
 
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
 	.probe = stmmac_pltfr_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
@@ -266,8 +266,6 @@
 		   },
 };
 
-module_platform_driver(stmmac_driver);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cc..8c726b7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
 	struct netdev_queue *txq;
-	unsigned int tx_bytes;
 	u16 pkt_cnt, tmp;
 	int cons, index;
 	u64 cs;
@@ -3621,18 +3620,12 @@
 	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-	tx_bytes = 0;
-	tmp = pkt_cnt;
-	while (tmp--) {
-		tx_bytes += rp->tx_buffs[cons].skb->len;
+	while (pkt_cnt--)
 		cons = release_tx_packet(np, rp, cons);
-	}
 
 	rp->cons = cons;
 	smp_mb();
 
-	netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
 out:
 	if (unlikely(netif_tx_queue_stopped(txq) &&
 		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@
 			struct tx_ring_info *rp = &np->tx_rings[i];
 
 			niu_free_tx_ring_info(np, rp);
-			netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
 		}
 		kfree(np->tx_rings);
 		np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@
 		prod = NEXT_TX(rp, prod);
 	}
 
-	netdev_tx_sent_queue(txq, skb->len);
-
 	if (prod < rp->prod)
 		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 	rp->prod = prod;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b42252c..1b173a6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -51,7 +51,7 @@
 
 config TI_CPSW
 	tristate "TI CPSW Switch Support"
-	depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+	depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
 	select TI_DAVINCI_CPDMA
 	select TI_DAVINCI_MDIO
 	---help---
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f..098b1c4 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@
 	depends on TILE
 	default y
 	select CRC32
+	select TILE_GXIO_MPIPE if TILEGX
+	select HIGH_RES_TIMERS if TILEGX
 	---help---
 	  This is a standard Linux network device driver for the
 	  on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f14..0ef9eef 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_TILE_NET) += tile_net.o
 ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
 else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
 endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 0000000..83b4b38
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>      /* printk() */
+#include <linux/slab.h>        /* kmalloc() */
+#include <linux/errno.h>       /* error codes */
+#include <linux/types.h>       /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot.  Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header.  We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here?  If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+	void *buf;
+	size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+	/* The "complete_count" when the completion will be complete. */
+	s64 when;
+	/* The buffer to be freed when the completion is complete. */
+	struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+	/* The completions. */
+	struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+	/* The number of completions used. */
+	unsigned long comp_next;
+	/* The number of completions freed. */
+	unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+	struct hrtimer timer;
+	struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+	/* The NAPI struct. */
+	struct napi_struct napi;
+	/* Packet queue. */
+	gxio_mpipe_iqueue_t iqueue;
+	/* Our cpu. */
+	int my_cpu;
+	/* True if iqueue is valid. */
+	bool has_iqueue;
+	/* NAPI flags. */
+	bool napi_added;
+	bool napi_enabled;
+	/* Number of small sk_buffs which must still be provided. */
+	unsigned int num_needed_small_buffers;
+	/* Number of large sk_buffs which must still be provided. */
+	unsigned int num_needed_large_buffers;
+	/* A timer for handling egress completions. */
+	struct hrtimer egress_timer;
+	/* True if "egress_timer" is scheduled. */
+	bool egress_timer_scheduled;
+	/* Comps for each egress channel. */
+	struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+	/* Transmit wake timer for each egress channel. */
+	struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+	/* The "equeue". */
+	gxio_mpipe_equeue_t *equeue;
+	/* The headers for TSO. */
+	unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+	/* Our network device. */
+	struct net_device *dev;
+	/* The primary link. */
+	gxio_mpipe_link_t link;
+	/* The primary channel, if open, else -1. */
+	int channel;
+	/* The "loopify" egress link, if needed. */
+	gxio_mpipe_link_t loopify_link;
+	/* The "loopify" egress channel, if open, else -1. */
+	int loopify_channel;
+	/* The egress channel (channel or loopify_channel). */
+	int echannel;
+	/* Total stats. */
+	struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+	char buf[1024];
+	int rc;
+
+	if (network_cpus_string == NULL)
+		return false;
+
+	rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+	if (rc != 0) {
+		pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+			network_cpus_string);
+		return false;
+	}
+
+	/* Remove dedicated cpus. */
+	cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+	if (cpumask_empty(&network_cpus_map)) {
+		pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+			network_cpus_string);
+		return false;
+	}
+
+	cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+	pr_info("Linux network CPUs: %s\n", buf);
+	return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress.  This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+	atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+	int stack = small ? small_buffer_stack : large_buffer_stack;
+	const unsigned long buffer_alignment = 128;
+	struct sk_buff *skb;
+	int len;
+
+	len = sizeof(struct sk_buff **) + buffer_alignment;
+	len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+	skb = dev_alloc_skb(len);
+	if (skb == NULL)
+		return false;
+
+	/* Make room for a back-pointer to 'skb' and guarantee alignment. */
+	skb_reserve(skb, sizeof(struct sk_buff **));
+	skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+	/* Save a back-pointer to 'skb'. */
+	*(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+	/* Make sure "skb" and the back-pointer have been flushed. */
+	wmb();
+
+	gxio_mpipe_push_buffer(&context, stack,
+			       (void *)va_to_tile_io_addr(skb->data));
+
+	return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+	/* Acquire the associated "skb". */
+	struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+	struct sk_buff *skb = *skb_ptr;
+
+	/* Paranoia. */
+	if (skb->data != va) {
+		/* Panic here since there's a reasonable chance
+		 * that corrupt buffers means generic memory
+		 * corruption, with unpredictable system effects.
+		 */
+		panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+		      va, skb, skb->data);
+	}
+
+	return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+	for (;;) {
+		tile_io_addr_t addr =
+			(tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+		if (addr == 0)
+			break;
+		dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+	}
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+	while (info->num_needed_small_buffers != 0) {
+		if (!tile_net_provide_buffer(true))
+			goto oops;
+		info->num_needed_small_buffers--;
+	}
+
+	while (info->num_needed_large_buffers != 0) {
+		if (!tile_net_provide_buffer(false))
+			goto oops;
+		info->num_needed_large_buffers--;
+	}
+
+	return;
+
+oops:
+	/* Add a description to the page allocation failure dump. */
+	pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+	/* Filter packets received before we're up. */
+	if (dev == NULL || !(dev->flags & IFF_UP))
+		return true;
+
+	/* Filter out packets that aren't for us. */
+	if (!(dev->flags & IFF_PROMISC) &&
+	    !is_multicast_ether_addr(buf) &&
+	    compare_ether_addr(dev->dev_addr, buf) != 0)
+		return true;
+
+	return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+				 gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct tile_net_priv *priv = netdev_priv(dev);
+
+	/* Encode the actual packet length. */
+	skb_put(skb, len);
+
+	skb->protocol = eth_type_trans(skb, dev);
+
+	/* Acknowledge "good" hardware checksums. */
+	if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	netif_receive_skb(skb);
+
+	/* Update stats. */
+	tile_net_stats_add(1, &priv->stats.rx_packets);
+	tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+	/* Need a new buffer. */
+	if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+		info->num_needed_small_buffers++;
+	else
+		info->num_needed_large_buffers++;
+}
+
+/* Handle a packet.  Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+	uint8_t l2_offset;
+	void *va;
+	void *buf;
+	unsigned long len;
+	bool filter;
+
+	/* Drop packets for which no buffer was available.
+	 * NOTE: This happens under heavy load.
+	 */
+	if (idesc->be) {
+		struct tile_net_priv *priv = netdev_priv(dev);
+		tile_net_stats_add(1, &priv->stats.rx_dropped);
+		gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+		if (net_ratelimit())
+			pr_info("Dropping packet (insufficient buffers).\n");
+		return false;
+	}
+
+	/* Get the "l2_offset", if allowed. */
+	l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+	/* Get the raw buffer VA (includes "headroom"). */
+	va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+	/* Get the actual packet start/length. */
+	buf = va + l2_offset;
+	len = idesc->l2_size - l2_offset;
+
+	/* Point "va" at the raw buffer. */
+	va -= NET_IP_ALIGN;
+
+	filter = filter_packet(dev, buf);
+	if (filter) {
+		gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+	} else {
+		struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+		/* Skip headroom, and any custom header. */
+		skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+		tile_net_receive_skb(dev, skb, idesc, len);
+	}
+
+	gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+	return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	unsigned int work = 0;
+	gxio_mpipe_idesc_t *idesc;
+	int i, n;
+
+	/* Process packets. */
+	while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+		for (i = 0; i < n; i++) {
+			if (i == TILE_NET_BATCH)
+				goto done;
+			if (tile_net_handle_packet(idesc + i)) {
+				if (++work >= budget)
+					goto done;
+			}
+		}
+	}
+
+	/* There are no packets left. */
+	napi_complete(&info->napi);
+
+	/* Re-enable hypervisor interrupts. */
+	gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+	/* HACK: Avoid the "rotting packet" problem. */
+	if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+		napi_schedule(&info->napi);
+
+	/* ISSUE: Handle completions? */
+
+done:
+	tile_net_provide_needed_buffers();
+
+	return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	napi_schedule(&info->napi);
+	return IRQ_HANDLED;
+}
+
+/* Free some completions.  This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+				struct tile_net_comps *comps,
+				int limit, bool force_update)
+{
+	int n = 0;
+	while (comps->comp_last < comps->comp_next) {
+		unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+		struct tile_net_comp *comp = &comps->comp_queue[cid];
+		if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+						   force_update || n == 0))
+			break;
+		dev_kfree_skb_irq(comp->skb);
+		comps->comp_last++;
+		if (++n == limit)
+			break;
+	}
+	return n;
+}
+
+/* Add a completion.  This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+		     struct tile_net_comps *comps,
+		     uint64_t when, struct sk_buff *skb)
+{
+	int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+	comps->comp_queue[cid].when = when;
+	comps->comp_queue[cid].skb = skb;
+	comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct tile_net_priv *priv = netdev_priv(dev);
+
+	hrtimer_start(&info->tx_wake[priv->echannel].timer,
+		      ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+		      HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+	struct tile_net_tx_wake *tx_wake =
+		container_of(t, struct tile_net_tx_wake, timer);
+	netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+	return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+	if (!info->egress_timer_scheduled) {
+		hrtimer_start(&info->egress_timer,
+			      ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+			      HRTIMER_MODE_REL_PINNED);
+		info->egress_timer_scheduled = true;
+	}
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	unsigned long irqflags;
+	bool pending = false;
+	int i;
+
+	local_irq_save(irqflags);
+
+	/* The timer is no longer scheduled. */
+	info->egress_timer_scheduled = false;
+
+	/* Free all possible comps for this tile. */
+	for (i = 0; i < TILE_NET_CHANNELS; i++) {
+		struct tile_net_egress *egress = &egress_for_echannel[i];
+		struct tile_net_comps *comps = info->comps_for_echannel[i];
+		if (comps->comp_last >= comps->comp_next)
+			continue;
+		tile_net_free_comps(egress->equeue, comps, -1, true);
+		pending = pending || (comps->comp_last < comps->comp_next);
+	}
+
+	/* Reschedule timer if needed. */
+	if (pending)
+		tile_net_schedule_egress_timer();
+
+	local_irq_restore(irqflags);
+
+	return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct net_device *dev = arg;
+
+	if (!info->has_iqueue)
+		return;
+
+	if (dev != NULL) {
+		if (!info->napi_added) {
+			netif_napi_add(dev, &info->napi,
+				       tile_net_poll, TILE_NET_WEIGHT);
+			info->napi_added = true;
+		}
+		if (!info->napi_enabled) {
+			napi_enable(&info->napi);
+			info->napi_enabled = true;
+		}
+		enable_percpu_irq(ingress_irq, 0);
+	} else {
+		disable_percpu_irq(ingress_irq);
+		if (info->napi_enabled) {
+			napi_disable(&info->napi);
+			info->napi_enabled = false;
+		}
+		/* FIXME: Drain the iqueue. */
+	}
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+	static gxio_mpipe_rules_t rules;  /* too big to fit on the stack */
+	bool saw_channel = false;
+	int channel;
+	int rc;
+	int cpu;
+
+	gxio_mpipe_rules_init(&rules, &context);
+
+	for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+		if (tile_net_devs_for_channel[channel] == NULL)
+			continue;
+		if (!saw_channel) {
+			saw_channel = true;
+			gxio_mpipe_rules_begin(&rules, first_bucket,
+					       num_buckets, NULL);
+			gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+		}
+		gxio_mpipe_rules_add_channel(&rules, channel);
+	}
+
+	/* NOTE: This can fail if there is no classifier.
+	 * ISSUE: Can anything else cause it to fail?
+	 */
+	rc = gxio_mpipe_rules_commit(&rules);
+	if (rc != 0) {
+		netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+		return -EIO;
+	}
+
+	/* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+	for_each_online_cpu(cpu)
+		smp_call_function_single(cpu, tile_net_update_cpu,
+					 (saw_channel ? dev : NULL), 1);
+
+	/* HACK: Allow packets to flow in the simulator. */
+	if (saw_channel)
+		sim_enable_mpipe_links(0, -1);
+
+	return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+	pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+	int rc;
+
+	/* Compute stack bytes; we round up to 64KB and then use
+	 * alloc_pages() so we get the required 64KB alignment as well.
+	 */
+	buffer_stack_size =
+		ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+		      64 * 1024);
+
+	/* Allocate two buffer stack indices. */
+	rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+	if (rc < 0) {
+		netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+			   rc);
+		return rc;
+	}
+	small_buffer_stack = rc;
+	large_buffer_stack = rc + 1;
+
+	/* Allocate the small memory stack. */
+	small_buffer_stack_va =
+		alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+	if (small_buffer_stack_va == NULL) {
+		netdev_err(dev,
+			   "Could not alloc %zd bytes for buffer stacks\n",
+			   buffer_stack_size);
+		return -ENOMEM;
+	}
+	rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+					  BUFFER_SIZE_SMALL_ENUM,
+					  small_buffer_stack_va,
+					  buffer_stack_size, 0);
+	if (rc != 0) {
+		netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+		return rc;
+	}
+	rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+					       hash_pte, 0);
+	if (rc != 0) {
+		netdev_err(dev,
+			   "gxio_mpipe_register_buffer_memory failed: %d\n",
+			   rc);
+		return rc;
+	}
+
+	/* Allocate the large buffer stack. */
+	large_buffer_stack_va =
+		alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+	if (large_buffer_stack_va == NULL) {
+		netdev_err(dev,
+			   "Could not alloc %zd bytes for buffer stacks\n",
+			   buffer_stack_size);
+		return -ENOMEM;
+	}
+	rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+					  BUFFER_SIZE_LARGE_ENUM,
+					  large_buffer_stack_va,
+					  buffer_stack_size, 0);
+	if (rc != 0) {
+		netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+			   rc);
+		return rc;
+	}
+	rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+					       hash_pte, 0);
+	if (rc != 0) {
+		netdev_err(dev,
+			   "gxio_mpipe_register_buffer_memory failed: %d\n",
+			   rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+					int cpu, int ring)
+{
+	struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+	int order, i, rc;
+	struct page *page;
+	void *addr;
+
+	/* Allocate the "comps". */
+	order = get_order(COMPS_SIZE);
+	page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+	if (page == NULL) {
+		netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+			   COMPS_SIZE);
+		return -ENOMEM;
+	}
+	addr = pfn_to_kaddr(page_to_pfn(page));
+	memset(addr, 0, COMPS_SIZE);
+	for (i = 0; i < TILE_NET_CHANNELS; i++)
+		info->comps_for_echannel[i] =
+			addr + i * sizeof(struct tile_net_comps);
+
+	/* If this is a network cpu, create an iqueue. */
+	if (cpu_isset(cpu, network_cpus_map)) {
+		order = get_order(NOTIF_RING_SIZE);
+		page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+		if (page == NULL) {
+			netdev_err(dev,
+				   "Failed to alloc %zd bytes iqueue memory\n",
+				   NOTIF_RING_SIZE);
+			return -ENOMEM;
+		}
+		addr = pfn_to_kaddr(page_to_pfn(page));
+		rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+					    addr, NOTIF_RING_SIZE, 0);
+		if (rc < 0) {
+			netdev_err(dev,
+				   "gxio_mpipe_iqueue_init failed: %d\n", rc);
+			return rc;
+		}
+		info->has_iqueue = true;
+	}
+
+	return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+					int ring, int network_cpus_count)
+{
+	int group, rc;
+
+	/* Allocate one NotifGroup. */
+	rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+	if (rc < 0) {
+		netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+			   rc);
+		return rc;
+	}
+	group = rc;
+
+	/* Initialize global num_buckets value. */
+	if (network_cpus_count > 4)
+		num_buckets = 256;
+	else if (network_cpus_count > 1)
+		num_buckets = 16;
+
+	/* Allocate some buckets, and set global first_bucket value. */
+	rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+	if (rc < 0) {
+		netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+		return rc;
+	}
+	first_bucket = rc;
+
+	/* Init group and buckets. */
+	rc = gxio_mpipe_init_notif_group_and_buckets(
+		&context, group, ring, network_cpus_count,
+		first_bucket, num_buckets,
+		GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+	if (rc != 0) {
+		netdev_err(
+			dev,
+			"gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores.  Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+	int cpu, rc;
+
+	rc = create_irq();
+	if (rc < 0) {
+		netdev_err(dev, "create_irq failed: %d\n", rc);
+		return rc;
+	}
+	ingress_irq = rc;
+	tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+	rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+			 0, NULL, NULL);
+	if (rc != 0) {
+		netdev_err(dev, "request_irq failed: %d\n", rc);
+		destroy_irq(ingress_irq);
+		ingress_irq = -1;
+		return rc;
+	}
+
+	for_each_online_cpu(cpu) {
+		struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+		if (info->has_iqueue) {
+			gxio_mpipe_request_notif_ring_interrupt(
+				&context, cpu_x(cpu), cpu_y(cpu),
+				1, ingress_irq, info->iqueue.ring);
+		}
+	}
+
+	return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+	int cpu;
+
+	/* Do cleanups that require the mpipe context first. */
+	if (small_buffer_stack >= 0)
+		tile_net_pop_all_buffers(small_buffer_stack);
+	if (large_buffer_stack >= 0)
+		tile_net_pop_all_buffers(large_buffer_stack);
+
+	/* Destroy mpipe context so the hardware no longer owns any memory. */
+	gxio_mpipe_destroy(&context);
+
+	for_each_online_cpu(cpu) {
+		struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+		free_pages((unsigned long)(info->comps_for_echannel[0]),
+			   get_order(COMPS_SIZE));
+		info->comps_for_echannel[0] = NULL;
+		free_pages((unsigned long)(info->iqueue.idescs),
+			   get_order(NOTIF_RING_SIZE));
+		info->iqueue.idescs = NULL;
+	}
+
+	if (small_buffer_stack_va)
+		free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+	if (large_buffer_stack_va)
+		free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+	small_buffer_stack_va = NULL;
+	large_buffer_stack_va = NULL;
+	large_buffer_stack = -1;
+	small_buffer_stack = -1;
+	first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state.  If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+	int i, num_buffers, rc;
+	int cpu;
+	int first_ring, ring;
+	int network_cpus_count = cpus_weight(network_cpus_map);
+
+	if (!hash_default) {
+		netdev_err(dev, "Networking requires hash_default!\n");
+		return -EIO;
+	}
+
+	rc = gxio_mpipe_init(&context, 0);
+	if (rc != 0) {
+		netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+		return -EIO;
+	}
+
+	/* Set up the buffer stacks. */
+	num_buffers =
+		network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+	rc = init_buffer_stacks(dev, num_buffers);
+	if (rc != 0)
+		goto fail;
+
+	/* Provide initial buffers. */
+	rc = -ENOMEM;
+	for (i = 0; i < num_buffers; i++) {
+		if (!tile_net_provide_buffer(true)) {
+			netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+			goto fail;
+		}
+	}
+	for (i = 0; i < num_buffers; i++) {
+		if (!tile_net_provide_buffer(false)) {
+			netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+			goto fail;
+		}
+	}
+
+	/* Allocate one NotifRing for each network cpu. */
+	rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+	if (rc < 0) {
+		netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+			   rc);
+		goto fail;
+	}
+
+	/* Init NotifRings per-cpu. */
+	first_ring = rc;
+	ring = first_ring;
+	for_each_online_cpu(cpu) {
+		rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+		if (rc < 0)
+			goto fail;
+		ring = rc;
+	}
+
+	/* Initialize NotifGroup and buckets. */
+	rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+	if (rc != 0)
+		goto fail;
+
+	/* Create and enable interrupts. */
+	rc = tile_net_setup_interrupts(dev);
+	if (rc != 0)
+		goto fail;
+
+	return 0;
+
+fail:
+	tile_net_init_mpipe_fail();
+	return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+	struct page *headers_page, *edescs_page, *equeue_page;
+	gxio_mpipe_edesc_t *edescs;
+	gxio_mpipe_equeue_t *equeue;
+	unsigned char *headers;
+	int headers_order, edescs_order, equeue_order;
+	size_t edescs_size;
+	int edma;
+	int rc = -ENOMEM;
+
+	/* Only initialize once. */
+	if (egress_for_echannel[echannel].equeue != NULL)
+		return 0;
+
+	/* Allocate memory for the "headers". */
+	headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+	headers_page = alloc_pages(GFP_KERNEL, headers_order);
+	if (headers_page == NULL) {
+		netdev_warn(dev,
+			    "Could not alloc %zd bytes for TSO headers.\n",
+			    PAGE_SIZE << headers_order);
+		goto fail;
+	}
+	headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+	/* Allocate memory for the "edescs". */
+	edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+	edescs_order = get_order(edescs_size);
+	edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+	if (edescs_page == NULL) {
+		netdev_warn(dev,
+			    "Could not alloc %zd bytes for eDMA ring.\n",
+			    edescs_size);
+		goto fail_headers;
+	}
+	edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+	/* Allocate memory for the "equeue". */
+	equeue_order = get_order(sizeof(*equeue));
+	equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+	if (equeue_page == NULL) {
+		netdev_warn(dev,
+			    "Could not alloc %zd bytes for equeue info.\n",
+			    PAGE_SIZE << equeue_order);
+		goto fail_edescs;
+	}
+	equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+	/* Allocate an edma ring.  Note that in practice this can't
+	 * fail, which is good, because we will leak an edma ring if so.
+	 */
+	rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+	if (rc < 0) {
+		netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+			    rc);
+		goto fail_equeue;
+	}
+	edma = rc;
+
+	/* Initialize the equeue. */
+	rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+				    edescs, edescs_size, 0);
+	if (rc != 0) {
+		netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+		goto fail_equeue;
+	}
+
+	/* Done. */
+	egress_for_echannel[echannel].equeue = equeue;
+	egress_for_echannel[echannel].headers = headers;
+	return 0;
+
+fail_equeue:
+	__free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+	__free_pages(edescs_page, edescs_order);
+
+fail_headers:
+	__free_pages(headers_page, headers_order);
+
+fail:
+	return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+			      const char *link_name)
+{
+	int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+	if (rc < 0) {
+		netdev_err(dev, "Failed to open '%s'\n", link_name);
+		return rc;
+	}
+	rc = gxio_mpipe_link_channel(link);
+	if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+		netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+		gxio_mpipe_link_close(link);
+		return -EINVAL;
+	}
+	return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	int cpu, rc;
+
+	mutex_lock(&tile_net_devs_for_channel_mutex);
+
+	/* Do one-time initialization the first time any device is opened. */
+	if (ingress_irq < 0) {
+		rc = tile_net_init_mpipe(dev);
+		if (rc != 0)
+			goto fail;
+	}
+
+	/* Determine if this is the "loopify" device. */
+	if (unlikely((loopify_link_name != NULL) &&
+		     !strcmp(dev->name, loopify_link_name))) {
+		rc = tile_net_link_open(dev, &priv->link, "loop0");
+		if (rc < 0)
+			goto fail;
+		priv->channel = rc;
+		rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+		if (rc < 0)
+			goto fail;
+		priv->loopify_channel = rc;
+		priv->echannel = rc;
+	} else {
+		rc = tile_net_link_open(dev, &priv->link, dev->name);
+		if (rc < 0)
+			goto fail;
+		priv->channel = rc;
+		priv->echannel = rc;
+	}
+
+	/* Initialize egress info (if needed).  Once ever, per echannel. */
+	rc = tile_net_init_egress(dev, priv->echannel);
+	if (rc != 0)
+		goto fail;
+
+	tile_net_devs_for_channel[priv->channel] = dev;
+
+	rc = tile_net_update(dev);
+	if (rc != 0)
+		goto fail;
+
+	mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+	/* Initialize the transmit wake timer for this device for each cpu. */
+	for_each_online_cpu(cpu) {
+		struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+		struct tile_net_tx_wake *tx_wake =
+			&info->tx_wake[priv->echannel];
+
+		hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+		tx_wake->dev = dev;
+	}
+
+	for_each_online_cpu(cpu)
+		netif_start_subqueue(dev, cpu);
+	netif_carrier_on(dev);
+	return 0;
+
+fail:
+	if (priv->loopify_channel >= 0) {
+		if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+			netdev_warn(dev, "Failed to close loopify link!\n");
+		priv->loopify_channel = -1;
+	}
+	if (priv->channel >= 0) {
+		if (gxio_mpipe_link_close(&priv->link) != 0)
+			netdev_warn(dev, "Failed to close link!\n");
+		priv->channel = -1;
+	}
+	priv->echannel = -1;
+	tile_net_devs_for_channel[priv->channel] = NULL;
+	mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+	/* Don't return raw gxio error codes to generic Linux. */
+	return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+		struct tile_net_tx_wake *tx_wake =
+			&info->tx_wake[priv->echannel];
+
+		hrtimer_cancel(&tx_wake->timer);
+		netif_stop_subqueue(dev, cpu);
+	}
+
+	mutex_lock(&tile_net_devs_for_channel_mutex);
+	tile_net_devs_for_channel[priv->channel] = NULL;
+	(void)tile_net_update(dev);
+	if (priv->loopify_channel >= 0) {
+		if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+			netdev_warn(dev, "Failed to close loopify link!\n");
+		priv->loopify_channel = -1;
+	}
+	if (priv->channel >= 0) {
+		if (gxio_mpipe_link_close(&priv->link) != 0)
+			netdev_warn(dev, "Failed to close link!\n");
+		priv->channel = -1;
+	}
+	priv->echannel = -1;
+	mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+	return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+	unsigned long pfn = page_to_pfn(skb_frag_page(f));
+	return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+				       struct tile_net_comps *comps,
+				       gxio_mpipe_equeue_t *equeue,
+				       int num_edescs)
+{
+	/* Try to acquire a completion entry. */
+	if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+	    tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+		/* Try to acquire an egress slot. */
+		s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+		if (slot >= 0)
+			return slot;
+
+		/* Freeing some completions gives the equeue time to drain. */
+		tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+		slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+		if (slot >= 0)
+			return slot;
+	}
+
+	/* Still nothing; give up and stop the queue for a short while. */
+	netif_stop_subqueue(dev, smp_processor_id());
+	tile_net_schedule_tx_wake_timer(dev);
+	return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments.  This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	unsigned int data_len = skb->data_len;
+	unsigned int p_len = sh->gso_size;
+	long f_id = -1;    /* id of the current fragment */
+	long f_size = -1;  /* size of the current fragment */
+	long f_used = -1;  /* bytes used from the current fragment */
+	long n;            /* size of the current piece of payload */
+	int num_edescs = 0;
+	int segment;
+
+	for (segment = 0; segment < sh->gso_segs; segment++) {
+
+		unsigned int p_used = 0;
+
+		/* One edesc for header and for each piece of the payload. */
+		for (num_edescs++; p_used < p_len; num_edescs++) {
+
+			/* Advance as needed. */
+			while (f_used >= f_size) {
+				f_id++;
+				f_size = sh->frags[f_id].size;
+				f_used = 0;
+			}
+
+			/* Use bytes from the current fragment. */
+			n = p_len - p_used;
+			if (n > f_size - f_used)
+				n = f_size - f_used;
+			f_used += n;
+			p_used += n;
+		}
+
+		/* The last segment may be less than gso_size. */
+		data_len -= p_len;
+		if (data_len < p_len)
+			p_len = data_len;
+	}
+
+	return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+				s64 slot)
+{
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	struct iphdr *ih;
+	struct tcphdr *th;
+	unsigned int data_len = skb->data_len;
+	unsigned char *data = skb->data;
+	unsigned int ih_off, th_off, sh_len, p_len;
+	unsigned int isum_seed, tsum_seed, id, seq;
+	long f_id = -1;    /* id of the current fragment */
+	long f_size = -1;  /* size of the current fragment */
+	long f_used = -1;  /* bytes used from the current fragment */
+	long n;            /* size of the current piece of payload */
+	int segment;
+
+	/* Locate original headers and compute various lengths. */
+	ih = ip_hdr(skb);
+	th = tcp_hdr(skb);
+	ih_off = skb_network_offset(skb);
+	th_off = skb_transport_offset(skb);
+	sh_len = th_off + tcp_hdrlen(skb);
+	p_len = sh->gso_size;
+
+	/* Set up seed values for IP and TCP csum and initialize id and seq. */
+	isum_seed = ((0xFFFF - ih->check) +
+		     (0xFFFF - ih->tot_len) +
+		     (0xFFFF - ih->id));
+	tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+	id = ntohs(ih->id);
+	seq = ntohl(th->seq);
+
+	/* Prepare all the headers. */
+	for (segment = 0; segment < sh->gso_segs; segment++) {
+		unsigned char *buf;
+		unsigned int p_used = 0;
+
+		/* Copy to the header memory for this segment. */
+		buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+			NET_IP_ALIGN;
+		memcpy(buf, data, sh_len);
+
+		/* Update copied ip header. */
+		ih = (struct iphdr *)(buf + ih_off);
+		ih->tot_len = htons(sh_len + p_len - ih_off);
+		ih->id = htons(id);
+		ih->check = csum_long(isum_seed + ih->tot_len +
+				      ih->id) ^ 0xffff;
+
+		/* Update copied tcp header. */
+		th = (struct tcphdr *)(buf + th_off);
+		th->seq = htonl(seq);
+		th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+		if (segment != sh->gso_segs - 1) {
+			th->fin = 0;
+			th->psh = 0;
+		}
+
+		/* Skip past the header. */
+		slot++;
+
+		/* Skip past the payload. */
+		while (p_used < p_len) {
+
+			/* Advance as needed. */
+			while (f_used >= f_size) {
+				f_id++;
+				f_size = sh->frags[f_id].size;
+				f_used = 0;
+			}
+
+			/* Use bytes from the current fragment. */
+			n = p_len - p_used;
+			if (n > f_size - f_used)
+				n = f_size - f_used;
+			f_used += n;
+			p_used += n;
+
+			slot++;
+		}
+
+		id++;
+		seq += p_len;
+
+		/* The last segment may be less than gso_size. */
+		data_len -= p_len;
+		if (data_len < p_len)
+			p_len = data_len;
+	}
+
+	/* Flush the headers so they are ready for hardware DMA. */
+	wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+		       struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	unsigned int data_len = skb->data_len;
+	unsigned int p_len = sh->gso_size;
+	gxio_mpipe_edesc_t edesc_head = { { 0 } };
+	gxio_mpipe_edesc_t edesc_body = { { 0 } };
+	long f_id = -1;    /* id of the current fragment */
+	long f_size = -1;  /* size of the current fragment */
+	long f_used = -1;  /* bytes used from the current fragment */
+	long n;            /* size of the current piece of payload */
+	unsigned long tx_packets = 0, tx_bytes = 0;
+	unsigned int csum_start, sh_len;
+	int segment;
+
+	/* Prepare to egress the headers: set up header edesc. */
+	csum_start = skb_checksum_start_offset(skb);
+	sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	edesc_head.csum = 1;
+	edesc_head.csum_start = csum_start;
+	edesc_head.csum_dest = csum_start + skb->csum_offset;
+	edesc_head.xfer_size = sh_len;
+
+	/* This is only used to specify the TLB. */
+	edesc_head.stack_idx = large_buffer_stack;
+	edesc_body.stack_idx = large_buffer_stack;
+
+	/* Egress all the edescs. */
+	for (segment = 0; segment < sh->gso_segs; segment++) {
+		void *va;
+		unsigned char *buf;
+		unsigned int p_used = 0;
+
+		/* Egress the header. */
+		buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+			NET_IP_ALIGN;
+		edesc_head.va = va_to_tile_io_addr(buf);
+		gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+		slot++;
+
+		/* Egress the payload. */
+		while (p_used < p_len) {
+
+			/* Advance as needed. */
+			while (f_used >= f_size) {
+				f_id++;
+				f_size = sh->frags[f_id].size;
+				f_used = 0;
+			}
+
+			va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+			/* Use bytes from the current fragment. */
+			n = p_len - p_used;
+			if (n > f_size - f_used)
+				n = f_size - f_used;
+			f_used += n;
+			p_used += n;
+
+			/* Egress a piece of the payload. */
+			edesc_body.va = va_to_tile_io_addr(va);
+			edesc_body.xfer_size = n;
+			edesc_body.bound = !(p_used < p_len);
+			gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+			slot++;
+		}
+
+		tx_packets++;
+		tx_bytes += sh_len + p_len;
+
+		/* The last segment may be less than gso_size. */
+		data_len -= p_len;
+		if (data_len < p_len)
+			p_len = data_len;
+	}
+
+	/* Update stats. */
+	tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+	tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%.  But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%.  In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct tile_net_priv *priv = netdev_priv(dev);
+	int channel = priv->echannel;
+	struct tile_net_egress *egress = &egress_for_echannel[channel];
+	struct tile_net_comps *comps = info->comps_for_echannel[channel];
+	gxio_mpipe_equeue_t *equeue = egress->equeue;
+	unsigned long irqflags;
+	int num_edescs;
+	s64 slot;
+
+	/* Determine how many mpipe edesc's are needed. */
+	num_edescs = tso_count_edescs(skb);
+
+	local_irq_save(irqflags);
+
+	/* Try to acquire a completion entry and an egress slot. */
+	slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+	if (slot < 0) {
+		local_irq_restore(irqflags);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* Set up copies of header data properly. */
+	tso_headers_prepare(skb, egress->headers, slot);
+
+	/* Actually pass the data to the network hardware. */
+	tso_egress(dev, equeue, skb, egress->headers, slot);
+
+	/* Add a completion record. */
+	add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+	local_irq_restore(irqflags);
+
+	/* Make sure the egress timer is scheduled. */
+	tile_net_schedule_egress_timer();
+
+	return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+				       struct sk_buff *skb,
+				       void *b_data, unsigned int b_len)
+{
+	unsigned int i, n = 0;
+
+	struct skb_shared_info *sh = skb_shinfo(skb);
+
+	if (b_len != 0) {
+		frags[n].buf = b_data;
+		frags[n++].length = b_len;
+	}
+
+	for (i = 0; i < sh->nr_frags; i++) {
+		skb_frag_t *f = &sh->frags[i];
+		frags[n].buf = tile_net_frag_buf(f);
+		frags[n++].length = skb_frag_size(f);
+	}
+
+	return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	struct tile_net_priv *priv = netdev_priv(dev);
+	struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+	gxio_mpipe_equeue_t *equeue = egress->equeue;
+	struct tile_net_comps *comps =
+		info->comps_for_echannel[priv->echannel];
+	unsigned int len = skb->len;
+	unsigned char *data = skb->data;
+	unsigned int num_edescs;
+	struct frag frags[MAX_FRAGS];
+	gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+	unsigned long irqflags;
+	gxio_mpipe_edesc_t edesc = { { 0 } };
+	unsigned int i;
+	s64 slot;
+
+	if (skb_is_gso(skb))
+		return tile_net_tx_tso(skb, dev);
+
+	num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+	/* This is only used to specify the TLB. */
+	edesc.stack_idx = large_buffer_stack;
+
+	/* Prepare the edescs. */
+	for (i = 0; i < num_edescs; i++) {
+		edesc.xfer_size = frags[i].length;
+		edesc.va = va_to_tile_io_addr(frags[i].buf);
+		edescs[i] = edesc;
+	}
+
+	/* Mark the final edesc. */
+	edescs[num_edescs - 1].bound = 1;
+
+	/* Add checksum info to the initial edesc, if needed. */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		unsigned int csum_start = skb_checksum_start_offset(skb);
+		edescs[0].csum = 1;
+		edescs[0].csum_start = csum_start;
+		edescs[0].csum_dest = csum_start + skb->csum_offset;
+	}
+
+	local_irq_save(irqflags);
+
+	/* Try to acquire a completion entry and an egress slot. */
+	slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+	if (slot < 0) {
+		local_irq_restore(irqflags);
+		return NETDEV_TX_BUSY;
+	}
+
+	for (i = 0; i < num_edescs; i++)
+		gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+	/* Add a completion record. */
+	add_comp(equeue, comps, slot - 1, skb);
+
+	/* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+	tile_net_stats_add(1, &priv->stats.tx_packets);
+	tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+			   &priv->stats.tx_bytes);
+
+	local_irq_restore(irqflags);
+
+	/* Make sure the egress timer is scheduled. */
+	tile_net_schedule_egress_timer();
+
+	return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) || (new_mtu > 1500))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address.  However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+	disable_percpu_irq(ingress_irq);
+	tile_net_handle_ingress_irq(ingress_irq, NULL);
+	enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+	.ndo_open = tile_net_open,
+	.ndo_stop = tile_net_stop,
+	.ndo_start_xmit = tile_net_tx,
+	.ndo_select_queue = tile_net_select_queue,
+	.ndo_do_ioctl = tile_net_ioctl,
+	.ndo_get_stats = tile_net_get_stats,
+	.ndo_change_mtu = tile_net_change_mtu,
+	.ndo_tx_timeout = tile_net_tx_timeout,
+	.ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+	dev->netdev_ops = &tile_net_ops;
+	dev->watchdog_timeo = TILE_NET_TIMEOUT;
+	dev->features |= NETIF_F_LLTX;
+	dev->features |= NETIF_F_HW_CSUM;
+	dev->features |= NETIF_F_SG;
+	dev->features |= NETIF_F_TSO;
+	dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+	int ret;
+	int i;
+	int nz_addr = 0;
+	struct net_device *dev;
+	struct tile_net_priv *priv;
+
+	/* HACK: Ignore "loop" links. */
+	if (strncmp(name, "loop", 4) == 0)
+		return;
+
+	/* Allocate the device structure.  Normally, "name" is a
+	 * template, instantiated by register_netdev(), but not for us.
+	 */
+	dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+			       NR_CPUS, 1);
+	if (!dev) {
+		pr_err("alloc_netdev_mqs(%s) failed\n", name);
+		return;
+	}
+
+	/* Initialize "priv". */
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof(*priv));
+	priv->dev = dev;
+	priv->channel = -1;
+	priv->loopify_channel = -1;
+	priv->echannel = -1;
+
+	/* Get the MAC address and set it in the device struct; this must
+	 * be done before the device is opened.  If the MAC is all zeroes,
+	 * we use a random address, since we're probably on the simulator.
+	 */
+	for (i = 0; i < 6; i++)
+		nz_addr |= mac[i];
+
+	if (nz_addr) {
+		memcpy(dev->dev_addr, mac, 6);
+		dev->addr_len = 6;
+	} else {
+		random_ether_addr(dev->dev_addr);
+	}
+
+	/* Register the network device. */
+	ret = register_netdev(dev);
+	if (ret) {
+		netdev_err(dev, "register_netdev failed %d\n", ret);
+		free_netdev(dev);
+		return;
+	}
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+	struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+	int my_cpu = smp_processor_id();
+
+	info->has_iqueue = false;
+
+	info->my_cpu = my_cpu;
+
+	/* Initialize the egress timer. */
+	hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+	int i;
+	char name[GXIO_MPIPE_LINK_NAME_LEN];
+	uint8_t mac[6];
+
+	pr_info("Tilera Network Driver\n");
+
+	mutex_init(&tile_net_devs_for_channel_mutex);
+
+	/* Initialize each CPU. */
+	on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+	/* Find out what devices we have, and initialize them. */
+	for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+		tile_net_dev_init(name, mac);
+
+	if (!network_cpus_init())
+		network_cpus_map = *cpu_online_mask;
+
+	return 0;
+}
+
+module_init(tile_net_init_module);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57..2857ab07 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@
 	u32 nvsp_version;
 
 	atomic_t num_outstanding_sends;
+	wait_queue_head_t wait_drain;
 	bool start_remove;
 	bool destroy;
 	/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b91947..0c56983 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@
 	if (!net_device)
 		return NULL;
 
+	init_waitqueue_head(&net_device->wait_drain);
 	net_device->start_remove = false;
 	net_device->destroy = false;
 	net_device->dev = device;
@@ -387,12 +388,8 @@
 	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
 
 	/* Wait for all send completions */
-	while (atomic_read(&net_device->num_outstanding_sends)) {
-		dev_info(&device->device,
-			"waiting for %d requests to complete...\n",
-			atomic_read(&net_device->num_outstanding_sends));
-		udelay(100);
-	}
+	wait_event(net_device->wait_drain,
+		   atomic_read(&net_device->num_outstanding_sends) == 0);
 
 	netvsc_disconnect_vsp(net_device);
 
@@ -486,6 +483,9 @@
 		num_outstanding_sends =
 			atomic_dec_return(&net_device->num_outstanding_sends);
 
+		if (net_device->destroy && num_outstanding_sends == 0)
+			wake_up(&net_device->wait_drain);
+
 		if (netif_queue_stopped(ndev) && !net_device->start_remove &&
 			(hv_ringbuf_avail_percent(&device->channel->outbound)
 			> RING_AVAIL_PERCENT_HIWATER ||
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5..47f8e89 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@
 #define IP1001_APS_ON			11	/* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON			2	/* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS	0x11	/* Conf Info IRQ & Status Reg */
+#define	IP101A_G_IRQ_PIN_USED		(1<<15) /* INTR pin used */
+#define	IP101A_G_IRQ_DEFAULT		IP101A_G_IRQ_PIN_USED
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -136,6 +138,11 @@
 	if (c < 0)
 		return c;
 
+	/* INTR pin used: speed/link/duplex will cause an interrupt */
+	c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+	if (c < 0)
+		return c;
+
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
 		/* Additional delay (2ns) used to adjust RX clock phase
 		 * at RGMII interface */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1c..5061608 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -96,7 +96,7 @@
 }
 /**
  * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
  *
  * Returns a pointer to the mii_bus, or NULL if none found.
  *
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 590f902..9d6c80c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -161,7 +161,7 @@
 static struct phy_driver ks8001_driver = {
 	.phy_id		= PHY_ID_KS8001,
 	.name		= "Micrel KS8001 or KS8721",
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= 0x00ffffff,
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
@@ -174,7 +174,7 @@
 
 static struct phy_driver ksz9021_driver = {
 	.phy_id		= PHY_ID_KSZ9021,
-	.phy_id_mask	= 0x000fff10,
+	.phy_id_mask	= 0x000ffffe,
 	.name		= "Micrel KSZ9021 Gigabit PHY",
 	.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause
 				| SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@
 MODULE_LICENSE("GPL");
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
-	{ PHY_ID_KSZ9021, 0x000fff10 },
-	{ PHY_ID_KS8001, 0x00fffff0 },
+	{ PHY_ID_KSZ9021, 0x000ffffe },
+	{ PHY_ID_KS8001, 0x00ffffff },
 	{ PHY_ID_KS8737, 0x00fffff0 },
 	{ PHY_ID_KS8041, 0x00fffff0 },
 	{ PHY_ID_KS8051, 0x00fffff0 },
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b05..3ae80ec 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 
 #define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@
 			return 0;
 		}
 
-		if ((size > dev->net->mtu + ETH_HLEN) ||
+		if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
 		    (size + offset > skb->len)) {
 			netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
 				   size);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 964031e..a28a983d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4	0x1297
+#define USB_PRODUCT_IPAD 0x129a
 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
 #define USB_PRODUCT_IPHONE_4S	0x12a0
 
@@ -101,6 +102,10 @@
 		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
 		IPHETH_USBINTF_PROTO) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(
+		USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+		IPHETH_USBINTF_PROTO) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(
 		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
 		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
 		IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064..03c2d8d 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@
 	return skb->len > 0;
 }
 
+static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+{
+	u8 *buf = urb->transfer_buffer;
+	bool link;
+
+	if (urb->actual_length < 16)
+		return;
+
+	link = !(buf[1] & 0x20);
+	if (netif_carrier_ok(dev->net) != link) {
+		if (link) {
+			netif_carrier_on(dev->net);
+			usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+		} else
+			netif_carrier_off(dev->net);
+		netdev_dbg(dev->net, "Link Status is: %d\n", link);
+	}
+}
+
 static const struct driver_info moschip_info = {
 	.description	= "MOSCHIP 7830/7832/7730 usb-NET adapter",
 	.bind		= mcs7830_bind,
 	.rx_fixup	= mcs7830_rx_fixup,
-	.flags		= FLAG_ETHER,
+	.flags		= FLAG_ETHER | FLAG_LINK_INTR,
+	.status		= mcs7830_status,
 	.in		= 1,
 	.out		= 2,
 };
@@ -642,7 +662,8 @@
 	.description    = "Sitecom LN-30 usb-NET adapter",
 	.bind		= mcs7830_bind,
 	.rx_fixup	= mcs7830_rx_fixup,
-	.flags		= FLAG_ETHER,
+	.flags		= FLAG_ETHER | FLAG_LINK_INTR,
+	.status		= mcs7830_status,
 	.in		= 1,
 	.out		= 2,
 };
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea..3767a12 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -257,29 +257,6 @@
 	return rv;
 }
 
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- *   a) hardcoding interface number, or
- *   b) use the fact that the wwan interface is the only one lacking additional
- *      (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
-	int rv = -EINVAL;
-
-	/* ignore any interface with additional descriptors */
-	if (intf->cur_altsetting->extralen)
-		goto err;
-
-	rv = qmi_wwan_bind_shared(dev, intf);
-err:
-	return rv;
-}
-
 static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
 {
 	struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +324,15 @@
 	.manage_power	= qmi_wwan_manage_power,
 };
 
-static const struct driver_info	qmi_wwan_gobi = {
-	.description	= "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info	qmi_wwan_force_int0 = {
+	.description	= "Qualcomm WWAN/QMI device",
 	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_gobi,
+	.bind		= qmi_wwan_bind_shared,
 	.unbind		= qmi_wwan_unbind_shared,
 	.manage_power	= qmi_wwan_manage_power,
+	.data		= BIT(0), /* interface whitelist bitmap */
 };
 
-/* ZTE suck at making USB descriptors */
 static const struct driver_info	qmi_wwan_force_int1 = {
 	.description	= "Qualcomm WWAN/QMI device",
 	.flags		= FLAG_WWAN,
@@ -365,6 +342,15 @@
 	.data		= BIT(1), /* interface whitelist bitmap */
 };
 
+static const struct driver_info	qmi_wwan_force_int3 = {
+	.description	= "Qualcomm WWAN/QMI device",
+	.flags		= FLAG_WWAN,
+	.bind		= qmi_wwan_bind_shared,
+	.unbind		= qmi_wwan_unbind_shared,
+	.manage_power	= qmi_wwan_manage_power,
+	.data		= BIT(3), /* interface whitelist bitmap */
+};
+
 static const struct driver_info	qmi_wwan_force_int4 = {
 	.description	= "Qualcomm WWAN/QMI device",
 	.flags		= FLAG_WWAN,
@@ -390,16 +376,23 @@
 static const struct driver_info	qmi_wwan_sierra = {
 	.description	= "Sierra Wireless wwan/QMI device",
 	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_gobi,
+	.bind		= qmi_wwan_bind_shared,
 	.unbind		= qmi_wwan_unbind_shared,
 	.manage_power	= qmi_wwan_manage_power,
 	.data		= BIT(8) | BIT(19), /* interface whitelist bitmap */
 };
 
 #define HUAWEI_VENDOR_ID	0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+	USB_DEVICE(vend, prod), \
+	.driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
 #define QMI_GOBI_DEVICE(vend, prod) \
 	USB_DEVICE(vend, prod), \
-	.driver_info = (unsigned long)&qmi_wwan_gobi
+	.driver_info = (unsigned long)&qmi_wwan_force_int0
 
 static const struct usb_device_id products[] = {
 	{	/* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -510,20 +503,24 @@
 		.bInterfaceProtocol = 0xff,
 		.driver_info        = (unsigned long)&qmi_wwan_sierra,
 	},
-	{QMI_GOBI_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
-	{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */
-	{QMI_GOBI_DEVICE(0x03f0, 0x371d)},	/* HP un2430 Mobile Broadband Module */
-	{QMI_GOBI_DEVICE(0x04da, 0x250d)},	/* Panasonic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x413c, 0x8172)},	/* Dell Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x1410, 0xa001)},	/* Novatel Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x0b05, 0x1776)},	/* Asus Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x19d2, 0xfff3)},	/* ONDA Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9001)},	/* Generic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9002)},	/* Generic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9202)},	/* Generic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9203)},	/* Generic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9222)},	/* Generic Gobi Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x9009)},	/* Generic Gobi Modem device */
+
+	/* Gobi 1000 devices */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+	{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */
+	{QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},	/* HP un2430 Mobile Broadband Module */
+	{QMI_GOBI1K_DEVICE(0x04da, 0x250d)},	/* Panasonic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x413c, 0x8172)},	/* Dell Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa001)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},	/* Asus Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},	/* ONDA Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},	/* Generic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9002)},	/* Generic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9202)},	/* Generic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9203)},	/* Generic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},	/* Generic Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},	/* Generic Gobi Modem device */
+
+	/* Gobi 2000 and 3000 devices */
 	{QMI_GOBI_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x920b)},	/* Generic Gobi 2000 Modem device */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -547,6 +544,8 @@
 	{QMI_GOBI_DEVICE(0x16d8, 0x8002)},	/* CMDTech Gobi 2000 Modem device (VU922) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9205)},	/* Gobi 2000 Modem device */
 	{QMI_GOBI_DEVICE(0x1199, 0x9013)},	/* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+	{QMI_GOBI_DEVICE(0x1199, 0x9015)},	/* Sierra Wireless Gobi 3000 Modem device */
+	{QMI_GOBI_DEVICE(0x1199, 0x9019)},	/* Sierra Wireless Gobi 3000 Modem device */
 	{ }					/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef56..d75d1f5 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@
 }
 
 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
 	.rx_urb_size = 8 * 1024,
 	.whitelist = {
 		.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@
 	}
 };
 
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
 	.description = "Sierra Wireless USB-to-WWAN Modem",
 	.flags = FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind = sierra_net_bind,
@@ -962,12 +962,18 @@
 	.status = sierra_net_status,
 	.rx_fixup = sierra_net_rx_fixup,
 	.tx_fixup = sierra_net_tx_fixup,
-	.data = (unsigned long)&sierra_net_info_data_68A3,
+	.data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 static const struct usb_device_id products[] = {
 	{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-	.driver_info = (unsigned long) &sierra_net_info_68A3},
+	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
+	{USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
+	{USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
+	{USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
 
 	{}, /* last item */
 };
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9f58330..aba769d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -796,11 +796,13 @@
 	if (info->manage_power) {
 		retval = info->manage_power(dev, 1);
 		if (retval < 0)
-			goto done;
+			goto done_manage_power_error;
 		usb_autopm_put_interface(dev->intf);
 	}
 	return retval;
 
+done_manage_power_error:
+	clear_bit(EVENT_DEV_OPEN, &dev->flags);
 done:
 	usb_autopm_put_interface(dev->intf);
 done_nopm:
@@ -876,9 +878,9 @@
 {
 	struct usbnet *dev = netdev_priv(net);
 
-	strncpy (info->driver, dev->driver_name, sizeof info->driver);
-	strncpy (info->version, DRIVER_VERSION, sizeof info->version);
-	strncpy (info->fw_version, dev->driver_info->description,
+	strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+	strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+	strlcpy (info->fw_version, dev->driver_info->description,
 		sizeof info->fw_version);
 	usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
 }
@@ -1202,6 +1204,21 @@
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
 
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+	struct urb	*urb;
+	int		i;
+
+	/* don't refill the queue all at once */
+	for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+		urb = usb_alloc_urb(0, flags);
+		if (urb != NULL) {
+			if (rx_submit(dev, urb, flags) == -ENOLINK)
+				return;
+		}
+	}
+}
+
 /*-------------------------------------------------------------------------*/
 
 // tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@
 		   !timer_pending (&dev->delay) &&
 		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
 		int	temp = dev->rxq.qlen;
-		int	qlen = RX_QLEN (dev);
 
-		if (temp < qlen) {
-			struct urb	*urb;
-			int		i;
-
-			// don't refill the queue all at once
-			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
-				urb = usb_alloc_urb (0, GFP_ATOMIC);
-				if (urb != NULL) {
-					if (rx_submit (dev, urb, GFP_ATOMIC) ==
-					    -ENOLINK)
-						return;
-				}
-			}
+		if (temp < RX_QLEN(dev)) {
+			rx_alloc_submit(dev, GFP_ATOMIC);
 			if (temp != dev->rxq.qlen)
 				netif_dbg(dev, link, dev->net,
 					  "rxqlen %d --> %d\n",
 					  temp, dev->rxq.qlen);
-			if (dev->rxq.qlen < qlen)
+			if (dev->rxq.qlen < RX_QLEN(dev))
 				tasklet_schedule (&dev->bh);
 		}
 		if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@
 		spin_lock_irq(&dev->txq.lock);
 		/* don't autosuspend while transmitting */
 		if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+			dev->suspend_count--;
 			spin_unlock_irq(&dev->txq.lock);
 			return -EBUSY;
 		} else {
@@ -1569,6 +1575,13 @@
 		spin_unlock_irq(&dev->txq.lock);
 
 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+			/* handle remote wakeup ASAP */
+			if (!dev->wait &&
+				netif_device_present(dev->net) &&
+				!timer_pending(&dev->delay) &&
+				!test_bit(EVENT_RX_HALT, &dev->flags))
+					rx_alloc_submit(dev, GFP_KERNEL);
+
 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
 				netif_tx_wake_all_queues(dev->net);
 			tasklet_schedule (&dev->bh);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995..f18149a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
-	struct u64_stats_sync syncp;
+	struct u64_stats_sync tx_syncp;
+	struct u64_stats_sync rx_syncp;
 	u64 tx_bytes;
 	u64 tx_packets;
 
@@ -300,10 +301,10 @@
 
 	hdr = skb_vnet_hdr(skb);
 
-	u64_stats_update_begin(&stats->syncp);
+	u64_stats_update_begin(&stats->rx_syncp);
 	stats->rx_bytes += skb->len;
 	stats->rx_packets++;
-	u64_stats_update_end(&stats->syncp);
+	u64_stats_update_end(&stats->rx_syncp);
 
 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@
 	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 
-		u64_stats_update_begin(&stats->syncp);
+		u64_stats_update_begin(&stats->tx_syncp);
 		stats->tx_bytes += skb->len;
 		stats->tx_packets++;
-		u64_stats_update_end(&stats->syncp);
+		u64_stats_update_end(&stats->tx_syncp);
 
 		tot_sgs += skb_vnet_hdr(skb)->num_sg;
 		dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@
 		u64 tpackets, tbytes, rpackets, rbytes;
 
 		do {
-			start = u64_stats_fetch_begin(&stats->syncp);
+			start = u64_stats_fetch_begin(&stats->tx_syncp);
 			tpackets = stats->tx_packets;
 			tbytes   = stats->tx_bytes;
+		} while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+		do {
+			start = u64_stats_fetch_begin(&stats->rx_syncp);
 			rpackets = stats->rx_packets;
 			rbytes   = stats->rx_bytes;
-		} while (u64_stats_fetch_retry(&stats->syncp, start));
+		} while (u64_stats_fetch_retry(&stats->rx_syncp, start));
 
 		tot->rx_packets += rpackets;
 		tot->tx_packets += tpackets;
@@ -1231,11 +1236,6 @@
 	vi->config_enable = false;
 	mutex_unlock(&vi->config_lock);
 
-	virtqueue_disable_cb(vi->rvq);
-	virtqueue_disable_cb(vi->svq);
-	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
-		virtqueue_disable_cb(vi->cvq);
-
 	netif_device_detach(vi->dev);
 	cancel_delayed_work_sync(&vi->refill);
 
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 520a4b2..a747c63 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -7233,8 +7233,8 @@
 		}
 	} else {
 		dwrq->flags = 1; /* Should be define'd */
-		memcpy(extra + sizeof(struct sockaddr)*i,
-		       &qual,  sizeof(struct iw_quality)*i);
+		memcpy(extra + sizeof(struct sockaddr) * i, qual,
+		       sizeof(struct iw_quality) * i);
 	}
 	dwrq->length = i;
 
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a6..44ad6fe 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1045,11 +1045,11 @@
 
 				ath5k_txbuf_free_skb(ah, bf);
 
-				spin_lock_bh(&ah->txbuflock);
+				spin_lock(&ah->txbuflock);
 				list_move_tail(&bf->list, &ah->txbuf);
 				ah->txbuf_len++;
 				txq->txq_len--;
-				spin_unlock_bh(&ah->txbuflock);
+				spin_unlock(&ah->txbuflock);
 			}
 			txq->link = NULL;
 			txq->txq_poll_mark = false;
@@ -2415,6 +2415,22 @@
 * Initialization routines *
 \*************************/
 
+static const struct ieee80211_iface_limit if_limits[] = {
+	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) },
+	{ .max = 4,	.types =
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+				 BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+	.limits = if_limits,
+	.n_limits = ARRAY_SIZE(if_limits),
+	.max_interfaces = 2048,
+	.num_different_channels = 1,
+};
+
 int __devinit
 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
 {
@@ -2436,6 +2452,9 @@
 		BIT(NL80211_IFTYPE_ADHOC) |
 		BIT(NL80211_IFTYPE_MESH_POINT);
 
+	hw->wiphy->iface_combinations = &if_comb;
+	hw->wiphy->n_iface_combinations = 1;
+
 	/* SW support for IBSS_RSN is provided by mac80211 */
 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d90..dfb0441 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@
 	return true;
 }
 
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
 {
 	int internal_regulator =
 		ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac4..8396d15 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@
 
 unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
 					   struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2..1bd3a3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
 #define INITVALS_9330_1P1_H
 
 static const u32 ar9331_1p1_baseband_postamble[][5] = {
-	/*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
 	{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
 	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@
 	{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
 	{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
 	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
-	{0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+	{0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
 	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
 	{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
-	{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+	{0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
 	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+	{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@
 };
 
 static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
-	/*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
 	{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
 	{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@
 };
 
 static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
-	/*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
 	{0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
 	{0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@
 };
 
 static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
-	/*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
 	{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
 	{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@
 	{0x000160b4, 0x92480040},
 	{0x000160c0, 0x006db6db},
 	{0x000160c4, 0x0186db60},
-	{0x000160c8, 0x6db6db6c},
+	{0x000160c8, 0x6db4db6c},
 	{0x000160cc, 0x6de6c300},
 	{0x000160d0, 0x14500820},
 	{0x00016100, 0x04cb0001},
 	{0x00016104, 0xfff80015},
 	{0x00016108, 0x00080010},
 	{0x0001610c, 0x00170000},
-	{0x00016140, 0x10804000},
+	{0x00016140, 0x10800000},
 	{0x00016144, 0x01884080},
 	{0x00016148, 0x000080c0},
 	{0x00016280, 0x01000015},
@@ -417,7 +418,7 @@
 };
 
 static const u32 ar9331_1p1_soc_postamble[][5] = {
-	/*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
 };
 
@@ -691,7 +692,7 @@
 };
 
 static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
-	/*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
 	{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
 	{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@
 };
 
 static const u32 ar9331_1p1_mac_postamble[][5] = {
-	/*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
 	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
 	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@
 
 static const u32 ar9331_common_rx_gain_1p1[][2] = {
 	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x01910190},
-	{0x0000a030, 0x01930192},
-	{0x0000a034, 0x01950194},
-	{0x0000a038, 0x038a0196},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
+	{0x00009e18, 0x05000000},
+	{0x0000a000, 0x00060005},
+	{0x0000a004, 0x00810080},
+	{0x0000a008, 0x00830082},
+	{0x0000a00c, 0x00850084},
+	{0x0000a010, 0x01820181},
+	{0x0000a014, 0x01840183},
+	{0x0000a018, 0x01880185},
+	{0x0000a01c, 0x018a0189},
+	{0x0000a020, 0x02850284},
+	{0x0000a024, 0x02890288},
+	{0x0000a028, 0x028b028a},
+	{0x0000a02c, 0x03850384},
+	{0x0000a030, 0x03890388},
+	{0x0000a034, 0x038b038a},
+	{0x0000a038, 0x038d038c},
+	{0x0000a03c, 0x03910390},
+	{0x0000a040, 0x03930392},
+	{0x0000a044, 0x03950394},
+	{0x0000a048, 0x00000396},
+	{0x0000a04c, 0x00000000},
 	{0x0000a050, 0x00000000},
 	{0x0000a054, 0x00000000},
 	{0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@
 	{0x0000a074, 0x00000000},
 	{0x0000a078, 0x00000000},
 	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x22222229},
-	{0x0000a084, 0x1d1d1d1d},
-	{0x0000a088, 0x1d1d1d1d},
-	{0x0000a08c, 0x1d1d1d1d},
-	{0x0000a090, 0x171d1d1d},
-	{0x0000a094, 0x11111717},
-	{0x0000a098, 0x00030311},
-	{0x0000a09c, 0x00000000},
-	{0x0000a0a0, 0x00000000},
+	{0x0000a080, 0x28282828},
+	{0x0000a084, 0x28282828},
+	{0x0000a088, 0x28282828},
+	{0x0000a08c, 0x28282828},
+	{0x0000a090, 0x28282828},
+	{0x0000a094, 0x24242428},
+	{0x0000a098, 0x171e1e1e},
+	{0x0000a09c, 0x02020b0b},
+	{0x0000a0a0, 0x02020202},
 	{0x0000a0a4, 0x00000000},
 	{0x0000a0a8, 0x00000000},
 	{0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@
 	{0x0000a0b4, 0x00000000},
 	{0x0000a0b8, 0x00000000},
 	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
+	{0x0000a0c0, 0x22072208},
+	{0x0000a0c4, 0x22052206},
+	{0x0000a0c8, 0x22032204},
+	{0x0000a0cc, 0x22012202},
+	{0x0000a0d0, 0x221f2200},
+	{0x0000a0d4, 0x221d221e},
+	{0x0000a0d8, 0x33023303},
+	{0x0000a0dc, 0x33003301},
+	{0x0000a0e0, 0x331e331f},
+	{0x0000a0e4, 0x4402331d},
+	{0x0000a0e8, 0x44004401},
+	{0x0000a0ec, 0x441e441f},
+	{0x0000a0f0, 0x55025503},
+	{0x0000a0f4, 0x55005501},
+	{0x0000a0f8, 0x551e551f},
+	{0x0000a0fc, 0x6602551d},
+	{0x0000a100, 0x66006601},
+	{0x0000a104, 0x661e661f},
+	{0x0000a108, 0x7703661d},
+	{0x0000a10c, 0x77017702},
+	{0x0000a110, 0x00007700},
 	{0x0000a114, 0x00000000},
 	{0x0000a118, 0x00000000},
 	{0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@
 	{0x0000a138, 0x00000000},
 	{0x0000a13c, 0x00000000},
 	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
+	{0x0000a144, 0x111f1100},
+	{0x0000a148, 0x111d111e},
+	{0x0000a14c, 0x111b111c},
+	{0x0000a150, 0x22032204},
+	{0x0000a154, 0x22012202},
+	{0x0000a158, 0x221f2200},
+	{0x0000a15c, 0x221d221e},
+	{0x0000a160, 0x33013302},
+	{0x0000a164, 0x331f3300},
+	{0x0000a168, 0x4402331e},
+	{0x0000a16c, 0x44004401},
+	{0x0000a170, 0x441e441f},
+	{0x0000a174, 0x55015502},
+	{0x0000a178, 0x551f5500},
+	{0x0000a17c, 0x6602551e},
+	{0x0000a180, 0x66006601},
+	{0x0000a184, 0x661e661f},
+	{0x0000a188, 0x7703661d},
+	{0x0000a18c, 0x77017702},
+	{0x0000a190, 0x00007700},
 	{0x0000a194, 0x00000000},
 	{0x0000a198, 0x00000000},
 	{0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@
 	{0x0000a1f0, 0x00000396},
 	{0x0000a1f4, 0x00000396},
 	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
+	{0x0000a1fc, 0x00000296},
 };
 
 static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
-	{0},
-	{3},
-	{0},
-	{0},
+	{0x00000000},
+	{0x00000003},
+	{0x00000000},
+	{0x00000000},
 };
 
 static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a277cf6..4866550 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -214,6 +214,7 @@
 	enum ath9k_key_type keytype;
 	u8 keyix;
 	u8 retries;
+	u8 rtscts_rate;
 };
 
 struct ath_buf_state {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 2b8f61c..abbd6ef 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1496,6 +1496,7 @@
 			priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
 
 		if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+			ath9k_htc_choose_set_bssid(priv);
 			if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
 				ath9k_htc_start_ani(priv);
 			else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@
 		}
 	}
 
-	if (changed & BSS_CHANGED_BSSID) {
+	if (changed & BSS_CHANGED_IBSS) {
 		if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
 			common->curaid = bss_conf->aid;
 			memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
 			ath9k_htc_set_bssid(priv);
-		} else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
-			ath9k_htc_choose_set_bssid(priv);
 		}
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index abe05ec..1c68e56 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -784,13 +784,25 @@
 
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
+	int i = 0;
+
 	REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 	udelay(100);
 	REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 
-	while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+	while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
 		udelay(100);
 
+		if (WARN_ON_ONCE(i >= 100)) {
+			ath_err(common, "PLL4 meaurement not done\n");
+			break;
+		}
+
+		i++;
+	}
+
 	return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 }
 EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
@@ -1468,6 +1480,9 @@
 		return false;
 
 	ah->chip_fullsleep = false;
+
+	if (AR_SREV_9330(ah))
+		ar9003_hw_internal_regulator_apply(ah);
 	ath9k_hw_init_pll(ah, chan);
 	ath9k_hw_set_rfmode(ah, chan);
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8..dac1a27 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	bool ret;
+	bool ret = true;
 
 	ieee80211_stop_queues(sc->hw);
 
@@ -250,11 +250,12 @@
 	ath9k_debug_samp_bb_mac(sc);
 	ath9k_hw_disable_interrupts(ah);
 
-	ret = ath_drain_all_txq(sc, retry_tx);
-
 	if (!ath_stoprecv(sc))
 		ret = false;
 
+	if (!ath_drain_all_txq(sc, retry_tx))
+		ret = false;
+
 	if (!flush) {
 		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 			ath_rx_tasklet(sc, 1, true);
@@ -970,6 +971,15 @@
 					    hw_pll_work.work);
 	u32 pll_sqsum;
 
+	/*
+	 * ensure that the PLL WAR is executed only
+	 * after the STA is associated (or) if the
+	 * beaconing had started in interfaces that
+	 * uses beacons.
+	 */
+	if (!(sc->sc_flags & SC_OP_BEACONS))
+		return;
+
 	if (AR_SREV_9485(sc->sc_ah)) {
 
 		ath9k_ps_wakeup(sc);
@@ -1442,15 +1452,6 @@
 		}
 	}
 
-	if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
-	    ((vif->type == NL80211_IFTYPE_ADHOC) &&
-	     sc->nvifs > 0)) {
-		ath_err(common, "Cannot create ADHOC interface when other"
-			" interfaces already exist.\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
 	ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 
 	sc->nvifs++;
@@ -1475,15 +1476,6 @@
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
 
-	/* See if new interface type is valid. */
-	if ((new_type == NL80211_IFTYPE_ADHOC) &&
-	    (sc->nvifs > 1)) {
-		ath_err(common, "When using ADHOC, it must be the only"
-			" interface.\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
 	if (ath9k_uses_beacons(new_type) &&
 	    !ath9k_uses_beacons(vif->type)) {
 		if (sc->nbcnvifs >= ATH_BCBUF) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b..4d57139 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
 					   struct ath_txq *txq,
 					   struct ath_atx_tid *tid,
-					   struct sk_buff *skb);
+					   struct sk_buff *skb,
+					   bool dequeue);
 
 enum {
 	MCS_HT20,
@@ -811,7 +812,7 @@
 		fi = get_frame_info(skb);
 		bf = fi->bf;
 		if (!fi->bf)
-			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+			bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
 
 		if (!bf)
 			continue;
@@ -937,6 +938,7 @@
 	struct ieee80211_tx_rate *rates;
 	const struct ieee80211_rate *rate;
 	struct ieee80211_hdr *hdr;
+	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
 	int i;
 	u8 rix = 0;
 
@@ -947,18 +949,7 @@
 
 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
 	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
-	/*
-	 * We check if Short Preamble is needed for the CTS rate by
-	 * checking the BSS's global flag.
-	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
-	 */
-	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
-	info->rtscts_rate = rate->hw_value;
-
-	if (tx_info->control.vif &&
-	    tx_info->control.vif->bss_conf.use_short_preamble)
-		info->rtscts_rate |= rate->hw_value_short;
+	info->rtscts_rate = fi->rtscts_rate;
 
 	for (i = 0; i < 4; i++) {
 		bool is_40, is_sgi, is_sp;
@@ -1000,13 +991,13 @@
 		}
 
 		/* legacy rates */
+		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
 		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
 		    !(rate->flags & IEEE80211_RATE_ERP_G))
 			phy = WLAN_RC_PHY_CCK;
 		else
 			phy = WLAN_RC_PHY_OFDM;
 
-		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
 		info->rates[i].Rate = rate->hw_value;
 		if (rate->hw_value_short) {
 			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1726,7 +1717,7 @@
 		return;
 	}
 
-	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
 	if (!bf)
 		return;
 
@@ -1753,7 +1744,7 @@
 
 	bf = fi->bf;
 	if (!bf)
-		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+		bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
 
 	if (!bf)
 		return;
@@ -1775,10 +1766,22 @@
 	struct ieee80211_sta *sta = tx_info->control.sta;
 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	const struct ieee80211_rate *rate;
 	struct ath_frame_info *fi = get_frame_info(skb);
 	struct ath_node *an = NULL;
 	enum ath9k_key_type keytype;
+	bool short_preamble = false;
 
+	/*
+	 * We check if Short Preamble is needed for the CTS rate by
+	 * checking the BSS's global flag.
+	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+	 */
+	if (tx_info->control.vif &&
+	    tx_info->control.vif->bss_conf.use_short_preamble)
+		short_preamble = true;
+
+	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
 	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
 
 	if (sta)
@@ -1793,6 +1796,9 @@
 		fi->keyix = ATH9K_TXKEYIX_INVALID;
 	fi->keytype = keytype;
 	fi->framelen = framelen;
+	fi->rtscts_rate = rate->hw_value;
+	if (short_preamble)
+		fi->rtscts_rate |= rate->hw_value_short;
 }
 
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
@@ -1814,7 +1820,8 @@
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
 					   struct ath_txq *txq,
 					   struct ath_atx_tid *tid,
-					   struct sk_buff *skb)
+					   struct sk_buff *skb,
+					   bool dequeue)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1870,8 @@
 	return bf;
 
 error:
+	if (dequeue)
+		__skb_unlink(skb, &tid->buf_q);
 	dev_kfree_skb_any(skb);
 	return NULL;
 }
@@ -1893,7 +1902,7 @@
 		 */
 		ath_tx_send_ampdu(sc, tid, skb, txctl);
 	} else {
-		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
 		if (!bf)
 			return;
 
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af..c06b6cb 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@
 	 * from the mac80211 subsystem. */
 	u16 mac80211_initially_registered_queues;
 
+	/* Set this if we call ieee80211_register_hw() and check if we call
+	 * ieee80211_unregister_hw(). */
+	bool hw_registred;
+
 	/* We can only have one operating interface (802.11 core)
 	 * at a time. General information about this interface follows.
 	 */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b22..1b988f2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@
 	err = ieee80211_register_hw(wl->hw);
 	if (err)
 		goto err_one_core_detach;
+	wl->hw_registred = true;
 	b43_leds_register(wl->current_dev);
 	goto out;
 
@@ -3766,7 +3767,7 @@
 	if (prev_status >= B43_STAT_STARTED) {
 		err = b43_wireless_core_start(up_dev);
 		if (err) {
-			b43err(wl, "Fatal: Coult not start device for "
+			b43err(wl, "Fatal: Could not start device for "
 			       "selected %s-GHz band\n",
 			       band_to_string(chan->band));
 			b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@
 
 	hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
 	wl->mac80211_initially_registered_queues = hw->queues;
+	wl->hw_registred = false;
 	hw->max_rates = 2;
 	SET_IEEE80211_DEV(hw, dev->dev);
 	if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@
 	 * as the ieee80211 unreg will destroy the workqueue. */
 	cancel_work_sync(&wldev->restart_work);
 
-	/* Restore the queues count before unregistering, because firmware detect
-	 * might have modified it. Restoring is important, so the networking
-	 * stack can properly free resources. */
-	wl->hw->queues = wl->mac80211_initially_registered_queues;
-	b43_leds_stop(wldev);
-	ieee80211_unregister_hw(wl->hw);
+	B43_WARN_ON(!wl);
+	if (wl->current_dev == wldev && wl->hw_registred) {
+		/* Restore the queues count before unregistering, because firmware detect
+		 * might have modified it. Restoring is important, so the networking
+		 * stack can properly free resources. */
+		wl->hw->queues = wl->mac80211_initially_registered_queues;
+		b43_leds_stop(wldev);
+		ieee80211_unregister_hw(wl->hw);
+	}
 
 	b43_one_core_detach(wldev->dev);
 
@@ -5446,7 +5451,7 @@
 	cancel_work_sync(&wldev->restart_work);
 
 	B43_WARN_ON(!wl);
-	if (wl->current_dev == wldev) {
+	if (wl->current_dev == wldev && wl->hw_registred) {
 		/* Restore the queues count before unregistering, because firmware detect
 		 * might have modified it. Restoring is important, so the networking
 		 * stack can properly free resources. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index cd9c9bc..eae691e 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2633,7 +2633,7 @@
 	if (prev_status >= B43legacy_STAT_STARTED) {
 		err = b43legacy_wireless_core_start(up_dev);
 		if (err) {
-			b43legacyerr(wl, "Fatal: Coult not start device for "
+			b43legacyerr(wl, "Fatal: Could not start device for "
 			       "newly selected %s-PHY mode\n",
 			       phymode_to_string(new_mode));
 			b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e2480d1..8e7e692 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -89,9 +89,9 @@
 	data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
 	brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
-	/* redirect, configure ane enable io for interrupt signal */
+	/* redirect, configure and enable io for interrupt signal */
 	data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
-	if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+	if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
 		data |= SDIO_SEPINT_ACT_HI;
 	brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ff..a299d42 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
+#include <linux/vmalloc.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -1239,7 +1240,7 @@
 		return -EINVAL;
 	}
 
-	devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+	devinfo->image = vmalloc(fw->size); /* plus nvram */
 	if (!devinfo->image)
 		return -ENOMEM;
 
@@ -1603,7 +1604,7 @@
 void brcmf_usb_exit(void)
 {
 	usb_deregister(&brcmf_usbdrvr);
-	kfree(g_image.data);
+	vfree(g_image.data);
 	g_image.data = NULL;
 	g_image.len = 0;
 }
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c..95aa8e1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@
 	netif_stop_queue(priv->net_dev);
 }
 
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
-	struct ipw2100_priv *priv = libipw_priv(dev);
-
-	return ipw2100_up(priv, 1);
-}
-
 static int ipw2100_wdev_init(struct net_device *dev)
 {
 	struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@
 	.ndo_stop		= ipw2100_close,
 	.ndo_start_xmit		= libipw_xmit,
 	.ndo_change_mtu		= libipw_change_mtu,
-	.ndo_init		= ipw2100_net_init,
 	.ndo_tx_timeout		= ipw2100_tx_timeout,
 	.ndo_set_mac_address	= ipw2100_set_address,
 	.ndo_validate_addr	= eth_validate_addr,
@@ -6329,6 +6320,10 @@
 	printk(KERN_INFO DRV_NAME
 	       ": Detected Intel PRO/Wireless 2100 Network Connection\n");
 
+	err = ipw2100_up(priv, 1);
+	if (err)
+		goto fail;
+
 	err = ipw2100_wdev_init(dev);
 	if (err)
 		goto fail;
@@ -6338,12 +6333,7 @@
 	 * network device we would call ipw2100_up.  This introduced a race
 	 * condition with newer hotplug configurations (network was coming
 	 * up and making calls before the device was initialized).
-	 *
-	 * If we called ipw2100_up before we registered the device, then the
-	 * device name wasn't registered.  So, we instead use the net_dev->init
-	 * member to call a function that then just turns and calls ipw2100_up.
-	 * net_dev->init is called after name allocation but before the
-	 * notifier chain is called */
+	 */
 	err = register_netdev(dev);
 	if (err) {
 		printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e5..2463c06 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@
 	  even if the microcode doesn't advertise it.
 
 	  Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
-	bool "support uCode 16.0"
-	depends on IWLWIFI
-	help
-	  This option enables support for uCode version 16.0.
-
-	  Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297..d615eac 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@
 iwlwifi-objs		+= iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
 
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f79341..8133105 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@
 	.chain_noise_scale = 1000,
 	.wd_timeout = IWL_DEF_WD_TIMEOUT,
 	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 	.hd_v2 = true,
 };
 
@@ -97,7 +97,7 @@
 	.chain_noise_scale = 1000,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
 	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 	.hd_v2 = true,
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02c..e5e8ada 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -35,17 +35,20 @@
 #define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
 #define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 #define IWL6050_UCODE_API_OK 5
 #define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
 
 /* EEPROM versions */
 #define EEPROM_6000_TX_POWER_VERSION	(4)
@@ -86,7 +89,7 @@
 	.chain_noise_scale = 1000,
 	.wd_timeout = IWL_DEF_WD_TIMEOUT,
 	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@
 	.chain_noise_scale = 1500,
 	.wd_timeout = IWL_DEF_WD_TIMEOUT,
 	.max_event_log_size = 1024,
-	.shadow_reg_enable = true,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +121,7 @@
 	.chain_noise_scale = 1000,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
 	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_ht_params iwl6000_ht_params = {
@@ -227,9 +230,25 @@
 	IWL_DEVICE_6030,
 };
 
+#define IWL_DEVICE_6035						\
+	.fw_name_pre = IWL6030_FW_PRE,				\
+	.ucode_api_max = IWL6035_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL6035_UCODE_API_OK,			\
+	.ucode_api_min = IWL6035_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_6030_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.bt_params = &iwl6000_bt_params,			\
+	.need_temp_offset_calib = true,				\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.adv_pm = true
+
 const struct iwl_cfg iwl6035_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
-	IWL_DEVICE_6030,
+	IWL_DEVICE_6035,
 	.ht_params = &iwl6000_ht_params,
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69..8cebd7c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@
 	if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
 	    (priv->bt_full_concurrent != full_concurrent)) {
 		priv->bt_full_concurrent = full_concurrent;
+		priv->last_bt_traffic_load = priv->bt_traffic_load;
 
 		/* Update uCode's rate table. */
 		tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e..eb6a8ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@
 						~IWL_STA_DRIVER_ACTIVE;
 				priv->stations[i].used &=
 						~IWL_STA_UCODE_INPROGRESS;
-				spin_unlock_bh(&priv->sta_lock);
+				continue;
 			}
 			/*
 			 * Rate scaling has already been initialized, send
@@ -1267,7 +1267,7 @@
 		key_flags |= STA_KEY_MULTICAST_MSK;
 
 	sta_cmd.key.key_flags = key_flags;
-	sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+	sta_cmd.key.key_offset = keyconf->hw_key_idx;
 	sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
 	sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index e7c157e..7f97dec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2239,6 +2239,7 @@
 	return count;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
 					 char __user *user_buf,
 					 size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@
 
 	return count;
 }
+#endif
 
 static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
 					 char __user *user_buf,
@@ -2345,7 +2347,9 @@
 DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
 DEBUGFS_READ_FILE_OPS(reply_tx_error);
 DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
 DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
 
 /*
@@ -2405,7 +2409,9 @@
 	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
 	DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
 
 	if (iwl_advanced_bt_coexist(priv))
 		DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad..fac67a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@
 	return -EINVAL;
 }
 
-static int alloc_pci_desc(struct iwl_drv *drv,
-			  struct iwl_firmware_pieces *pieces,
-			  enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+			   struct iwl_firmware_pieces *pieces,
+			   enum iwl_ucode_type type)
 {
 	int i;
 	for (i = 0;
 	     i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
 	     i++)
 		if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
-						get_sec(pieces, type, i)))
-			return -1;
+				      get_sec(pieces, type, i)))
+			return -ENOMEM;
 	return 0;
 }
 
@@ -825,8 +825,8 @@
 	 * 1) unmodified from disk
 	 * 2) backup cache for save/restore during power-downs */
 	for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
-		if (alloc_pci_desc(drv, &pieces, i))
-			goto err_pci_alloc;
+		if (iwl_alloc_ucode(drv, &pieces, i))
+			goto out_free_fw;
 
 	/* Now that we can no longer fail, copy information */
 
@@ -861,13 +861,18 @@
 
 	/* We have our copies now, allow OS release its copies */
 	release_firmware(ucode_raw);
-	complete(&drv->request_firmware_complete);
 
 	drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
 	if (!drv->op_mode)
 		goto out_unbind;
 
+	/*
+	 * Complete the firmware request last so that
+	 * a driver unbind (stop) doesn't run while we
+	 * are doing the start() above.
+	 */
+	complete(&drv->request_firmware_complete);
 	return;
 
  try_again:
@@ -877,7 +882,7 @@
 		goto out_unbind;
 	return;
 
- err_pci_alloc:
+ out_free_fw:
 	IWL_ERR(drv, "failed to allocate pci memory\n");
 	iwl_dealloc_ucode(drv);
 	release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 50c5891..b8e2b22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -568,28 +568,28 @@
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
 		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
 		int element, s8 *max_txpower_in_half_dbm)
 {
 	s8 max_txpower_avg = 0; /* (dBm) */
 
 	/* Take the highest tx power from any valid chains */
-	if ((cfg->valid_tx_ant & ANT_A) &&
+	if ((priv->hw_params.valid_tx_ant & ANT_A) &&
 	    (enhanced_txpower[element].chain_a_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_a_max;
-	if ((cfg->valid_tx_ant & ANT_B) &&
+	if ((priv->hw_params.valid_tx_ant & ANT_B) &&
 	    (enhanced_txpower[element].chain_b_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_b_max;
-	if ((cfg->valid_tx_ant & ANT_C) &&
+	if ((priv->hw_params.valid_tx_ant & ANT_C) &&
 	    (enhanced_txpower[element].chain_c_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_c_max;
-	if (((cfg->valid_tx_ant == ANT_AB) |
-	    (cfg->valid_tx_ant == ANT_BC) |
-	    (cfg->valid_tx_ant == ANT_AC)) &&
+	if (((priv->hw_params.valid_tx_ant == ANT_AB) |
+	    (priv->hw_params.valid_tx_ant == ANT_BC) |
+	    (priv->hw_params.valid_tx_ant == ANT_AC)) &&
 	    (enhanced_txpower[element].mimo2_max > max_txpower_avg))
 		max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-	if ((cfg->valid_tx_ant == ANT_ABC) &&
+	if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
 	    (enhanced_txpower[element].mimo3_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -691,7 +691,7 @@
 				 ((txp->delta_20_in_40 & 0xf0) >> 4),
 				 (txp->delta_20_in_40 & 0x0f));
 
-		max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
+		max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
 						      &max_txp_avg_halfdbm);
 
 		/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index ab2f4d75..3ee23134 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -199,6 +199,7 @@
 			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
 			    WIPHY_FLAG_IBSS_RSN;
 
+#ifdef CONFIG_PM_SLEEP
 	if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
 	    priv->trans->ops->wowlan_suspend &&
 	    device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@
 		hw->wiphy->wowlan.pattern_max_len =
 					IWLAGN_WOWLAN_MAX_PATTERN_LEN;
 	}
+#endif
 
 	if (iwlwifi_mod_params.power_save)
 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@
 	ret = ieee80211_register_hw(priv->hw);
 	if (ret) {
 		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+		iwl_leds_exit(priv);
 		return ret;
 	}
 	priv->mac80211_registered = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE	4	/* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
-	struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
-					    GFP_KERNEL);
-
-	if (!phy_db)
-		return phy_db;
-
-	phy_db->dev = dev;
-
-	/* TODO: add default values of the phy db. */
-	return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
-		       enum iwl_phy_db_section_type type,
-		       u16 chg_id)
-{
-	if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
-		return NULL;
-
-	switch (type) {
-	case IWL_PHY_DB_CFG:
-		return &phy_db->cfg;
-	case IWL_PHY_DB_CALIB_NCH:
-		return &phy_db->calib_nch;
-	case IWL_PHY_DB_CALIB_CH:
-		return &phy_db->calib_ch;
-	case IWL_PHY_DB_CALIB_CHG_PAPD:
-		if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
-			return NULL;
-		return &phy_db->calib_ch_group_papd[chg_id];
-	case IWL_PHY_DB_CALIB_CHG_TXP:
-		if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
-			return NULL;
-		return &phy_db->calib_ch_group_txp[chg_id];
-	default:
-		return NULL;
-	}
-	return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
-				    enum iwl_phy_db_section_type type,
-				    u16 chg_id)
-{
-	struct iwl_phy_db_entry *entry =
-				iwl_phy_db_get_section(phy_db, type, chg_id);
-	if (!entry)
-		return;
-
-	kfree(entry->data);
-	entry->data = NULL;
-	entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
-	int i;
-
-	if (!phy_db)
-		return;
-
-	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
-	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
-	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
-	for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
-		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
-	for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
-		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
-	kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-			   enum iwl_phy_db_section_type type, u8 *data,
-			   u16 size, gfp_t alloc_ctx)
-{
-	struct iwl_phy_db_entry *entry;
-	u16 chg_id = 0;
-
-	if (!phy_db)
-		return -EINVAL;
-
-	if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
-	    type == IWL_PHY_DB_CALIB_CHG_TXP)
-		chg_id = le16_to_cpup((__le16 *)data);
-
-	entry = iwl_phy_db_get_section(phy_db, type, chg_id);
-	if (!entry)
-		return -EINVAL;
-
-	kfree(entry->data);
-	entry->data = kmemdup(data, size, alloc_ctx);
-	if (!entry->data) {
-		entry->size = 0;
-		return -ENOMEM;
-	}
-
-	entry->size = size;
-
-	if (type == IWL_PHY_DB_CALIB_CH) {
-		phy_db->channel_num = le32_to_cpup((__le32 *)data);
-		phy_db->channel_size =
-		      (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
-	}
-
-	return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
-	if (ch_id <= 14 ||
-	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
-	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
-	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
-		return 1;
-	return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
-	if (WARN_ON(!is_valid_channel(ch_id)))
-		return 0xff;
-
-	if (ch_id <= 14)
-		return ch_id - 1;
-	if (ch_id <= 64)
-		return (ch_id + 20) / 4;
-	if (ch_id <= 140)
-		return (ch_id - 12) / 4;
-	return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
-	if (WARN_ON(!is_valid_channel(ch_id)))
-		return 0xff;
-
-	if (1 <= ch_id && ch_id <= 14)
-		return 0;
-	if (36 <= ch_id && ch_id <= 64)
-		return 1;
-	if (100 <= ch_id && ch_id <= 140)
-		return 2;
-	return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
-	struct iwl_phy_db_chg_txp *txp_chg;
-	int i;
-	u8 ch_index = ch_id_to_ch_index(ch_id);
-	if (ch_index == 0xff)
-		return 0xff;
-
-	for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
-		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
-		if (!txp_chg)
-			return 0xff;
-		/*
-		 * Looking for the first channel group that its max channel is
-		 * higher then wanted channel.
-		 */
-		if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
-			return i;
-	}
-	return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-				enum iwl_phy_db_section_type type, u8 **data,
-				u16 *size, u16 ch_id)
-{
-	struct iwl_phy_db_entry *entry;
-	u32 channel_num;
-	u32 channel_size;
-	u16 ch_group_id = 0;
-	u16 index;
-
-	if (!phy_db)
-		return -EINVAL;
-
-	/* find wanted channel group */
-	if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
-		ch_group_id = channel_id_to_papd(ch_id);
-	else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
-		ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
-	entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
-	if (!entry)
-		return -EINVAL;
-
-	if (type == IWL_PHY_DB_CALIB_CH) {
-		index = ch_id_to_ch_index(ch_id);
-		channel_num = phy_db->channel_num;
-		channel_size = phy_db->channel_size;
-		if (index >= channel_num) {
-			IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
-			return -EINVAL;
-		}
-		*data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
-		*size = channel_size;
-	} else {
-		*data = entry->data;
-		*size = entry->size;
-	}
-	return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS	4
-#define IWL_NUM_TXP_CH_GROUPS	8
-
-struct iwl_phy_db_entry {
-	u16	size;
-	u8	*data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
-	struct iwl_phy_db_entry	cfg;
-	struct iwl_phy_db_entry	calib_nch;
-	struct iwl_phy_db_entry	calib_ch;
-	struct iwl_phy_db_entry	calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
-	struct iwl_phy_db_entry	calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
-	u32 channel_num;
-	u32 channel_size;
-
-	/* for an access to the logger */
-	struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
-	IWL_PHY_DB_CFG = 1,
-	IWL_PHY_DB_CALIB_NCH,
-	IWL_PHY_DB_CALIB_CH,
-	IWL_PHY_DB_CALIB_CHG_PAPD,
-	IWL_PHY_DB_CALIB_CHG_TXP,
-	IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
-	__le32 space;
-	__le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-			   enum iwl_phy_db_section_type type, u8 *data,
-			   u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-				enum iwl_phy_db_section_type type, u8 **data,
-				u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b106929..dfd5466 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
 #define SCD_TXFACT		(SCD_BASE + 0x10)
 #define SCD_ACTIVE		(SCD_BASE + 0x14)
 #define SCD_QUEUECHAIN_SEL	(SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN		(SCD_BASE + 0x244)
 #define SCD_AGGR_SEL		(SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK	(SCD_BASE + 0x108)
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05..e959207 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@
 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
 				 int sta_id, int tid, int frame_limit, u16 ssn);
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-	int index, enum dma_data_direction dma_dir);
+			 enum dma_data_direction dma_dir);
 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
 			 struct sk_buff_head *skbs);
 int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a67..a875023 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@
 	for (i = 1; i < num_tbs; i++)
 		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
 				iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+	tfd->num_tbs = 0;
 }
 
 /**
  * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  * @trans - transport private data
  * @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
  *
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-	int index, enum dma_data_direction dma_dir)
+			 enum dma_data_direction dma_dir)
 {
 	struct iwl_tfd *tfd_tmp = txq->tfds;
 
+	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+	int rd_ptr = txq->q.read_ptr;
+	int idx = get_cmd_index(&txq->q, rd_ptr);
+
 	lockdep_assert_held(&txq->lock);
 
-	iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
-			 &tfd_tmp[index], dma_dir);
+	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+	iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+			 &tfd_tmp[rd_ptr], dma_dir);
 
 	/* free SKB */
 	if (txq->entries) {
 		struct sk_buff *skb;
 
-		skb = txq->entries[index].skb;
+		skb = txq->entries[idx].skb;
 
 		/* Can be called from irqs-disabled context
 		 * If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@
 		 */
 		if (skb) {
 			iwl_op_mode_free_skb(trans->op_mode, skb);
-			txq->entries[index].skb = NULL;
+			txq->entries[idx].skb = NULL;
 		}
 	}
 }
@@ -973,7 +979,7 @@
 
 		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
 
-		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+		iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
 		freed++;
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161..79c6b91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@
 
 	spin_lock_bh(&txq->lock);
 	while (q->write_ptr != q->read_ptr) {
-		/* The read_ptr needs to bound by q->n_window */
-		iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
-				    dma_dir);
+		iwlagn_txq_free_tfd(trans, txq, dma_dir);
 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
 	}
 	spin_unlock_bh(&txq->lock);
@@ -1060,6 +1058,11 @@
 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
 		       trans_pcie->scd_bc_tbls.dma >> 10);
 
+	/* The chain extension of the SCD doesn't work well. This feature is
+	 * enabled by default by the HW, so we need to disable it manually.
+	 */
+	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
 	/* Enable DMA channel */
 	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fb787df..a0b7cfd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1555,6 +1555,7 @@
 			hdr = (struct ieee80211_hdr *) skb->data;
 			mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
 		}
+		txi->flags |= IEEE80211_TX_STAT_ACK;
 	}
 	ieee80211_tx_status_irqsafe(data2->hw, skb);
 	return 0;
@@ -1721,6 +1722,24 @@
 		       "unregister family %i\n", ret);
 }
 
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+	{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+	{ .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+				 BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+	.limits = hwsim_if_limits,
+	.n_limits = ARRAY_SIZE(hwsim_if_limits),
+	.max_interfaces = 2048,
+	.num_different_channels = 1,
+};
+
 static int __init init_mac80211_hwsim(void)
 {
 	int i, err = 0;
@@ -1782,6 +1801,9 @@
 		hw->wiphy->n_addresses = 2;
 		hw->wiphy->addresses = data->addresses;
 
+		hw->wiphy->iface_combinations = &hwsim_if_comb;
+		hw->wiphy->n_iface_combinations = 1;
+
 		if (fake_hw_scan) {
 			hw->wiphy->max_scan_ssids = 255;
 			hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8767144..ce61b6f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -948,6 +948,19 @@
 		bss_cfg->ssid.ssid_len = params->ssid_len;
 	}
 
+	switch (params->hidden_ssid) {
+	case NL80211_HIDDEN_SSID_NOT_IN_USE:
+		bss_cfg->bcast_ssid_ctl = 1;
+		break;
+	case NL80211_HIDDEN_SSID_ZERO_LEN:
+		bss_cfg->bcast_ssid_ctl = 0;
+		break;
+	case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+		/* firmware doesn't support this type of hidden SSID */
+	default:
+		return -EINVAL;
+	}
+
 	if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
 		kfree(bss_cfg);
 		wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
@@ -1471,7 +1484,7 @@
 	struct wireless_dev *wdev;
 
 	if (!adapter)
-		return NULL;
+		return ERR_PTR(-EFAULT);
 
 	switch (type) {
 	case NL80211_IFTYPE_UNSPECIFIED:
@@ -1481,12 +1494,12 @@
 		if (priv->bss_mode) {
 			wiphy_err(wiphy,
 				  "cannot create multiple sta/adhoc ifaces\n");
-			return NULL;
+			return ERR_PTR(-EINVAL);
 		}
 
 		wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
 		if (!wdev)
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 
 		wdev->wiphy = wiphy;
 		priv->wdev = wdev;
@@ -1509,12 +1522,12 @@
 
 		if (priv->bss_mode) {
 			wiphy_err(wiphy, "Can't create multiple AP interfaces");
-			return NULL;
+			return ERR_PTR(-EINVAL);
 		}
 
 		wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
 		if (!wdev)
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 
 		priv->wdev = wdev;
 		wdev->wiphy = wiphy;
@@ -1531,14 +1544,15 @@
 		break;
 	default:
 		wiphy_err(wiphy, "type not supported\n");
-		return NULL;
+		return ERR_PTR(-EINVAL);
 	}
 
 	dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
 			      ether_setup, 1);
 	if (!dev) {
 		wiphy_err(wiphy, "no memory available for netdevice\n");
-		goto error;
+		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	mwifiex_init_priv_params(priv, dev);
@@ -1569,7 +1583,9 @@
 	/* Register network device */
 	if (register_netdevice(dev)) {
 		wiphy_err(wiphy, "cannot register virtual network device\n");
-		goto error;
+		free_netdev(dev);
+		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+		return ERR_PTR(-EFAULT);
 	}
 
 	sema_init(&priv->async_sem, 1);
@@ -1581,12 +1597,6 @@
 	mwifiex_dev_debugfs_init(priv);
 #endif
 	return dev;
-error:
-	if (dev && (dev->reg_state == NETREG_UNREGISTERED))
-		free_netdev(dev);
-	priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
-	return NULL;
 }
 EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
 
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 9f674bb..561452a 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -122,6 +122,7 @@
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@
 	u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_bcast_ssid {
+	struct host_cmd_tlv tlv;
+	u8 bcast_ctl;
+} __packed;
+
 struct host_cmd_tlv_beacon_period {
 	struct host_cmd_tlv tlv;
 	__le16 period;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index e2faec4..cecb272 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -161,15 +161,11 @@
 		goto done;
 
 	for (i = 0; i < adapter->priv_num; i++) {
-
 		tpriv = adapter->priv[i];
 
-		if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
-		    (tpriv->media_connected)) {
-			if (netif_queue_stopped(tpriv->netdev))
-				mwifiex_wake_up_net_dev_queue(tpriv->netdev,
-							      adapter);
-		}
+		if (tpriv->media_connected &&
+		    netif_queue_stopped(tpriv->netdev))
+			mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
 	}
 done:
 	dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 76dfbc4..89f9a2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -27,6 +27,17 @@
 			      struct cfg80211_ap_settings *params) {
 	int i;
 
+	if (!params->privacy) {
+		bss_config->protocol = PROTOCOL_NO_SECURITY;
+		bss_config->key_mgmt = KEY_MGMT_NONE;
+		bss_config->wpa_cfg.length = 0;
+		priv->sec_info.wep_enabled = 0;
+		priv->sec_info.wpa_enabled = 0;
+		priv->sec_info.wpa2_enabled = 0;
+
+		return 0;
+	}
+
 	switch (params->auth_type) {
 	case NL80211_AUTHTYPE_OPEN_SYSTEM:
 		bss_config->auth_mode = WLAN_AUTH_OPEN;
@@ -132,6 +143,7 @@
 	struct host_cmd_tlv_dtim_period *dtim_period;
 	struct host_cmd_tlv_beacon_period *beacon_period;
 	struct host_cmd_tlv_ssid *ssid;
+	struct host_cmd_tlv_bcast_ssid *bcast_ssid;
 	struct host_cmd_tlv_channel_band *chan_band;
 	struct host_cmd_tlv_frag_threshold *frag_threshold;
 	struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +165,14 @@
 		cmd_size += sizeof(struct host_cmd_tlv) +
 			    bss_cfg->ssid.ssid_len;
 		tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+		bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+		bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+		bcast_ssid->tlv.len =
+				cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+		bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+		cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+		tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
 	}
 	if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
 		chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +436,7 @@
 	if (!bss_cfg)
 		return -ENOMEM;
 
+	mwifiex_set_sys_config_invalid_data(bss_cfg);
 	bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
 	bss_cfg->channel = channel;
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2e9e6af..dfcd02a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2110,7 +2110,7 @@
 	while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
 		if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
 		    matched) {
-			if (!ether_addr_equal(bssid->mac, match_bssid))
+			if (ether_addr_equal(bssid->mac, match_bssid))
 				*matched = true;
 		}
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36ccc..8f75402 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -396,8 +396,7 @@
 	 * for hardware which doesn't support hardware
 	 * sequence counting.
 	 */
-	spinlock_t seqlock;
-	u16 seqno;
+	atomic_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773e..dd24b26 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@
 	else
 		rt2x00dev->intf_sta_count++;
 
-	spin_lock_init(&intf->seqlock);
 	mutex_init(&intf->beacon_skb_mutex);
 	intf->beacon = entry;
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662ec..2fd8301 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+	u16 seqno;
 
 	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 		return;
@@ -238,15 +239,13 @@
 	 * sequence counting per-frame, since those will override the
 	 * sequence counter given by mac80211.
 	 */
-	spin_lock(&intf->seqlock);
-
 	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-		intf->seqno += 0x10;
+		seqno = atomic_add_return(0x10, &intf->seqno);
+	else
+		seqno = atomic_read(&intf->seqno);
+
 	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-	hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
-	spin_unlock(&intf->seqlock);
-
+	hdr->seq_ctrl |= cpu_to_le16(seqno);
 }
 
 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f..c2d5b49 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@
 			radio_on = true;
 		} else if (radio_on) {
 			radio_on = false;
-			cancel_delayed_work_sync(&priv->led_on);
+			cancel_delayed_work(&priv->led_on);
 			ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
 		}
 	} else if (radio_on) {
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1a..db6430c 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -869,7 +869,7 @@
 	}
 
 	*mactime = tsf_info->current_tsf_lsb |
-		(tsf_info->current_tsf_msb << 31);
+		((u64)tsf_info->current_tsf_msb << 32);
 
 out:
 	kfree(tsf_info);
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15cca..5ec50a4 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -76,8 +76,7 @@
 		}
 	}
 
-	if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
-	    wl->station_mode != STATION_ACTIVE_MODE) {
+	if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
 		wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
 		/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f6..e2750a1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@
 	}
 
 	if (wl->irq) {
+		irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
 		ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
 		if (ret < 0) {
 			wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@
 		}
 
 		irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-		disable_irq(wl->irq);
 
 		wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
 		wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c35..567660c 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -73,6 +73,8 @@
 	spi_sync(wl_to_spi(wl), &m);
 
 	wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+	kfree(cmd);
 }
 
 static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@
 	spi_sync(wl_to_spi(wl), &m);
 
 	wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+	kfree(cmd);
 }
 
 static void wl1251_spi_reset_wake(struct wl1251 *wl)
@@ -281,6 +285,7 @@
 
 	wl->use_eeprom = pdata->use_eeprom;
 
+	irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
 	ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
 	if (ret < 0) {
 		wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +294,6 @@
 
 	irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
-	disable_irq(wl->irq);
-
 	ret = wl1251_init_ieee80211(wl);
 	if (ret)
 		goto out_irq;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 509aa88..f3d6fa5 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1715,6 +1715,7 @@
 
 }
 
+#ifdef CONFIG_PM
 /* Set the global behaviour of RX filters - On/Off + default action */
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
 					enum rx_filter_action action)
@@ -1794,3 +1795,4 @@
 	kfree(acx);
 	return ret;
 }
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 8106b2e..e6a7486 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1330,9 +1330,11 @@
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
 int wl12xx_acx_config_hangover(struct wl1271 *wl);
+
+#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
 					enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
 			     struct wl12xx_rx_filter *filter);
-
+#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 1f1d948..d6a3c6b 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -279,6 +279,7 @@
 	wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
+#ifdef CONFIG_PM
 int wl1271_rx_filter_enable(struct wl1271 *wl,
 			    int index, bool enable,
 			    struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@
 		wl1271_rx_filter_enable(wl, i, 0, NULL);
 	}
 }
+#endif /* CONFIG_PM */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401..f4a6fca 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@
 	unsigned int count;
 	int i, copy_off;
 
-	count = DIV_ROUND_UP(
-			offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
 
 	copy_off = skb_headlen(skb) % PAGE_SIZE;
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2027afe..3089990 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1935,14 +1935,14 @@
 
 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
-	unregister_netdev(info->netdev);
-
 	xennet_disconnect_backend(info);
 
-	del_timer_sync(&info->rx_refill_timer);
-
 	xennet_sysfs_delif(info->netdev);
 
+	unregister_netdev(info->netdev);
+
+	del_timer_sync(&info->rx_refill_timer);
+
 	free_percpu(info->stats);
 
 	free_netdev(info->netdev);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 46f4a9f..281f18c 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -232,7 +232,7 @@
 
 static int check_crc(u8 *buf, int buflen)
 {
-	u8 len;
+	int len;
 	u16 crc;
 
 	len = buf[0] + 1;
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index f37fbeb..1e173f3 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -90,8 +90,22 @@
 	if (!dev)
 		return NULL;
 
-	return to_i2c_client(dev);
+	return i2c_verify_client(dev);
 }
 EXPORT_SYMBOL(of_find_i2c_device_by_node);
 
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&i2c_bus_type, NULL, node,
+					 of_dev_node_match);
+	if (!dev)
+		return NULL;
+
+	return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 9312516..6770538 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
  * PCI tree until an device-node is found, at which point it will finish
  * resolving using the OF tree walking.
  */
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
 {
 	struct device_node *dn, *ppnode;
 	struct pci_dev *ppdev;
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432..efc4b7f 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -25,7 +25,7 @@
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
 static int num_counters;
 
 /*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f16900..77cb54a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1744,6 +1744,11 @@
 	if (target_state == PCI_POWER_ERROR)
 		return -EIO;
 
+	/* Some devices mustn't be in D3 during system sleep */
+	if (target_state == PCI_D3hot &&
+			(dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+		return 0;
+
 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
 	error = pci_set_power_state(dev, target_state);
@@ -2370,7 +2375,7 @@
  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
  * the PCI Express Base Specification, Revision 2.1)
  */
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
 {
 	int slot;
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a75216..194b243a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2929,6 +2929,32 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
+/*
+ * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+ * ASUS motherboards will cause memory corruption or a system crash
+ * if they are in D3 while the system is put into S3 sleep.
+ */
+static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+{
+	const char *sys_info;
+	static const char good_Asus_board[] = "P8Z68-V";
+
+	if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+		return;
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+		return;
+	sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+	if (sys_info && memcmp(sys_info, good_Asus_board,
+			sizeof(good_Asus_board) - 1) == 0)
+		return;
+
+	dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+	dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+	device_set_wakeup_capable(&dev->dev, false);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
 			  struct pci_fixup *end)
 {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b..0cc053a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@
 	list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
 		for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
 			_i_ < _maps_node_->num_maps; \
-			i++, _map_ = &_maps_node_->maps[_i_])
+			_i_++, _map_ = &_maps_node_->maps[_i_])
 
 /**
  * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c67..dd6d93a 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
 #include "core.h"
 #include "pinctrl-imx.h"
 
-#define IMX_PMX_DUMP(info, p, m, c, n)		\
-{						\
-	int i, j;				\
-	printk("Format: Pin Mux Config\n");	\
-	for (i = 0; i < n; i++) {		\
-		j = p[i];			\
-		printk("%s %d 0x%lx\n",		\
-			info->pins[j].name,	\
-			m[i], c[i]);		\
-	}					\
+#define IMX_PMX_DUMP(info, p, m, c, n)			\
+{							\
+	int i, j;					\
+	printk(KERN_DEBUG "Format: Pin Mux Config\n");	\
+	for (i = 0; i < n; i++) {			\
+		j = p[i];				\
+		printk(KERN_DEBUG "%s %d 0x%lx\n",	\
+			info->pins[j].name,		\
+			m[i], c[i]);			\
+	}						\
 }
 
 /* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@
 
 	/* create mux map */
 	parent = of_get_parent(np);
-	if (!parent)
+	if (!parent) {
+		kfree(new_map);
 		return -EINVAL;
+	}
 	new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
 	new_map[0].data.mux.function = parent->name;
 	new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@
 	}
 
 	dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
-		new_map->data.mux.function, new_map->data.mux.group, map_num);
+		(*map)->data.mux.function, (*map)->data.mux.group, map_num);
 
 	return 0;
 }
@@ -201,10 +203,7 @@
 static void imx_dt_free_map(struct pinctrl_dev *pctldev,
 				struct pinctrl_map *map, unsigned num_maps)
 {
-	int i;
-
-	for (i = 0; i < num_maps; i++)
-		kfree(map);
+	kfree(map);
 }
 
 static struct pinctrl_ops imx_pctrl_ops = {
@@ -475,9 +474,8 @@
 		grp->configs[j] = config & ~IMX_PAD_SION;
 	}
 
-#ifdef DEBUG
 	IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a..4ba4636 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@
 
 		/* Compose group name */
 		group = kzalloc(length, GFP_KERNEL);
-		if (!group)
-			return -ENOMEM;
+		if (!group) {
+			ret = -ENOMEM;
+			goto free;
+		}
 		snprintf(group, length, "%s.%d", np->name, reg);
 		new_map[i].data.mux.group = group;
 		i++;
@@ -118,7 +120,7 @@
 		pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
 		if (!pconfig) {
 			ret = -ENOMEM;
-			goto free;
+			goto free_group;
 		}
 
 		new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@
 
 	return 0;
 
+free_group:
+	if (!purecfg)
+		kfree(group);
 free:
 	kfree(new_map);
 	return ret;
@@ -511,6 +516,7 @@
 	return 0;
 
 err:
+	platform_set_drvdata(pdev, NULL);
 	iounmap(d->base);
 	return ret;
 }
@@ -520,6 +526,7 @@
 {
 	struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
 
+	platform_set_drvdata(pdev, NULL);
 	pinctrl_unregister(d->pctl);
 	iounmap(d->base);
 
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b8e01c3..3e7e47d 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/slab.h>
+#include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@
 	 * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
 	 */
 	if (nmk_chip->sleepmode && on) {
-		__nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+		__nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
 				    NMK_GPIO_SLPM_WAKEUP_ENABLE);
 	}
 
@@ -1245,6 +1246,7 @@
 		ret = PTR_ERR(clk);
 		goto out_unmap;
 	}
+	clk_prepare(clk);
 
 	nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
 	if (!nmk_chip) {
@@ -1436,7 +1438,27 @@
 
 	dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
 
-	/* Handle this special glitch on altfunction C */
+	/*
+	 * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+	 * we may pass through an undesired state. In this case we take
+	 * some extra care.
+	 *
+	 * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+	 *  - Save SLPM registers (since we have a shadow register in the
+	 *    nmk_chip we're using that as backup)
+	 *  - Set SLPM=0 for the IOs you want to switch and others to 1
+	 *  - Configure the GPIO registers for the IOs that are being switched
+	 *  - Set IOFORCE=1
+	 *  - Modify the AFLSA/B registers for the IOs that are being switched
+	 *  - Set IOFORCE=0
+	 *  - Restore SLPM registers
+	 *  - Any spurious wake up event during switch sequence to be ignored
+	 *    and cleared
+	 *
+	 * We REALLY need to save ALL slpm registers, because the external
+	 * IOFORCE will switch *all* ports to their sleepmode setting to as
+	 * to avoid glitches. (Not just one port!)
+	 */
 	glitch = (g->altsetting == NMK_GPIO_ALT_C);
 
 	if (glitch) {
@@ -1688,18 +1710,34 @@
 	.owner = THIS_MODULE,
 };
 
+static const struct of_device_id nmk_pinctrl_match[] = {
+	{
+		.compatible = "stericsson,nmk_pinctrl",
+		.data = (void *)PINCTRL_NMK_DB8500,
+	},
+	{},
+};
+
 static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
 {
 	const struct platform_device_id *platid = platform_get_device_id(pdev);
+	struct device_node *np = pdev->dev.of_node;
 	struct nmk_pinctrl *npct;
+	unsigned int version = 0;
 	int i;
 
 	npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
 	if (!npct)
 		return -ENOMEM;
 
+	if (platid)
+		version = platid->driver_data;
+	else if (np)
+		version = (unsigned int)
+			of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
+
 	/* Poke in other ASIC variants here */
-	if (platid->driver_data == PINCTRL_NMK_DB8500)
+	if (version == PINCTRL_NMK_DB8500)
 		nmk_pinctrl_db8500_init(&npct->soc);
 
 	/*
@@ -1758,6 +1796,7 @@
 	.driver = {
 		.owner = THIS_MODULE,
 		.name = "pinctrl-nomadik",
+		.of_match_table = nmk_pinctrl_match,
 	},
 	.probe = nmk_pinctrl_probe,
 	.id_table = nmk_pinctrl_id,
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a..e9f8e7d 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1184,7 +1184,7 @@
 	return ret;
 }
 
-static const struct of_device_id pinmux_ids[]  = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
 	{ .compatible = "sirf,prima2-gpio-pinmux" },
 	{}
 };
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 6a2596b..91558791 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -31,4 +31,14 @@
 	depends on MACH_SPEAR320
 	select PINCTRL_SPEAR3XX
 
+config PINCTRL_SPEAR1310
+	bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
+	depends on MACH_SPEAR1310
+	select PINCTRL_SPEAR
+
+config PINCTRL_SPEAR1340
+	bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
+	depends on MACH_SPEAR1340
+	select PINCTRL_SPEAR
+
 endif
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 15dcb85..b28a7ba 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -5,3 +5,5 @@
 obj-$(CONFIG_PINCTRL_SPEAR300)	+= pinctrl-spear300.o
 obj-$(CONFIG_PINCTRL_SPEAR310)	+= pinctrl-spear310.o
 obj-$(CONFIG_PINCTRL_SPEAR320)	+= pinctrl-spear320.o
+obj-$(CONFIG_PINCTRL_SPEAR1310)	+= pinctrl-spear1310.o
+obj-$(CONFIG_PINCTRL_SPEAR1340)	+= pinctrl-spear1340.o
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aa..b3f6b28 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired from:
  * - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 47a6b5b..d950eb7 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
  * Driver header file for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -139,4 +139,255 @@
 int __devinit spear_pinctrl_probe(struct platform_device *pdev,
 		struct spear_pinctrl_machdata *machdata);
 int __devexit spear_pinctrl_remove(struct platform_device *pdev);
+
+#define SPEAR_PIN_0_TO_101		\
+	PINCTRL_PIN(0, "PLGPIO0"),	\
+	PINCTRL_PIN(1, "PLGPIO1"),	\
+	PINCTRL_PIN(2, "PLGPIO2"),	\
+	PINCTRL_PIN(3, "PLGPIO3"),	\
+	PINCTRL_PIN(4, "PLGPIO4"),	\
+	PINCTRL_PIN(5, "PLGPIO5"),	\
+	PINCTRL_PIN(6, "PLGPIO6"),	\
+	PINCTRL_PIN(7, "PLGPIO7"),	\
+	PINCTRL_PIN(8, "PLGPIO8"),	\
+	PINCTRL_PIN(9, "PLGPIO9"),	\
+	PINCTRL_PIN(10, "PLGPIO10"),	\
+	PINCTRL_PIN(11, "PLGPIO11"),	\
+	PINCTRL_PIN(12, "PLGPIO12"),	\
+	PINCTRL_PIN(13, "PLGPIO13"),	\
+	PINCTRL_PIN(14, "PLGPIO14"),	\
+	PINCTRL_PIN(15, "PLGPIO15"),	\
+	PINCTRL_PIN(16, "PLGPIO16"),	\
+	PINCTRL_PIN(17, "PLGPIO17"),	\
+	PINCTRL_PIN(18, "PLGPIO18"),	\
+	PINCTRL_PIN(19, "PLGPIO19"),	\
+	PINCTRL_PIN(20, "PLGPIO20"),	\
+	PINCTRL_PIN(21, "PLGPIO21"),	\
+	PINCTRL_PIN(22, "PLGPIO22"),	\
+	PINCTRL_PIN(23, "PLGPIO23"),	\
+	PINCTRL_PIN(24, "PLGPIO24"),	\
+	PINCTRL_PIN(25, "PLGPIO25"),	\
+	PINCTRL_PIN(26, "PLGPIO26"),	\
+	PINCTRL_PIN(27, "PLGPIO27"),	\
+	PINCTRL_PIN(28, "PLGPIO28"),	\
+	PINCTRL_PIN(29, "PLGPIO29"),	\
+	PINCTRL_PIN(30, "PLGPIO30"),	\
+	PINCTRL_PIN(31, "PLGPIO31"),	\
+	PINCTRL_PIN(32, "PLGPIO32"),	\
+	PINCTRL_PIN(33, "PLGPIO33"),	\
+	PINCTRL_PIN(34, "PLGPIO34"),	\
+	PINCTRL_PIN(35, "PLGPIO35"),	\
+	PINCTRL_PIN(36, "PLGPIO36"),	\
+	PINCTRL_PIN(37, "PLGPIO37"),	\
+	PINCTRL_PIN(38, "PLGPIO38"),	\
+	PINCTRL_PIN(39, "PLGPIO39"),	\
+	PINCTRL_PIN(40, "PLGPIO40"),	\
+	PINCTRL_PIN(41, "PLGPIO41"),	\
+	PINCTRL_PIN(42, "PLGPIO42"),	\
+	PINCTRL_PIN(43, "PLGPIO43"),	\
+	PINCTRL_PIN(44, "PLGPIO44"),	\
+	PINCTRL_PIN(45, "PLGPIO45"),	\
+	PINCTRL_PIN(46, "PLGPIO46"),	\
+	PINCTRL_PIN(47, "PLGPIO47"),	\
+	PINCTRL_PIN(48, "PLGPIO48"),	\
+	PINCTRL_PIN(49, "PLGPIO49"),	\
+	PINCTRL_PIN(50, "PLGPIO50"),	\
+	PINCTRL_PIN(51, "PLGPIO51"),	\
+	PINCTRL_PIN(52, "PLGPIO52"),	\
+	PINCTRL_PIN(53, "PLGPIO53"),	\
+	PINCTRL_PIN(54, "PLGPIO54"),	\
+	PINCTRL_PIN(55, "PLGPIO55"),	\
+	PINCTRL_PIN(56, "PLGPIO56"),	\
+	PINCTRL_PIN(57, "PLGPIO57"),	\
+	PINCTRL_PIN(58, "PLGPIO58"),	\
+	PINCTRL_PIN(59, "PLGPIO59"),	\
+	PINCTRL_PIN(60, "PLGPIO60"),	\
+	PINCTRL_PIN(61, "PLGPIO61"),	\
+	PINCTRL_PIN(62, "PLGPIO62"),	\
+	PINCTRL_PIN(63, "PLGPIO63"),	\
+	PINCTRL_PIN(64, "PLGPIO64"),	\
+	PINCTRL_PIN(65, "PLGPIO65"),	\
+	PINCTRL_PIN(66, "PLGPIO66"),	\
+	PINCTRL_PIN(67, "PLGPIO67"),	\
+	PINCTRL_PIN(68, "PLGPIO68"),	\
+	PINCTRL_PIN(69, "PLGPIO69"),	\
+	PINCTRL_PIN(70, "PLGPIO70"),	\
+	PINCTRL_PIN(71, "PLGPIO71"),	\
+	PINCTRL_PIN(72, "PLGPIO72"),	\
+	PINCTRL_PIN(73, "PLGPIO73"),	\
+	PINCTRL_PIN(74, "PLGPIO74"),	\
+	PINCTRL_PIN(75, "PLGPIO75"),	\
+	PINCTRL_PIN(76, "PLGPIO76"),	\
+	PINCTRL_PIN(77, "PLGPIO77"),	\
+	PINCTRL_PIN(78, "PLGPIO78"),	\
+	PINCTRL_PIN(79, "PLGPIO79"),	\
+	PINCTRL_PIN(80, "PLGPIO80"),	\
+	PINCTRL_PIN(81, "PLGPIO81"),	\
+	PINCTRL_PIN(82, "PLGPIO82"),	\
+	PINCTRL_PIN(83, "PLGPIO83"),	\
+	PINCTRL_PIN(84, "PLGPIO84"),	\
+	PINCTRL_PIN(85, "PLGPIO85"),	\
+	PINCTRL_PIN(86, "PLGPIO86"),	\
+	PINCTRL_PIN(87, "PLGPIO87"),	\
+	PINCTRL_PIN(88, "PLGPIO88"),	\
+	PINCTRL_PIN(89, "PLGPIO89"),	\
+	PINCTRL_PIN(90, "PLGPIO90"),	\
+	PINCTRL_PIN(91, "PLGPIO91"),	\
+	PINCTRL_PIN(92, "PLGPIO92"),	\
+	PINCTRL_PIN(93, "PLGPIO93"),	\
+	PINCTRL_PIN(94, "PLGPIO94"),	\
+	PINCTRL_PIN(95, "PLGPIO95"),	\
+	PINCTRL_PIN(96, "PLGPIO96"),	\
+	PINCTRL_PIN(97, "PLGPIO97"),	\
+	PINCTRL_PIN(98, "PLGPIO98"),	\
+	PINCTRL_PIN(99, "PLGPIO99"),	\
+	PINCTRL_PIN(100, "PLGPIO100"),	\
+	PINCTRL_PIN(101, "PLGPIO101")
+
+#define SPEAR_PIN_102_TO_245		\
+	PINCTRL_PIN(102, "PLGPIO102"),	\
+	PINCTRL_PIN(103, "PLGPIO103"),	\
+	PINCTRL_PIN(104, "PLGPIO104"),	\
+	PINCTRL_PIN(105, "PLGPIO105"),	\
+	PINCTRL_PIN(106, "PLGPIO106"),	\
+	PINCTRL_PIN(107, "PLGPIO107"),	\
+	PINCTRL_PIN(108, "PLGPIO108"),	\
+	PINCTRL_PIN(109, "PLGPIO109"),	\
+	PINCTRL_PIN(110, "PLGPIO110"),	\
+	PINCTRL_PIN(111, "PLGPIO111"),	\
+	PINCTRL_PIN(112, "PLGPIO112"),	\
+	PINCTRL_PIN(113, "PLGPIO113"),	\
+	PINCTRL_PIN(114, "PLGPIO114"),	\
+	PINCTRL_PIN(115, "PLGPIO115"),	\
+	PINCTRL_PIN(116, "PLGPIO116"),	\
+	PINCTRL_PIN(117, "PLGPIO117"),	\
+	PINCTRL_PIN(118, "PLGPIO118"),	\
+	PINCTRL_PIN(119, "PLGPIO119"),	\
+	PINCTRL_PIN(120, "PLGPIO120"),	\
+	PINCTRL_PIN(121, "PLGPIO121"),	\
+	PINCTRL_PIN(122, "PLGPIO122"),	\
+	PINCTRL_PIN(123, "PLGPIO123"),	\
+	PINCTRL_PIN(124, "PLGPIO124"),	\
+	PINCTRL_PIN(125, "PLGPIO125"),	\
+	PINCTRL_PIN(126, "PLGPIO126"),	\
+	PINCTRL_PIN(127, "PLGPIO127"),	\
+	PINCTRL_PIN(128, "PLGPIO128"),	\
+	PINCTRL_PIN(129, "PLGPIO129"),	\
+	PINCTRL_PIN(130, "PLGPIO130"),	\
+	PINCTRL_PIN(131, "PLGPIO131"),	\
+	PINCTRL_PIN(132, "PLGPIO132"),	\
+	PINCTRL_PIN(133, "PLGPIO133"),	\
+	PINCTRL_PIN(134, "PLGPIO134"),	\
+	PINCTRL_PIN(135, "PLGPIO135"),	\
+	PINCTRL_PIN(136, "PLGPIO136"),	\
+	PINCTRL_PIN(137, "PLGPIO137"),	\
+	PINCTRL_PIN(138, "PLGPIO138"),	\
+	PINCTRL_PIN(139, "PLGPIO139"),	\
+	PINCTRL_PIN(140, "PLGPIO140"),	\
+	PINCTRL_PIN(141, "PLGPIO141"),	\
+	PINCTRL_PIN(142, "PLGPIO142"),	\
+	PINCTRL_PIN(143, "PLGPIO143"),	\
+	PINCTRL_PIN(144, "PLGPIO144"),	\
+	PINCTRL_PIN(145, "PLGPIO145"),	\
+	PINCTRL_PIN(146, "PLGPIO146"),	\
+	PINCTRL_PIN(147, "PLGPIO147"),	\
+	PINCTRL_PIN(148, "PLGPIO148"),	\
+	PINCTRL_PIN(149, "PLGPIO149"),	\
+	PINCTRL_PIN(150, "PLGPIO150"),	\
+	PINCTRL_PIN(151, "PLGPIO151"),	\
+	PINCTRL_PIN(152, "PLGPIO152"),	\
+	PINCTRL_PIN(153, "PLGPIO153"),	\
+	PINCTRL_PIN(154, "PLGPIO154"),	\
+	PINCTRL_PIN(155, "PLGPIO155"),	\
+	PINCTRL_PIN(156, "PLGPIO156"),	\
+	PINCTRL_PIN(157, "PLGPIO157"),	\
+	PINCTRL_PIN(158, "PLGPIO158"),	\
+	PINCTRL_PIN(159, "PLGPIO159"),	\
+	PINCTRL_PIN(160, "PLGPIO160"),	\
+	PINCTRL_PIN(161, "PLGPIO161"),	\
+	PINCTRL_PIN(162, "PLGPIO162"),	\
+	PINCTRL_PIN(163, "PLGPIO163"),	\
+	PINCTRL_PIN(164, "PLGPIO164"),	\
+	PINCTRL_PIN(165, "PLGPIO165"),	\
+	PINCTRL_PIN(166, "PLGPIO166"),	\
+	PINCTRL_PIN(167, "PLGPIO167"),	\
+	PINCTRL_PIN(168, "PLGPIO168"),	\
+	PINCTRL_PIN(169, "PLGPIO169"),	\
+	PINCTRL_PIN(170, "PLGPIO170"),	\
+	PINCTRL_PIN(171, "PLGPIO171"),	\
+	PINCTRL_PIN(172, "PLGPIO172"),	\
+	PINCTRL_PIN(173, "PLGPIO173"),	\
+	PINCTRL_PIN(174, "PLGPIO174"),	\
+	PINCTRL_PIN(175, "PLGPIO175"),	\
+	PINCTRL_PIN(176, "PLGPIO176"),	\
+	PINCTRL_PIN(177, "PLGPIO177"),	\
+	PINCTRL_PIN(178, "PLGPIO178"),	\
+	PINCTRL_PIN(179, "PLGPIO179"),	\
+	PINCTRL_PIN(180, "PLGPIO180"),	\
+	PINCTRL_PIN(181, "PLGPIO181"),	\
+	PINCTRL_PIN(182, "PLGPIO182"),	\
+	PINCTRL_PIN(183, "PLGPIO183"),	\
+	PINCTRL_PIN(184, "PLGPIO184"),	\
+	PINCTRL_PIN(185, "PLGPIO185"),	\
+	PINCTRL_PIN(186, "PLGPIO186"),	\
+	PINCTRL_PIN(187, "PLGPIO187"),	\
+	PINCTRL_PIN(188, "PLGPIO188"),	\
+	PINCTRL_PIN(189, "PLGPIO189"),	\
+	PINCTRL_PIN(190, "PLGPIO190"),	\
+	PINCTRL_PIN(191, "PLGPIO191"),	\
+	PINCTRL_PIN(192, "PLGPIO192"),	\
+	PINCTRL_PIN(193, "PLGPIO193"),	\
+	PINCTRL_PIN(194, "PLGPIO194"),	\
+	PINCTRL_PIN(195, "PLGPIO195"),	\
+	PINCTRL_PIN(196, "PLGPIO196"),	\
+	PINCTRL_PIN(197, "PLGPIO197"),	\
+	PINCTRL_PIN(198, "PLGPIO198"),	\
+	PINCTRL_PIN(199, "PLGPIO199"),	\
+	PINCTRL_PIN(200, "PLGPIO200"),	\
+	PINCTRL_PIN(201, "PLGPIO201"),	\
+	PINCTRL_PIN(202, "PLGPIO202"),	\
+	PINCTRL_PIN(203, "PLGPIO203"),	\
+	PINCTRL_PIN(204, "PLGPIO204"),	\
+	PINCTRL_PIN(205, "PLGPIO205"),	\
+	PINCTRL_PIN(206, "PLGPIO206"),	\
+	PINCTRL_PIN(207, "PLGPIO207"),	\
+	PINCTRL_PIN(208, "PLGPIO208"),	\
+	PINCTRL_PIN(209, "PLGPIO209"),	\
+	PINCTRL_PIN(210, "PLGPIO210"),	\
+	PINCTRL_PIN(211, "PLGPIO211"),	\
+	PINCTRL_PIN(212, "PLGPIO212"),	\
+	PINCTRL_PIN(213, "PLGPIO213"),	\
+	PINCTRL_PIN(214, "PLGPIO214"),	\
+	PINCTRL_PIN(215, "PLGPIO215"),	\
+	PINCTRL_PIN(216, "PLGPIO216"),	\
+	PINCTRL_PIN(217, "PLGPIO217"),	\
+	PINCTRL_PIN(218, "PLGPIO218"),	\
+	PINCTRL_PIN(219, "PLGPIO219"),	\
+	PINCTRL_PIN(220, "PLGPIO220"),	\
+	PINCTRL_PIN(221, "PLGPIO221"),	\
+	PINCTRL_PIN(222, "PLGPIO222"),	\
+	PINCTRL_PIN(223, "PLGPIO223"),	\
+	PINCTRL_PIN(224, "PLGPIO224"),	\
+	PINCTRL_PIN(225, "PLGPIO225"),	\
+	PINCTRL_PIN(226, "PLGPIO226"),	\
+	PINCTRL_PIN(227, "PLGPIO227"),	\
+	PINCTRL_PIN(228, "PLGPIO228"),	\
+	PINCTRL_PIN(229, "PLGPIO229"),	\
+	PINCTRL_PIN(230, "PLGPIO230"),	\
+	PINCTRL_PIN(231, "PLGPIO231"),	\
+	PINCTRL_PIN(232, "PLGPIO232"),	\
+	PINCTRL_PIN(233, "PLGPIO233"),	\
+	PINCTRL_PIN(234, "PLGPIO234"),	\
+	PINCTRL_PIN(235, "PLGPIO235"),	\
+	PINCTRL_PIN(236, "PLGPIO236"),	\
+	PINCTRL_PIN(237, "PLGPIO237"),	\
+	PINCTRL_PIN(238, "PLGPIO238"),	\
+	PINCTRL_PIN(239, "PLGPIO239"),	\
+	PINCTRL_PIN(240, "PLGPIO240"),	\
+	PINCTRL_PIN(241, "PLGPIO241"),	\
+	PINCTRL_PIN(242, "PLGPIO242"),	\
+	PINCTRL_PIN(243, "PLGPIO243"),	\
+	PINCTRL_PIN(244, "PLGPIO244"),	\
+	PINCTRL_PIN(245, "PLGPIO245")
+
 #endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644
index 0000000..d6cca8c
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -0,0 +1,2198 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1310 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1310-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1310_pins[] = {
+	SPEAR_PIN_0_TO_101,
+	SPEAR_PIN_102_TO_245,
+};
+
+/* registers */
+#define PERIP_CFG					0x32C
+	#define MCIF_SEL_SHIFT				3
+	#define MCIF_SEL_SD				(0x1 << MCIF_SEL_SHIFT)
+	#define MCIF_SEL_CF				(0x2 << MCIF_SEL_SHIFT)
+	#define MCIF_SEL_XD				(0x3 << MCIF_SEL_SHIFT)
+	#define MCIF_SEL_MASK				(0x3 << MCIF_SEL_SHIFT)
+
+#define PCIE_SATA_CFG					0x3A4
+	#define PCIE_SATA2_SEL_PCIE			(0 << 31)
+	#define PCIE_SATA1_SEL_PCIE			(0 << 30)
+	#define PCIE_SATA0_SEL_PCIE			(0 << 29)
+	#define PCIE_SATA2_SEL_SATA			(1 << 31)
+	#define PCIE_SATA1_SEL_SATA			(1 << 30)
+	#define PCIE_SATA0_SEL_SATA			(1 << 29)
+	#define SATA2_CFG_TX_CLK_EN			(1 << 27)
+	#define SATA2_CFG_RX_CLK_EN			(1 << 26)
+	#define SATA2_CFG_POWERUP_RESET			(1 << 25)
+	#define SATA2_CFG_PM_CLK_EN			(1 << 24)
+	#define SATA1_CFG_TX_CLK_EN			(1 << 23)
+	#define SATA1_CFG_RX_CLK_EN			(1 << 22)
+	#define SATA1_CFG_POWERUP_RESET			(1 << 21)
+	#define SATA1_CFG_PM_CLK_EN			(1 << 20)
+	#define SATA0_CFG_TX_CLK_EN			(1 << 19)
+	#define SATA0_CFG_RX_CLK_EN			(1 << 18)
+	#define SATA0_CFG_POWERUP_RESET			(1 << 17)
+	#define SATA0_CFG_PM_CLK_EN			(1 << 16)
+	#define PCIE2_CFG_DEVICE_PRESENT		(1 << 11)
+	#define PCIE2_CFG_POWERUP_RESET			(1 << 10)
+	#define PCIE2_CFG_CORE_CLK_EN			(1 << 9)
+	#define PCIE2_CFG_AUX_CLK_EN			(1 << 8)
+	#define PCIE1_CFG_DEVICE_PRESENT		(1 << 7)
+	#define PCIE1_CFG_POWERUP_RESET			(1 << 6)
+	#define PCIE1_CFG_CORE_CLK_EN			(1 << 5)
+	#define PCIE1_CFG_AUX_CLK_EN			(1 << 4)
+	#define PCIE0_CFG_DEVICE_PRESENT		(1 << 3)
+	#define PCIE0_CFG_POWERUP_RESET			(1 << 2)
+	#define PCIE0_CFG_CORE_CLK_EN			(1 << 1)
+	#define PCIE0_CFG_AUX_CLK_EN			(1 << 0)
+
+#define PAD_FUNCTION_EN_0				0x650
+	#define PMX_UART0_MASK				(1 << 1)
+	#define PMX_I2C0_MASK				(1 << 2)
+	#define PMX_I2S0_MASK				(1 << 3)
+	#define PMX_SSP0_MASK				(1 << 4)
+	#define PMX_CLCD1_MASK				(1 << 5)
+	#define PMX_EGPIO00_MASK			(1 << 6)
+	#define PMX_EGPIO01_MASK			(1 << 7)
+	#define PMX_EGPIO02_MASK			(1 << 8)
+	#define PMX_EGPIO03_MASK			(1 << 9)
+	#define PMX_EGPIO04_MASK			(1 << 10)
+	#define PMX_EGPIO05_MASK			(1 << 11)
+	#define PMX_EGPIO06_MASK			(1 << 12)
+	#define PMX_EGPIO07_MASK			(1 << 13)
+	#define PMX_EGPIO08_MASK			(1 << 14)
+	#define PMX_EGPIO09_MASK			(1 << 15)
+	#define PMX_SMI_MASK				(1 << 16)
+	#define PMX_NAND8_MASK				(1 << 17)
+	#define PMX_GMIICLK_MASK			(1 << 18)
+	#define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK	(1 << 19)
+	#define PMX_RXCLK_RDV_TXEN_D03_MASK		(1 << 20)
+	#define PMX_GMIID47_MASK			(1 << 21)
+	#define PMX_MDC_MDIO_MASK			(1 << 22)
+	#define PMX_MCI_DATA8_15_MASK			(1 << 23)
+	#define PMX_NFAD23_MASK				(1 << 24)
+	#define PMX_NFAD24_MASK				(1 << 25)
+	#define PMX_NFAD25_MASK				(1 << 26)
+	#define PMX_NFCE3_MASK				(1 << 27)
+	#define PMX_NFWPRT3_MASK			(1 << 28)
+	#define PMX_NFRSTPWDWN0_MASK			(1 << 29)
+	#define PMX_NFRSTPWDWN1_MASK			(1 << 30)
+	#define PMX_NFRSTPWDWN2_MASK			(1 << 31)
+
+#define PAD_FUNCTION_EN_1				0x654
+	#define PMX_NFRSTPWDWN3_MASK			(1 << 0)
+	#define PMX_SMINCS2_MASK			(1 << 1)
+	#define PMX_SMINCS3_MASK			(1 << 2)
+	#define PMX_CLCD2_MASK				(1 << 3)
+	#define PMX_KBD_ROWCOL68_MASK			(1 << 4)
+	#define PMX_EGPIO10_MASK			(1 << 5)
+	#define PMX_EGPIO11_MASK			(1 << 6)
+	#define PMX_EGPIO12_MASK			(1 << 7)
+	#define PMX_EGPIO13_MASK			(1 << 8)
+	#define PMX_EGPIO14_MASK			(1 << 9)
+	#define PMX_EGPIO15_MASK			(1 << 10)
+	#define PMX_UART0_MODEM_MASK			(1 << 11)
+	#define PMX_GPT0_TMR0_MASK			(1 << 12)
+	#define PMX_GPT0_TMR1_MASK			(1 << 13)
+	#define PMX_GPT1_TMR0_MASK			(1 << 14)
+	#define PMX_GPT1_TMR1_MASK			(1 << 15)
+	#define PMX_I2S1_MASK				(1 << 16)
+	#define PMX_KBD_ROWCOL25_MASK			(1 << 17)
+	#define PMX_NFIO8_15_MASK			(1 << 18)
+	#define PMX_KBD_COL1_MASK			(1 << 19)
+	#define PMX_NFCE1_MASK				(1 << 20)
+	#define PMX_KBD_COL0_MASK			(1 << 21)
+	#define PMX_NFCE2_MASK				(1 << 22)
+	#define PMX_KBD_ROW1_MASK			(1 << 23)
+	#define PMX_NFWPRT1_MASK			(1 << 24)
+	#define PMX_KBD_ROW0_MASK			(1 << 25)
+	#define PMX_NFWPRT2_MASK			(1 << 26)
+	#define PMX_MCIDATA0_MASK			(1 << 27)
+	#define PMX_MCIDATA1_MASK			(1 << 28)
+	#define PMX_MCIDATA2_MASK			(1 << 29)
+	#define PMX_MCIDATA3_MASK			(1 << 30)
+	#define PMX_MCIDATA4_MASK			(1 << 31)
+
+#define PAD_FUNCTION_EN_2				0x658
+	#define PMX_MCIDATA5_MASK			(1 << 0)
+	#define PMX_MCIDATA6_MASK			(1 << 1)
+	#define PMX_MCIDATA7_MASK			(1 << 2)
+	#define PMX_MCIDATA1SD_MASK			(1 << 3)
+	#define PMX_MCIDATA2SD_MASK			(1 << 4)
+	#define PMX_MCIDATA3SD_MASK			(1 << 5)
+	#define PMX_MCIADDR0ALE_MASK			(1 << 6)
+	#define PMX_MCIADDR1CLECLK_MASK			(1 << 7)
+	#define PMX_MCIADDR2_MASK			(1 << 8)
+	#define PMX_MCICECF_MASK			(1 << 9)
+	#define PMX_MCICEXD_MASK			(1 << 10)
+	#define PMX_MCICESDMMC_MASK			(1 << 11)
+	#define PMX_MCICDCF1_MASK			(1 << 12)
+	#define PMX_MCICDCF2_MASK			(1 << 13)
+	#define PMX_MCICDXD_MASK			(1 << 14)
+	#define PMX_MCICDSDMMC_MASK			(1 << 15)
+	#define PMX_MCIDATADIR_MASK			(1 << 16)
+	#define PMX_MCIDMARQWP_MASK			(1 << 17)
+	#define PMX_MCIIORDRE_MASK			(1 << 18)
+	#define PMX_MCIIOWRWE_MASK			(1 << 19)
+	#define PMX_MCIRESETCF_MASK			(1 << 20)
+	#define PMX_MCICS0CE_MASK			(1 << 21)
+	#define PMX_MCICFINTR_MASK			(1 << 22)
+	#define PMX_MCIIORDY_MASK			(1 << 23)
+	#define PMX_MCICS1_MASK				(1 << 24)
+	#define PMX_MCIDMAACK_MASK			(1 << 25)
+	#define PMX_MCISDCMD_MASK			(1 << 26)
+	#define PMX_MCILEDS_MASK			(1 << 27)
+	#define PMX_TOUCH_XY_MASK			(1 << 28)
+	#define PMX_SSP0_CS0_MASK			(1 << 29)
+	#define PMX_SSP0_CS1_2_MASK			(1 << 30)
+
+/* combined macros */
+#define PMX_GMII_MASK		(PMX_GMIICLK_MASK |			\
+				PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK |	\
+				PMX_RXCLK_RDV_TXEN_D03_MASK |		\
+				PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
+
+#define PMX_EGPIO_0_GRP_MASK	(PMX_EGPIO00_MASK | PMX_EGPIO01_MASK |	\
+				PMX_EGPIO02_MASK |			\
+				PMX_EGPIO03_MASK | PMX_EGPIO04_MASK |	\
+				PMX_EGPIO05_MASK | PMX_EGPIO06_MASK |	\
+				PMX_EGPIO07_MASK | PMX_EGPIO08_MASK |	\
+				PMX_EGPIO09_MASK)
+#define PMX_EGPIO_1_GRP_MASK	(PMX_EGPIO10_MASK | PMX_EGPIO11_MASK |	\
+				PMX_EGPIO12_MASK | PMX_EGPIO13_MASK |	\
+				PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
+
+#define PMX_KEYBOARD_6X6_MASK	(PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+				PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
+				PMX_KBD_COL1_MASK)
+
+#define PMX_NAND8BIT_0_MASK	(PMX_NAND8_MASK | PMX_NFAD23_MASK |	\
+				PMX_NFAD24_MASK | PMX_NFAD25_MASK |	\
+				PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
+				PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
+				PMX_NFCE3_MASK)
+#define PMX_NAND8BIT_1_MASK	PMX_NFRSTPWDWN3_MASK
+
+#define PMX_NAND16BIT_1_MASK	(PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
+#define PMX_NAND_4CHIPS_MASK	(PMX_NFCE1_MASK | PMX_NFCE2_MASK |	\
+				PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK |	\
+				PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK |	\
+				PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
+
+#define PMX_MCIFALL_1_MASK	0xF8000000
+#define PMX_MCIFALL_2_MASK	0x0FFFFFFF
+
+#define PMX_PCI_REG1_MASK	(PMX_SMINCS2_MASK | PMX_SMINCS3_MASK |	\
+				PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
+				PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
+				PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
+				PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK |	\
+				PMX_NFCE2_MASK)
+#define PMX_PCI_REG2_MASK	(PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+				PMX_SSP0_CS1_2_MASK)
+
+#define PMX_SMII_0_1_2_MASK	(PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
+#define PMX_RGMII_REG0_MASK	(PMX_MCI_DATA8_15_MASK |		\
+				PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK |	\
+				PMX_GMIID47_MASK)
+#define PMX_RGMII_REG1_MASK	(PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
+				PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK |	\
+				PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
+#define PMX_RGMII_REG2_MASK	(PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+				PMX_SSP0_CS1_2_MASK)
+
+#define PCIE_CFG_VAL(x)		(PCIE_SATA##x##_SEL_PCIE |	\
+				PCIE##x##_CFG_AUX_CLK_EN |	\
+				PCIE##x##_CFG_CORE_CLK_EN |	\
+				PCIE##x##_CFG_POWERUP_RESET |	\
+				PCIE##x##_CFG_DEVICE_PRESENT)
+#define SATA_CFG_VAL(x)		(PCIE_SATA##x##_SEL_SATA |	\
+				SATA##x##_CFG_PM_CLK_EN |	\
+				SATA##x##_CFG_POWERUP_RESET |	\
+				SATA##x##_CFG_RX_CLK_EN |	\
+				SATA##x##_CFG_TX_CLK_EN)
+
+/* Pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 102, 103 };
+static struct spear_muxreg i2c0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_I2C0_MASK,
+		.val = PMX_I2C0_MASK,
+	},
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+	{
+		.muxregs = i2c0_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+	.name = "i2c0_grp",
+	.pins = i2c0_pins,
+	.npins = ARRAY_SIZE(i2c0_pins),
+	.modemuxs = i2c0_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+	.name = "i2c0",
+	.groups = i2c0_grps,
+	.ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* Pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
+static struct spear_muxreg ssp0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_SSP0_MASK,
+		.val = PMX_SSP0_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+	{
+		.muxregs = ssp0_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+	.name = "ssp0_grp",
+	.pins = ssp0_pins,
+	.npins = ARRAY_SIZE(ssp0_pins),
+	.modemuxs = ssp0_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* Pad multiplexing for ssp0_cs0 device */
+static const unsigned ssp0_cs0_pins[] = { 96 };
+static struct spear_muxreg ssp0_cs0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_SSP0_CS0_MASK,
+		.val = PMX_SSP0_CS0_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_cs0_modemux[] = {
+	{
+		.muxregs = ssp0_cs0_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_cs0_pingroup = {
+	.name = "ssp0_cs0_grp",
+	.pins = ssp0_cs0_pins,
+	.npins = ARRAY_SIZE(ssp0_cs0_pins),
+	.modemuxs = ssp0_cs0_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
+};
+
+/* ssp0_cs1_2 device */
+static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
+static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_SSP0_CS1_2_MASK,
+		.val = PMX_SSP0_CS1_2_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_cs1_2_modemux[] = {
+	{
+		.muxregs = ssp0_cs1_2_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_cs1_2_pingroup = {
+	.name = "ssp0_cs1_2_grp",
+	.pins = ssp0_cs1_2_pins,
+	.npins = ARRAY_SIZE(ssp0_cs1_2_pins),
+	.modemuxs = ssp0_cs1_2_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
+	"ssp0_cs1_2_grp" };
+static struct spear_function ssp0_function = {
+	.name = "ssp0",
+	.groups = ssp0_grps,
+	.ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* Pad multiplexing for i2s0 device */
+static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
+static struct spear_muxreg i2s0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_I2S0_MASK,
+		.val = PMX_I2S0_MASK,
+	},
+};
+
+static struct spear_modemux i2s0_modemux[] = {
+	{
+		.muxregs = i2s0_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2s0_muxreg),
+	},
+};
+
+static struct spear_pingroup i2s0_pingroup = {
+	.name = "i2s0_grp",
+	.pins = i2s0_pins,
+	.npins = ARRAY_SIZE(i2s0_pins),
+	.modemuxs = i2s0_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2s0_modemux),
+};
+
+static const char *const i2s0_grps[] = { "i2s0_grp" };
+static struct spear_function i2s0_function = {
+	.name = "i2s0",
+	.groups = i2s0_grps,
+	.ngroups = ARRAY_SIZE(i2s0_grps),
+};
+
+/* Pad multiplexing for i2s1 device */
+static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
+static struct spear_muxreg i2s1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_I2S1_MASK,
+		.val = PMX_I2S1_MASK,
+	},
+};
+
+static struct spear_modemux i2s1_modemux[] = {
+	{
+		.muxregs = i2s1_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2s1_muxreg),
+	},
+};
+
+static struct spear_pingroup i2s1_pingroup = {
+	.name = "i2s1_grp",
+	.pins = i2s1_pins,
+	.npins = ARRAY_SIZE(i2s1_pins),
+	.modemuxs = i2s1_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2s1_modemux),
+};
+
+static const char *const i2s1_grps[] = { "i2s1_grp" };
+static struct spear_function i2s1_function = {
+	.name = "i2s1",
+	.groups = i2s1_grps,
+	.ngroups = ARRAY_SIZE(i2s1_grps),
+};
+
+/* Pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
+	121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+	135, 136, 137, 138, 139, 140, 141, 142 };
+static struct spear_muxreg clcd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_CLCD1_MASK,
+		.val = PMX_CLCD1_MASK,
+	},
+};
+
+static struct spear_modemux clcd_modemux[] = {
+	{
+		.muxregs = clcd_muxreg,
+		.nmuxregs = ARRAY_SIZE(clcd_muxreg),
+	},
+};
+
+static struct spear_pingroup clcd_pingroup = {
+	.name = "clcd_grp",
+	.pins = clcd_pins,
+	.npins = ARRAY_SIZE(clcd_pins),
+	.modemuxs = clcd_modemux,
+	.nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
+static struct spear_muxreg clcd_high_res_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_CLCD2_MASK,
+		.val = PMX_CLCD2_MASK,
+	},
+};
+
+static struct spear_modemux clcd_high_res_modemux[] = {
+	{
+		.muxregs = clcd_high_res_muxreg,
+		.nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
+	},
+};
+
+static struct spear_pingroup clcd_high_res_pingroup = {
+	.name = "clcd_high_res_grp",
+	.pins = clcd_high_res_pins,
+	.npins = ARRAY_SIZE(clcd_high_res_pins),
+	.modemuxs = clcd_high_res_modemux,
+	.nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static struct spear_function clcd_function = {
+	.name = "clcd",
+	.groups = clcd_grps,
+	.ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
+	146, 147, 148, 149, 150, 151, 152 };
+static struct spear_muxreg arm_gpio_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_EGPIO_0_GRP_MASK,
+		.val = PMX_EGPIO_0_GRP_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_EGPIO_1_GRP_MASK,
+		.val = PMX_EGPIO_1_GRP_MASK,
+	},
+};
+
+static struct spear_modemux arm_gpio_modemux[] = {
+	{
+		.muxregs = arm_gpio_muxreg,
+		.nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
+	},
+};
+
+static struct spear_pingroup arm_gpio_pingroup = {
+	.name = "arm_gpio_grp",
+	.pins = arm_gpio_pins,
+	.npins = ARRAY_SIZE(arm_gpio_pins),
+	.modemuxs = arm_gpio_modemux,
+	.nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
+};
+
+static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
+static struct spear_function arm_gpio_function = {
+	.name = "arm_gpio",
+	.groups = arm_gpio_grps,
+	.ngroups = ARRAY_SIZE(arm_gpio_grps),
+};
+
+/* Pad multiplexing for smi 2 chips device */
+static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
+static struct spear_muxreg smi_2_chips_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_SMI_MASK,
+		.val = PMX_SMI_MASK,
+	},
+};
+
+static struct spear_modemux smi_2_chips_modemux[] = {
+	{
+		.muxregs = smi_2_chips_muxreg,
+		.nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
+	},
+};
+
+static struct spear_pingroup smi_2_chips_pingroup = {
+	.name = "smi_2_chips_grp",
+	.pins = smi_2_chips_pins,
+	.npins = ARRAY_SIZE(smi_2_chips_pins),
+	.modemuxs = smi_2_chips_modemux,
+	.nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
+};
+
+static const unsigned smi_4_chips_pins[] = { 54, 55 };
+static struct spear_muxreg smi_4_chips_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_SMI_MASK,
+		.val = PMX_SMI_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+		.val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+	},
+};
+
+static struct spear_modemux smi_4_chips_modemux[] = {
+	{
+		.muxregs = smi_4_chips_muxreg,
+		.nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
+	},
+};
+
+static struct spear_pingroup smi_4_chips_pingroup = {
+	.name = "smi_4_chips_grp",
+	.pins = smi_4_chips_pins,
+	.npins = ARRAY_SIZE(smi_4_chips_pins),
+	.modemuxs = smi_4_chips_modemux,
+	.nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
+static struct spear_function smi_function = {
+	.name = "smi",
+	.groups = smi_grps,
+	.ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* Pad multiplexing for gmii device */
+static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
+	181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+	195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg gmii_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_GMII_MASK,
+		.val = PMX_GMII_MASK,
+	},
+};
+
+static struct spear_modemux gmii_modemux[] = {
+	{
+		.muxregs = gmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(gmii_muxreg),
+	},
+};
+
+static struct spear_pingroup gmii_pingroup = {
+	.name = "gmii_grp",
+	.pins = gmii_pins,
+	.npins = ARRAY_SIZE(gmii_pins),
+	.modemuxs = gmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+static const char *const gmii_grps[] = { "gmii_grp" };
+static struct spear_function gmii_function = {
+	.name = "gmii",
+	.groups = gmii_grps,
+	.ngroups = ARRAY_SIZE(gmii_grps),
+};
+
+/* Pad multiplexing for rgmii device */
+static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+	28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
+	180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
+static struct spear_muxreg rgmii_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_RGMII_REG0_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_RGMII_REG1_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_RGMII_REG2_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+	{
+		.muxregs = rgmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+	},
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+	.name = "rgmii_grp",
+	.pins = rgmii_pins,
+	.npins = ARRAY_SIZE(rgmii_pins),
+	.modemuxs = rgmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+static const char *const rgmii_grps[] = { "rgmii_grp" };
+static struct spear_function rgmii_function = {
+	.name = "rgmii",
+	.groups = rgmii_grps,
+	.ngroups = ARRAY_SIZE(rgmii_grps),
+};
+
+/* Pad multiplexing for smii_0_1_2 device */
+static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
+	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+	51, 52, 53, 54, 55 };
+static struct spear_muxreg smii_0_1_2_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_SMII_0_1_2_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux smii_0_1_2_modemux[] = {
+	{
+		.muxregs = smii_0_1_2_muxreg,
+		.nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
+	},
+};
+
+static struct spear_pingroup smii_0_1_2_pingroup = {
+	.name = "smii_0_1_2_grp",
+	.pins = smii_0_1_2_pins,
+	.npins = ARRAY_SIZE(smii_0_1_2_pins),
+	.modemuxs = smii_0_1_2_modemux,
+	.nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
+};
+
+static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
+static struct spear_function smii_0_1_2_function = {
+	.name = "smii_0_1_2",
+	.groups = smii_0_1_2_grps,
+	.ngroups = ARRAY_SIZE(smii_0_1_2_grps),
+};
+
+/* Pad multiplexing for ras_mii_txclk device */
+static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
+static struct spear_muxreg ras_mii_txclk_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_NFCE2_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux ras_mii_txclk_modemux[] = {
+	{
+		.muxregs = ras_mii_txclk_muxreg,
+		.nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
+	},
+};
+
+static struct spear_pingroup ras_mii_txclk_pingroup = {
+	.name = "ras_mii_txclk_grp",
+	.pins = ras_mii_txclk_pins,
+	.npins = ARRAY_SIZE(ras_mii_txclk_pins),
+	.modemuxs = ras_mii_txclk_modemux,
+	.nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
+};
+
+static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
+static struct spear_function ras_mii_txclk_function = {
+	.name = "ras_mii_txclk",
+	.groups = ras_mii_txclk_grps,
+	.ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
+};
+
+/* Pad multiplexing for nand 8bit device (cs0 only) */
+static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
+	65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+	83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+	170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+	212 };
+static struct spear_muxreg nand_8bit_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_NAND8BIT_0_MASK,
+		.val = PMX_NAND8BIT_0_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_NAND8BIT_1_MASK,
+		.val = PMX_NAND8BIT_1_MASK,
+	},
+};
+
+static struct spear_modemux nand_8bit_modemux[] = {
+	{
+		.muxregs = nand_8bit_muxreg,
+		.nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
+	},
+};
+
+static struct spear_pingroup nand_8bit_pingroup = {
+	.name = "nand_8bit_grp",
+	.pins = nand_8bit_pins,
+	.npins = ARRAY_SIZE(nand_8bit_pins),
+	.modemuxs = nand_8bit_modemux,
+	.nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
+};
+
+/* Pad multiplexing for nand 16bit device */
+static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
+	210 };
+static struct spear_muxreg nand_16bit_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_NAND16BIT_1_MASK,
+		.val = PMX_NAND16BIT_1_MASK,
+	},
+};
+
+static struct spear_modemux nand_16bit_modemux[] = {
+	{
+		.muxregs = nand_16bit_muxreg,
+		.nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
+	},
+};
+
+static struct spear_pingroup nand_16bit_pingroup = {
+	.name = "nand_16bit_grp",
+	.pins = nand_16bit_pins,
+	.npins = ARRAY_SIZE(nand_16bit_pins),
+	.modemuxs = nand_16bit_modemux,
+	.nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
+};
+
+/* Pad multiplexing for nand 4 chips */
+static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
+static struct spear_muxreg nand_4_chips_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_NAND_4CHIPS_MASK,
+		.val = PMX_NAND_4CHIPS_MASK,
+	},
+};
+
+static struct spear_modemux nand_4_chips_modemux[] = {
+	{
+		.muxregs = nand_4_chips_muxreg,
+		.nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
+	},
+};
+
+static struct spear_pingroup nand_4_chips_pingroup = {
+	.name = "nand_4_chips_grp",
+	.pins = nand_4_chips_pins,
+	.npins = ARRAY_SIZE(nand_4_chips_pins),
+	.modemuxs = nand_4_chips_modemux,
+	.nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
+};
+
+static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
+	"nand_4_chips_grp" };
+static struct spear_function nand_function = {
+	.name = "nand",
+	.groups = nand_grps,
+	.ngroups = ARRAY_SIZE(nand_grps),
+};
+
+/* Pad multiplexing for keyboard_6x6 device */
+static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
+	208, 209, 210, 211, 212 };
+static struct spear_muxreg keyboard_6x6_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
+			PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
+			PMX_NFWPRT2_MASK,
+		.val = PMX_KEYBOARD_6X6_MASK,
+	},
+};
+
+static struct spear_modemux keyboard_6x6_modemux[] = {
+	{
+		.muxregs = keyboard_6x6_muxreg,
+		.nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
+	},
+};
+
+static struct spear_pingroup keyboard_6x6_pingroup = {
+	.name = "keyboard_6x6_grp",
+	.pins = keyboard_6x6_pins,
+	.npins = ARRAY_SIZE(keyboard_6x6_pins),
+	.modemuxs = keyboard_6x6_modemux,
+	.nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
+};
+
+/* Pad multiplexing for keyboard_rowcol6_8 device */
+static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
+static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_KBD_ROWCOL68_MASK,
+		.val = PMX_KBD_ROWCOL68_MASK,
+	},
+};
+
+static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
+	{
+		.muxregs = keyboard_rowcol6_8_muxreg,
+		.nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
+	},
+};
+
+static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
+	.name = "keyboard_rowcol6_8_grp",
+	.pins = keyboard_rowcol6_8_pins,
+	.npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
+	.modemuxs = keyboard_rowcol6_8_modemux,
+	.nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
+	"keyboard_rowcol6_8_grp" };
+static struct spear_function keyboard_function = {
+	.name = "keyboard",
+	.groups = keyboard_grps,
+	.ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* Pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 100, 101 };
+static struct spear_muxreg uart0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_UART0_MASK,
+		.val = PMX_UART0_MASK,
+	},
+};
+
+static struct spear_modemux uart0_modemux[] = {
+	{
+		.muxregs = uart0_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart0_muxreg),
+	},
+};
+
+static struct spear_pingroup uart0_pingroup = {
+	.name = "uart0_grp",
+	.pins = uart0_pins,
+	.npins = ARRAY_SIZE(uart0_pins),
+	.modemuxs = uart0_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* Pad multiplexing for uart0_modem device */
+static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
+static struct spear_muxreg uart0_modem_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_UART0_MODEM_MASK,
+		.val = PMX_UART0_MODEM_MASK,
+	},
+};
+
+static struct spear_modemux uart0_modem_modemux[] = {
+	{
+		.muxregs = uart0_modem_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
+	},
+};
+
+static struct spear_pingroup uart0_modem_pingroup = {
+	.name = "uart0_modem_grp",
+	.pins = uart0_modem_pins,
+	.npins = ARRAY_SIZE(uart0_modem_pins),
+	.modemuxs = uart0_modem_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
+static struct spear_function uart0_function = {
+	.name = "uart0",
+	.groups = uart0_grps,
+	.ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* Pad multiplexing for gpt0_tmr0 device */
+static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
+static struct spear_muxreg gpt0_tmr0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_GPT0_TMR0_MASK,
+		.val = PMX_GPT0_TMR0_MASK,
+	},
+};
+
+static struct spear_modemux gpt0_tmr0_modemux[] = {
+	{
+		.muxregs = gpt0_tmr0_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt0_tmr0_pingroup = {
+	.name = "gpt0_tmr0_grp",
+	.pins = gpt0_tmr0_pins,
+	.npins = ARRAY_SIZE(gpt0_tmr0_pins),
+	.modemuxs = gpt0_tmr0_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt0_tmr1 device */
+static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
+static struct spear_muxreg gpt0_tmr1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_GPT0_TMR1_MASK,
+		.val = PMX_GPT0_TMR1_MASK,
+	},
+};
+
+static struct spear_modemux gpt0_tmr1_modemux[] = {
+	{
+		.muxregs = gpt0_tmr1_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt0_tmr1_pingroup = {
+	.name = "gpt0_tmr1_grp",
+	.pins = gpt0_tmr1_pins,
+	.npins = ARRAY_SIZE(gpt0_tmr1_pins),
+	.modemuxs = gpt0_tmr1_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
+};
+
+static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
+static struct spear_function gpt0_function = {
+	.name = "gpt0",
+	.groups = gpt0_grps,
+	.ngroups = ARRAY_SIZE(gpt0_grps),
+};
+
+/* Pad multiplexing for gpt1_tmr0 device */
+static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
+static struct spear_muxreg gpt1_tmr0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_GPT1_TMR0_MASK,
+		.val = PMX_GPT1_TMR0_MASK,
+	},
+};
+
+static struct spear_modemux gpt1_tmr0_modemux[] = {
+	{
+		.muxregs = gpt1_tmr0_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt1_tmr0_pingroup = {
+	.name = "gpt1_tmr0_grp",
+	.pins = gpt1_tmr0_pins,
+	.npins = ARRAY_SIZE(gpt1_tmr0_pins),
+	.modemuxs = gpt1_tmr0_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt1_tmr1 device */
+static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
+static struct spear_muxreg gpt1_tmr1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_GPT1_TMR1_MASK,
+		.val = PMX_GPT1_TMR1_MASK,
+	},
+};
+
+static struct spear_modemux gpt1_tmr1_modemux[] = {
+	{
+		.muxregs = gpt1_tmr1_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt1_tmr1_pingroup = {
+	.name = "gpt1_tmr1_grp",
+	.pins = gpt1_tmr1_pins,
+	.npins = ARRAY_SIZE(gpt1_tmr1_pins),
+	.modemuxs = gpt1_tmr1_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
+};
+
+static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
+static struct spear_function gpt1_function = {
+	.name = "gpt1",
+	.groups = gpt1_grps,
+	.ngroups = ARRAY_SIZE(gpt1_grps),
+};
+
+/* Pad multiplexing for mcif device */
+static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
+	215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+	229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+	243, 244, 245 };
+#define MCIF_MUXREG						\
+	{							\
+		.reg = PAD_FUNCTION_EN_0,			\
+		.mask = PMX_MCI_DATA8_15_MASK,			\
+		.val = PMX_MCI_DATA8_15_MASK,			\
+	}, {							\
+		.reg = PAD_FUNCTION_EN_1,			\
+		.mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK |	\
+			PMX_NFWPRT2_MASK,			\
+		.val = PMX_MCIFALL_1_MASK,			\
+	}, {							\
+		.reg = PAD_FUNCTION_EN_2,			\
+		.mask = PMX_MCIFALL_2_MASK,			\
+		.val = PMX_MCIFALL_2_MASK,			\
+	}
+
+/* sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_SD,
+	},
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+	{
+		.muxregs = sdhci_muxreg,
+		.nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+	},
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+	.name = "sdhci_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = sdhci_modemux,
+	.nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+	.name = "sdhci",
+	.groups = sdhci_grps,
+	.ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* cf device */
+static struct spear_muxreg cf_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_CF,
+	},
+};
+
+static struct spear_modemux cf_modemux[] = {
+	{
+		.muxregs = cf_muxreg,
+		.nmuxregs = ARRAY_SIZE(cf_muxreg),
+	},
+};
+
+static struct spear_pingroup cf_pingroup = {
+	.name = "cf_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = cf_modemux,
+	.nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+	.name = "cf",
+	.groups = cf_grps,
+	.ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* xd device */
+static struct spear_muxreg xd_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_XD,
+	},
+};
+
+static struct spear_modemux xd_modemux[] = {
+	{
+		.muxregs = xd_muxreg,
+		.nmuxregs = ARRAY_SIZE(xd_muxreg),
+	},
+};
+
+static struct spear_pingroup xd_pingroup = {
+	.name = "xd_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = xd_modemux,
+	.nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+	.name = "xd",
+	.groups = xd_grps,
+	.ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* Pad multiplexing for touch_xy device */
+static const unsigned touch_xy_pins[] = { 97 };
+static struct spear_muxreg touch_xy_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_TOUCH_XY_MASK,
+		.val = PMX_TOUCH_XY_MASK,
+	},
+};
+
+static struct spear_modemux touch_xy_modemux[] = {
+	{
+		.muxregs = touch_xy_muxreg,
+		.nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
+	},
+};
+
+static struct spear_pingroup touch_xy_pingroup = {
+	.name = "touch_xy_grp",
+	.pins = touch_xy_pins,
+	.npins = ARRAY_SIZE(touch_xy_pins),
+	.modemuxs = touch_xy_modemux,
+	.nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
+};
+
+static const char *const touch_xy_grps[] = { "touch_xy_grp" };
+static struct spear_function touch_xy_function = {
+	.name = "touchscreen",
+	.groups = touch_xy_grps,
+	.ngroups = ARRAY_SIZE(touch_xy_grps),
+};
+
+/* Pad multiplexing for uart1 device */
+/* Muxed with I2C */
+static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
+static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_I2C0_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux uart1_dis_i2c_modemux[] = {
+	{
+		.muxregs = uart1_dis_i2c_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
+	},
+};
+
+static struct spear_pingroup uart_1_dis_i2c_pingroup = {
+	.name = "uart1_disable_i2c_grp",
+	.pins = uart1_dis_i2c_pins,
+	.npins = ARRAY_SIZE(uart1_dis_i2c_pins),
+	.modemuxs = uart1_dis_i2c_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
+};
+
+/* Muxed with SD/MMC */
+static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
+static struct spear_muxreg uart1_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_MCIDATA1_MASK |
+			PMX_MCIDATA2_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux uart1_dis_sd_modemux[] = {
+	{
+		.muxregs = uart1_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup uart_1_dis_sd_pingroup = {
+	.name = "uart1_disable_sd_grp",
+	.pins = uart1_dis_sd_pins,
+	.npins = ARRAY_SIZE(uart1_dis_sd_pins),
+	.modemuxs = uart1_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
+	"uart1_disable_sd_grp" };
+static struct spear_function uart1_function = {
+	.name = "uart1",
+	.groups = uart1_grps,
+	.ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* Pad multiplexing for uart2_3 device */
+static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
+static struct spear_muxreg uart2_3_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_I2S0_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux uart2_3_modemux[] = {
+	{
+		.muxregs = uart2_3_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
+	},
+};
+
+static struct spear_pingroup uart_2_3_pingroup = {
+	.name = "uart2_3_grp",
+	.pins = uart2_3_pins,
+	.npins = ARRAY_SIZE(uart2_3_pins),
+	.modemuxs = uart2_3_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
+};
+
+static const char *const uart2_3_grps[] = { "uart2_3_grp" };
+static struct spear_function uart2_3_function = {
+	.name = "uart2_3",
+	.groups = uart2_3_grps,
+	.ngroups = ARRAY_SIZE(uart2_3_grps),
+};
+
+/* Pad multiplexing for uart4 device */
+static const unsigned uart4_pins[] = { 108, 113 };
+static struct spear_muxreg uart4_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux uart4_modemux[] = {
+	{
+		.muxregs = uart4_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart4_muxreg),
+	},
+};
+
+static struct spear_pingroup uart_4_pingroup = {
+	.name = "uart4_grp",
+	.pins = uart4_pins,
+	.npins = ARRAY_SIZE(uart4_pins),
+	.modemuxs = uart4_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart4_modemux),
+};
+
+static const char *const uart4_grps[] = { "uart4_grp" };
+static struct spear_function uart4_function = {
+	.name = "uart4",
+	.groups = uart4_grps,
+	.ngroups = ARRAY_SIZE(uart4_grps),
+};
+
+/* Pad multiplexing for uart5 device */
+static const unsigned uart5_pins[] = { 114, 115 };
+static struct spear_muxreg uart5_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_CLCD1_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux uart5_modemux[] = {
+	{
+		.muxregs = uart5_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart5_muxreg),
+	},
+};
+
+static struct spear_pingroup uart_5_pingroup = {
+	.name = "uart5_grp",
+	.pins = uart5_pins,
+	.npins = ARRAY_SIZE(uart5_pins),
+	.modemuxs = uart5_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart5_modemux),
+};
+
+static const char *const uart5_grps[] = { "uart5_grp" };
+static struct spear_function uart5_function = {
+	.name = "uart5",
+	.groups = uart5_grps,
+	.ngroups = ARRAY_SIZE(uart5_grps),
+};
+
+/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
+static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
+	122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+	136, 137 };
+static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_CLCD1_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
+	{
+		.muxregs = rs485_0_1_tdm_0_1_muxreg,
+		.nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
+	},
+};
+
+static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
+	.name = "rs485_0_1_tdm_0_1_grp",
+	.pins = rs485_0_1_tdm_0_1_pins,
+	.npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
+	.modemuxs = rs485_0_1_tdm_0_1_modemux,
+	.nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
+};
+
+static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
+static struct spear_function rs485_0_1_tdm_0_1_function = {
+	.name = "rs485_0_1_tdm_0_1",
+	.groups = rs485_0_1_tdm_0_1_grps,
+	.ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
+};
+
+/* Pad multiplexing for i2c_1_2 device */
+static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
+static struct spear_muxreg i2c_1_2_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_CLCD1_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c_1_2_modemux[] = {
+	{
+		.muxregs = i2c_1_2_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c_1_2_pingroup = {
+	.name = "i2c_1_2_grp",
+	.pins = i2c_1_2_pins,
+	.npins = ARRAY_SIZE(i2c_1_2_pins),
+	.modemuxs = i2c_1_2_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
+};
+
+static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
+static struct spear_function i2c_1_2_function = {
+	.name = "i2c_1_2",
+	.groups = i2c_1_2_grps,
+	.ngroups = ARRAY_SIZE(i2c_1_2_grps),
+};
+
+/* Pad multiplexing for i2c3_dis_smi_clcd device */
+/* Muxed with SMI & CLCD */
+static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
+static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
+	{
+		.muxregs = i2c3_dis_smi_clcd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
+	.name = "i2c3_dis_smi_clcd_grp",
+	.pins = i2c3_dis_smi_clcd_pins,
+	.npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
+	.modemuxs = i2c3_dis_smi_clcd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
+};
+
+/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
+/* Muxed with SD/MMC & I2S1 */
+static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
+static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
+	{
+		.muxregs = i2c3_dis_sd_i2s0_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
+	.name = "i2c3_dis_sd_i2s0_grp",
+	.pins = i2c3_dis_sd_i2s0_pins,
+	.npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
+	.modemuxs = i2c3_dis_sd_i2s0_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
+};
+
+static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
+	"i2c3_dis_sd_i2s0_grp" };
+static struct spear_function i2c3_unction = {
+	.name = "i2c3_i2s1",
+	.groups = i2c3_grps,
+	.ngroups = ARRAY_SIZE(i2c3_grps),
+};
+
+/* Pad multiplexing for i2c_4_5_dis_smi device */
+/* Muxed with SMI */
+static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
+static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_SMI_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
+	{
+		.muxregs = i2c_4_5_dis_smi_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
+	.name = "i2c_4_5_dis_smi_grp",
+	.pins = i2c_4_5_dis_smi_pins,
+	.npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
+	.modemuxs = i2c_4_5_dis_smi_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
+};
+
+/* Pad multiplexing for i2c4_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
+static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_MCIDATA4_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCIDATA5_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c4_dis_sd_modemux[] = {
+	{
+		.muxregs = i2c4_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c4_dis_sd_pingroup = {
+	.name = "i2c4_dis_sd_grp",
+	.pins = i2c4_dis_sd_pins,
+	.npins = ARRAY_SIZE(i2c4_dis_sd_pins),
+	.modemuxs = i2c4_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c5_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
+static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCIDATA6_MASK |
+			PMX_MCIDATA7_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c5_dis_sd_modemux[] = {
+	{
+		.muxregs = i2c5_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c5_dis_sd_pingroup = {
+	.name = "i2c5_dis_sd_grp",
+	.pins = i2c5_dis_sd_pins,
+	.npins = ARRAY_SIZE(i2c5_dis_sd_pins),
+	.modemuxs = i2c5_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
+};
+
+static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
+	"i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
+static struct spear_function i2c_4_5_function = {
+	.name = "i2c_4_5",
+	.groups = i2c_4_5_grps,
+	.ngroups = ARRAY_SIZE(i2c_4_5_grps),
+};
+
+/* Pad multiplexing for i2c_6_7_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
+static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_KBD_ROWCOL25_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
+	{
+		.muxregs = i2c_6_7_dis_kbd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
+	.name = "i2c_6_7_dis_kbd_grp",
+	.pins = i2c_6_7_dis_kbd_pins,
+	.npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
+	.modemuxs = i2c_6_7_dis_kbd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for i2c6_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
+static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCIIORDRE_MASK |
+			PMX_MCIIOWRWE_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c6_dis_sd_modemux[] = {
+	{
+		.muxregs = i2c6_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c6_dis_sd_pingroup = {
+	.name = "i2c6_dis_sd_grp",
+	.pins = i2c6_dis_sd_pins,
+	.npins = ARRAY_SIZE(i2c6_dis_sd_pins),
+	.modemuxs = i2c6_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c7_dis_sd device */
+static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
+static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCIRESETCF_MASK |
+			PMX_MCICS0CE_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux i2c7_dis_sd_modemux[] = {
+	{
+		.muxregs = i2c7_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c7_dis_sd_pingroup = {
+	.name = "i2c7_dis_sd_grp",
+	.pins = i2c7_dis_sd_pins,
+	.npins = ARRAY_SIZE(i2c7_dis_sd_pins),
+	.modemuxs = i2c7_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
+};
+
+static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
+	"i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
+static struct spear_function i2c_6_7_function = {
+	.name = "i2c_6_7",
+	.groups = i2c_6_7_grps,
+	.ngroups = ARRAY_SIZE(i2c_6_7_grps),
+};
+
+/* Pad multiplexing for can0_dis_nor device */
+/* Muxed with NOR */
+static const unsigned can0_dis_nor_pins[] = { 56, 57 };
+static struct spear_muxreg can0_dis_nor_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_0,
+		.mask = PMX_NFRSTPWDWN2_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_NFRSTPWDWN3_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux can0_dis_nor_modemux[] = {
+	{
+		.muxregs = can0_dis_nor_muxreg,
+		.nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
+	},
+};
+
+static struct spear_pingroup can0_dis_nor_pingroup = {
+	.name = "can0_dis_nor_grp",
+	.pins = can0_dis_nor_pins,
+	.npins = ARRAY_SIZE(can0_dis_nor_pins),
+	.modemuxs = can0_dis_nor_modemux,
+	.nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
+};
+
+/* Pad multiplexing for can0_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can0_dis_sd_pins[] = { 240, 241 };
+static struct spear_muxreg can0_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux can0_dis_sd_modemux[] = {
+	{
+		.muxregs = can0_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup can0_dis_sd_pingroup = {
+	.name = "can0_dis_sd_grp",
+	.pins = can0_dis_sd_pins,
+	.npins = ARRAY_SIZE(can0_dis_sd_pins),
+	.modemuxs = can0_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
+};
+
+static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
+};
+static struct spear_function can0_function = {
+	.name = "can0",
+	.groups = can0_grps,
+	.ngroups = ARRAY_SIZE(can0_grps),
+};
+
+/* Pad multiplexing for can1_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can1_dis_sd_pins[] = { 242, 243 };
+static struct spear_muxreg can1_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux can1_dis_sd_modemux[] = {
+	{
+		.muxregs = can1_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup can1_dis_sd_pingroup = {
+	.name = "can1_dis_sd_grp",
+	.pins = can1_dis_sd_pins,
+	.npins = ARRAY_SIZE(can1_dis_sd_pins),
+	.modemuxs = can1_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
+};
+
+/* Pad multiplexing for can1_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
+static struct spear_muxreg can1_dis_kbd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_KBD_ROWCOL25_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux can1_dis_kbd_modemux[] = {
+	{
+		.muxregs = can1_dis_kbd_muxreg,
+		.nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
+	},
+};
+
+static struct spear_pingroup can1_dis_kbd_pingroup = {
+	.name = "can1_dis_kbd_grp",
+	.pins = can1_dis_kbd_pins,
+	.npins = ARRAY_SIZE(can1_dis_kbd_pins),
+	.modemuxs = can1_dis_kbd_modemux,
+	.nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
+};
+
+static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
+};
+static struct spear_function can1_function = {
+	.name = "can1",
+	.groups = can1_grps,
+	.ngroups = ARRAY_SIZE(can1_grps),
+};
+
+/* Pad multiplexing for pci device */
+static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+	19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+	37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+	55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
+#define PCI_SATA_MUXREG				\
+	{					\
+		.reg = PAD_FUNCTION_EN_0,	\
+		.mask = PMX_MCI_DATA8_15_MASK,	\
+		.val = 0,			\
+	}, {					\
+		.reg = PAD_FUNCTION_EN_1,	\
+		.mask = PMX_PCI_REG1_MASK,	\
+		.val = 0,			\
+	}, {					\
+		.reg = PAD_FUNCTION_EN_2,	\
+		.mask = PMX_PCI_REG2_MASK,	\
+		.val = 0,			\
+	}
+
+/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pcie0_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = PCIE_CFG_VAL(0),
+		.val = PCIE_CFG_VAL(0),
+	},
+};
+
+static struct spear_modemux pcie0_modemux[] = {
+	{
+		.muxregs = pcie0_muxreg,
+		.nmuxregs = ARRAY_SIZE(pcie0_muxreg),
+	},
+};
+
+static struct spear_pingroup pcie0_pingroup = {
+	.name = "pcie0_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = pcie0_modemux,
+	.nmodemuxs = ARRAY_SIZE(pcie0_modemux),
+};
+
+/* pad multiplexing for pcie1 device */
+static struct spear_muxreg pcie1_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = PCIE_CFG_VAL(1),
+		.val = PCIE_CFG_VAL(1),
+	},
+};
+
+static struct spear_modemux pcie1_modemux[] = {
+	{
+		.muxregs = pcie1_muxreg,
+		.nmuxregs = ARRAY_SIZE(pcie1_muxreg),
+	},
+};
+
+static struct spear_pingroup pcie1_pingroup = {
+	.name = "pcie1_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = pcie1_modemux,
+	.nmodemuxs = ARRAY_SIZE(pcie1_modemux),
+};
+
+/* pad multiplexing for pcie2 device */
+static struct spear_muxreg pcie2_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = PCIE_CFG_VAL(2),
+		.val = PCIE_CFG_VAL(2),
+	},
+};
+
+static struct spear_modemux pcie2_modemux[] = {
+	{
+		.muxregs = pcie2_muxreg,
+		.nmuxregs = ARRAY_SIZE(pcie2_muxreg),
+	},
+};
+
+static struct spear_pingroup pcie2_pingroup = {
+	.name = "pcie2_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = pcie2_modemux,
+	.nmodemuxs = ARRAY_SIZE(pcie2_modemux),
+};
+
+static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
+static struct spear_function pci_function = {
+	.name = "pci",
+	.groups = pci_grps,
+	.ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for sata0 device */
+static struct spear_muxreg sata0_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = SATA_CFG_VAL(0),
+		.val = SATA_CFG_VAL(0),
+	},
+};
+
+static struct spear_modemux sata0_modemux[] = {
+	{
+		.muxregs = sata0_muxreg,
+		.nmuxregs = ARRAY_SIZE(sata0_muxreg),
+	},
+};
+
+static struct spear_pingroup sata0_pingroup = {
+	.name = "sata0_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = sata0_modemux,
+	.nmodemuxs = ARRAY_SIZE(sata0_modemux),
+};
+
+/* pad multiplexing for sata1 device */
+static struct spear_muxreg sata1_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = SATA_CFG_VAL(1),
+		.val = SATA_CFG_VAL(1),
+	},
+};
+
+static struct spear_modemux sata1_modemux[] = {
+	{
+		.muxregs = sata1_muxreg,
+		.nmuxregs = ARRAY_SIZE(sata1_muxreg),
+	},
+};
+
+static struct spear_pingroup sata1_pingroup = {
+	.name = "sata1_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = sata1_modemux,
+	.nmodemuxs = ARRAY_SIZE(sata1_modemux),
+};
+
+/* pad multiplexing for sata2 device */
+static struct spear_muxreg sata2_muxreg[] = {
+	PCI_SATA_MUXREG,
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = SATA_CFG_VAL(2),
+		.val = SATA_CFG_VAL(2),
+	},
+};
+
+static struct spear_modemux sata2_modemux[] = {
+	{
+		.muxregs = sata2_muxreg,
+		.nmuxregs = ARRAY_SIZE(sata2_muxreg),
+	},
+};
+
+static struct spear_pingroup sata2_pingroup = {
+	.name = "sata2_grp",
+	.pins = pci_sata_pins,
+	.npins = ARRAY_SIZE(pci_sata_pins),
+	.modemuxs = sata2_modemux,
+	.nmodemuxs = ARRAY_SIZE(sata2_modemux),
+};
+
+static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
+};
+static struct spear_function sata_function = {
+	.name = "sata",
+	.groups = sata_grps,
+	.ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* Pad multiplexing for ssp1_dis_kbd device */
+static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
+static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+			PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+			PMX_NFCE2_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux ssp1_dis_kbd_modemux[] = {
+	{
+		.muxregs = ssp1_dis_kbd_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp1_dis_kbd_pingroup = {
+	.name = "ssp1_dis_kbd_grp",
+	.pins = ssp1_dis_kbd_pins,
+	.npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
+	.modemuxs = ssp1_dis_kbd_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for ssp1_dis_sd device */
+static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
+static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+			PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux ssp1_dis_sd_modemux[] = {
+	{
+		.muxregs = ssp1_dis_sd_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp1_dis_sd_pingroup = {
+	.name = "ssp1_dis_sd_grp",
+	.pins = ssp1_dis_sd_pins,
+	.npins = ARRAY_SIZE(ssp1_dis_sd_pins),
+	.modemuxs = ssp1_dis_sd_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
+};
+
+static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
+	"ssp1_dis_sd_grp" };
+static struct spear_function ssp1_function = {
+	.name = "ssp1",
+	.groups = ssp1_grps,
+	.ngroups = ARRAY_SIZE(ssp1_grps),
+};
+
+/* Pad multiplexing for gpt64 device */
+static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
+static struct spear_muxreg gpt64_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+			| PMX_MCILEDS_MASK,
+		.val = 0,
+	},
+};
+
+static struct spear_modemux gpt64_modemux[] = {
+	{
+		.muxregs = gpt64_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt64_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt64_pingroup = {
+	.name = "gpt64_grp",
+	.pins = gpt64_pins,
+	.npins = ARRAY_SIZE(gpt64_pins),
+	.modemuxs = gpt64_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt64_modemux),
+};
+
+static const char *const gpt64_grps[] = { "gpt64_grp" };
+static struct spear_function gpt64_function = {
+	.name = "gpt64",
+	.groups = gpt64_grps,
+	.ngroups = ARRAY_SIZE(gpt64_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1310_pingroups[] = {
+	&i2c0_pingroup,
+	&ssp0_pingroup,
+	&i2s0_pingroup,
+	&i2s1_pingroup,
+	&clcd_pingroup,
+	&clcd_high_res_pingroup,
+	&arm_gpio_pingroup,
+	&smi_2_chips_pingroup,
+	&smi_4_chips_pingroup,
+	&gmii_pingroup,
+	&rgmii_pingroup,
+	&smii_0_1_2_pingroup,
+	&ras_mii_txclk_pingroup,
+	&nand_8bit_pingroup,
+	&nand_16bit_pingroup,
+	&nand_4_chips_pingroup,
+	&keyboard_6x6_pingroup,
+	&keyboard_rowcol6_8_pingroup,
+	&uart0_pingroup,
+	&uart0_modem_pingroup,
+	&gpt0_tmr0_pingroup,
+	&gpt0_tmr1_pingroup,
+	&gpt1_tmr0_pingroup,
+	&gpt1_tmr1_pingroup,
+	&sdhci_pingroup,
+	&cf_pingroup,
+	&xd_pingroup,
+	&touch_xy_pingroup,
+	&ssp0_cs0_pingroup,
+	&ssp0_cs1_2_pingroup,
+	&uart_1_dis_i2c_pingroup,
+	&uart_1_dis_sd_pingroup,
+	&uart_2_3_pingroup,
+	&uart_4_pingroup,
+	&uart_5_pingroup,
+	&rs485_0_1_tdm_0_1_pingroup,
+	&i2c_1_2_pingroup,
+	&i2c3_dis_smi_clcd_pingroup,
+	&i2c3_dis_sd_i2s0_pingroup,
+	&i2c_4_5_dis_smi_pingroup,
+	&i2c4_dis_sd_pingroup,
+	&i2c5_dis_sd_pingroup,
+	&i2c_6_7_dis_kbd_pingroup,
+	&i2c6_dis_sd_pingroup,
+	&i2c7_dis_sd_pingroup,
+	&can0_dis_nor_pingroup,
+	&can0_dis_sd_pingroup,
+	&can1_dis_sd_pingroup,
+	&can1_dis_kbd_pingroup,
+	&pcie0_pingroup,
+	&pcie1_pingroup,
+	&pcie2_pingroup,
+	&sata0_pingroup,
+	&sata1_pingroup,
+	&sata2_pingroup,
+	&ssp1_dis_kbd_pingroup,
+	&ssp1_dis_sd_pingroup,
+	&gpt64_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1310_functions[] = {
+	&i2c0_function,
+	&ssp0_function,
+	&i2s0_function,
+	&i2s1_function,
+	&clcd_function,
+	&arm_gpio_function,
+	&smi_function,
+	&gmii_function,
+	&rgmii_function,
+	&smii_0_1_2_function,
+	&ras_mii_txclk_function,
+	&nand_function,
+	&keyboard_function,
+	&uart0_function,
+	&gpt0_function,
+	&gpt1_function,
+	&sdhci_function,
+	&cf_function,
+	&xd_function,
+	&touch_xy_function,
+	&uart1_function,
+	&uart2_3_function,
+	&uart4_function,
+	&uart5_function,
+	&rs485_0_1_tdm_0_1_function,
+	&i2c_1_2_function,
+	&i2c3_unction,
+	&i2c_4_5_function,
+	&i2c_6_7_function,
+	&can0_function,
+	&can1_function,
+	&pci_function,
+	&sata_function,
+	&ssp1_function,
+	&gpt64_function,
+};
+
+static struct spear_pinctrl_machdata spear1310_machdata = {
+	.pins = spear1310_pins,
+	.npins = ARRAY_SIZE(spear1310_pins),
+	.groups = spear1310_pingroups,
+	.ngroups = ARRAY_SIZE(spear1310_pingroups),
+	.functions = spear1310_functions,
+	.nfunctions = ARRAY_SIZE(spear1310_functions),
+	.modes_supported = false,
+};
+
+static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
+	{
+		.compatible = "st,spear1310-pinmux",
+	},
+	{},
+};
+
+static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+{
+	return spear_pinctrl_probe(pdev, &spear1310_machdata);
+}
+
+static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
+{
+	return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1310_pinctrl_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = spear1310_pinctrl_of_match,
+	},
+	.probe = spear1310_pinctrl_probe,
+	.remove = __devexit_p(spear1310_pinctrl_remove),
+};
+
+static int __init spear1310_pinctrl_init(void)
+{
+	return platform_driver_register(&spear1310_pinctrl_driver);
+}
+arch_initcall(spear1310_pinctrl_init);
+
+static void __exit spear1310_pinctrl_exit(void)
+{
+	platform_driver_unregister(&spear1310_pinctrl_driver);
+}
+module_exit(spear1310_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644
index 0000000..a0eb057
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -0,0 +1,1989 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1340 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1340-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1340_pins[] = {
+	SPEAR_PIN_0_TO_101,
+	SPEAR_PIN_102_TO_245,
+	PINCTRL_PIN(246, "PLGPIO246"),
+	PINCTRL_PIN(247, "PLGPIO247"),
+	PINCTRL_PIN(248, "PLGPIO248"),
+	PINCTRL_PIN(249, "PLGPIO249"),
+	PINCTRL_PIN(250, "PLGPIO250"),
+	PINCTRL_PIN(251, "PLGPIO251"),
+};
+
+/* In SPEAr1340 there are two levels of pad muxing */
+/* - pads as gpio OR peripherals */
+#define PAD_FUNCTION_EN_1			0x668
+#define PAD_FUNCTION_EN_2			0x66C
+#define PAD_FUNCTION_EN_3			0x670
+#define PAD_FUNCTION_EN_4			0x674
+#define PAD_FUNCTION_EN_5			0x690
+#define PAD_FUNCTION_EN_6			0x694
+#define PAD_FUNCTION_EN_7			0x698
+#define PAD_FUNCTION_EN_8			0x69C
+
+/* - If peripherals, then primary OR alternate peripheral */
+#define PAD_SHARED_IP_EN_1			0x6A0
+#define PAD_SHARED_IP_EN_2			0x6A4
+
+/*
+ * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
+ * registers with 32 bits each for handling gpio pads, register 8 has only 26
+ * relevant bits.
+ */
+/* macro's for making pads as gpio's */
+#define PADS_AS_GPIO_REG0_MASK			0xFFFFFFFE
+#define PADS_AS_GPIO_REGS_MASK			0xFFFFFFFF
+#define PADS_AS_GPIO_REG7_MASK			0x07FFFFFF
+
+/* macro's for making pads as peripherals */
+#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK	0x00000FFE
+#define UART0_ENH_AND_GPT_REG0_MASK		0x0003F000
+#define PWM1_AND_KBD_COL5_REG0_MASK		0x00040000
+#define I2C1_REG0_MASK				0x01080000
+#define SPDIF_IN_REG0_MASK			0x00100000
+#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK	0x00400000
+#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK	0x00800000
+#define PWM0_AND_SSP0_CS1_REG0_MASK		0x02000000
+#define VIP_AND_CAM3_REG0_MASK			0xFC200000
+#define VIP_AND_CAM3_REG1_MASK			0x0000000F
+#define VIP_REG1_MASK				0x00001EF0
+#define VIP_AND_CAM2_REG1_MASK			0x007FE100
+#define VIP_AND_CAM1_REG1_MASK			0xFF800000
+#define VIP_AND_CAM1_REG2_MASK			0x00000003
+#define VIP_AND_CAM0_REG2_MASK			0x00001FFC
+#define SMI_REG2_MASK				0x0021E000
+#define SSP0_REG2_MASK				0x001E0000
+#define TS_AND_SSP0_CS2_REG2_MASK		0x00400000
+#define UART0_REG2_MASK				0x01800000
+#define UART1_REG2_MASK				0x06000000
+#define I2S_IN_REG2_MASK			0xF8000000
+#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK	0x000001FE
+#define I2S_OUT_REG3_MASK			0x000001EF
+#define I2S_IN_REG3_MASK			0x00000010
+#define GMAC_REG3_MASK				0xFFFFFE00
+#define GMAC_REG4_MASK				0x0000001F
+#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK	0x7FFFFF20
+#define SSP0_CS3_REG4_MASK			0x00000020
+#define I2C0_REG4_MASK				0x000000C0
+#define CEC0_REG4_MASK				0x00000100
+#define CEC1_REG4_MASK				0x00000200
+#define SPDIF_OUT_REG4_MASK			0x00000400
+#define CLCD_REG4_MASK				0x7FFFF800
+#define CLCD_AND_ARM_TRACE_REG4_MASK		0x80000000
+#define CLCD_AND_ARM_TRACE_REG5_MASK		0xFFFFFFFF
+#define CLCD_AND_ARM_TRACE_REG6_MASK		0x00000001
+#define FSMC_PNOR_AND_MCIF_REG6_MASK		0x073FFFFE
+#define MCIF_REG6_MASK				0xF8C00000
+#define MCIF_REG7_MASK				0x000043FF
+#define FSMC_8BIT_REG7_MASK			0x07FFBC00
+
+/* other registers */
+#define PERIP_CFG				0x42C
+	/* PERIP_CFG register masks */
+	#define SSP_CS_CTL_HW			0
+	#define SSP_CS_CTL_SW			1
+	#define SSP_CS_CTL_MASK			1
+	#define SSP_CS_CTL_SHIFT		21
+	#define SSP_CS_VAL_MASK			1
+	#define SSP_CS_VAL_SHIFT		20
+	#define SSP_CS_SEL_CS0			0
+	#define SSP_CS_SEL_CS1			1
+	#define SSP_CS_SEL_CS2			2
+	#define SSP_CS_SEL_MASK			3
+	#define SSP_CS_SEL_SHIFT		18
+
+	#define I2S_CHNL_2_0			(0)
+	#define I2S_CHNL_3_1			(1)
+	#define I2S_CHNL_5_1			(2)
+	#define I2S_CHNL_7_1			(3)
+	#define I2S_CHNL_PLAY_SHIFT		(4)
+	#define I2S_CHNL_PLAY_MASK		(3 << 4)
+	#define I2S_CHNL_REC_SHIFT		(6)
+	#define I2S_CHNL_REC_MASK		(3 << 6)
+
+	#define SPDIF_OUT_ENB_MASK		(1 << 2)
+	#define SPDIF_OUT_ENB_SHIFT		2
+
+	#define MCIF_SEL_SD			1
+	#define MCIF_SEL_CF			2
+	#define MCIF_SEL_XD			3
+	#define MCIF_SEL_MASK			3
+	#define MCIF_SEL_SHIFT			0
+
+#define GMAC_CLK_CFG				0x248
+	#define GMAC_PHY_IF_GMII_VAL		(0 << 3)
+	#define GMAC_PHY_IF_RGMII_VAL		(1 << 3)
+	#define GMAC_PHY_IF_SGMII_VAL		(2 << 3)
+	#define GMAC_PHY_IF_RMII_VAL		(4 << 3)
+	#define GMAC_PHY_IF_SEL_MASK		(7 << 3)
+	#define GMAC_PHY_INPUT_ENB_VAL		0
+	#define GMAC_PHY_SYNT_ENB_VAL		1
+	#define GMAC_PHY_CLK_MASK		1
+	#define GMAC_PHY_CLK_SHIFT		2
+	#define GMAC_PHY_125M_PAD_VAL		0
+	#define GMAC_PHY_PLL2_VAL		1
+	#define GMAC_PHY_OSC3_VAL		2
+	#define GMAC_PHY_INPUT_CLK_MASK		3
+	#define GMAC_PHY_INPUT_CLK_SHIFT	0
+
+#define PCIE_SATA_CFG				0x424
+	/* PCIE CFG MASks */
+	#define PCIE_CFG_DEVICE_PRESENT		(1 << 11)
+	#define PCIE_CFG_POWERUP_RESET		(1 << 10)
+	#define PCIE_CFG_CORE_CLK_EN		(1 << 9)
+	#define PCIE_CFG_AUX_CLK_EN		(1 << 8)
+	#define SATA_CFG_TX_CLK_EN		(1 << 4)
+	#define SATA_CFG_RX_CLK_EN		(1 << 3)
+	#define SATA_CFG_POWERUP_RESET		(1 << 2)
+	#define SATA_CFG_PM_CLK_EN		(1 << 1)
+	#define PCIE_SATA_SEL_PCIE		(0)
+	#define PCIE_SATA_SEL_SATA		(1)
+	#define SATA_PCIE_CFG_MASK		0xF1F
+	#define PCIE_CFG_VAL	(PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
+				PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
+				PCIE_CFG_DEVICE_PRESENT)
+	#define SATA_CFG_VAL	(PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
+				SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
+				SATA_CFG_TX_CLK_EN)
+
+/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
+/* Write 0 to enable FSMC_16_BIT */
+#define KBD_ROW_COL_MASK			(1 << 0)
+
+/* Write 0 to enable UART0_ENH */
+#define GPT_MASK				(1 << 1) /* Only clk & cpt */
+
+/* Write 0 to enable PWM1 */
+#define KBD_COL5_MASK				(1 << 2)
+
+/* Write 0 to enable PWM2 */
+#define GPT0_TMR0_CPT_MASK			(1 << 3) /* Only clk & cpt */
+
+/* Write 0 to enable PWM3 */
+#define GPT0_TMR1_CLK_MASK			(1 << 4) /* Only clk & cpt */
+
+/* Write 0 to enable PWM0 */
+#define SSP0_CS1_MASK				(1 << 5)
+
+/* Write 0 to enable VIP */
+#define CAM3_MASK				(1 << 6)
+
+/* Write 0 to enable VIP */
+#define CAM2_MASK				(1 << 7)
+
+/* Write 0 to enable VIP */
+#define CAM1_MASK				(1 << 8)
+
+/* Write 0 to enable VIP */
+#define CAM0_MASK				(1 << 9)
+
+/* Write 0 to enable TS */
+#define SSP0_CS2_MASK				(1 << 10)
+
+/* Write 0 to enable FSMC PNOR */
+#define MCIF_MASK				(1 << 11)
+
+/* Write 0 to enable CLCD */
+#define ARM_TRACE_MASK				(1 << 12)
+
+/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
+#define MIPHY_DBG_MASK				(1 << 13)
+
+/*
+ * Pad multiplexing for making all pads as gpio's. This is done to override the
+ * values passed from bootloader and start from scratch.
+ */
+static const unsigned pads_as_gpio_pins[] = { 251 };
+static struct spear_muxreg pads_as_gpio_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PADS_AS_GPIO_REG0_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_4,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_6,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_7,
+		.mask = PADS_AS_GPIO_REGS_MASK,
+		.val = 0x0,
+	}, {
+		.reg = PAD_FUNCTION_EN_8,
+		.mask = PADS_AS_GPIO_REG7_MASK,
+		.val = 0x0,
+	},
+};
+
+static struct spear_modemux pads_as_gpio_modemux[] = {
+	{
+		.muxregs = pads_as_gpio_muxreg,
+		.nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
+	},
+};
+
+static struct spear_pingroup pads_as_gpio_pingroup = {
+	.name = "pads_as_gpio_grp",
+	.pins = pads_as_gpio_pins,
+	.npins = ARRAY_SIZE(pads_as_gpio_pins),
+	.modemuxs = pads_as_gpio_modemux,
+	.nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
+};
+
+static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
+static struct spear_function pads_as_gpio_function = {
+	.name = "pads_as_gpio",
+	.groups = pads_as_gpio_grps,
+	.ngroups = ARRAY_SIZE(pads_as_gpio_grps),
+};
+
+/* Pad multiplexing for fsmc_8bit device */
+static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
+	241, 242, 243, 244, 245, 246, 247, 248, 249 };
+static struct spear_muxreg fsmc_8bit_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_8,
+		.mask = FSMC_8BIT_REG7_MASK,
+		.val = FSMC_8BIT_REG7_MASK,
+	}
+};
+
+static struct spear_modemux fsmc_8bit_modemux[] = {
+	{
+		.muxregs = fsmc_8bit_muxreg,
+		.nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
+	},
+};
+
+static struct spear_pingroup fsmc_8bit_pingroup = {
+	.name = "fsmc_8bit_grp",
+	.pins = fsmc_8bit_pins,
+	.npins = ARRAY_SIZE(fsmc_8bit_pins),
+	.modemuxs = fsmc_8bit_modemux,
+	.nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
+};
+
+/* Pad multiplexing for fsmc_16bit device */
+static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+static struct spear_muxreg fsmc_16bit_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = KBD_ROW_COL_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+		.val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+	},
+};
+
+static struct spear_modemux fsmc_16bit_modemux[] = {
+	{
+		.muxregs = fsmc_16bit_muxreg,
+		.nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
+	},
+};
+
+static struct spear_pingroup fsmc_16bit_pingroup = {
+	.name = "fsmc_16bit_grp",
+	.pins = fsmc_16bit_pins,
+	.npins = ARRAY_SIZE(fsmc_16bit_pins),
+	.modemuxs = fsmc_16bit_modemux,
+	.nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
+};
+
+/* pad multiplexing for fsmc_pnor device */
+static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
+	199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+	215, 216, 217 };
+static struct spear_muxreg fsmc_pnor_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = MCIF_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_7,
+		.mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
+		.val = FSMC_PNOR_AND_MCIF_REG6_MASK,
+	},
+};
+
+static struct spear_modemux fsmc_pnor_modemux[] = {
+	{
+		.muxregs = fsmc_pnor_muxreg,
+		.nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
+	},
+};
+
+static struct spear_pingroup fsmc_pnor_pingroup = {
+	.name = "fsmc_pnor_grp",
+	.pins = fsmc_pnor_pins,
+	.npins = ARRAY_SIZE(fsmc_pnor_pins),
+	.modemuxs = fsmc_pnor_modemux,
+	.nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
+};
+
+static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
+	"fsmc_pnor_grp" };
+static struct spear_function fsmc_function = {
+	.name = "fsmc",
+	.groups = fsmc_grps,
+	.ngroups = ARRAY_SIZE(fsmc_grps),
+};
+
+/* pad multiplexing for keyboard rows-cols device */
+static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+	10 };
+static struct spear_muxreg keyboard_row_col_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = KBD_ROW_COL_MASK,
+		.val = KBD_ROW_COL_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+		.val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+	},
+};
+
+static struct spear_modemux keyboard_row_col_modemux[] = {
+	{
+		.muxregs = keyboard_row_col_muxreg,
+		.nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
+	},
+};
+
+static struct spear_pingroup keyboard_row_col_pingroup = {
+	.name = "keyboard_row_col_grp",
+	.pins = keyboard_row_col_pins,
+	.npins = ARRAY_SIZE(keyboard_row_col_pins),
+	.modemuxs = keyboard_row_col_modemux,
+	.nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
+};
+
+/* pad multiplexing for keyboard col5 device */
+static const unsigned keyboard_col5_pins[] = { 17 };
+static struct spear_muxreg keyboard_col5_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = KBD_COL5_MASK,
+		.val = KBD_COL5_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM1_AND_KBD_COL5_REG0_MASK,
+		.val = PWM1_AND_KBD_COL5_REG0_MASK,
+	},
+};
+
+static struct spear_modemux keyboard_col5_modemux[] = {
+	{
+		.muxregs = keyboard_col5_muxreg,
+		.nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
+	},
+};
+
+static struct spear_pingroup keyboard_col5_pingroup = {
+	.name = "keyboard_col5_grp",
+	.pins = keyboard_col5_pins,
+	.npins = ARRAY_SIZE(keyboard_col5_pins),
+	.modemuxs = keyboard_col5_modemux,
+	.nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
+	"keyboard_col5_grp" };
+static struct spear_function keyboard_function = {
+	.name = "keyboard",
+	.groups = keyboard_grps,
+	.ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* pad multiplexing for spdif_in device */
+static const unsigned spdif_in_pins[] = { 19 };
+static struct spear_muxreg spdif_in_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = SPDIF_IN_REG0_MASK,
+		.val = SPDIF_IN_REG0_MASK,
+	},
+};
+
+static struct spear_modemux spdif_in_modemux[] = {
+	{
+		.muxregs = spdif_in_muxreg,
+		.nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
+	},
+};
+
+static struct spear_pingroup spdif_in_pingroup = {
+	.name = "spdif_in_grp",
+	.pins = spdif_in_pins,
+	.npins = ARRAY_SIZE(spdif_in_pins),
+	.modemuxs = spdif_in_modemux,
+	.nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
+};
+
+static const char *const spdif_in_grps[] = { "spdif_in_grp" };
+static struct spear_function spdif_in_function = {
+	.name = "spdif_in",
+	.groups = spdif_in_grps,
+	.ngroups = ARRAY_SIZE(spdif_in_grps),
+};
+
+/* pad multiplexing for spdif_out device */
+static const unsigned spdif_out_pins[] = { 137 };
+static struct spear_muxreg spdif_out_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = SPDIF_OUT_REG4_MASK,
+		.val = SPDIF_OUT_REG4_MASK,
+	}, {
+		.reg = PERIP_CFG,
+		.mask = SPDIF_OUT_ENB_MASK,
+		.val = SPDIF_OUT_ENB_MASK,
+	}
+};
+
+static struct spear_modemux spdif_out_modemux[] = {
+	{
+		.muxregs = spdif_out_muxreg,
+		.nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
+	},
+};
+
+static struct spear_pingroup spdif_out_pingroup = {
+	.name = "spdif_out_grp",
+	.pins = spdif_out_pins,
+	.npins = ARRAY_SIZE(spdif_out_pins),
+	.modemuxs = spdif_out_modemux,
+	.nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
+};
+
+static const char *const spdif_out_grps[] = { "spdif_out_grp" };
+static struct spear_function spdif_out_function = {
+	.name = "spdif_out",
+	.groups = spdif_out_grps,
+	.ngroups = ARRAY_SIZE(spdif_out_grps),
+};
+
+/* pad multiplexing for gpt_0_1 device */
+static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
+static struct spear_muxreg gpt_0_1_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+		.val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = UART0_ENH_AND_GPT_REG0_MASK |
+			PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+			PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+		.val = UART0_ENH_AND_GPT_REG0_MASK |
+			PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+			PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+	},
+};
+
+static struct spear_modemux gpt_0_1_modemux[] = {
+	{
+		.muxregs = gpt_0_1_muxreg,
+		.nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
+	},
+};
+
+static struct spear_pingroup gpt_0_1_pingroup = {
+	.name = "gpt_0_1_grp",
+	.pins = gpt_0_1_pins,
+	.npins = ARRAY_SIZE(gpt_0_1_pins),
+	.modemuxs = gpt_0_1_modemux,
+	.nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
+};
+
+static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
+static struct spear_function gpt_0_1_function = {
+	.name = "gpt_0_1",
+	.groups = gpt_0_1_grps,
+	.ngroups = ARRAY_SIZE(gpt_0_1_grps),
+};
+
+/* pad multiplexing for pwm0 device */
+static const unsigned pwm0_pins[] = { 24 };
+static struct spear_muxreg pwm0_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = SSP0_CS1_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+		.val = PWM0_AND_SSP0_CS1_REG0_MASK,
+	},
+};
+
+static struct spear_modemux pwm0_modemux[] = {
+	{
+		.muxregs = pwm0_muxreg,
+		.nmuxregs = ARRAY_SIZE(pwm0_muxreg),
+	},
+};
+
+static struct spear_pingroup pwm0_pingroup = {
+	.name = "pwm0_grp",
+	.pins = pwm0_pins,
+	.npins = ARRAY_SIZE(pwm0_pins),
+	.modemuxs = pwm0_modemux,
+	.nmodemuxs = ARRAY_SIZE(pwm0_modemux),
+};
+
+/* pad multiplexing for pwm1 device */
+static const unsigned pwm1_pins[] = { 17 };
+static struct spear_muxreg pwm1_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = KBD_COL5_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM1_AND_KBD_COL5_REG0_MASK,
+		.val = PWM1_AND_KBD_COL5_REG0_MASK,
+	},
+};
+
+static struct spear_modemux pwm1_modemux[] = {
+	{
+		.muxregs = pwm1_muxreg,
+		.nmuxregs = ARRAY_SIZE(pwm1_muxreg),
+	},
+};
+
+static struct spear_pingroup pwm1_pingroup = {
+	.name = "pwm1_grp",
+	.pins = pwm1_pins,
+	.npins = ARRAY_SIZE(pwm1_pins),
+	.modemuxs = pwm1_modemux,
+	.nmodemuxs = ARRAY_SIZE(pwm1_modemux),
+};
+
+/* pad multiplexing for pwm2 device */
+static const unsigned pwm2_pins[] = { 21 };
+static struct spear_muxreg pwm2_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = GPT0_TMR0_CPT_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+		.val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+	},
+};
+
+static struct spear_modemux pwm2_modemux[] = {
+	{
+		.muxregs = pwm2_muxreg,
+		.nmuxregs = ARRAY_SIZE(pwm2_muxreg),
+	},
+};
+
+static struct spear_pingroup pwm2_pingroup = {
+	.name = "pwm2_grp",
+	.pins = pwm2_pins,
+	.npins = ARRAY_SIZE(pwm2_pins),
+	.modemuxs = pwm2_modemux,
+	.nmodemuxs = ARRAY_SIZE(pwm2_modemux),
+};
+
+/* pad multiplexing for pwm3 device */
+static const unsigned pwm3_pins[] = { 22 };
+static struct spear_muxreg pwm3_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = GPT0_TMR1_CLK_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+		.val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+	},
+};
+
+static struct spear_modemux pwm3_modemux[] = {
+	{
+		.muxregs = pwm3_muxreg,
+		.nmuxregs = ARRAY_SIZE(pwm3_muxreg),
+	},
+};
+
+static struct spear_pingroup pwm3_pingroup = {
+	.name = "pwm3_grp",
+	.pins = pwm3_pins,
+	.npins = ARRAY_SIZE(pwm3_pins),
+	.modemuxs = pwm3_modemux,
+	.nmodemuxs = ARRAY_SIZE(pwm3_modemux),
+};
+
+static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+	"pwm3_grp" };
+static struct spear_function pwm_function = {
+	.name = "pwm",
+	.groups = pwm_grps,
+	.ngroups = ARRAY_SIZE(pwm_grps),
+};
+
+/* pad multiplexing for vip_mux device */
+static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
+static struct spear_muxreg vip_mux_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_REG1_MASK,
+		.val = VIP_REG1_MASK,
+	},
+};
+
+static struct spear_modemux vip_mux_modemux[] = {
+	{
+		.muxregs = vip_mux_muxreg,
+		.nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
+	},
+};
+
+static struct spear_pingroup vip_mux_pingroup = {
+	.name = "vip_mux_grp",
+	.pins = vip_mux_pins,
+	.npins = ARRAY_SIZE(vip_mux_pins),
+	.modemuxs = vip_mux_modemux,
+	.nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
+static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
+	73, 74, 75 };
+static struct spear_muxreg vip_mux_cam0_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM0_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = VIP_AND_CAM0_REG2_MASK,
+		.val = VIP_AND_CAM0_REG2_MASK,
+	},
+};
+
+static struct spear_modemux vip_mux_cam0_modemux[] = {
+	{
+		.muxregs = vip_mux_cam0_muxreg,
+		.nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
+	},
+};
+
+static struct spear_pingroup vip_mux_cam0_pingroup = {
+	.name = "vip_mux_cam0_grp",
+	.pins = vip_mux_cam0_pins,
+	.npins = ARRAY_SIZE(vip_mux_cam0_pins),
+	.modemuxs = vip_mux_cam0_modemux,
+	.nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
+static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
+	62, 63, 64 };
+static struct spear_muxreg vip_mux_cam1_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM1_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM1_REG1_MASK,
+		.val = VIP_AND_CAM1_REG1_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = VIP_AND_CAM1_REG2_MASK,
+		.val = VIP_AND_CAM1_REG2_MASK,
+	},
+};
+
+static struct spear_modemux vip_mux_cam1_modemux[] = {
+	{
+		.muxregs = vip_mux_cam1_muxreg,
+		.nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
+	},
+};
+
+static struct spear_pingroup vip_mux_cam1_pingroup = {
+	.name = "vip_mux_cam1_grp",
+	.pins = vip_mux_cam1_pins,
+	.npins = ARRAY_SIZE(vip_mux_cam1_pins),
+	.modemuxs = vip_mux_cam1_modemux,
+	.nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
+static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
+	51, 52, 53 };
+static struct spear_muxreg vip_mux_cam2_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM2_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM2_REG1_MASK,
+		.val = VIP_AND_CAM2_REG1_MASK,
+	},
+};
+
+static struct spear_modemux vip_mux_cam2_modemux[] = {
+	{
+		.muxregs = vip_mux_cam2_muxreg,
+		.nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
+	},
+};
+
+static struct spear_pingroup vip_mux_cam2_pingroup = {
+	.name = "vip_mux_cam2_grp",
+	.pins = vip_mux_cam2_pins,
+	.npins = ARRAY_SIZE(vip_mux_cam2_pins),
+	.modemuxs = vip_mux_cam2_modemux,
+	.nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
+static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
+	32, 33, 34 };
+static struct spear_muxreg vip_mux_cam3_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM3_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = VIP_AND_CAM3_REG0_MASK,
+		.val = VIP_AND_CAM3_REG0_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM3_REG1_MASK,
+		.val = VIP_AND_CAM3_REG1_MASK,
+	},
+};
+
+static struct spear_modemux vip_mux_cam3_modemux[] = {
+	{
+		.muxregs = vip_mux_cam3_muxreg,
+		.nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
+	},
+};
+
+static struct spear_pingroup vip_mux_cam3_pingroup = {
+	.name = "vip_mux_cam3_grp",
+	.pins = vip_mux_cam3_pins,
+	.npins = ARRAY_SIZE(vip_mux_cam3_pins),
+	.modemuxs = vip_mux_cam3_modemux,
+	.nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
+};
+
+static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
+	"vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
+static struct spear_function vip_function = {
+	.name = "vip",
+	.groups = vip_grps,
+	.ngroups = ARRAY_SIZE(vip_grps),
+};
+
+/* pad multiplexing for cam0 device */
+static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
+};
+static struct spear_muxreg cam0_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM0_MASK,
+		.val = CAM0_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = VIP_AND_CAM0_REG2_MASK,
+		.val = VIP_AND_CAM0_REG2_MASK,
+	},
+};
+
+static struct spear_modemux cam0_modemux[] = {
+	{
+		.muxregs = cam0_muxreg,
+		.nmuxregs = ARRAY_SIZE(cam0_muxreg),
+	},
+};
+
+static struct spear_pingroup cam0_pingroup = {
+	.name = "cam0_grp",
+	.pins = cam0_pins,
+	.npins = ARRAY_SIZE(cam0_pins),
+	.modemuxs = cam0_modemux,
+	.nmodemuxs = ARRAY_SIZE(cam0_modemux),
+};
+
+static const char *const cam0_grps[] = { "cam0_grp" };
+static struct spear_function cam0_function = {
+	.name = "cam0",
+	.groups = cam0_grps,
+	.ngroups = ARRAY_SIZE(cam0_grps),
+};
+
+/* pad multiplexing for cam1 device */
+static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static struct spear_muxreg cam1_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM1_MASK,
+		.val = CAM1_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM1_REG1_MASK,
+		.val = VIP_AND_CAM1_REG1_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = VIP_AND_CAM1_REG2_MASK,
+		.val = VIP_AND_CAM1_REG2_MASK,
+	},
+};
+
+static struct spear_modemux cam1_modemux[] = {
+	{
+		.muxregs = cam1_muxreg,
+		.nmuxregs = ARRAY_SIZE(cam1_muxreg),
+	},
+};
+
+static struct spear_pingroup cam1_pingroup = {
+	.name = "cam1_grp",
+	.pins = cam1_pins,
+	.npins = ARRAY_SIZE(cam1_pins),
+	.modemuxs = cam1_modemux,
+	.nmodemuxs = ARRAY_SIZE(cam1_modemux),
+};
+
+static const char *const cam1_grps[] = { "cam1_grp" };
+static struct spear_function cam1_function = {
+	.name = "cam1",
+	.groups = cam1_grps,
+	.ngroups = ARRAY_SIZE(cam1_grps),
+};
+
+/* pad multiplexing for cam2 device */
+static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
+};
+static struct spear_muxreg cam2_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM2_MASK,
+		.val = CAM2_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM2_REG1_MASK,
+		.val = VIP_AND_CAM2_REG1_MASK,
+	},
+};
+
+static struct spear_modemux cam2_modemux[] = {
+	{
+		.muxregs = cam2_muxreg,
+		.nmuxregs = ARRAY_SIZE(cam2_muxreg),
+	},
+};
+
+static struct spear_pingroup cam2_pingroup = {
+	.name = "cam2_grp",
+	.pins = cam2_pins,
+	.npins = ARRAY_SIZE(cam2_pins),
+	.modemuxs = cam2_modemux,
+	.nmodemuxs = ARRAY_SIZE(cam2_modemux),
+};
+
+static const char *const cam2_grps[] = { "cam2_grp" };
+static struct spear_function cam2_function = {
+	.name = "cam2",
+	.groups = cam2_grps,
+	.ngroups = ARRAY_SIZE(cam2_grps),
+};
+
+/* pad multiplexing for cam3 device */
+static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+static struct spear_muxreg cam3_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = CAM3_MASK,
+		.val = CAM3_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = VIP_AND_CAM3_REG0_MASK,
+		.val = VIP_AND_CAM3_REG0_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_2,
+		.mask = VIP_AND_CAM3_REG1_MASK,
+		.val = VIP_AND_CAM3_REG1_MASK,
+	},
+};
+
+static struct spear_modemux cam3_modemux[] = {
+	{
+		.muxregs = cam3_muxreg,
+		.nmuxregs = ARRAY_SIZE(cam3_muxreg),
+	},
+};
+
+static struct spear_pingroup cam3_pingroup = {
+	.name = "cam3_grp",
+	.pins = cam3_pins,
+	.npins = ARRAY_SIZE(cam3_pins),
+	.modemuxs = cam3_modemux,
+	.nmodemuxs = ARRAY_SIZE(cam3_modemux),
+};
+
+static const char *const cam3_grps[] = { "cam3_grp" };
+static struct spear_function cam3_function = {
+	.name = "cam3",
+	.groups = cam3_grps,
+	.ngroups = ARRAY_SIZE(cam3_grps),
+};
+
+/* pad multiplexing for smi device */
+static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
+static struct spear_muxreg smi_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = SMI_REG2_MASK,
+		.val = SMI_REG2_MASK,
+	},
+};
+
+static struct spear_modemux smi_modemux[] = {
+	{
+		.muxregs = smi_muxreg,
+		.nmuxregs = ARRAY_SIZE(smi_muxreg),
+	},
+};
+
+static struct spear_pingroup smi_pingroup = {
+	.name = "smi_grp",
+	.pins = smi_pins,
+	.npins = ARRAY_SIZE(smi_pins),
+	.modemuxs = smi_modemux,
+	.nmodemuxs = ARRAY_SIZE(smi_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_grp" };
+static struct spear_function smi_function = {
+	.name = "smi",
+	.groups = smi_grps,
+	.ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
+static struct spear_muxreg ssp0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = SSP0_REG2_MASK,
+		.val = SSP0_REG2_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+	{
+		.muxregs = ssp0_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+	.name = "ssp0_grp",
+	.pins = ssp0_pins,
+	.npins = ARRAY_SIZE(ssp0_pins),
+	.modemuxs = ssp0_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* pad multiplexing for ssp0_cs1 device */
+static const unsigned ssp0_cs1_pins[] = { 24 };
+static struct spear_muxreg ssp0_cs1_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = SSP0_CS1_MASK,
+		.val = SSP0_CS1_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+		.val = PWM0_AND_SSP0_CS1_REG0_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_cs1_modemux[] = {
+	{
+		.muxregs = ssp0_cs1_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_cs1_pingroup = {
+	.name = "ssp0_cs1_grp",
+	.pins = ssp0_cs1_pins,
+	.npins = ARRAY_SIZE(ssp0_cs1_pins),
+	.modemuxs = ssp0_cs1_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
+};
+
+/* pad multiplexing for ssp0_cs2 device */
+static const unsigned ssp0_cs2_pins[] = { 85 };
+static struct spear_muxreg ssp0_cs2_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = SSP0_CS2_MASK,
+		.val = SSP0_CS2_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = TS_AND_SSP0_CS2_REG2_MASK,
+		.val = TS_AND_SSP0_CS2_REG2_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_cs2_modemux[] = {
+	{
+		.muxregs = ssp0_cs2_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_cs2_pingroup = {
+	.name = "ssp0_cs2_grp",
+	.pins = ssp0_cs2_pins,
+	.npins = ARRAY_SIZE(ssp0_cs2_pins),
+	.modemuxs = ssp0_cs2_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
+};
+
+/* pad multiplexing for ssp0_cs3 device */
+static const unsigned ssp0_cs3_pins[] = { 132 };
+static struct spear_muxreg ssp0_cs3_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = SSP0_CS3_REG4_MASK,
+		.val = SSP0_CS3_REG4_MASK,
+	},
+};
+
+static struct spear_modemux ssp0_cs3_modemux[] = {
+	{
+		.muxregs = ssp0_cs3_muxreg,
+		.nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
+	},
+};
+
+static struct spear_pingroup ssp0_cs3_pingroup = {
+	.name = "ssp0_cs3_grp",
+	.pins = ssp0_cs3_pins,
+	.npins = ARRAY_SIZE(ssp0_cs3_pins),
+	.modemuxs = ssp0_cs3_modemux,
+	.nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
+	"ssp0_cs2_grp", "ssp0_cs3_grp" };
+static struct spear_function ssp0_function = {
+	.name = "ssp0",
+	.groups = ssp0_grps,
+	.ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 86, 87 };
+static struct spear_muxreg uart0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = UART0_REG2_MASK,
+		.val = UART0_REG2_MASK,
+	},
+};
+
+static struct spear_modemux uart0_modemux[] = {
+	{
+		.muxregs = uart0_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart0_muxreg),
+	},
+};
+
+static struct spear_pingroup uart0_pingroup = {
+	.name = "uart0_grp",
+	.pins = uart0_pins,
+	.npins = ARRAY_SIZE(uart0_pins),
+	.modemuxs = uart0_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* pad multiplexing for uart0_enh device */
+static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
+static struct spear_muxreg uart0_enh_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = GPT_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = UART0_ENH_AND_GPT_REG0_MASK,
+		.val = UART0_ENH_AND_GPT_REG0_MASK,
+	},
+};
+
+static struct spear_modemux uart0_enh_modemux[] = {
+	{
+		.muxregs = uart0_enh_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
+	},
+};
+
+static struct spear_pingroup uart0_enh_pingroup = {
+	.name = "uart0_enh_grp",
+	.pins = uart0_enh_pins,
+	.npins = ARRAY_SIZE(uart0_enh_pins),
+	.modemuxs = uart0_enh_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
+static struct spear_function uart0_function = {
+	.name = "uart0",
+	.groups = uart0_grps,
+	.ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* pad multiplexing for uart1 device */
+static const unsigned uart1_pins[] = { 88, 89 };
+static struct spear_muxreg uart1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = UART1_REG2_MASK,
+		.val = UART1_REG2_MASK,
+	},
+};
+
+static struct spear_modemux uart1_modemux[] = {
+	{
+		.muxregs = uart1_muxreg,
+		.nmuxregs = ARRAY_SIZE(uart1_muxreg),
+	},
+};
+
+static struct spear_pingroup uart1_pingroup = {
+	.name = "uart1_grp",
+	.pins = uart1_pins,
+	.npins = ARRAY_SIZE(uart1_pins),
+	.modemuxs = uart1_modemux,
+	.nmodemuxs = ARRAY_SIZE(uart1_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_grp" };
+static struct spear_function uart1_function = {
+	.name = "uart1",
+	.groups = uart1_grps,
+	.ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* pad multiplexing for i2s_in device */
+static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
+static struct spear_muxreg i2s_in_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_3,
+		.mask = I2S_IN_REG2_MASK,
+		.val = I2S_IN_REG2_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_4,
+		.mask = I2S_IN_REG3_MASK,
+		.val = I2S_IN_REG3_MASK,
+	},
+};
+
+static struct spear_modemux i2s_in_modemux[] = {
+	{
+		.muxregs = i2s_in_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
+	},
+};
+
+static struct spear_pingroup i2s_in_pingroup = {
+	.name = "i2s_in_grp",
+	.pins = i2s_in_pins,
+	.npins = ARRAY_SIZE(i2s_in_pins),
+	.modemuxs = i2s_in_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
+};
+
+/* pad multiplexing for i2s_out device */
+static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
+static struct spear_muxreg i2s_out_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_4,
+		.mask = I2S_OUT_REG3_MASK,
+		.val = I2S_OUT_REG3_MASK,
+	},
+};
+
+static struct spear_modemux i2s_out_modemux[] = {
+	{
+		.muxregs = i2s_out_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
+	},
+};
+
+static struct spear_pingroup i2s_out_pingroup = {
+	.name = "i2s_out_grp",
+	.pins = i2s_out_pins,
+	.npins = ARRAY_SIZE(i2s_out_pins),
+	.modemuxs = i2s_out_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
+};
+
+static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
+static struct spear_function i2s_function = {
+	.name = "i2s",
+	.groups = i2s_grps,
+	.ngroups = ARRAY_SIZE(i2s_grps),
+};
+
+/* pad multiplexing for gmac device */
+static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
+	112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+	126, 127, 128, 129, 130, 131 };
+#define GMAC_MUXREG				\
+	{					\
+		.reg = PAD_FUNCTION_EN_4,	\
+		.mask = GMAC_REG3_MASK,		\
+		.val = GMAC_REG3_MASK,		\
+	}, {					\
+		.reg = PAD_FUNCTION_EN_5,	\
+		.mask = GMAC_REG4_MASK,		\
+		.val = GMAC_REG4_MASK,		\
+	}
+
+/* pad multiplexing for gmii device */
+static struct spear_muxreg gmii_muxreg[] = {
+	GMAC_MUXREG,
+	{
+		.reg = GMAC_CLK_CFG,
+		.mask = GMAC_PHY_IF_SEL_MASK,
+		.val = GMAC_PHY_IF_GMII_VAL,
+	},
+};
+
+static struct spear_modemux gmii_modemux[] = {
+	{
+		.muxregs = gmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(gmii_muxreg),
+	},
+};
+
+static struct spear_pingroup gmii_pingroup = {
+	.name = "gmii_grp",
+	.pins = gmac_pins,
+	.npins = ARRAY_SIZE(gmac_pins),
+	.modemuxs = gmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+/* pad multiplexing for rgmii device */
+static struct spear_muxreg rgmii_muxreg[] = {
+	GMAC_MUXREG,
+	{
+		.reg = GMAC_CLK_CFG,
+		.mask = GMAC_PHY_IF_SEL_MASK,
+		.val = GMAC_PHY_IF_RGMII_VAL,
+	},
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+	{
+		.muxregs = rgmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+	},
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+	.name = "rgmii_grp",
+	.pins = gmac_pins,
+	.npins = ARRAY_SIZE(gmac_pins),
+	.modemuxs = rgmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+/* pad multiplexing for rmii device */
+static struct spear_muxreg rmii_muxreg[] = {
+	GMAC_MUXREG,
+	{
+		.reg = GMAC_CLK_CFG,
+		.mask = GMAC_PHY_IF_SEL_MASK,
+		.val = GMAC_PHY_IF_RMII_VAL,
+	},
+};
+
+static struct spear_modemux rmii_modemux[] = {
+	{
+		.muxregs = rmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(rmii_muxreg),
+	},
+};
+
+static struct spear_pingroup rmii_pingroup = {
+	.name = "rmii_grp",
+	.pins = gmac_pins,
+	.npins = ARRAY_SIZE(gmac_pins),
+	.modemuxs = rmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(rmii_modemux),
+};
+
+/* pad multiplexing for sgmii device */
+static struct spear_muxreg sgmii_muxreg[] = {
+	GMAC_MUXREG,
+	{
+		.reg = GMAC_CLK_CFG,
+		.mask = GMAC_PHY_IF_SEL_MASK,
+		.val = GMAC_PHY_IF_SGMII_VAL,
+	},
+};
+
+static struct spear_modemux sgmii_modemux[] = {
+	{
+		.muxregs = sgmii_muxreg,
+		.nmuxregs = ARRAY_SIZE(sgmii_muxreg),
+	},
+};
+
+static struct spear_pingroup sgmii_pingroup = {
+	.name = "sgmii_grp",
+	.pins = gmac_pins,
+	.npins = ARRAY_SIZE(gmac_pins),
+	.modemuxs = sgmii_modemux,
+	.nmodemuxs = ARRAY_SIZE(sgmii_modemux),
+};
+
+static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
+	"sgmii_grp" };
+static struct spear_function gmac_function = {
+	.name = "gmac",
+	.groups = gmac_grps,
+	.ngroups = ARRAY_SIZE(gmac_grps),
+};
+
+/* pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 133, 134 };
+static struct spear_muxreg i2c0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = I2C0_REG4_MASK,
+		.val = I2C0_REG4_MASK,
+	},
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+	{
+		.muxregs = i2c0_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+	.name = "i2c0_grp",
+	.pins = i2c0_pins,
+	.npins = ARRAY_SIZE(i2c0_pins),
+	.modemuxs = i2c0_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+	.name = "i2c0",
+	.groups = i2c0_grps,
+	.ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* pad multiplexing for i2c1 device */
+static const unsigned i2c1_pins[] = { 18, 23 };
+static struct spear_muxreg i2c1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_1,
+		.mask = I2C1_REG0_MASK,
+		.val = I2C1_REG0_MASK,
+	},
+};
+
+static struct spear_modemux i2c1_modemux[] = {
+	{
+		.muxregs = i2c1_muxreg,
+		.nmuxregs = ARRAY_SIZE(i2c1_muxreg),
+	},
+};
+
+static struct spear_pingroup i2c1_pingroup = {
+	.name = "i2c1_grp",
+	.pins = i2c1_pins,
+	.npins = ARRAY_SIZE(i2c1_pins),
+	.modemuxs = i2c1_modemux,
+	.nmodemuxs = ARRAY_SIZE(i2c1_modemux),
+};
+
+static const char *const i2c1_grps[] = { "i2c1_grp" };
+static struct spear_function i2c1_function = {
+	.name = "i2c1",
+	.groups = i2c1_grps,
+	.ngroups = ARRAY_SIZE(i2c1_grps),
+};
+
+/* pad multiplexing for cec0 device */
+static const unsigned cec0_pins[] = { 135 };
+static struct spear_muxreg cec0_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = CEC0_REG4_MASK,
+		.val = CEC0_REG4_MASK,
+	},
+};
+
+static struct spear_modemux cec0_modemux[] = {
+	{
+		.muxregs = cec0_muxreg,
+		.nmuxregs = ARRAY_SIZE(cec0_muxreg),
+	},
+};
+
+static struct spear_pingroup cec0_pingroup = {
+	.name = "cec0_grp",
+	.pins = cec0_pins,
+	.npins = ARRAY_SIZE(cec0_pins),
+	.modemuxs = cec0_modemux,
+	.nmodemuxs = ARRAY_SIZE(cec0_modemux),
+};
+
+static const char *const cec0_grps[] = { "cec0_grp" };
+static struct spear_function cec0_function = {
+	.name = "cec0",
+	.groups = cec0_grps,
+	.ngroups = ARRAY_SIZE(cec0_grps),
+};
+
+/* pad multiplexing for cec1 device */
+static const unsigned cec1_pins[] = { 136 };
+static struct spear_muxreg cec1_muxreg[] = {
+	{
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = CEC1_REG4_MASK,
+		.val = CEC1_REG4_MASK,
+	},
+};
+
+static struct spear_modemux cec1_modemux[] = {
+	{
+		.muxregs = cec1_muxreg,
+		.nmuxregs = ARRAY_SIZE(cec1_muxreg),
+	},
+};
+
+static struct spear_pingroup cec1_pingroup = {
+	.name = "cec1_grp",
+	.pins = cec1_pins,
+	.npins = ARRAY_SIZE(cec1_pins),
+	.modemuxs = cec1_modemux,
+	.nmodemuxs = ARRAY_SIZE(cec1_modemux),
+};
+
+static const char *const cec1_grps[] = { "cec1_grp" };
+static struct spear_function cec1_function = {
+	.name = "cec1",
+	.groups = cec1_grps,
+	.ngroups = ARRAY_SIZE(cec1_grps),
+};
+
+/* pad multiplexing for mcif devices */
+static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
+	201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+	215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+	229, 230, 231, 232, 237 };
+#define MCIF_MUXREG							\
+	{								\
+		.reg = PAD_SHARED_IP_EN_1,				\
+		.mask = MCIF_MASK,					\
+		.val = MCIF_MASK,					\
+	}, {								\
+		.reg = PAD_FUNCTION_EN_7,				\
+		.mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK,	\
+		.val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK,	\
+	}, {								\
+		.reg = PAD_FUNCTION_EN_8,				\
+		.mask = MCIF_REG7_MASK,					\
+		.val = MCIF_REG7_MASK,					\
+	}
+
+/* Pad multiplexing for sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_SD,
+	},
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+	{
+		.muxregs = sdhci_muxreg,
+		.nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+	},
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+	.name = "sdhci_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = sdhci_modemux,
+	.nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+	.name = "sdhci",
+	.groups = sdhci_grps,
+	.ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* Pad multiplexing for cf device */
+static struct spear_muxreg cf_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_CF,
+	},
+};
+
+static struct spear_modemux cf_modemux[] = {
+	{
+		.muxregs = cf_muxreg,
+		.nmuxregs = ARRAY_SIZE(cf_muxreg),
+	},
+};
+
+static struct spear_pingroup cf_pingroup = {
+	.name = "cf_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = cf_modemux,
+	.nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+	.name = "cf",
+	.groups = cf_grps,
+	.ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* Pad multiplexing for xd device */
+static struct spear_muxreg xd_muxreg[] = {
+	MCIF_MUXREG,
+	{
+		.reg = PERIP_CFG,
+		.mask = MCIF_SEL_MASK,
+		.val = MCIF_SEL_XD,
+	},
+};
+
+static struct spear_modemux xd_modemux[] = {
+	{
+		.muxregs = xd_muxreg,
+		.nmuxregs = ARRAY_SIZE(xd_muxreg),
+	},
+};
+
+static struct spear_pingroup xd_pingroup = {
+	.name = "xd_grp",
+	.pins = mcif_pins,
+	.npins = ARRAY_SIZE(mcif_pins),
+	.modemuxs = xd_modemux,
+	.nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+	.name = "xd",
+	.groups = xd_grps,
+	.ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
+	146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+	160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+	188, 189, 190, 191 };
+static struct spear_muxreg clcd_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+		.val = 0,
+	}, {
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+		.val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_6,
+		.mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+		.val = CLCD_AND_ARM_TRACE_REG5_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_7,
+		.mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+		.val = CLCD_AND_ARM_TRACE_REG6_MASK,
+	},
+};
+
+static struct spear_modemux clcd_modemux[] = {
+	{
+		.muxregs = clcd_muxreg,
+		.nmuxregs = ARRAY_SIZE(clcd_muxreg),
+	},
+};
+
+static struct spear_pingroup clcd_pingroup = {
+	.name = "clcd_grp",
+	.pins = clcd_pins,
+	.npins = ARRAY_SIZE(clcd_pins),
+	.modemuxs = clcd_modemux,
+	.nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp" };
+static struct spear_function clcd_function = {
+	.name = "clcd",
+	.groups = clcd_grps,
+	.ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+/* pad multiplexing for arm_trace device */
+static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
+	165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+	179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+	193, 194, 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg arm_trace_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = ARM_TRACE_MASK,
+		.val = ARM_TRACE_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = CLCD_AND_ARM_TRACE_REG4_MASK,
+		.val = CLCD_AND_ARM_TRACE_REG4_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_6,
+		.mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+		.val = CLCD_AND_ARM_TRACE_REG5_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_7,
+		.mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+		.val = CLCD_AND_ARM_TRACE_REG6_MASK,
+	},
+};
+
+static struct spear_modemux arm_trace_modemux[] = {
+	{
+		.muxregs = arm_trace_muxreg,
+		.nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
+	},
+};
+
+static struct spear_pingroup arm_trace_pingroup = {
+	.name = "arm_trace_grp",
+	.pins = arm_trace_pins,
+	.npins = ARRAY_SIZE(arm_trace_pins),
+	.modemuxs = arm_trace_modemux,
+	.nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
+};
+
+static const char *const arm_trace_grps[] = { "arm_trace_grp" };
+static struct spear_function arm_trace_function = {
+	.name = "arm_trace",
+	.groups = arm_trace_grps,
+	.ngroups = ARRAY_SIZE(arm_trace_grps),
+};
+
+/* pad multiplexing for miphy_dbg device */
+static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
+	132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+	148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
+static struct spear_muxreg miphy_dbg_muxreg[] = {
+	{
+		.reg = PAD_SHARED_IP_EN_1,
+		.mask = MIPHY_DBG_MASK,
+		.val = MIPHY_DBG_MASK,
+	}, {
+		.reg = PAD_FUNCTION_EN_5,
+		.mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+		.val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+	},
+};
+
+static struct spear_modemux miphy_dbg_modemux[] = {
+	{
+		.muxregs = miphy_dbg_muxreg,
+		.nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
+	},
+};
+
+static struct spear_pingroup miphy_dbg_pingroup = {
+	.name = "miphy_dbg_grp",
+	.pins = miphy_dbg_pins,
+	.npins = ARRAY_SIZE(miphy_dbg_pins),
+	.modemuxs = miphy_dbg_modemux,
+	.nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
+};
+
+static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
+static struct spear_function miphy_dbg_function = {
+	.name = "miphy_dbg",
+	.groups = miphy_dbg_grps,
+	.ngroups = ARRAY_SIZE(miphy_dbg_grps),
+};
+
+/* pad multiplexing for pcie device */
+static const unsigned pcie_pins[] = { 250 };
+static struct spear_muxreg pcie_muxreg[] = {
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = SATA_PCIE_CFG_MASK,
+		.val = PCIE_CFG_VAL,
+	},
+};
+
+static struct spear_modemux pcie_modemux[] = {
+	{
+		.muxregs = pcie_muxreg,
+		.nmuxregs = ARRAY_SIZE(pcie_muxreg),
+	},
+};
+
+static struct spear_pingroup pcie_pingroup = {
+	.name = "pcie_grp",
+	.pins = pcie_pins,
+	.npins = ARRAY_SIZE(pcie_pins),
+	.modemuxs = pcie_modemux,
+	.nmodemuxs = ARRAY_SIZE(pcie_modemux),
+};
+
+static const char *const pcie_grps[] = { "pcie_grp" };
+static struct spear_function pcie_function = {
+	.name = "pcie",
+	.groups = pcie_grps,
+	.ngroups = ARRAY_SIZE(pcie_grps),
+};
+
+/* pad multiplexing for sata device */
+static const unsigned sata_pins[] = { 250 };
+static struct spear_muxreg sata_muxreg[] = {
+	{
+		.reg = PCIE_SATA_CFG,
+		.mask = SATA_PCIE_CFG_MASK,
+		.val = SATA_CFG_VAL,
+	},
+};
+
+static struct spear_modemux sata_modemux[] = {
+	{
+		.muxregs = sata_muxreg,
+		.nmuxregs = ARRAY_SIZE(sata_muxreg),
+	},
+};
+
+static struct spear_pingroup sata_pingroup = {
+	.name = "sata_grp",
+	.pins = sata_pins,
+	.npins = ARRAY_SIZE(sata_pins),
+	.modemuxs = sata_modemux,
+	.nmodemuxs = ARRAY_SIZE(sata_modemux),
+};
+
+static const char *const sata_grps[] = { "sata_grp" };
+static struct spear_function sata_function = {
+	.name = "sata",
+	.groups = sata_grps,
+	.ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1340_pingroups[] = {
+	&pads_as_gpio_pingroup,
+	&fsmc_8bit_pingroup,
+	&fsmc_16bit_pingroup,
+	&fsmc_pnor_pingroup,
+	&keyboard_row_col_pingroup,
+	&keyboard_col5_pingroup,
+	&spdif_in_pingroup,
+	&spdif_out_pingroup,
+	&gpt_0_1_pingroup,
+	&pwm0_pingroup,
+	&pwm1_pingroup,
+	&pwm2_pingroup,
+	&pwm3_pingroup,
+	&vip_mux_pingroup,
+	&vip_mux_cam0_pingroup,
+	&vip_mux_cam1_pingroup,
+	&vip_mux_cam2_pingroup,
+	&vip_mux_cam3_pingroup,
+	&cam0_pingroup,
+	&cam1_pingroup,
+	&cam2_pingroup,
+	&cam3_pingroup,
+	&smi_pingroup,
+	&ssp0_pingroup,
+	&ssp0_cs1_pingroup,
+	&ssp0_cs2_pingroup,
+	&ssp0_cs3_pingroup,
+	&uart0_pingroup,
+	&uart0_enh_pingroup,
+	&uart1_pingroup,
+	&i2s_in_pingroup,
+	&i2s_out_pingroup,
+	&gmii_pingroup,
+	&rgmii_pingroup,
+	&rmii_pingroup,
+	&sgmii_pingroup,
+	&i2c0_pingroup,
+	&i2c1_pingroup,
+	&cec0_pingroup,
+	&cec1_pingroup,
+	&sdhci_pingroup,
+	&cf_pingroup,
+	&xd_pingroup,
+	&clcd_pingroup,
+	&arm_trace_pingroup,
+	&miphy_dbg_pingroup,
+	&pcie_pingroup,
+	&sata_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1340_functions[] = {
+	&pads_as_gpio_function,
+	&fsmc_function,
+	&keyboard_function,
+	&spdif_in_function,
+	&spdif_out_function,
+	&gpt_0_1_function,
+	&pwm_function,
+	&vip_function,
+	&cam0_function,
+	&cam1_function,
+	&cam2_function,
+	&cam3_function,
+	&smi_function,
+	&ssp0_function,
+	&uart0_function,
+	&uart1_function,
+	&i2s_function,
+	&gmac_function,
+	&i2c0_function,
+	&i2c1_function,
+	&cec0_function,
+	&cec1_function,
+	&sdhci_function,
+	&cf_function,
+	&xd_function,
+	&clcd_function,
+	&arm_trace_function,
+	&miphy_dbg_function,
+	&pcie_function,
+	&sata_function,
+};
+
+static struct spear_pinctrl_machdata spear1340_machdata = {
+	.pins = spear1340_pins,
+	.npins = ARRAY_SIZE(spear1340_pins),
+	.groups = spear1340_pingroups,
+	.ngroups = ARRAY_SIZE(spear1340_pingroups),
+	.functions = spear1340_functions,
+	.nfunctions = ARRAY_SIZE(spear1340_functions),
+	.modes_supported = false,
+};
+
+static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
+	{
+		.compatible = "st,spear1340-pinmux",
+	},
+	{},
+};
+
+static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+{
+	return spear_pinctrl_probe(pdev, &spear1340_machdata);
+}
+
+static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
+{
+	return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1340_pinctrl_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = spear1340_pinctrl_of_match,
+	},
+	.probe = spear1340_pinctrl_probe,
+	.remove = __devexit_p(spear1340_pinctrl_remove),
+};
+
+static int __init spear1340_pinctrl_init(void)
+{
+	return platform_driver_register(&spear1340_pinctrl_driver);
+}
+arch_initcall(spear1340_pinctrl_init);
+
+static void __exit spear1340_pinctrl_exit(void)
+{
+	platform_driver_unregister(&spear1340_pinctrl_driver);
+}
+module_exit(spear1340_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35..4dfc284 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr300 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@
 }
 module_exit(spear300_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a97076..9688369 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@
 }
 module_exit(spear310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6..020b1e0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr320 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@
 }
 module_exit(spear320_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 832049a..0242378 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -15,108 +15,7 @@
 
 /* pins */
 static const struct pinctrl_pin_desc spear3xx_pins[] = {
-	PINCTRL_PIN(0, "PLGPIO0"),
-	PINCTRL_PIN(1, "PLGPIO1"),
-	PINCTRL_PIN(2, "PLGPIO2"),
-	PINCTRL_PIN(3, "PLGPIO3"),
-	PINCTRL_PIN(4, "PLGPIO4"),
-	PINCTRL_PIN(5, "PLGPIO5"),
-	PINCTRL_PIN(6, "PLGPIO6"),
-	PINCTRL_PIN(7, "PLGPIO7"),
-	PINCTRL_PIN(8, "PLGPIO8"),
-	PINCTRL_PIN(9, "PLGPIO9"),
-	PINCTRL_PIN(10, "PLGPIO10"),
-	PINCTRL_PIN(11, "PLGPIO11"),
-	PINCTRL_PIN(12, "PLGPIO12"),
-	PINCTRL_PIN(13, "PLGPIO13"),
-	PINCTRL_PIN(14, "PLGPIO14"),
-	PINCTRL_PIN(15, "PLGPIO15"),
-	PINCTRL_PIN(16, "PLGPIO16"),
-	PINCTRL_PIN(17, "PLGPIO17"),
-	PINCTRL_PIN(18, "PLGPIO18"),
-	PINCTRL_PIN(19, "PLGPIO19"),
-	PINCTRL_PIN(20, "PLGPIO20"),
-	PINCTRL_PIN(21, "PLGPIO21"),
-	PINCTRL_PIN(22, "PLGPIO22"),
-	PINCTRL_PIN(23, "PLGPIO23"),
-	PINCTRL_PIN(24, "PLGPIO24"),
-	PINCTRL_PIN(25, "PLGPIO25"),
-	PINCTRL_PIN(26, "PLGPIO26"),
-	PINCTRL_PIN(27, "PLGPIO27"),
-	PINCTRL_PIN(28, "PLGPIO28"),
-	PINCTRL_PIN(29, "PLGPIO29"),
-	PINCTRL_PIN(30, "PLGPIO30"),
-	PINCTRL_PIN(31, "PLGPIO31"),
-	PINCTRL_PIN(32, "PLGPIO32"),
-	PINCTRL_PIN(33, "PLGPIO33"),
-	PINCTRL_PIN(34, "PLGPIO34"),
-	PINCTRL_PIN(35, "PLGPIO35"),
-	PINCTRL_PIN(36, "PLGPIO36"),
-	PINCTRL_PIN(37, "PLGPIO37"),
-	PINCTRL_PIN(38, "PLGPIO38"),
-	PINCTRL_PIN(39, "PLGPIO39"),
-	PINCTRL_PIN(40, "PLGPIO40"),
-	PINCTRL_PIN(41, "PLGPIO41"),
-	PINCTRL_PIN(42, "PLGPIO42"),
-	PINCTRL_PIN(43, "PLGPIO43"),
-	PINCTRL_PIN(44, "PLGPIO44"),
-	PINCTRL_PIN(45, "PLGPIO45"),
-	PINCTRL_PIN(46, "PLGPIO46"),
-	PINCTRL_PIN(47, "PLGPIO47"),
-	PINCTRL_PIN(48, "PLGPIO48"),
-	PINCTRL_PIN(49, "PLGPIO49"),
-	PINCTRL_PIN(50, "PLGPIO50"),
-	PINCTRL_PIN(51, "PLGPIO51"),
-	PINCTRL_PIN(52, "PLGPIO52"),
-	PINCTRL_PIN(53, "PLGPIO53"),
-	PINCTRL_PIN(54, "PLGPIO54"),
-	PINCTRL_PIN(55, "PLGPIO55"),
-	PINCTRL_PIN(56, "PLGPIO56"),
-	PINCTRL_PIN(57, "PLGPIO57"),
-	PINCTRL_PIN(58, "PLGPIO58"),
-	PINCTRL_PIN(59, "PLGPIO59"),
-	PINCTRL_PIN(60, "PLGPIO60"),
-	PINCTRL_PIN(61, "PLGPIO61"),
-	PINCTRL_PIN(62, "PLGPIO62"),
-	PINCTRL_PIN(63, "PLGPIO63"),
-	PINCTRL_PIN(64, "PLGPIO64"),
-	PINCTRL_PIN(65, "PLGPIO65"),
-	PINCTRL_PIN(66, "PLGPIO66"),
-	PINCTRL_PIN(67, "PLGPIO67"),
-	PINCTRL_PIN(68, "PLGPIO68"),
-	PINCTRL_PIN(69, "PLGPIO69"),
-	PINCTRL_PIN(70, "PLGPIO70"),
-	PINCTRL_PIN(71, "PLGPIO71"),
-	PINCTRL_PIN(72, "PLGPIO72"),
-	PINCTRL_PIN(73, "PLGPIO73"),
-	PINCTRL_PIN(74, "PLGPIO74"),
-	PINCTRL_PIN(75, "PLGPIO75"),
-	PINCTRL_PIN(76, "PLGPIO76"),
-	PINCTRL_PIN(77, "PLGPIO77"),
-	PINCTRL_PIN(78, "PLGPIO78"),
-	PINCTRL_PIN(79, "PLGPIO79"),
-	PINCTRL_PIN(80, "PLGPIO80"),
-	PINCTRL_PIN(81, "PLGPIO81"),
-	PINCTRL_PIN(82, "PLGPIO82"),
-	PINCTRL_PIN(83, "PLGPIO83"),
-	PINCTRL_PIN(84, "PLGPIO84"),
-	PINCTRL_PIN(85, "PLGPIO85"),
-	PINCTRL_PIN(86, "PLGPIO86"),
-	PINCTRL_PIN(87, "PLGPIO87"),
-	PINCTRL_PIN(88, "PLGPIO88"),
-	PINCTRL_PIN(89, "PLGPIO89"),
-	PINCTRL_PIN(90, "PLGPIO90"),
-	PINCTRL_PIN(91, "PLGPIO91"),
-	PINCTRL_PIN(92, "PLGPIO92"),
-	PINCTRL_PIN(93, "PLGPIO93"),
-	PINCTRL_PIN(94, "PLGPIO94"),
-	PINCTRL_PIN(95, "PLGPIO95"),
-	PINCTRL_PIN(96, "PLGPIO96"),
-	PINCTRL_PIN(97, "PLGPIO97"),
-	PINCTRL_PIN(98, "PLGPIO98"),
-	PINCTRL_PIN(99, "PLGPIO99"),
-	PINCTRL_PIN(100, "PLGPIO100"),
-	PINCTRL_PIN(101, "PLGPIO101"),
+	SPEAR_PIN_0_TO_101,
 };
 
 /* firda_pins */
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8..31f4434 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
  * Header file for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a3fd8..ce875dc 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -523,6 +523,30 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
 		},
 	},
+	{
+		.callback = video_set_backlight_video_vendor,
+		.ident = "Acer Extensa 5235",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+		},
+	},
+	{
+		.callback = video_set_backlight_video_vendor,
+		.ident = "Acer TravelMate 5760",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+		},
+	},
+	{
+		.callback = video_set_backlight_video_vendor,
+		.ident = "Acer Aspire 5750",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+		},
+	},
 	{}
 };
 
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d..2fd9d36 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
  *
  * (C) 2009 - Peter Feuerer     peter (a) piie.net
  *                              http://piie.net
- *     2009 Borislav Petkov <petkovbb@gmail.com>
+ *     2009 Borislav Petkov	bp (a) alien8.de
  *
  * Inspired by and many thanks to:
  *  o acerfand   - Rachel Greenham
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8a582bd..694a15a 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -87,6 +87,9 @@
 	struct apple_gmux_data *gmux_data = bl_get_data(bd);
 	u32 brightness = bd->props.brightness;
 
+	if (bd->props.state & BL_CORE_SUSPENDED)
+		return 0;
+
 	/*
 	 * Older gmux versions require writing out lower bytes first then
 	 * setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@
 }
 
 static const struct backlight_ops gmux_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = gmux_get_brightness,
 	.update_status = gmux_update_status,
 };
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e6c08ee..5f78aac 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,7 +21,6 @@
 #include <linux/err.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
-#include <linux/rfkill.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
 #include <linux/mm.h>
@@ -90,11 +89,8 @@
 
 static struct platform_device *platform_device;
 static struct backlight_device *dell_backlight_device;
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
 
-static const struct dmi_system_id __initdata dell_device_table[] = {
+static const struct dmi_system_id dell_device_table[] __initconst = {
 	{
 		.ident = "Dell laptop",
 		.matches = {
@@ -119,53 +115,6 @@
 };
 MODULE_DEVICE_TABLE(dmi, dell_device_table);
 
-static struct dmi_system_id __devinitdata dell_blacklist[] = {
-	/* Supported by compal-laptop */
-	{
-		.ident = "Dell Mini 9",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
-		},
-	},
-	{
-		.ident = "Dell Mini 10",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
-		},
-	},
-	{
-		.ident = "Dell Mini 10v",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
-		},
-	},
-	{
-		.ident = "Dell Mini 1012",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
-		},
-	},
-	{
-		.ident = "Dell Inspiron 11z",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
-		},
-	},
-	{
-		.ident = "Dell Mini 12",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
-		},
-	},
-	{}
-};
-
 static struct dmi_system_id __devinitdata dell_quirks[] = {
 	{
 		.callback = dmi_matched,
@@ -187,6 +136,15 @@
 	},
 	{
 		.callback = dmi_matched,
+		.ident = "Dell Vostro 3350",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
+		},
+		.driver_data = &quirk_dell_vostro_v130,
+	},
+	{
+		.callback = dmi_matched,
 		.ident = "Dell Vostro 3555",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -212,6 +170,42 @@
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
+	{
+		.callback = dmi_matched,
+		.ident = "Dell Vostro 3360",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+		},
+		.driver_data = &quirk_dell_vostro_v130,
+	},
+	{
+		.callback = dmi_matched,
+		.ident = "Dell Vostro 3460",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
+		},
+		.driver_data = &quirk_dell_vostro_v130,
+	},
+	{
+		.callback = dmi_matched,
+		.ident = "Dell Vostro 3560",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
+		},
+		.driver_data = &quirk_dell_vostro_v130,
+	},
+	{
+		.callback = dmi_matched,
+		.ident = "Dell Vostro 3450",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
+		},
+		.driver_data = &quirk_dell_vostro_v130,
+	},
 	{ }
 };
 
@@ -305,94 +299,6 @@
 	return buffer;
 }
 
-/* Derived from information in DellWirelessCtl.cpp:
-   Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
-   Input byte 0 = 0: Wireless information
-
-   result[0]: return code
-   result[1]:
-     Bit 0:      Hardware switch supported
-     Bit 1:      Wifi locator supported
-     Bit 2:      Wifi is supported
-     Bit 3:      Bluetooth is supported
-     Bit 4:      WWAN is supported
-     Bit 5:      Wireless keyboard supported
-     Bits 6-7:   Reserved
-     Bit 8:      Wifi is installed
-     Bit 9:      Bluetooth is installed
-     Bit 10:     WWAN is installed
-     Bits 11-15: Reserved
-     Bit 16:     Hardware switch is on
-     Bit 17:     Wifi is blocked
-     Bit 18:     Bluetooth is blocked
-     Bit 19:     WWAN is blocked
-     Bits 20-31: Reserved
-   result[2]: NVRAM size in bytes
-   result[3]: NVRAM format version number
-
-   Input byte 0 = 2: Wireless switch configuration
-   result[0]: return code
-   result[1]:
-     Bit 0:      Wifi controlled by switch
-     Bit 1:      Bluetooth controlled by switch
-     Bit 2:      WWAN controlled by switch
-     Bits 3-6:   Reserved
-     Bit 7:      Wireless switch config locked
-     Bit 8:      Wifi locator enabled
-     Bits 9-14:  Reserved
-     Bit 15:     Wifi locator setting locked
-     Bits 16-31: Reserved
-*/
-
-static int dell_rfkill_set(void *data, bool blocked)
-{
-	int disable = blocked ? 1 : 0;
-	unsigned long radio = (unsigned long)data;
-	int hwswitch_bit = (unsigned long)data - 1;
-	int ret = 0;
-
-	get_buffer();
-	dell_send_request(buffer, 17, 11);
-
-	/* If the hardware switch controls this radio, and the hardware
-	   switch is disabled, don't allow changing the software state */
-	if ((hwswitch_state & BIT(hwswitch_bit)) &&
-	    !(buffer->output[1] & BIT(16))) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	buffer->input[0] = (1 | (radio<<8) | (disable << 16));
-	dell_send_request(buffer, 17, 11);
-
-out:
-	release_buffer();
-	return ret;
-}
-
-static void dell_rfkill_query(struct rfkill *rfkill, void *data)
-{
-	int status;
-	int bit = (unsigned long)data + 16;
-	int hwswitch_bit = (unsigned long)data - 1;
-
-	get_buffer();
-	dell_send_request(buffer, 17, 11);
-	status = buffer->output[1];
-	release_buffer();
-
-	rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
-
-	if (hwswitch_state & (BIT(hwswitch_bit)))
-		rfkill_set_hw_state(rfkill, !(status & BIT(16)));
-}
-
-static const struct rfkill_ops dell_rfkill_ops = {
-	.set_block = dell_rfkill_set,
-	.query = dell_rfkill_query,
-};
-
 static struct dentry *dell_laptop_dir;
 
 static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@
 	.release = single_release,
 };
 
-static void dell_update_rfkill(struct work_struct *ignored)
-{
-	if (wifi_rfkill)
-		dell_rfkill_query(wifi_rfkill, (void *)1);
-	if (bluetooth_rfkill)
-		dell_rfkill_query(bluetooth_rfkill, (void *)2);
-	if (wwan_rfkill)
-		dell_rfkill_query(wwan_rfkill, (void *)3);
-}
-static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-
-
-static int __init dell_setup_rfkill(void)
-{
-	int status;
-	int ret;
-
-	if (dmi_check_system(dell_blacklist)) {
-		pr_info("Blacklisted hardware detected - not enabling rfkill\n");
-		return 0;
-	}
-
-	get_buffer();
-	dell_send_request(buffer, 17, 11);
-	status = buffer->output[1];
-	buffer->input[0] = 0x2;
-	dell_send_request(buffer, 17, 11);
-	hwswitch_state = buffer->output[1];
-	release_buffer();
-
-	if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
-		wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
-					   RFKILL_TYPE_WLAN,
-					   &dell_rfkill_ops, (void *) 1);
-		if (!wifi_rfkill) {
-			ret = -ENOMEM;
-			goto err_wifi;
-		}
-		ret = rfkill_register(wifi_rfkill);
-		if (ret)
-			goto err_wifi;
-	}
-
-	if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
-		bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
-						&platform_device->dev,
-						RFKILL_TYPE_BLUETOOTH,
-						&dell_rfkill_ops, (void *) 2);
-		if (!bluetooth_rfkill) {
-			ret = -ENOMEM;
-			goto err_bluetooth;
-		}
-		ret = rfkill_register(bluetooth_rfkill);
-		if (ret)
-			goto err_bluetooth;
-	}
-
-	if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
-		wwan_rfkill = rfkill_alloc("dell-wwan",
-					   &platform_device->dev,
-					   RFKILL_TYPE_WWAN,
-					   &dell_rfkill_ops, (void *) 3);
-		if (!wwan_rfkill) {
-			ret = -ENOMEM;
-			goto err_wwan;
-		}
-		ret = rfkill_register(wwan_rfkill);
-		if (ret)
-			goto err_wwan;
-	}
-
-	return 0;
-err_wwan:
-	rfkill_destroy(wwan_rfkill);
-	if (bluetooth_rfkill)
-		rfkill_unregister(bluetooth_rfkill);
-err_bluetooth:
-	rfkill_destroy(bluetooth_rfkill);
-	if (wifi_rfkill)
-		rfkill_unregister(wifi_rfkill);
-err_wifi:
-	rfkill_destroy(wifi_rfkill);
-
-	return ret;
-}
-
-static void dell_cleanup_rfkill(void)
-{
-	if (wifi_rfkill) {
-		rfkill_unregister(wifi_rfkill);
-		rfkill_destroy(wifi_rfkill);
-	}
-	if (bluetooth_rfkill) {
-		rfkill_unregister(bluetooth_rfkill);
-		rfkill_destroy(bluetooth_rfkill);
-	}
-	if (wwan_rfkill) {
-		rfkill_unregister(wwan_rfkill);
-		rfkill_destroy(wwan_rfkill);
-	}
-}
-
 static int dell_send_intensity(struct backlight_device *bd)
 {
 	int ret = 0;
@@ -655,30 +459,6 @@
 	led_classdev_unregister(&touchpad_led);
 }
 
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
-			      struct serio *port)
-{
-	static bool extended;
-
-	if (str & 0x20)
-		return false;
-
-	if (unlikely(data == 0xe0)) {
-		extended = true;
-		return false;
-	} else if (unlikely(extended)) {
-		switch (data) {
-		case 0x8:
-			schedule_delayed_work(&dell_rfkill_work,
-					      round_jiffies_relative(HZ));
-			break;
-		}
-		extended = false;
-	}
-
-	return false;
-}
-
 static int __init dell_init(void)
 {
 	int max_intensity = 0;
@@ -720,26 +500,10 @@
 		goto fail_buffer;
 	buffer = page_address(bufferpage);
 
-	ret = dell_setup_rfkill();
-
-	if (ret) {
-		pr_warn("Unable to setup rfkill\n");
-		goto fail_rfkill;
-	}
-
-	ret = i8042_install_filter(dell_laptop_i8042_filter);
-	if (ret) {
-		pr_warn("Unable to install key filter\n");
-		goto fail_filter;
-	}
-
 	if (quirks && quirks->touchpad_led)
 		touchpad_led_init(&platform_device->dev);
 
 	dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
-	if (dell_laptop_dir != NULL)
-		debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
-				    &dell_debugfs_fops);
 
 #ifdef CONFIG_ACPI
 	/* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@
 	return 0;
 
 fail_backlight:
-	i8042_remove_filter(dell_laptop_i8042_filter);
-	cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
-	dell_cleanup_rfkill();
-fail_rfkill:
 	free_page((unsigned long)bufferpage);
 fail_buffer:
 	platform_device_del(platform_device);
@@ -804,10 +563,7 @@
 	debugfs_remove_recursive(dell_laptop_dir);
 	if (quirks && quirks->touchpad_led)
 		touchpad_led_exit();
-	i8042_remove_filter(dell_laptop_i8042_filter);
-	cancel_delayed_work_sync(&dell_rfkill_work);
 	backlight_device_unregister(dell_backlight_device);
-	dell_cleanup_rfkill();
 	if (platform_device) {
 		platform_device_unregister(platform_device);
 		platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 580d80a..da267ea 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -16,6 +16,8 @@
  * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -34,7 +36,8 @@
 #define ACPI_FUJITSU_CLASS "fujitsu"
 
 #define INVERT_TABLET_MODE_BIT      0x01
-#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+#define INVERT_DOCK_STATE_BIT       0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
 
 #define KEYMAP_LEN 16
 
@@ -161,6 +164,8 @@
 	state = fujitsu_read_register(0xdd);
 
 	dock = state & 0x02;
+	if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+		dock = !dock;
 
 	if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
 		tablet_mode = 1;
@@ -221,9 +226,6 @@
 	input_set_capability(idev, EV_SW, SW_DOCK);
 	input_set_capability(idev, EV_SW, SW_TABLET_MODE);
 
-	input_set_capability(idev, EV_SW, SW_DOCK);
-	input_set_capability(idev, EV_SW, SW_TABLET_MODE);
-
 	error = input_register_device(idev);
 	if (error) {
 		input_free_device(idev);
@@ -275,25 +277,31 @@
 	return IRQ_HANDLED;
 }
 
-static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
 {
-	printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+	pr_info("%s\n", dmi->ident);
 	memcpy(fujitsu.config.keymap, dmi->driver_data,
 			sizeof(fujitsu.config.keymap));
+}
+
+static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+	fujitsu_dmi_common(dmi);
+	fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
 	return 1;
 }
 
 static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
 {
-	fujitsu_dmi_default(dmi);
+	fujitsu_dmi_common(dmi);
 	fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
-	fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+	fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
 	return 1;
 }
 
 static struct dmi_system_id dmi_ids[] __initconst = {
 	{
-		.callback = fujitsu_dmi_default,
+		.callback = fujitsu_dmi_lifebook,
 		.ident = "Fujitsu Siemens P/T Series",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@
 		.driver_data = keymap_Lifebook_Tseries
 	},
 	{
-		.callback = fujitsu_dmi_default,
+		.callback = fujitsu_dmi_lifebook,
 		.ident = "Fujitsu Lifebook T Series",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@
 		.driver_data = keymap_Stylistic_Tseries
 	},
 	{
-		.callback = fujitsu_dmi_default,
+		.callback = fujitsu_dmi_lifebook,
 		.ident = "Fujitsu LifeBook U810",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@
 		.driver_data = keymap_Stylistic_ST5xxx
 	},
 	{
-		.callback = fujitsu_dmi_default,
+		.callback = fujitsu_dmi_lifebook,
 		.ident = "Unknown (using defaults)",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@
 MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
 MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION("2.4");
+MODULE_VERSION("2.5");
 
 MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 7387f97..24a3ae0 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -2,7 +2,7 @@
  * hdaps.c - driver for IBM's Hard Drive Active Protection System
  *
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
  * starting with the R40, T41, and X40.  It provides a basic two-axis
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e2faa3c..387183a 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -634,6 +634,8 @@
 					   RFKILL_TYPE_WLAN,
 					   &hp_wmi_rfkill_ops,
 					   (void *) HPWMI_WIFI);
+		if (!wifi_rfkill)
+			return -ENOMEM;
 		rfkill_init_sw_state(wifi_rfkill,
 				     hp_wmi_get_sw_state(HPWMI_WIFI));
 		rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@
 						RFKILL_TYPE_BLUETOOTH,
 						&hp_wmi_rfkill_ops,
 						(void *) HPWMI_BLUETOOTH);
+		if (!bluetooth_rfkill) {
+			err = -ENOMEM;
+			goto register_wifi_error;
+		}
 		rfkill_init_sw_state(bluetooth_rfkill,
 				     hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
 		rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@
 					   RFKILL_TYPE_WWAN,
 					   &hp_wmi_rfkill_ops,
 					   (void *) HPWMI_WWAN);
+		if (!wwan_rfkill) {
+			err = -ENOMEM;
+			goto register_bluetooth_error;
+		}
 		rfkill_init_sw_state(wwan_rfkill,
 				     hp_wmi_get_sw_state(HPWMI_WWAN));
 		rfkill_set_hw_state(wwan_rfkill,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac902f7..4f20f8d 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -194,7 +194,6 @@
 /*
  * debugfs
  */
-#define DEBUGFS_EVENT_LEN (4096)
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
 	unsigned long value;
@@ -315,7 +314,7 @@
 	node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
 				   &debugfs_status_fops);
 	if (!node) {
-		pr_err("failed to create event in debugfs");
+		pr_err("failed to create status in debugfs");
 		goto errout;
 	}
 
@@ -785,6 +784,10 @@
 			case 9:
 				ideapad_sync_rfk_state(priv);
 				break;
+			case 13:
+			case 6:
+				ideapad_input_report(priv, vpc_bit);
+				break;
 			case 4:
 				ideapad_backlight_notify_brightness(priv);
 				break;
@@ -795,7 +798,7 @@
 				ideapad_backlight_notify_power(priv);
 				break;
 			default:
-				ideapad_input_report(priv, vpc_bit);
+				pr_info("Unknown event: %lu\n", vpc_bit);
 			}
 		}
 	}
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8a51795..210d4ae 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -141,6 +141,27 @@
 		 "(default: 0)");
 
 static void sony_nc_kbd_backlight_resume(void);
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+		unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+		unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+static void sony_nc_thermal_resume(void);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+				  unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
 
 enum sony_nc_rfkill {
 	SONY_WIFI,
@@ -153,6 +174,9 @@
 static int sony_rfkill_handle;
 static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
 static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+		unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
 static void sony_nc_rfkill_update(void);
 
 /*********** Input Devices ***********/
@@ -691,59 +715,97 @@
 
 /*
  * acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
  */
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+		u64 *value)
 {
-	struct acpi_buffer output;
-	union acpi_object out_obj;
+	union acpi_object *result = NULL;
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
 	acpi_status status;
 
-	output.length = sizeof(out_obj);
-	output.pointer = &out_obj;
-
-	status = acpi_evaluate_object(handle, name, NULL, &output);
-	if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
-		*result = out_obj.integer.value;
-		return 0;
+	if (value) {
+		struct acpi_object_list params;
+		union acpi_object in;
+		in.type = ACPI_TYPE_INTEGER;
+		in.integer.value = *value;
+		params.count = 1;
+		params.pointer = &in;
+		status = acpi_evaluate_object(handle, method, &params, &output);
+		dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+				(unsigned int)(*value >> 32),
+				(unsigned int)*value & 0xffffffff);
+	} else {
+		status = acpi_evaluate_object(handle, method, NULL, &output);
+		dprintk("__call_snc_method: [%s]\n", method);
 	}
 
-	pr_warn("acpi_callreadfunc failed\n");
+	if (ACPI_FAILURE(status)) {
+		pr_err("Failed to evaluate [%s]\n", method);
+		return NULL;
+	}
 
-	return -1;
+	result = (union acpi_object *) output.pointer;
+	if (!result)
+		dprintk("No return object [%s]\n", method);
+
+	return result;
 }
 
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
-			    int *result)
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+		int *result)
 {
-	struct acpi_object_list params;
-	union acpi_object in_obj;
-	struct acpi_buffer output;
-	union acpi_object out_obj;
-	acpi_status status;
+	union acpi_object *object = NULL;
+	if (value) {
+		u64 v = *value;
+		object = __call_snc_method(handle, name, &v);
+	} else
+		object = __call_snc_method(handle, name, NULL);
 
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = value;
+	if (!object)
+		return -EINVAL;
 
-	output.length = sizeof(out_obj);
-	output.pointer = &out_obj;
-
-	status = acpi_evaluate_object(handle, name, &params, &output);
-	if (status == AE_OK) {
-		if (result != NULL) {
-			if (out_obj.type != ACPI_TYPE_INTEGER) {
-				pr_warn("acpi_evaluate_object bad return type\n");
-				return -1;
-			}
-			*result = out_obj.integer.value;
-		}
-		return 0;
+	if (object->type != ACPI_TYPE_INTEGER) {
+		pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+				ACPI_TYPE_INTEGER, object->type);
+		kfree(object);
+		return -EINVAL;
 	}
 
-	pr_warn("acpi_evaluate_object failed\n");
+	if (result)
+		*result = object->integer.value;
 
-	return -1;
+	kfree(object);
+	return 0;
+}
+
+#define MIN(a, b)	(a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+		void *buffer, size_t buflen)
+{
+	size_t len = len;
+	union acpi_object *object = __call_snc_method(handle, name, value);
+
+	if (!object)
+		return -EINVAL;
+
+	if (object->type == ACPI_TYPE_BUFFER)
+		len = MIN(buflen, object->buffer.length);
+
+	else if (object->type == ACPI_TYPE_INTEGER)
+		len = MIN(buflen, sizeof(object->integer.value));
+
+	else {
+		pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+				ACPI_TYPE_BUFFER, object->type);
+		kfree(object);
+		return -EINVAL;
+	}
+
+	memcpy(buffer, object->buffer.pointer, len);
+	kfree(object);
+	return 0;
 }
 
 struct sony_nc_handles {
@@ -770,16 +832,17 @@
 
 static int sony_nc_handles_setup(struct platform_device *pd)
 {
-	int i;
-	int result;
+	int i, r, result, arg;
 
 	handles = kzalloc(sizeof(*handles), GFP_KERNEL);
 	if (!handles)
 		return -ENOMEM;
 
 	for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
-		if (!acpi_callsetfunc(sony_nc_acpi_handle,
-					"SN00", i + 0x20, &result)) {
+		arg = i + 0x20;
+		r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+					&result);
+		if (!r) {
 			dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
 					result, i);
 			handles->cap[i] = result;
@@ -819,8 +882,8 @@
 	int i;
 
 	/* not initialized yet, return early */
-	if (!handles)
-		return -1;
+	if (!handles || !handle)
+		return -EINVAL;
 
 	for (i = 0; i < 0x10; i++) {
 		if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@
 		}
 	}
 	dprintk("handle 0x%.4x not found\n", handle);
-	return -1;
+	return -EINVAL;
 }
 
 static int sony_call_snc_handle(int handle, int argument, int *result)
 {
-	int ret = 0;
+	int arg, ret = 0;
 	int offset = sony_find_snc_handle(handle);
 
 	if (offset < 0)
-		return -1;
+		return offset;
 
-	ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
-			result);
-	dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
-			*result);
+	arg = offset | argument;
+	ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+	dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
 	return ret;
 }
 
@@ -889,14 +951,16 @@
 static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
 			      char *buffer)
 {
-	int value;
+	int value, ret = 0;
 	struct sony_nc_value *item =
 	    container_of(attr, struct sony_nc_value, devattr);
 
 	if (!*item->acpiget)
 		return -EIO;
 
-	if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+	ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+				&value);
+	if (ret < 0)
 		return -EIO;
 
 	if (item->validate)
@@ -909,7 +973,8 @@
 			       struct device_attribute *attr,
 			       const char *buffer, size_t count)
 {
-	int value;
+	unsigned long value = 0;
+	int ret = 0;
 	struct sony_nc_value *item =
 	    container_of(attr, struct sony_nc_value, devattr);
 
@@ -919,7 +984,8 @@
 	if (count > 31)
 		return -EINVAL;
 
-	value = simple_strtoul(buffer, NULL, 10);
+	if (kstrtoul(buffer, 10, &value))
+		return -EINVAL;
 
 	if (item->validate)
 		value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@
 	if (value < 0)
 		return value;
 
-	if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+	ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+			(int *)&value, NULL);
+	if (ret < 0)
 		return -EIO;
+
 	item->value = value;
 	item->valid = 1;
 	return count;
@@ -948,15 +1017,15 @@
 
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
-	return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-				bd->props.brightness + 1, NULL);
+	int arg = bd->props.brightness + 1;
+	return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
 }
 
 static int sony_backlight_get_brightness(struct backlight_device *bd)
 {
 	int value;
 
-	if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+	if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
 		return 0;
 	/* brightness levels are 1-based, while backlight ones are 0-based */
 	return value - 1;
@@ -1024,10 +1093,14 @@
 	{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x87, SONYPI_EVENT_FNKEY_F7 },
 	{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x88, SONYPI_EVENT_FNKEY_F8 },
+	{ 0x08, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x89, SONYPI_EVENT_FNKEY_F9 },
 	{ 0x09, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
 	{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x8B, SONYPI_EVENT_FNKEY_F11 },
+	{ 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
 	{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1136,116 @@
 	{ 0, 0 },
 };
 
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+	int ret = -EINVAL;
+	unsigned int result = 0;
+	struct sony_nc_event *key_event;
+
+	if (sony_call_snc_handle(handle, 0x200, &result)) {
+		dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+				event);
+		return -EINVAL;
+	}
+
+	result &= 0xFF;
+
+	if (handle == 0x0100)
+		key_event = sony_100_events;
+	else
+		key_event = sony_127_events;
+
+	for (; key_event->data; key_event++) {
+		if (key_event->data == result) {
+			ret = key_event->event;
+			break;
+		}
+	}
+
+	if (!key_event->data)
+		pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+				event, result, handle);
+
+	return ret;
+}
+
 /*
  * ACPI callbacks
  */
 static void sony_nc_notify(struct acpi_device *device, u32 event)
 {
-	u32 ev = event;
+	u32 real_ev = event;
+	u8 ev_type = 0;
+	dprintk("sony_nc_notify, event: 0x%.2x\n", event);
 
-	if (ev >= 0x90) {
-		/* New-style event */
-		int result;
-		int key_handle = 0;
-		ev -= 0x90;
+	if (event >= 0x90) {
+		unsigned int result = 0;
+		unsigned int arg = 0;
+		unsigned int handle = 0;
+		unsigned int offset = event - 0x90;
 
-		if (sony_find_snc_handle(0x100) == ev)
-			key_handle = 0x100;
-		if (sony_find_snc_handle(0x127) == ev)
-			key_handle = 0x127;
-
-		if (key_handle) {
-			struct sony_nc_event *key_event;
-
-			if (sony_call_snc_handle(key_handle, 0x200, &result)) {
-				dprintk("sony_nc_notify, unable to decode"
-					" event 0x%.2x 0x%.2x\n", key_handle,
-					ev);
-				/* restore the original event */
-				ev = event;
-			} else {
-				ev = result & 0xFF;
-
-				if (key_handle == 0x100)
-					key_event = sony_100_events;
-				else
-					key_event = sony_127_events;
-
-				for (; key_event->data; key_event++) {
-					if (key_event->data == ev) {
-						ev = key_event->event;
-						break;
-					}
-				}
-
-				if (!key_event->data)
-					pr_info("Unknown event: 0x%x 0x%x\n",
-						key_handle, ev);
-				else
-					sony_laptop_report_input_event(ev);
-			}
-		} else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
-			sony_nc_rfkill_update();
+		if (offset >= ARRAY_SIZE(handles->cap)) {
+			pr_err("Event 0x%x outside of capabilities list\n",
+					event);
 			return;
 		}
-	} else
-		sony_laptop_report_input_event(ev);
+		handle = handles->cap[offset];
 
-	dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
-	acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+		/* list of handles known for generating events */
+		switch (handle) {
+		/* hotkey event */
+		case 0x0100:
+		case 0x0127:
+			ev_type = 1;
+			real_ev = sony_nc_hotkeys_decode(event, handle);
+
+			if (real_ev > 0)
+				sony_laptop_report_input_event(real_ev);
+			else
+				/* restore the original event for reporting */
+				real_ev = event;
+
+			break;
+
+		/* wlan switch */
+		case 0x0124:
+		case 0x0135:
+			/* events on this handle are reported when the
+			 * switch changes position or for battery
+			 * events. We'll notify both of them but only
+			 * update the rfkill device status when the
+			 * switch is moved.
+			 */
+			ev_type = 2;
+			sony_call_snc_handle(handle, 0x0100, &result);
+			real_ev = result & 0x03;
+
+			/* hw switch event */
+			if (real_ev == 1)
+				sony_nc_rfkill_update();
+
+			break;
+
+		default:
+			dprintk("Unknown event 0x%x for handle 0x%x\n",
+					event, handle);
+			break;
+		}
+
+		/* clear the event (and the event reason when present) */
+		arg = 1 << offset;
+		sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+	} else {
+		/* old style event */
+		ev_type = 1;
+		sony_laptop_report_input_event(real_ev);
+	}
+
+	acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
+
+	acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+			dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
 }
 
 static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1266,190 @@
 /*
  * ACPI device
  */
-static int sony_nc_function_setup(struct acpi_device *device)
+static void sony_nc_function_setup(struct acpi_device *device,
+		struct platform_device *pf_device)
 {
-	int result;
+	unsigned int i, result, bitmask, arg;
+
+	if (!handles)
+		return;
+
+	/* setup found handles here */
+	for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+		unsigned int handle = handles->cap[i];
+
+		if (!handle)
+			continue;
+
+		dprintk("setting up handle 0x%.4x\n", handle);
+
+		switch (handle) {
+		case 0x0100:
+		case 0x0101:
+		case 0x0127:
+			/* setup hotkeys */
+			sony_call_snc_handle(handle, 0, &result);
+			break;
+		case 0x0102:
+			/* setup hotkeys */
+			sony_call_snc_handle(handle, 0x100, &result);
+			break;
+		case 0x0105:
+		case 0x0148:
+			/* touchpad enable/disable */
+			result = sony_nc_touchpad_setup(pf_device, handle);
+			if (result)
+				pr_err("couldn't set up touchpad control function (%d)\n",
+						result);
+			break;
+		case 0x0115:
+		case 0x0136:
+		case 0x013f:
+			result = sony_nc_battery_care_setup(pf_device, handle);
+			if (result)
+				pr_err("couldn't set up battery care function (%d)\n",
+						result);
+			break;
+		case 0x0119:
+			result = sony_nc_lid_resume_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up lid resume function (%d)\n",
+						result);
+			break;
+		case 0x0122:
+			result = sony_nc_thermal_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up thermal profile function (%d)\n",
+						result);
+			break;
+		case 0x0131:
+			result = sony_nc_highspeed_charging_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up high speed charging function (%d)\n",
+				       result);
+			break;
+		case 0x0124:
+		case 0x0135:
+			result = sony_nc_rfkill_setup(device, handle);
+			if (result)
+				pr_err("couldn't set up rfkill support (%d)\n",
+						result);
+			break;
+		case 0x0137:
+		case 0x0143:
+			result = sony_nc_kbd_backlight_setup(pf_device, handle);
+			if (result)
+				pr_err("couldn't set up keyboard backlight function (%d)\n",
+						result);
+			break;
+		default:
+			continue;
+		}
+	}
 
 	/* Enable all events */
-	acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
+	arg = 0x10;
+	if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+		sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+				&result);
+}
 
-	/* Setup hotkeys */
-	sony_call_snc_handle(0x0100, 0, &result);
-	sony_call_snc_handle(0x0101, 0, &result);
-	sony_call_snc_handle(0x0102, 0x100, &result);
-	sony_call_snc_handle(0x0127, 0, &result);
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+	unsigned int i, result, bitmask, handle;
 
-	return 0;
+	/* get enabled events and disable them */
+	sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+	sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
+
+	/* cleanup handles here */
+	for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+		handle = handles->cap[i];
+
+		if (!handle)
+			continue;
+
+		switch (handle) {
+		case 0x0105:
+		case 0x0148:
+			sony_nc_touchpad_cleanup(pd);
+			break;
+		case 0x0115:
+		case 0x0136:
+		case 0x013f:
+			sony_nc_battery_care_cleanup(pd);
+			break;
+		case 0x0119:
+			sony_nc_lid_resume_cleanup(pd);
+			break;
+		case 0x0122:
+			sony_nc_thermal_cleanup(pd);
+			break;
+		case 0x0131:
+			sony_nc_highspeed_charging_cleanup(pd);
+			break;
+		case 0x0124:
+		case 0x0135:
+			sony_nc_rfkill_cleanup();
+			break;
+		case 0x0137:
+		case 0x0143:
+			sony_nc_kbd_backlight_cleanup(pd);
+			break;
+		default:
+			continue;
+		}
+	}
+
+	/* finally cleanup the handles list */
+	sony_nc_handles_cleanup(pd);
+}
+
+static void sony_nc_function_resume(void)
+{
+	unsigned int i, result, bitmask, arg;
+
+	dprintk("Resuming SNC device\n");
+
+	for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+		unsigned int handle = handles->cap[i];
+
+		if (!handle)
+			continue;
+
+		switch (handle) {
+		case 0x0100:
+		case 0x0101:
+		case 0x0127:
+			/* re-enable hotkeys */
+			sony_call_snc_handle(handle, 0, &result);
+			break;
+		case 0x0102:
+			/* re-enable hotkeys */
+			sony_call_snc_handle(handle, 0x100, &result);
+			break;
+		case 0x0122:
+			sony_nc_thermal_resume();
+			break;
+		case 0x0124:
+		case 0x0135:
+			sony_nc_rfkill_update();
+			break;
+		case 0x0137:
+		case 0x0143:
+			sony_nc_kbd_backlight_resume();
+			break;
+		default:
+			continue;
+		}
+	}
+
+	/* Enable all events */
+	arg = 0x10;
+	if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+		sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+				&result);
 }
 
 static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1462,8 @@
 
 		if (!item->valid)
 			continue;
-		ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
-				       item->value, NULL);
+		ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+				       &item->value, NULL);
 		if (ret < 0) {
 			pr_err("%s: %d\n", __func__, ret);
 			break;
@@ -1176,21 +1472,14 @@
 
 	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
 					 &handle))) {
-		if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+		int arg = 1;
+		if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
 			dprintk("ECON Method failed\n");
 	}
 
 	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-					 &handle))) {
-		dprintk("Doing SNC setup\n");
-		sony_nc_function_setup(device);
-	}
-
-	/* re-read rfkill state */
-	sony_nc_rfkill_update();
-
-	/* restore kbd backlight states */
-	sony_nc_kbd_backlight_resume();
+					 &handle)))
+		sony_nc_function_resume();
 
 	return 0;
 }
@@ -1213,7 +1502,7 @@
 	int argument = sony_rfkill_address[(long) data] + 0x100;
 
 	if (!blocked)
-		argument |= 0xff0000;
+		argument |= 0x030000;
 
 	return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
 }
@@ -1230,7 +1519,7 @@
 	enum rfkill_type type;
 	const char *name;
 	int result;
-	bool hwblock;
+	bool hwblock, swblock;
 
 	switch (nc_type) {
 	case SONY_WIFI:
@@ -1258,8 +1547,21 @@
 	if (!rfk)
 		return -ENOMEM;
 
-	sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+	if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+		rfkill_destroy(rfk);
+		return -1;
+	}
 	hwblock = !(result & 0x1);
+
+	if (sony_call_snc_handle(sony_rfkill_handle,
+				sony_rfkill_address[nc_type],
+				&result) < 0) {
+		rfkill_destroy(rfk);
+		return -1;
+	}
+	swblock = !(result & 0x2);
+
+	rfkill_init_sw_state(rfk, swblock);
 	rfkill_set_hw_state(rfk, hwblock);
 
 	err = rfkill_register(rfk);
@@ -1295,101 +1597,79 @@
 
 		sony_call_snc_handle(sony_rfkill_handle, argument, &result);
 		rfkill_set_states(sony_rfkill_devices[i],
-				  !(result & 0xf), false);
+				  !(result & 0x2), false);
 	}
 }
 
-static void sony_nc_rfkill_setup(struct acpi_device *device)
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+		unsigned int handle)
 {
-	int offset;
-	u8 dev_code, i;
-	acpi_status status;
-	struct acpi_object_list params;
-	union acpi_object in_obj;
-	union acpi_object *device_enum;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	u64 offset;
+	int i;
+	unsigned char buffer[32] = { 0 };
 
-	offset = sony_find_snc_handle(0x124);
-	if (offset == -1) {
-		offset = sony_find_snc_handle(0x135);
-		if (offset == -1)
-			return;
-		else
-			sony_rfkill_handle = 0x135;
-	} else
-		sony_rfkill_handle = 0x124;
-	dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
+	offset = sony_find_snc_handle(handle);
+	sony_rfkill_handle = handle;
 
-	/* need to read the whole buffer returned by the acpi call to SN06
-	 * here otherwise we may miss some features
+	i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+			32);
+	if (i < 0)
+		return i;
+
+	/* The buffer is filled with magic numbers describing the devices
+	 * available, 0xff terminates the enumeration.
+	 * Known codes:
+	 *	0x00 WLAN
+	 *	0x10 BLUETOOTH
+	 *	0x20 WWAN GPRS-EDGE
+	 *	0x21 WWAN HSDPA
+	 *	0x22 WWAN EV-DO
+	 *	0x23 WWAN GPS
+	 *	0x25 Gobi WWAN no GPS
+	 *	0x26 Gobi WWAN + GPS
+	 *	0x28 Gobi WWAN no GPS
+	 *	0x29 Gobi WWAN + GPS
+	 *	0x30 WIMAX
+	 *	0x50 Gobi WWAN no GPS
+	 *	0x51 Gobi WWAN + GPS
+	 *	0x70 no SIM card slot
+	 *	0x71 SIM card slot
 	 */
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = offset;
-	status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-			&buffer);
-	if (ACPI_FAILURE(status)) {
-		dprintk("Radio device enumeration failed\n");
-		return;
-	}
+	for (i = 0; i < ARRAY_SIZE(buffer); i++) {
 
-	device_enum = (union acpi_object *) buffer.pointer;
-	if (!device_enum) {
-		pr_err("No SN06 return object\n");
-		goto out_no_enum;
-	}
-	if (device_enum->type != ACPI_TYPE_BUFFER) {
-		pr_err("Invalid SN06 return object 0x%.2x\n",
-		       device_enum->type);
-		goto out_no_enum;
-	}
-
-	/* the buffer is filled with magic numbers describing the devices
-	 * available, 0xff terminates the enumeration
-	 */
-	for (i = 0; i < device_enum->buffer.length; i++) {
-
-		dev_code = *(device_enum->buffer.pointer + i);
-		if (dev_code == 0xff)
+		if (buffer[i] == 0xff)
 			break;
 
-		dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+		dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
 
-		if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+		if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
 			sony_nc_setup_rfkill(device, SONY_WIFI);
 
-		if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+		if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
 			sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
 
-		if ((0xf0 & dev_code) == 0x20 &&
+		if (((0xf0 & buffer[i]) == 0x20 ||
+					(0xf0 & buffer[i]) == 0x50) &&
 				!sony_rfkill_devices[SONY_WWAN])
 			sony_nc_setup_rfkill(device, SONY_WWAN);
 
-		if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+		if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
 			sony_nc_setup_rfkill(device, SONY_WIMAX);
 	}
-
-out_no_enum:
-	kfree(buffer.pointer);
-	return;
+	return 0;
 }
 
 /* Keyboard backlight feature */
-#define KBDBL_HANDLER	0x137
-#define KBDBL_PRESENT	0xB00
-#define	SET_MODE	0xC00
-#define SET_STATE	0xD00
-#define SET_TIMEOUT	0xE00
-
 struct kbd_backlight {
-	int mode;
-	int timeout;
+	unsigned int handle;
+	unsigned int base;
+	unsigned int mode;
+	unsigned int timeout;
 	struct device_attribute mode_attr;
 	struct device_attribute timeout_attr;
 };
 
-static struct kbd_backlight *kbdbl_handle;
+static struct kbd_backlight *kbdbl_ctl;
 
 static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
 {
@@ -1398,15 +1678,15 @@
 	if (value > 1)
 		return -EINVAL;
 
-	if (sony_call_snc_handle(KBDBL_HANDLER,
-				(value << 0x10) | SET_MODE, &result))
+	if (sony_call_snc_handle(kbdbl_ctl->handle,
+				(value << 0x10) | (kbdbl_ctl->base), &result))
 		return -EIO;
 
 	/* Try to turn the light on/off immediately */
-	sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
-			&result);
+	sony_call_snc_handle(kbdbl_ctl->handle,
+			(value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
 
-	kbdbl_handle->mode = value;
+	kbdbl_ctl->mode = value;
 
 	return 0;
 }
@@ -1421,7 +1701,7 @@
 	if (count > 31)
 		return -EINVAL;
 
-	if (strict_strtoul(buffer, 10, &value))
+	if (kstrtoul(buffer, 10, &value))
 		return -EINVAL;
 
 	ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1715,7 @@
 		struct device_attribute *attr, char *buffer)
 {
 	ssize_t count = 0;
-	count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
+	count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
 	return count;
 }
 
@@ -1446,11 +1726,11 @@
 	if (value > 3)
 		return -EINVAL;
 
-	if (sony_call_snc_handle(KBDBL_HANDLER,
-				(value << 0x10) | SET_TIMEOUT, &result))
+	if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+				(kbdbl_ctl->base + 0x200), &result))
 		return -EIO;
 
-	kbdbl_handle->timeout = value;
+	kbdbl_ctl->timeout = value;
 
 	return 0;
 }
@@ -1465,7 +1745,7 @@
 	if (count > 31)
 		return -EINVAL;
 
-	if (strict_strtoul(buffer, 10, &value))
+	if (kstrtoul(buffer, 10, &value))
 		return -EINVAL;
 
 	ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1759,58 @@
 		struct device_attribute *attr, char *buffer)
 {
 	ssize_t count = 0;
-	count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
+	count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
 	return count;
 }
 
-static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+		unsigned int handle)
 {
 	int result;
+	int ret = 0;
 
-	if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
-		return 0;
-	if (!(result & 0x02))
-		return 0;
+	/* verify the kbd backlight presence, these handles are not used for
+	 * keyboard backlight only
+	 */
+	ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
+			&result);
+	if (ret)
+		return ret;
 
-	kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
-	if (!kbdbl_handle)
+	if ((handle == 0x0137 && !(result & 0x02)) ||
+			!(result & 0x01)) {
+		dprintk("no backlight keyboard found\n");
+		return 0;
+	}
+
+	kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+	if (!kbdbl_ctl)
 		return -ENOMEM;
 
-	sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
-	kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
-	kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
-	kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
-	kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+	kbdbl_ctl->handle = handle;
+	if (handle == 0x0137)
+		kbdbl_ctl->base = 0x0C00;
+	else
+		kbdbl_ctl->base = 0x4000;
 
-	sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
-	kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
-	kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
-	kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
-	kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+	sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+	kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+	kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+	kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+	kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
 
-	if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
+	sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+	kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+	kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+	kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
+	kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+
+	ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+	if (ret)
 		goto outkzalloc;
 
-	if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
+	ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+	if (ret)
 		goto outmode;
 
 	__sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,57 +1819,661 @@
 	return 0;
 
 outmode:
-	device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
+	device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
 outkzalloc:
-	kfree(kbdbl_handle);
-	kbdbl_handle = NULL;
-	return -1;
+	kfree(kbdbl_ctl);
+	kbdbl_ctl = NULL;
+	return ret;
 }
 
-static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
-	if (kbdbl_handle) {
+	if (kbdbl_ctl) {
 		int result;
 
-		device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
-		device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+		device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+		device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
 
 		/* restore the default hw behaviour */
-		sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
-		sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+		sony_call_snc_handle(kbdbl_ctl->handle,
+				kbdbl_ctl->base | 0x10000, &result);
+		sony_call_snc_handle(kbdbl_ctl->handle,
+				kbdbl_ctl->base + 0x200, &result);
 
-		kfree(kbdbl_handle);
+		kfree(kbdbl_ctl);
+		kbdbl_ctl = NULL;
 	}
-	return 0;
 }
 
 static void sony_nc_kbd_backlight_resume(void)
 {
 	int ignore = 0;
 
-	if (!kbdbl_handle)
+	if (!kbdbl_ctl)
 		return;
 
-	if (kbdbl_handle->mode == 0)
-		sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
-
-	if (kbdbl_handle->timeout != 0)
-		sony_call_snc_handle(KBDBL_HANDLER,
-				(kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+	if (kbdbl_ctl->mode == 0)
+		sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
 				&ignore);
+
+	if (kbdbl_ctl->timeout != 0)
+		sony_call_snc_handle(kbdbl_ctl->handle,
+				(kbdbl_ctl->base + 0x200) |
+				(kbdbl_ctl->timeout << 0x10), &ignore);
+}
+
+struct battery_care_control {
+	struct device_attribute attrs[2];
+	unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result, cmd;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value))
+		return -EINVAL;
+
+	/*  limit values (2 bits):
+	 *  00 - none
+	 *  01 - 80%
+	 *  10 - 50%
+	 *  11 - 100%
+	 *
+	 *  bit 0: 0 disable BCL, 1 enable BCL
+	 *  bit 1: 1 tell to store the battery limit (see bits 6,7) too
+	 *  bits 2,3: reserved
+	 *  bits 4,5: store the limit into the EC
+	 *  bits 6,7: store the limit into the battery
+	 */
+
+	/*
+	 * handle 0x0115 should allow storing on battery too;
+	 * handle 0x0136 same as 0x0115 + health status;
+	 * handle 0x013f, same as 0x0136 but no storing on the battery
+	 *
+	 * Store only inside the EC for now, regardless the handle number
+	 */
+	if (value == 0)
+		/* disable limits */
+		cmd = 0x0;
+
+	else if (value <= 50)
+		cmd = 0x21;
+
+	else if (value <= 80)
+		cmd = 0x11;
+
+	else if (value <= 100)
+		cmd = 0x31;
+
+	else
+		return -EINVAL;
+
+	if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
+				&result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result, status;
+
+	if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+		return -EIO;
+
+	status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+	switch (status) {
+	case 1:
+		status = 80;
+		break;
+	case 2:
+		status = 50;
+		break;
+	case 3:
+		status = 100;
+		break;
+	default:
+		status = 0;
+		break;
+	}
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	ssize_t count = 0;
+	unsigned int health;
+
+	if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+		return -EIO;
+
+	count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
+
+	return count;
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+		unsigned int handle)
+{
+	int ret = 0;
+
+	bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+	if (!bcare_ctl)
+		return -ENOMEM;
+
+	bcare_ctl->handle = handle;
+
+	sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+	bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+	bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+	bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+	bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+	ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+	if (ret)
+		goto outkzalloc;
+
+	/* 0x0115 is for models with no health reporting capability */
+	if (handle == 0x0115)
+		return 0;
+
+	sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+	bcare_ctl->attrs[1].attr.name = "battery_care_health";
+	bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+	bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+	ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+	if (ret)
+		goto outlimiter;
+
+	return 0;
+
+outlimiter:
+	device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+	kfree(bcare_ctl);
+	bcare_ctl = NULL;
+
+	return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+	if (bcare_ctl) {
+		device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+		if (bcare_ctl->handle != 0x0115)
+			device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+		kfree(bcare_ctl);
+		bcare_ctl = NULL;
+	}
+}
+
+struct snc_thermal_ctrl {
+	unsigned int mode;
+	unsigned int profiles;
+	struct device_attribute mode_attr;
+	struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+	"balanced",
+	"silent",
+	"performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+	unsigned int result;
+
+	/* the thermal profile seems to be a two bit bitmask:
+	 * lsb -> silent
+	 * msb -> performance
+	 * no bit set is the normal operation and is always valid
+	 * Some vaio models only have "balanced" and "performance"
+	 */
+	if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+		return -EIO;
+
+	th_handle->mode = mode;
+
+	return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0122, 0x0100, &result))
+		return -EIO;
+
+	return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	short cnt;
+	size_t idx = 0;
+
+	for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+		if (!cnt || (th_handle->profiles & cnt))
+			idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+					snc_thermal_profiles[cnt]);
+	}
+	idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+	return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned short cmd;
+	size_t len = count;
+
+	if (count == 0)
+		return -EINVAL;
+
+	/* skip the newline if present */
+	if (buffer[len - 1] == '\n')
+		len--;
+
+	for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+		if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+			break;
+
+	if (sony_nc_thermal_mode_set(cmd))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	ssize_t count = 0;
+	unsigned int mode = sony_nc_thermal_mode_get();
+
+	if (mode < 0)
+		return mode;
+
+	count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
+
+	return count;
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+	int ret = 0;
+	th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+	if (!th_handle)
+		return -ENOMEM;
+
+	ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+	if (ret) {
+		pr_warn("couldn't to read the thermal profiles\n");
+		goto outkzalloc;
+	}
+
+	ret = sony_nc_thermal_mode_get();
+	if (ret < 0) {
+		pr_warn("couldn't to read the current thermal profile");
+		goto outkzalloc;
+	}
+	th_handle->mode = ret;
+
+	sysfs_attr_init(&th_handle->profiles_attr.attr);
+	th_handle->profiles_attr.attr.name = "thermal_profiles";
+	th_handle->profiles_attr.attr.mode = S_IRUGO;
+	th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+	sysfs_attr_init(&th_handle->mode_attr.attr);
+	th_handle->mode_attr.attr.name = "thermal_control";
+	th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+	th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+	th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+	ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+	if (ret)
+		goto outkzalloc;
+
+	ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+	if (ret)
+		goto outprofiles;
+
+	return 0;
+
+outprofiles:
+	device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+	kfree(th_handle);
+	th_handle = NULL;
+	return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+	if (th_handle) {
+		device_remove_file(&pd->dev, &th_handle->profiles_attr);
+		device_remove_file(&pd->dev, &th_handle->mode_attr);
+		kfree(th_handle);
+		th_handle = NULL;
+	}
+}
+
+static void sony_nc_thermal_resume(void)
+{
+	unsigned int status = sony_nc_thermal_mode_get();
+
+	if (status != th_handle->mode)
+		sony_nc_thermal_mode_set(th_handle->mode);
+}
+
+/* resume on LID open */
+struct snc_lid_resume_control {
+	struct device_attribute attrs[3];
+	unsigned int status;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buffer, size_t count)
+{
+	unsigned int result, pos;
+	unsigned long value;
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	/* the value we have to write to SNC is a bitmask:
+	 * +--------------+
+	 * | S3 | S4 | S5 |
+	 * +--------------+
+	 *   2    1    0
+	 */
+	if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+		pos = 2;
+	else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+		pos = 1;
+	else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+		pos = 0;
+	else
+               return -EINVAL;
+
+	if (value)
+		value = lid_ctl->status | (1 << pos);
+	else
+		value = lid_ctl->status & ~(1 << pos);
+
+	if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+		return -EIO;
+
+	lid_ctl->status = value;
+
+	return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+				       struct device_attribute *attr, char *buffer)
+{
+	unsigned int pos;
+
+	if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+		pos = 2;
+	else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+		pos = 1;
+	else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+		pos = 0;
+	else
+		return -EINVAL;
+	       
+	return snprintf(buffer, PAGE_SIZE, "%d\n",
+			(lid_ctl->status >> pos) & 0x01);
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd)
+{
+	unsigned int result;
+	int i;
+
+	if (sony_call_snc_handle(0x0119, 0x0000, &result))
+		return -EIO;
+
+	lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+	if (!lid_ctl)
+		return -ENOMEM;
+
+	lid_ctl->status = result & 0x7;
+
+	sysfs_attr_init(&lid_ctl->attrs[0].attr);
+	lid_ctl->attrs[0].attr.name = "lid_resume_S3";
+	lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+	lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
+	lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+
+	sysfs_attr_init(&lid_ctl->attrs[1].attr);
+	lid_ctl->attrs[1].attr.name = "lid_resume_S4";
+	lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
+	lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
+	lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+
+	sysfs_attr_init(&lid_ctl->attrs[2].attr);
+	lid_ctl->attrs[2].attr.name = "lid_resume_S5";
+	lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
+	lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
+	lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
+
+	for (i = 0; i < 3; i++) {
+		result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+		if (result)
+			goto liderror;
+	}
+
+	return 0;
+
+liderror:
+	for (; i > 0; i--)
+		device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+	kfree(lid_ctl);
+	lid_ctl = NULL;
+
+	return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+	int i;
+
+	if (lid_ctl) {
+		for (i = 0; i < 3; i++)
+			device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+		kfree(lid_ctl);
+		lid_ctl = NULL;
+	}
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0131, 0x0100, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+		/* some models advertise the handle but have no implementation
+		 * for it
+		 */
+		pr_info("No High Speed Charging capability found\n");
+		return 0;
+	}
+
+	hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!hsc_handle)
+		return -ENOMEM;
+
+	sysfs_attr_init(&hsc_handle->attr);
+	hsc_handle->attr.name = "battery_highspeed_charging";
+	hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+	hsc_handle->show = sony_nc_highspeed_charging_show;
+	hsc_handle->store = sony_nc_highspeed_charging_store;
+
+	result = device_create_file(&pd->dev, hsc_handle);
+	if (result) {
+		kfree(hsc_handle);
+		hsc_handle = NULL;
+		return result;
+	}
+
+	return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+	if (hsc_handle) {
+		device_remove_file(&pd->dev, hsc_handle);
+		kfree(hsc_handle);
+		hsc_handle = NULL;
+	}
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+	struct device_attribute attr;
+	int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+		struct device_attribute *attr, const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	/* sysfs: 0 disabled, 1 enabled
+	 * EC: 0 enabled, 1 disabled
+	 */
+	if (sony_call_snc_handle(tp_ctl->handle,
+				(!value << 0x10) | 0x100, &result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+		return -EINVAL;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+		unsigned int handle)
+{
+	int ret = 0;
+
+	tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+	if (!tp_ctl)
+		return -ENOMEM;
+
+	tp_ctl->handle = handle;
+
+	sysfs_attr_init(&tp_ctl->attr.attr);
+	tp_ctl->attr.attr.name = "touchpad";
+	tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+	tp_ctl->attr.show = sony_nc_touchpad_show;
+	tp_ctl->attr.store = sony_nc_touchpad_store;
+
+	ret = device_create_file(&pd->dev, &tp_ctl->attr);
+	if (ret) {
+		kfree(tp_ctl);
+		tp_ctl = NULL;
+	}
+
+	return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+	if (tp_ctl) {
+		device_remove_file(&pd->dev, &tp_ctl->attr);
+		kfree(tp_ctl);
+		tp_ctl = NULL;
+	}
 }
 
 static void sony_nc_backlight_ng_read_limits(int handle,
 		struct sony_backlight_props *props)
 {
-	int offset;
-	acpi_status status;
-	u8 brlvl, i;
+	u64 offset;
+	int i;
 	u8 min = 0xff, max = 0x00;
-	struct acpi_object_list params;
-	union acpi_object in_obj;
-	union acpi_object *lvl_enum;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	unsigned char buffer[32] = { 0 };
 
 	props->handle = handle;
 	props->offset = 0;
@@ -1583,50 +2486,31 @@
 	/* try to read the boundaries from ACPI tables, if we fail the above
 	 * defaults should be reasonable
 	 */
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = offset;
-	status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-			&buffer);
-	if (ACPI_FAILURE(status))
+	i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+			32);
+	if (i < 0)
 		return;
 
-	lvl_enum = (union acpi_object *) buffer.pointer;
-	if (!lvl_enum) {
-		pr_err("No SN06 return object.");
-		return;
-	}
-	if (lvl_enum->type != ACPI_TYPE_BUFFER) {
-		pr_err("Invalid SN06 return object 0x%.2x\n",
-		       lvl_enum->type);
-		goto out_invalid;
-	}
-
 	/* the buffer lists brightness levels available, brightness levels are
-	 * from 0 to 8 in the array, other values are used by ALS control.
+	 * from position 0 to 8 in the array, other values are used by ALS
+	 * control.
 	 */
-	for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+	for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
 
-		brlvl = *(lvl_enum->buffer.pointer + i);
-		dprintk("Brightness level: %d\n", brlvl);
+		dprintk("Brightness level: %d\n", buffer[i]);
 
-		if (!brlvl)
+		if (!buffer[i])
 			break;
 
-		if (brlvl > max)
-			max = brlvl;
-		if (brlvl < min)
-			min = brlvl;
+		if (buffer[i] > max)
+			max = buffer[i];
+		if (buffer[i] < min)
+			min = buffer[i];
 	}
 	props->offset = min;
 	props->maxlvl = max;
 	dprintk("Brightness levels: min=%d max=%d\n", props->offset,
 			props->maxlvl);
-
-out_invalid:
-	kfree(buffer.pointer);
-	return;
 }
 
 static void sony_nc_backlight_setup(void)
@@ -1715,28 +2599,25 @@
 
 	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
 					 &handle))) {
-		if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+		int arg = 1;
+		if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
 			dprintk("ECON Method failed\n");
 	}
 
 	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
 					 &handle))) {
 		dprintk("Doing SNC setup\n");
+		/* retrieve the available handles */
 		result = sony_nc_handles_setup(sony_pf_device);
-		if (result)
-			goto outpresent;
-		result = sony_nc_kbd_backlight_setup(sony_pf_device);
-		if (result)
-			goto outsnc;
-		sony_nc_function_setup(device);
-		sony_nc_rfkill_setup(device);
+		if (!result)
+			sony_nc_function_setup(device, sony_pf_device);
 	}
 
 	/* setup input devices and helper fifo */
 	result = sony_laptop_setup_input(device);
 	if (result) {
 		pr_err("Unable to create input devices\n");
-		goto outkbdbacklight;
+		goto outsnc;
 	}
 
 	if (acpi_video_backlight_support()) {
@@ -1794,10 +2675,8 @@
 
 	sony_laptop_remove_input();
 
-      outkbdbacklight:
-	sony_nc_kbd_backlight_cleanup(sony_pf_device);
-
       outsnc:
+	sony_nc_function_cleanup(sony_pf_device);
 	sony_nc_handles_cleanup(sony_pf_device);
 
       outpresent:
@@ -1820,11 +2699,10 @@
 		device_remove_file(&sony_pf_device->dev, &item->devattr);
 	}
 
-	sony_nc_kbd_backlight_cleanup(sony_pf_device);
+	sony_nc_function_cleanup(sony_pf_device);
 	sony_nc_handles_cleanup(sony_pf_device);
 	sony_pf_remove();
 	sony_laptop_remove_input();
-	sony_nc_rfkill_cleanup();
 	dprintk(SONY_NC_DRIVER_NAME " removed.\n");
 
 	return 0;
@@ -2437,7 +3315,9 @@
 	if (count > 31)
 		return -EINVAL;
 
-	value = simple_strtoul(buffer, NULL, 10);
+	if (kstrtoul(buffer, 10, &value))
+		return -EINVAL;
+
 	mutex_lock(&spic_dev.lock);
 	__sony_pic_set_wwanpower(value);
 	mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3354,9 @@
 	if (count > 31)
 		return -EINVAL;
 
-	value = simple_strtoul(buffer, NULL, 10);
+	if (kstrtoul(buffer, 10, &value))
+		return -EINVAL;
+
 	mutex_lock(&spic_dev.lock);
 	__sony_pic_set_bluetoothpower(value);
 	mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3395,9 @@
 	if (count > 31)
 		return -EINVAL;
 
-	value = simple_strtoul(buffer, NULL, 10);
+	if (kstrtoul(buffer, 10, &value))
+		return -EINVAL;
+
 	if (sony_pic_set_fanspeed(value))
 		return -EIO;
 
@@ -2671,7 +3555,8 @@
 			ret = -EIO;
 			break;
 		}
-		if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+		if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+					&value)) {
 			ret = -EIO;
 			break;
 		}
@@ -2688,8 +3573,9 @@
 			ret = -EFAULT;
 			break;
 		}
-		if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-				(val8 >> 5) + 1, NULL)) {
+		value = (val8 >> 5) + 1;
+		if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+					NULL)) {
 			ret = -EIO;
 			break;
 		}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d68c000..8b5610d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3402,7 +3402,7 @@
 	/* Do not issue duplicate brightness change events to
 	 * userspace. tpacpi_detect_brightness_capabilities() must have
 	 * been called before this point  */
-	if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+	if (acpi_video_backlight_support()) {
 		pr_info("This ThinkPad has standard ACPI backlight "
 			"brightness control, supported by the ACPI "
 			"video driver\n");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce6..dab10f6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -95,6 +95,7 @@
 
 /* registers */
 #define HCI_FAN				0x0004
+#define HCI_TR_BACKLIGHT		0x0005
 #define HCI_SYSTEM_EVENT		0x0016
 #define HCI_VIDEO_OUT			0x001c
 #define HCI_HOTKEY_EVENT		0x001e
@@ -134,6 +135,7 @@
 	unsigned int system_event_supported:1;
 	unsigned int ntfy_supported:1;
 	unsigned int info_supported:1;
+	unsigned int tr_backlight_supported:1;
 
 	struct mutex mutex;
 };
@@ -478,34 +480,70 @@
 	.poll = bt_rfkill_poll,
 };
 
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+	u32 hci_result;
+	u32 status;
+
+	hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+	*enabled = !status;
+	return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+	u32 hci_result;
+	u32 value = !enable;
+
+	hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
+	return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
 static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
 
-static int get_lcd(struct backlight_device *bd)
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
-	struct toshiba_acpi_dev *dev = bl_get_data(bd);
 	u32 hci_result;
 	u32 value;
+	int brightness = 0;
+
+	if (dev->tr_backlight_supported) {
+		bool enabled;
+		int ret = get_tr_backlight_status(dev, &enabled);
+		if (ret)
+			return ret;
+		if (enabled)
+			return 0;
+		brightness++;
+	}
 
 	hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
 	if (hci_result == HCI_SUCCESS)
-		return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+		return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
 	return -EIO;
 }
 
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+	struct toshiba_acpi_dev *dev = bl_get_data(bd);
+	return __get_lcd_brightness(dev);
+}
+
 static int lcd_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
 	int value;
+	int levels;
 
 	if (!dev->backlight_dev)
 		return -ENODEV;
 
-	value = get_lcd(dev->backlight_dev);
+	levels = dev->backlight_dev->props.max_brightness + 1;
+	value = get_lcd_brightness(dev->backlight_dev);
 	if (value >= 0) {
 		seq_printf(m, "brightness:              %d\n", value);
-		seq_printf(m, "brightness_levels:       %d\n",
-			     HCI_LCD_BRIGHTNESS_LEVELS);
+		seq_printf(m, "brightness_levels:       %d\n", levels);
 		return 0;
 	}
 
@@ -518,10 +556,19 @@
 	return single_open(file, lcd_proc_show, PDE(inode)->data);
 }
 
-static int set_lcd(struct toshiba_acpi_dev *dev, int value)
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
 {
 	u32 hci_result;
 
+	if (dev->tr_backlight_supported) {
+		bool enable = !value;
+		int ret = set_tr_backlight_status(dev, enable);
+		if (ret)
+			return ret;
+		if (value)
+			value--;
+	}
+
 	value = value << HCI_LCD_BRIGHTNESS_SHIFT;
 	hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
 	return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@
 static int set_lcd_status(struct backlight_device *bd)
 {
 	struct toshiba_acpi_dev *dev = bl_get_data(bd);
-	return set_lcd(dev, bd->props.brightness);
+	return set_lcd_brightness(dev, bd->props.brightness);
 }
 
 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@
 	size_t len;
 	int value;
 	int ret;
+	int levels = dev->backlight_dev->props.max_brightness + 1;
 
 	len = min(count, sizeof(cmd) - 1);
 	if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@
 	cmd[len] = '\0';
 
 	if (sscanf(cmd, " brightness : %i", &value) == 1 &&
-	    value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
-		ret = set_lcd(dev, value);
+	    value >= 0 && value < levels) {
+		ret = set_lcd_brightness(dev, value);
 		if (ret == 0)
 			ret = count;
 	} else {
@@ -860,8 +908,9 @@
 }
 
 static const struct backlight_ops toshiba_backlight_data = {
-        .get_brightness = get_lcd,
-        .update_status  = set_lcd_status,
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = get_lcd_brightness,
+	.update_status  = set_lcd_status,
 };
 
 static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@
 	return error;
 }
 
+static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+	struct backlight_properties props;
+	int brightness;
+	int ret;
+	bool enabled;
+
+	/*
+	 * Some machines don't support the backlight methods at all, and
+	 * others support it read-only. Either of these is pretty useless,
+	 * so only register the backlight device if the backlight method
+	 * supports both reads and writes.
+	 */
+	brightness = __get_lcd_brightness(dev);
+	if (brightness < 0)
+		return 0;
+	ret = set_lcd_brightness(dev, brightness);
+	if (ret) {
+		pr_debug("Backlight method is read-only, disabling backlight support\n");
+		return 0;
+	}
+
+	/* Determine whether or not BIOS supports transflective backlight */
+	ret = get_tr_backlight_status(dev, &enabled);
+	dev->tr_backlight_supported = !ret;
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_PLATFORM;
+	props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+	/* adding an extra level and having 0 change to transflective mode */
+	if (dev->tr_backlight_supported)
+		props.max_brightness++;
+
+	dev->backlight_dev = backlight_device_register("toshiba",
+						       &dev->acpi_dev->dev,
+						       dev,
+						       &toshiba_backlight_data,
+						       &props);
+	if (IS_ERR(dev->backlight_dev)) {
+		ret = PTR_ERR(dev->backlight_dev);
+		pr_err("Could not register toshiba backlight device\n");
+		dev->backlight_dev = NULL;
+		return ret;
+	}
+
+	dev->backlight_dev->props.brightness = brightness;
+	return 0;
+}
+
 static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@
 	u32 dummy;
 	bool bt_present;
 	int ret = 0;
-	struct backlight_properties props;
 
 	if (toshiba_acpi)
 		return -EBUSY;
@@ -1104,21 +1202,9 @@
 
 	mutex_init(&dev->mutex);
 
-	props.type = BACKLIGHT_PLATFORM;
-	props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
-	dev->backlight_dev = backlight_device_register("toshiba",
-						       &acpi_dev->dev,
-						       dev,
-						       &toshiba_backlight_data,
-						       &props);
-	if (IS_ERR(dev->backlight_dev)) {
-		ret = PTR_ERR(dev->backlight_dev);
-
-		pr_err("Could not register toshiba backlight device\n");
-		dev->backlight_dev = NULL;
+	ret = toshiba_acpi_setup_backlight(dev);
+	if (ret)
 		goto error;
-	}
-	dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
 
 	/* Register rfkill switch for Bluetooth */
 	if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 41781ed..b57ad86 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -15,15 +15,26 @@
 
 #include <asm/olpc.h>
 
+static bool card_blocked;
+
 static int rfkill_set_block(void *data, bool blocked)
 {
 	unsigned char cmd;
+	int r;
+
+	if (blocked == card_blocked)
+		return 0;
+
 	if (blocked)
 		cmd = EC_WLAN_ENTER_RESET;
 	else
 		cmd = EC_WLAN_LEAVE_RESET;
 
-	return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+	r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+	if (r == 0)
+		card_blocked = blocked;
+
+	return r;
 }
 
 static const struct rfkill_ops rfkill_ops = {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f..e3a3b49 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
 menuconfig POWER_SUPPLY
-	tristate "Power supply class support"
+	bool "Power supply class support"
 	help
 	  Say Y here to enable power supply class support. This allows
 	  power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@
 	  Say Y here to enable support for batteries with ds2780 chip.
 
 config BATTERY_DS2781
-	tristate "2781 battery driver"
+	tristate "DS2781 battery driver"
 	depends on HAS_IOMEM
 	select W1
 	select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@
 	  to operate with a single lithium cell
 
 config BATTERY_MAX17042
-	tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+	tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
 	depends on I2C
 	help
 	  MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
 	  in handheld and portable equipment. The MAX17042 is configured
 	  to operate with a single lithium cell. MAX8997 and MAX8966 are
 	  multi-function devices that include fuel gauages that are compatible
-	  with MAX17042.
+	  with MAX17042. This driver also supports max17047/50 chips which are
+	  improved version of max17042.
 
 config BATTERY_Z2
 	tristate "Z2 battery driver"
@@ -291,6 +292,7 @@
 config CHARGER_SMB347
 	tristate "Summit Microelectronics SMB347 Battery Charger"
 	depends on I2C
+	select REGMAP_I2C
 	help
 	  Say Y to include support for Summit Microelectronics SMB347
 	  Battery Charger.
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb993..bba3cca 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@
 {
 	int irq, i, ret = 0;
 	u8 val;
-	struct abx500_bm_plat_data *plat_data;
+	struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+	struct ab8500_btemp *di;
 
-	struct ab8500_btemp *di =
-		kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+	if (!plat_data) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
 	if (!di)
 		return -ENOMEM;
 
@@ -977,7 +982,6 @@
 	di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
 	/* get btemp specific platform data */
-	plat_data = pdev->dev.platform_data;
 	di->pdata = plat_data->btemp;
 	if (!di->pdata) {
 		dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4acc..d2303d0 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@
 static int __devinit ab8500_charger_probe(struct platform_device *pdev)
 {
 	int irq, i, charger_status, ret = 0;
-	struct abx500_bm_plat_data *plat_data;
+	struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+	struct ab8500_charger *di;
 
-	struct ab8500_charger *di =
-		kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+	if (!plat_data) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
 	if (!di)
 		return -ENOMEM;
 
@@ -2550,9 +2555,7 @@
 	spin_lock_init(&di->usb_state.usb_lock);
 
 	/* get charger specific platform data */
-	plat_data = pdev->dev.platform_data;
 	di->pdata = plat_data->charger;
-
 	if (!di->pdata) {
 		dev_err(di->dev, "no charger platform data supplied\n");
 		ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f0..bf02225 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@
 {
 	int i, irq;
 	int ret = 0;
-	struct abx500_bm_plat_data *plat_data;
+	struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+	struct ab8500_fg *di;
 
-	struct ab8500_fg *di =
-		kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+	if (!plat_data) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
 	if (!di)
 		return -ENOMEM;
 
@@ -2461,7 +2466,6 @@
 	di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
 	/* get fg specific platform data */
-	plat_data = pdev->dev.platform_data;
 	di->pdata = plat_data->fg;
 	if (!di->pdata) {
 		dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1..86935ec 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
 #include <linux/power/charger-manager.h>
 #include <linux/regulator/consumer.h>
 
+static const char * const default_event_names[] = {
+	[CM_EVENT_UNKNOWN] = "Unknown",
+	[CM_EVENT_BATT_FULL] = "Battery Full",
+	[CM_EVENT_BATT_IN] = "Battery Inserted",
+	[CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+	[CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+	[CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+	[CM_EVENT_OTHERS] = "Other battery events"
+};
+
 /*
  * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
  * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@
 static bool cm_rtc_set;
 static unsigned long cm_suspend_duration_ms;
 
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
 /* Global charger-manager description */
 static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
 
@@ -71,6 +87,11 @@
 	int i, ret;
 
 	switch (cm->desc->battery_present) {
+	case CM_BATTERY_PRESENT:
+		present = true;
+		break;
+	case CM_NO_BATTERY:
+		break;
 	case CM_FUEL_GAUGE:
 		ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
 				POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@
 }
 
 /**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+	int err;
+
+	if (cm->emergency_stop)
+		return -EAGAIN;
+
+	err = try_charger_enable(cm, false);
+	if (err)
+		return err;
+
+	return try_charger_enable(cm, true);
+}
+
+/**
  * uevent_notify - Let users know something has changed.
  * @cm: the Charger Manager representing the battery.
  * @event: the event string.
@@ -334,6 +375,46 @@
 }
 
 /**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct charger_manager *cm = container_of(dwork,
+			struct charger_manager, fullbatt_vchk_work);
+	struct charger_desc *desc = cm->desc;
+	int batt_uV, err, diff;
+
+	/* remove the appointment for fullbatt_vchk */
+	cm->fullbatt_vchk_jiffies_at = 0;
+
+	if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+		return;
+
+	err = get_batt_uV(cm, &batt_uV);
+	if (err) {
+		dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+		return;
+	}
+
+	diff = cm->fullbatt_vchk_uV;
+	diff -= batt_uV;
+
+	dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+	if (diff > desc->fullbatt_vchkdrop_uV) {
+		try_charger_restart(cm);
+		uevent_notify(cm, "Recharge");
+	}
+}
+
+/**
  * _cm_monitor - Monitor the temperature and return true for exceptions.
  * @cm: the Charger Manager representing the battery.
  *
@@ -392,6 +473,131 @@
 	return stop;
 }
 
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+	unsigned long min = ULONG_MAX;
+	struct charger_manager *cm;
+	bool keep_polling = false;
+	unsigned long _next_polling;
+
+	mutex_lock(&cm_list_mtx);
+
+	list_for_each_entry(cm, &cm_list, entry) {
+		if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+			keep_polling = true;
+
+			if (min > cm->desc->polling_interval_ms)
+				min = cm->desc->polling_interval_ms;
+		}
+	}
+
+	polling_jiffy = msecs_to_jiffies(min);
+	if (polling_jiffy <= CM_JIFFIES_SMALL)
+		polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+	if (!keep_polling)
+		polling_jiffy = ULONG_MAX;
+	if (polling_jiffy == ULONG_MAX)
+		goto out;
+
+	WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+			    ". try it later. %s\n", __func__);
+
+	_next_polling = jiffies + polling_jiffy;
+
+	if (!delayed_work_pending(&cm_monitor_work) ||
+	    (delayed_work_pending(&cm_monitor_work) &&
+	     time_after(next_polling, _next_polling))) {
+		cancel_delayed_work_sync(&cm_monitor_work);
+		next_polling = jiffies + polling_jiffy;
+		queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+	}
+
+out:
+	mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+	cm_monitor();
+	schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+	struct charger_desc *desc = cm->desc;
+
+	if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+		goto out;
+
+	if (cm_suspended)
+		device_set_wakeup_capable(cm->dev, true);
+
+	if (delayed_work_pending(&cm->fullbatt_vchk_work))
+		cancel_delayed_work(&cm->fullbatt_vchk_work);
+	queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+			   msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+	cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+				       desc->fullbatt_vchkdrop_ms);
+
+	if (cm->fullbatt_vchk_jiffies_at == 0)
+		cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+	dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+	uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+	if (cm_suspended)
+		device_set_wakeup_capable(cm->dev, true);
+
+	if (!is_batt_present(cm)) {
+		dev_emerg(cm->dev, "Battery Pulled Out!\n");
+		uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+	} else {
+		uevent_notify(cm, "Battery Reinserted?");
+	}
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+			enum cm_event_types type)
+{
+	if (cm_suspended)
+		device_set_wakeup_capable(cm->dev, true);
+
+	if (!delayed_work_pending(&cm_monitor_work) &&
+	    is_polling_required(cm) && cm->desc->polling_interval_ms)
+		schedule_work(&setup_polling);
+	uevent_notify(cm, default_event_names[type]);
+}
+
 static int charger_get_property(struct power_supply *psy,
 		enum power_supply_property psp,
 		union power_supply_propval *val)
@@ -613,6 +819,21 @@
 	mutex_lock(&cm_list_mtx);
 
 	list_for_each_entry(cm, &cm_list, entry) {
+		unsigned int fbchk_ms = 0;
+
+		/* fullbatt_vchk is required. setup timer for that */
+		if (cm->fullbatt_vchk_jiffies_at) {
+			fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+						    - jiffies);
+			if (time_is_before_eq_jiffies(
+				cm->fullbatt_vchk_jiffies_at) ||
+				msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+				fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+				fbchk_ms = 0;
+			}
+		}
+		CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
 		/* Skip if polling is not required for this CM */
 		if (!is_polling_required(cm) && !cm->emergency_stop)
 			continue;
@@ -672,6 +893,23 @@
 	return false;
 }
 
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+	unsigned long jiffy_now = jiffies;
+
+	if (!cm->fullbatt_vchk_jiffies_at)
+		return;
+
+	if (g_desc && g_desc->assume_timer_stops_in_suspend)
+		jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+	/* Execute now if it's going to be executed not too long after */
+	jiffy_now += CM_JIFFIES_SMALL;
+
+	if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+		fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
 /**
  * cm_suspend_again - Determine whether suspend again or not
  *
@@ -693,6 +931,8 @@
 	ret = true;
 	mutex_lock(&cm_list_mtx);
 	list_for_each_entry(cm, &cm_list, entry) {
+		_cm_fbchk_in_suspend(cm);
+
 		if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
 		    cm->status_save_batt != is_batt_present(cm)) {
 			ret = false;
@@ -796,6 +1036,21 @@
 	memcpy(cm->desc, desc, sizeof(struct charger_desc));
 	cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
 
+	/*
+	 * The following two do not need to be errors.
+	 * Users may intentionally ignore those two features.
+	 */
+	if (desc->fullbatt_uV == 0) {
+		dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+					" as it is not supplied.");
+	}
+	if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+		dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+				"checking mechanism as it is not supplied.");
+		desc->fullbatt_vchkdrop_ms = 0;
+		desc->fullbatt_vchkdrop_uV = 0;
+	}
+
 	if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
 		ret = -EINVAL;
 		dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@
 		cm->charger_psy.num_properties++;
 	}
 
+	INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
 	ret = power_supply_register(NULL, &cm->charger_psy);
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@
 	list_add(&cm->entry, &cm_list);
 	mutex_unlock(&cm_list_mtx);
 
+	/*
+	 * Charger-manager is capable of waking up the systme from sleep
+	 * when event is happend through cm_notify_event()
+	 */
+	device_init_wakeup(&pdev->dev, true);
+	device_set_wakeup_capable(&pdev->dev, false);
+
+	schedule_work(&setup_polling);
+
 	return 0;
 
 err_chg_enable:
@@ -958,9 +1224,17 @@
 	list_del(&cm->entry);
 	mutex_unlock(&cm_list_mtx);
 
+	if (work_pending(&setup_polling))
+		cancel_work_sync(&setup_polling);
+	if (delayed_work_pending(&cm_monitor_work))
+		cancel_delayed_work_sync(&cm_monitor_work);
+
 	regulator_bulk_free(desc->num_charger_regulators,
 			    desc->charger_regulators);
 	power_supply_unregister(&cm->charger_psy);
+
+	try_charger_enable(cm, false);
+
 	kfree(cm->charger_psy.properties);
 	kfree(cm->charger_stat);
 	kfree(cm->desc);
@@ -975,6 +1249,18 @@
 };
 MODULE_DEVICE_TABLE(platform, charger_manager_id);
 
+static int cm_suspend_noirq(struct device *dev)
+{
+	int ret = 0;
+
+	if (device_may_wakeup(dev)) {
+		device_set_wakeup_capable(dev, false);
+		ret = -EAGAIN;
+	}
+
+	return ret;
+}
+
 static int cm_suspend_prepare(struct device *dev)
 {
 	struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@
 		cm_suspended = true;
 	}
 
+	if (delayed_work_pending(&cm->fullbatt_vchk_work))
+		cancel_delayed_work(&cm->fullbatt_vchk_work);
 	cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
 	cm->status_save_batt = is_batt_present(cm);
 
@@ -1027,11 +1315,40 @@
 		cm_rtc_set = false;
 	}
 
+	/* Re-enqueue delayed work (fullbatt_vchk_work) */
+	if (cm->fullbatt_vchk_jiffies_at) {
+		unsigned long delay = 0;
+		unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+		if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+			delay = (unsigned long)((long)now
+				- (long)(cm->fullbatt_vchk_jiffies_at));
+			delay = jiffies_to_msecs(delay);
+		} else {
+			delay = 0;
+		}
+
+		/*
+		 * Account for cm_suspend_duration_ms if
+		 * assume_timer_stops_in_suspend is active
+		 */
+		if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+			if (delay > cm_suspend_duration_ms)
+				delay -= cm_suspend_duration_ms;
+			else
+				delay = 0;
+		}
+
+		queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+				   msecs_to_jiffies(delay));
+	}
+	device_set_wakeup_capable(cm->dev, false);
 	uevent_notify(cm, NULL);
 }
 
 static const struct dev_pm_ops charger_manager_pm = {
 	.prepare	= cm_suspend_prepare,
+	.suspend_noirq	= cm_suspend_noirq,
 	.complete	= cm_suspend_complete,
 };
 
@@ -1048,16 +1365,91 @@
 
 static int __init charger_manager_init(void)
 {
+	cm_wq = create_freezable_workqueue("charger_manager");
+	INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
 	return platform_driver_register(&charger_manager_driver);
 }
 late_initcall(charger_manager_init);
 
 static void __exit charger_manager_cleanup(void)
 {
+	destroy_workqueue(cm_wq);
+	cm_wq = NULL;
+
 	platform_driver_unregister(&charger_manager_driver);
 }
 module_exit(charger_manager_cleanup);
 
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+			struct power_supply *psy)
+{
+	int i;
+	bool found = false;
+
+	for (i = 0; cm->charger_stat[i]; i++) {
+		if (psy == cm->charger_stat[i]) {
+			found = true;
+			break;
+		}
+	}
+
+	return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+		     char *msg)
+{
+	struct charger_manager *cm;
+	bool found_power_supply = false;
+
+	if (psy == NULL)
+		return;
+
+	mutex_lock(&cm_list_mtx);
+	list_for_each_entry(cm, &cm_list, entry) {
+		found_power_supply = find_power_supply(cm, psy);
+		if (found_power_supply)
+			break;
+	}
+	mutex_unlock(&cm_list_mtx);
+
+	if (!found_power_supply)
+		return;
+
+	switch (type) {
+	case CM_EVENT_BATT_FULL:
+		fullbatt_handler(cm);
+		break;
+	case CM_EVENT_BATT_OUT:
+		battout_handler(cm);
+		break;
+	case CM_EVENT_BATT_IN:
+	case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+		misc_event_handler(cm, type);
+		break;
+	case CM_EVENT_UNKNOWN:
+	case CM_EVENT_OTHERS:
+		uevent_notify(cm, msg ? msg : default_event_names[type]);
+		break;
+	default:
+		dev_err(cm->dev, "%s type not specified.\n", __func__);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("Charger Manager");
 MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653..975684a 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-	count = min_t(loff_t, count,
-		DS2781_EEPROM_BLOCK1_END -
-		DS2781_EEPROM_BLOCK1_START + 1 - off);
+	count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
 	return ds2781_read_block(dev_info, buf,
 				DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count,
-		DS2781_EEPROM_BLOCK1_END -
-		DS2781_EEPROM_BLOCK1_START + 1 - off);
+	count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
 	ret = ds2781_write(dev_info, buf,
 				DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@
 		.name = "param_eeprom",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+	.size = DS2781_PARAM_EEPROM_SIZE,
 	.read = ds2781_read_param_eeprom_bin,
 	.write = ds2781_write_param_eeprom_bin,
 };
@@ -696,9 +692,7 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-	count = min_t(loff_t, count,
-		DS2781_EEPROM_BLOCK0_END -
-		DS2781_EEPROM_BLOCK0_START + 1 - off);
+	count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
 	return ds2781_read_block(dev_info, buf,
 				DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count,
-		DS2781_EEPROM_BLOCK0_END -
-		DS2781_EEPROM_BLOCK0_START + 1 - off);
+	count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
 	ret = ds2781_write(dev_info, buf,
 				DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@
 		.name = "user_eeprom",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+	.size = DS2781_USER_EEPROM_SIZE,
 	.read = ds2781_read_user_eeprom_bin,
 	.write = ds2781_write_user_eeprom_bin,
 };
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f..e5ccd29 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@
 fail2:
 	power_supply_unregister(&isp->psy);
 fail1:
+	isp1704_charger_set_power(isp, 0);
 	usb_put_transceiver(isp->phy);
 fail0:
 	kfree(isp);
 
 	dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
 
-	isp1704_charger_set_power(isp, 0);
 	return ret;
 }
 
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2..140788b 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/mod_devicetable.h>
 #include <linux/power_supply.h>
 #include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
 #define dP_ACC_100	0x1900
 #define dP_ACC_200	0x3200
 
+#define MAX17042_IC_VERSION	0x0092
+#define MAX17047_IC_VERSION	0x00AC	/* same for max17050 */
+
 struct max17042_chip {
 	struct i2c_client *client;
 	struct power_supply battery;
+	enum max170xx_chip_type chip_type;
 	struct max17042_platform_data *pdata;
 	struct work_struct work;
 	int    init_complete;
@@ -105,6 +110,7 @@
 	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@
 		val->intval *= 20000; /* Units of LSB = 20mV */
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-		ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+		if (chip->chip_type == MAX17042)
+			ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+		else
+			ret = max17042_read_reg(chip->client, MAX17047_V_empty);
 		if (ret < 0)
 			return ret;
 
@@ -171,6 +180,13 @@
 
 		val->intval = ret * 625 / 8;
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+		if (ret < 0)
+			return ret;
+
+		val->intval = ret * 625 / 8;
+		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
 		ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
 		if (ret < 0)
@@ -325,11 +341,10 @@
 static int max17042_init_model(struct max17042_chip *chip)
 {
 	int ret;
-	int table_size =
-		sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
 	u16 *temp_data;
 
-	temp_data = kzalloc(table_size, GFP_KERNEL);
+	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
 	if (!temp_data)
 		return -ENOMEM;
 
@@ -354,12 +369,11 @@
 static int max17042_verify_model_lock(struct max17042_chip *chip)
 {
 	int i;
-	int table_size =
-		sizeof(chip->pdata->config_data->cell_char_tbl);
+	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
 	u16 *temp_data;
 	int ret = 0;
 
-	temp_data = kzalloc(table_size, GFP_KERNEL);
+	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
 	if (!temp_data)
 		return -ENOMEM;
 
@@ -382,6 +396,9 @@
 	max17042_write_reg(chip->client, MAX17042_FilterCFG,
 			config->filter_cfg);
 	max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+	if (chip->chip_type == MAX17047)
+		max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+						config->full_soc_thresh);
 }
 
 static void  max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@
 				config->rcomp0);
 	max17042_write_verify_reg(chip->client, MAX17042_TempCo,
 				config->tcompc0);
-	max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
-			config->empty_tempco);
-	max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
-				config->kempty0);
 	max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
 				config->ichgt_term);
+	if (chip->chip_type == MAX17042) {
+		max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+					config->empty_tempco);
+		max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+					config->kempty0);
+	} else {
+		max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+						config->qrtbl00);
+		max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+						config->qrtbl10);
+		max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+						config->qrtbl20);
+		max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+						config->qrtbl30);
+	}
 }
 
 static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@
 			config->design_cap);
 	max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
 			config->fullcapnom);
+	/* Update SOC register with new SOC */
+	max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
 }
 
 /*
@@ -489,20 +519,28 @@
 
 	max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
 	max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
-	max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+	if (chip->chip_type == MAX17042)
+		max17042_override_por(client, MAX17042_SOC_empty,
+						config->socempty);
 	max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
 	max17042_override_por(client, MAX17042_dQacc, config->dqacc);
 	max17042_override_por(client, MAX17042_dPacc, config->dpacc);
 
-	max17042_override_por(client, MAX17042_V_empty, config->vempty);
+	if (chip->chip_type == MAX17042)
+		max17042_override_por(client, MAX17042_V_empty, config->vempty);
+	else
+		max17042_override_por(client, MAX17047_V_empty, config->vempty);
 	max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
 	max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
 	max17042_override_por(client, MAX17042_FCTC, config->fctc);
 	max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
 	max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
-	max17042_override_por(client, MAX17042_EmptyTempCo,
-			config->empty_tempco);
-	max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+	if (chip->chip_type) {
+		max17042_override_por(client, MAX17042_EmptyTempCo,
+					config->empty_tempco);
+		max17042_override_por(client, MAX17042_K_empty0,
+					config->kempty0);
+	}
 }
 
 static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@
 
 	i2c_set_clientdata(client, chip);
 
-	chip->battery.name		= "max17042_battery";
+	ret = max17042_read_reg(chip->client, MAX17042_DevName);
+	if (ret == MAX17042_IC_VERSION) {
+		dev_dbg(&client->dev, "chip type max17042 detected\n");
+		chip->chip_type = MAX17042;
+	} else if (ret == MAX17047_IC_VERSION) {
+		dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+		chip->chip_type = MAX17047;
+	} else {
+		dev_err(&client->dev, "device version mismatch: %x\n", ret);
+		return -EIO;
+	}
+
+	chip->battery.name		= "max170xx_battery";
 	chip->battery.type		= POWER_SUPPLY_TYPE_BATTERY;
 	chip->battery.get_property	= max17042_get_property;
 	chip->battery.properties	= max17042_battery_props;
@@ -683,6 +733,12 @@
 		max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
 	}
 
+	ret = power_supply_register(&client->dev, &chip->battery);
+	if (ret) {
+		dev_err(&client->dev, "failed: power supply register\n");
+		return ret;
+	}
+
 	if (client->irq) {
 		ret = request_threaded_irq(client->irq, NULL,
 						max17042_thread_handler,
@@ -693,13 +749,14 @@
 			reg |= CONFIG_ALRT_BIT_ENBL;
 			max17042_write_reg(client, MAX17042_CONFIG, reg);
 			max17042_set_soc_threshold(chip, 1);
-		} else
+		} else {
+			client->irq = 0;
 			dev_err(&client->dev, "%s(): cannot get IRQ\n",
 				__func__);
+		}
 	}
 
 	reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
 	if (reg & STATUS_POR_BIT) {
 		INIT_WORK(&chip->work, max17042_init_worker);
 		schedule_work(&chip->work);
@@ -707,23 +764,65 @@
 		chip->init_complete = 1;
 	}
 
-	ret = power_supply_register(&client->dev, &chip->battery);
-	if (ret)
-		dev_err(&client->dev, "failed: power supply register\n");
-	return ret;
+	return 0;
 }
 
 static int __devexit max17042_remove(struct i2c_client *client)
 {
 	struct max17042_chip *chip = i2c_get_clientdata(client);
 
+	if (client->irq)
+		free_irq(client->irq, chip);
 	power_supply_unregister(&chip->battery);
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+
+	/*
+	 * disable the irq and enable irq_wake
+	 * capability to the interrupt line.
+	 */
+	if (chip->client->irq) {
+		disable_irq(chip->client->irq);
+		enable_irq_wake(chip->client->irq);
+	}
+
+	return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+
+	if (chip->client->irq) {
+		disable_irq_wake(chip->client->irq);
+		enable_irq(chip->client->irq);
+		/* re-program the SOC thresholds to 1% change */
+		max17042_set_soc_threshold(chip, 1);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+	.suspend	= max17042_suspend,
+	.resume		= max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
 #ifdef CONFIG_OF
 static const struct of_device_id max17042_dt_match[] = {
 	{ .compatible = "maxim,max17042" },
+	{ .compatible = "maxim,max17047" },
+	{ .compatible = "maxim,max17050" },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@
 
 static const struct i2c_device_id max17042_id[] = {
 	{ "max17042", 0 },
+	{ "max17047", 1 },
+	{ "max17050", 2 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@
 	.driver	= {
 		.name	= "max17042",
 		.of_match_table = of_match_ptr(max17042_dt_match),
+		.pm	= MAX17042_PM_OPS,
 	},
 	.probe		= max17042_probe,
 	.remove		= __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d..4150747 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@
 	POWER_SUPPLY_ATTR(voltage_min_design),
 	POWER_SUPPLY_ATTR(voltage_now),
 	POWER_SUPPLY_ATTR(voltage_avg),
+	POWER_SUPPLY_ATTR(voltage_ocv),
 	POWER_SUPPLY_ATTR(current_max),
 	POWER_SUPPLY_ATTR(current_now),
 	POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d..a5b6849 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@
 	[REG_CURRENT] =
 		SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
 	[REG_CAPACITY] =
-		SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+		SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
 	[REG_REMAINING_CAPACITY] =
 		SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
 	[REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d..f8eedd8 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/debugfs.h>
+#include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -21,7 +21,7 @@
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
 #include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
 
 /*
  * Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
 #define CFG_CURRENT_LIMIT_DC_SHIFT		4
 #define CFG_CURRENT_LIMIT_USB_MASK		0x0f
 #define CFG_FLOAT_VOLTAGE			0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK		0x3f
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK	0xc0
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT	6
 #define CFG_STAT				0x05
@@ -113,29 +114,31 @@
 #define STAT_C_CHARGER_ERROR			BIT(6)
 #define STAT_E					0x3f
 
+#define SMB347_MAX_REGISTER			0x3f
+
 /**
  * struct smb347_charger - smb347 charger instance
  * @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
  * @battery: power_supply instance for battery
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
  * @charging_enabled: is charging enabled
- * @dentry: for debugfs
  * @pdata: pointer to platform data
  */
 struct smb347_charger {
 	struct mutex		lock;
-	struct i2c_client	*client;
+	struct device		*dev;
+	struct regmap		*regmap;
 	struct power_supply	mains;
 	struct power_supply	usb;
 	struct power_supply	battery;
 	bool			mains_online;
 	bool			usb_online;
 	bool			charging_enabled;
-	struct dentry		*dentry;
 	const struct smb347_charger_platform_data *pdata;
 };
 
@@ -193,14 +196,6 @@
 	1200000,
 };
 
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
-	if (val >= size)
-		return -EINVAL;
-	return tbl[val];
-}
-
 /* Convert current to register value using lookup table */
 static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
 {
@@ -212,43 +207,22 @@
 	return i > 0 ? i - 1 : -EINVAL;
 }
 
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
-	int ret;
-
-	ret = i2c_smbus_read_byte_data(smb->client, reg);
-	if (ret < 0)
-		dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
-			 reg, ret);
-	return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
-	int ret;
-
-	ret = i2c_smbus_write_byte_data(smb->client, reg, val);
-	if (ret < 0)
-		dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
-			 reg, ret);
-	return ret;
-}
-
 /**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
  * @smb: pointer to smb347 charger instance
  *
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
  */
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
 {
 	bool usb = false;
 	bool dc = false;
+	unsigned int val;
 	int ret;
 
-	ret = smb347_read(smb, IRQSTAT_E);
+	ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
 	if (ret < 0)
 		return ret;
 
@@ -257,9 +231,9 @@
 	 * platform data _and_ whether corresponding undervoltage is set.
 	 */
 	if (smb->pdata->use_mains)
-		dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+		dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
 	if (smb->pdata->use_usb)
-		usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+		usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
 
 	mutex_lock(&smb->lock);
 	ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@
 }
 
 /*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
  * @smb: pointer to smb347 charger instance
  *
  * Returns %true if input power source is connected. Note that this is
  * dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
  */
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
 {
 	bool ret;
 
@@ -299,16 +273,17 @@
  */
 static int smb347_charging_status(struct smb347_charger *smb)
 {
+	unsigned int val;
 	int ret;
 
-	if (!smb347_is_online(smb))
+	if (!smb347_is_ps_online(smb))
 		return 0;
 
-	ret = smb347_read(smb, STAT_C);
+	ret = regmap_read(smb->regmap, STAT_C, &val);
 	if (ret < 0)
 		return 0;
 
-	return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+	return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
 }
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@
 	int ret = 0;
 
 	if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
-		dev_dbg(&smb->client->dev,
-			"charging enable/disable in SW disabled\n");
+		dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
 		return 0;
 	}
 
 	mutex_lock(&smb->lock);
 	if (smb->charging_enabled != enable) {
-		ret = smb347_read(smb, CMD_A);
-		if (ret < 0)
-			goto out;
-
-		smb->charging_enabled = enable;
-
-		if (enable)
-			ret |= CMD_A_CHG_ENABLED;
-		else
-			ret &= ~CMD_A_CHG_ENABLED;
-
-		ret = smb347_write(smb, CMD_A, ret);
+		ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+					 enable ? CMD_A_CHG_ENABLED : 0);
+		if (!ret)
+			smb->charging_enabled = enable;
 	}
-out:
 	mutex_unlock(&smb->lock);
 	return ret;
 }
@@ -351,7 +316,7 @@
 	return smb347_charging_set(smb, false);
 }
 
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
 {
 	int ret;
 
@@ -360,16 +325,14 @@
 	 * disable or enable the charging. We do it manually because it
 	 * depends on how the platform has configured the valid inputs.
 	 */
-	if (smb347_is_online(smb)) {
+	if (smb347_is_ps_online(smb)) {
 		ret = smb347_charging_enable(smb);
 		if (ret < 0)
-			dev_err(&smb->client->dev,
-				"failed to enable charging\n");
+			dev_err(smb->dev, "failed to enable charging\n");
 	} else {
 		ret = smb347_charging_disable(smb);
 		if (ret < 0)
-			dev_err(&smb->client->dev,
-				"failed to disable charging\n");
+			dev_err(smb->dev, "failed to disable charging\n");
 	}
 
 	return ret;
@@ -377,112 +340,120 @@
 
 static int smb347_set_charge_current(struct smb347_charger *smb)
 {
-	int ret, val;
-
-	ret = smb347_read(smb, CFG_CHARGE_CURRENT);
-	if (ret < 0)
-		return ret;
+	int ret;
 
 	if (smb->pdata->max_charge_current) {
-		val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+		ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
 				    smb->pdata->max_charge_current);
-		if (val < 0)
-			return val;
+		if (ret < 0)
+			return ret;
 
-		ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
-		ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+					 CFG_CHARGE_CURRENT_FCC_MASK,
+					 ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+		if (ret < 0)
+			return ret;
 	}
 
 	if (smb->pdata->pre_charge_current) {
-		val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+		ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
 				    smb->pdata->pre_charge_current);
-		if (val < 0)
-			return val;
+		if (ret < 0)
+			return ret;
 
-		ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
-		ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+					 CFG_CHARGE_CURRENT_PCC_MASK,
+					 ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+		if (ret < 0)
+			return ret;
 	}
 
 	if (smb->pdata->termination_current) {
-		val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+		ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
 				    smb->pdata->termination_current);
-		if (val < 0)
-			return val;
+		if (ret < 0)
+			return ret;
 
-		ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
-		ret |= val;
+		ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+					 CFG_CHARGE_CURRENT_TC_MASK, ret);
+		if (ret < 0)
+			return ret;
 	}
 
-	return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+	return 0;
 }
 
 static int smb347_set_current_limits(struct smb347_charger *smb)
 {
-	int ret, val;
-
-	ret = smb347_read(smb, CFG_CURRENT_LIMIT);
-	if (ret < 0)
-		return ret;
+	int ret;
 
 	if (smb->pdata->mains_current_limit) {
-		val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+		ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
 				    smb->pdata->mains_current_limit);
-		if (val < 0)
-			return val;
+		if (ret < 0)
+			return ret;
 
-		ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
-		ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+					 CFG_CURRENT_LIMIT_DC_MASK,
+					 ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+		if (ret < 0)
+			return ret;
 	}
 
 	if (smb->pdata->usb_hc_current_limit) {
-		val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+		ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
 				    smb->pdata->usb_hc_current_limit);
-		if (val < 0)
-			return val;
+		if (ret < 0)
+			return ret;
 
-		ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
-		ret |= val;
+		ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+					 CFG_CURRENT_LIMIT_USB_MASK, ret);
+		if (ret < 0)
+			return ret;
 	}
 
-	return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+	return 0;
 }
 
 static int smb347_set_voltage_limits(struct smb347_charger *smb)
 {
-	int ret, val;
-
-	ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
-	if (ret < 0)
-		return ret;
+	int ret;
 
 	if (smb->pdata->pre_to_fast_voltage) {
-		val = smb->pdata->pre_to_fast_voltage;
+		ret = smb->pdata->pre_to_fast_voltage;
 
 		/* uV */
-		val = clamp_val(val, 2400000, 3000000) - 2400000;
-		val /= 200000;
+		ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+		ret /= 200000;
 
-		ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
-		ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+				CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+				ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+		if (ret < 0)
+			return ret;
 	}
 
 	if (smb->pdata->max_charge_voltage) {
-		val = smb->pdata->max_charge_voltage;
+		ret = smb->pdata->max_charge_voltage;
 
 		/* uV */
-		val = clamp_val(val, 3500000, 4500000) - 3500000;
-		val /= 20000;
+		ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+		ret /= 20000;
 
-		ret |= val;
+		ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+					 CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+		if (ret < 0)
+			return ret;
 	}
 
-	return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+	return 0;
 }
 
 static int smb347_set_temp_limits(struct smb347_charger *smb)
 {
 	bool enable_therm_monitor = false;
-	int ret, val;
+	int ret = 0;
+	int val;
 
 	if (smb->pdata->chip_temp_threshold) {
 		val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@
 		val = clamp_val(val, 100, 130) - 100;
 		val /= 10;
 
-		ret = smb347_read(smb, CFG_OTG);
-		if (ret < 0)
-			return ret;
-
-		ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
-		ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
-		ret = smb347_write(smb, CFG_OTG, ret);
+		ret = regmap_update_bits(smb->regmap, CFG_OTG,
+					 CFG_OTG_TEMP_THRESHOLD_MASK,
+					 val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
 		if (ret < 0)
 			return ret;
 	}
 
-	ret = smb347_read(smb, CFG_TEMP_LIMIT);
-	if (ret < 0)
-		return ret;
-
 	if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
 		val = smb->pdata->soft_cold_temp_limit;
 
@@ -515,8 +477,11 @@
 		/* this goes from higher to lower so invert the value */
 		val = ~val & 0x3;
 
-		ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
-		ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+					 CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+					 val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+		if (ret < 0)
+			return ret;
 
 		enable_therm_monitor = true;
 	}
@@ -527,8 +492,11 @@
 		val = clamp_val(val, 40, 55) - 40;
 		val /= 5;
 
-		ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
-		ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+					 CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+					 val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+		if (ret < 0)
+			return ret;
 
 		enable_therm_monitor = true;
 	}
@@ -541,8 +509,11 @@
 		/* this goes from higher to lower so invert the value */
 		val = ~val & 0x3;
 
-		ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
-		ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+					 CFG_TEMP_LIMIT_HARD_COLD_MASK,
+					 val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+		if (ret < 0)
+			return ret;
 
 		enable_therm_monitor = true;
 	}
@@ -553,16 +524,15 @@
 		val = clamp_val(val, 50, 65) - 50;
 		val /= 5;
 
-		ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
-		ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+		ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+					 CFG_TEMP_LIMIT_HARD_HOT_MASK,
+					 val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+		if (ret < 0)
+			return ret;
 
 		enable_therm_monitor = true;
 	}
 
-	ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
-	if (ret < 0)
-		return ret;
-
 	/*
 	 * If any of the temperature limits are set, we also enable the
 	 * thermistor monitoring.
@@ -574,25 +544,15 @@
 	 * depending on the configuration.
 	 */
 	if (enable_therm_monitor) {
-		ret = smb347_read(smb, CFG_THERM);
-		if (ret < 0)
-			return ret;
-
-		ret &= ~CFG_THERM_MONITOR_DISABLED;
-
-		ret = smb347_write(smb, CFG_THERM, ret);
+		ret = regmap_update_bits(smb->regmap, CFG_THERM,
+					 CFG_THERM_MONITOR_DISABLED, 0);
 		if (ret < 0)
 			return ret;
 	}
 
 	if (smb->pdata->suspend_on_hard_temp_limit) {
-		ret = smb347_read(smb, CFG_SYSOK);
-		if (ret < 0)
-			return ret;
-
-		ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
-		ret = smb347_write(smb, CFG_SYSOK, ret);
+		ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+				 CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
 		if (ret < 0)
 			return ret;
 	}
@@ -601,17 +561,15 @@
 	    SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
 		val = smb->pdata->soft_temp_limit_compensation & 0x3;
 
-		ret = smb347_read(smb, CFG_THERM);
+		ret = regmap_update_bits(smb->regmap, CFG_THERM,
+				 CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+				 val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
 		if (ret < 0)
 			return ret;
 
-		ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
-		ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
-		ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
-		ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
-		ret = smb347_write(smb, CFG_THERM, ret);
+		ret = regmap_update_bits(smb->regmap, CFG_THERM,
+				 CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+				 val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
 		if (ret < 0)
 			return ret;
 	}
@@ -622,14 +580,9 @@
 		if (val < 0)
 			return val;
 
-		ret = smb347_read(smb, CFG_OTG);
-		if (ret < 0)
-			return ret;
-
-		ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
-		ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
-		ret = smb347_write(smb, CFG_OTG, ret);
+		ret = regmap_update_bits(smb->regmap, CFG_OTG,
+				CFG_OTG_CC_COMPENSATION_MASK,
+				(val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
 		if (ret < 0)
 			return ret;
 	}
@@ -648,22 +601,13 @@
  */
 static int smb347_set_writable(struct smb347_charger *smb, bool writable)
 {
-	int ret;
-
-	ret = smb347_read(smb, CMD_A);
-	if (ret < 0)
-		return ret;
-
-	if (writable)
-		ret |= CMD_A_ALLOW_WRITE;
-	else
-		ret &= ~CMD_A_ALLOW_WRITE;
-
-	return smb347_write(smb, CMD_A, ret);
+	return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+				  writable ? CMD_A_ALLOW_WRITE : 0);
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
 {
+	unsigned int val;
 	int ret;
 
 	ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@
 
 	/* If USB charging is disabled we put the USB in suspend mode */
 	if (!smb->pdata->use_usb) {
-		ret = smb347_read(smb, CMD_A);
-		if (ret < 0)
-			goto fail;
-
-		ret |= CMD_A_SUSPEND_ENABLED;
-
-		ret = smb347_write(smb, CMD_A, ret);
+		ret = regmap_update_bits(smb->regmap, CMD_A,
+					 CMD_A_SUSPEND_ENABLED,
+					 CMD_A_SUSPEND_ENABLED);
 		if (ret < 0)
 			goto fail;
 	}
 
-	ret = smb347_read(smb, CFG_OTHER);
-	if (ret < 0)
-		goto fail;
-
 	/*
 	 * If configured by platform data, we enable hardware Auto-OTG
 	 * support for driving VBUS. Otherwise we disable it.
 	 */
-	ret &= ~CFG_OTHER_RID_MASK;
-	if (smb->pdata->use_usb_otg)
-		ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
-	ret = smb347_write(smb, CFG_OTHER, ret);
-	if (ret < 0)
-		goto fail;
-
-	ret = smb347_read(smb, CFG_PIN);
+	ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+		smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
 	if (ret < 0)
 		goto fail;
 
@@ -728,32 +657,33 @@
 	 * command register unless pin control is specified in the platform
 	 * data.
 	 */
-	ret &= ~CFG_PIN_EN_CTRL_MASK;
-
 	switch (smb->pdata->enable_control) {
-	case SMB347_CHG_ENABLE_SW:
-		/* Do nothing, 0 means i2c control */
-		break;
 	case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
-		ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+		val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
 		break;
 	case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
-		ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+		val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+		break;
+	default:
+		val = 0;
 		break;
 	}
 
+	ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+				 val);
+	if (ret < 0)
+		goto fail;
+
 	/* Disable Automatic Power Source Detection (APSD) interrupt. */
-	ret &= ~CFG_PIN_EN_APSD_IRQ;
-
-	ret = smb347_write(smb, CFG_PIN, ret);
+	ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
 	if (ret < 0)
 		goto fail;
 
-	ret = smb347_update_status(smb);
+	ret = smb347_update_ps_status(smb);
 	if (ret < 0)
 		goto fail;
 
-	ret = smb347_update_online(smb);
+	ret = smb347_start_stop_charging(smb);
 
 fail:
 	smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@
 static irqreturn_t smb347_interrupt(int irq, void *data)
 {
 	struct smb347_charger *smb = data;
-	int stat_c, irqstat_e, irqstat_c;
-	irqreturn_t ret = IRQ_NONE;
+	unsigned int stat_c, irqstat_e, irqstat_c;
+	bool handled = false;
+	int ret;
 
-	stat_c = smb347_read(smb, STAT_C);
-	if (stat_c < 0) {
-		dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+	ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+	if (ret < 0) {
+		dev_warn(smb->dev, "reading STAT_C failed\n");
 		return IRQ_NONE;
 	}
 
-	irqstat_c = smb347_read(smb, IRQSTAT_C);
-	if (irqstat_c < 0) {
-		dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+	ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+	if (ret < 0) {
+		dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
 		return IRQ_NONE;
 	}
 
-	irqstat_e = smb347_read(smb, IRQSTAT_E);
-	if (irqstat_e < 0) {
-		dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+	ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+	if (ret < 0) {
+		dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
 		return IRQ_NONE;
 	}
 
@@ -789,13 +720,11 @@
 	 * disable charging.
 	 */
 	if (stat_c & STAT_C_CHARGER_ERROR) {
-		dev_err(&smb->client->dev,
-			"error in charger, disabling charging\n");
+		dev_err(smb->dev, "error in charger, disabling charging\n");
 
 		smb347_charging_disable(smb);
 		power_supply_changed(&smb->battery);
-
-		ret = IRQ_HANDLED;
+		handled = true;
 	}
 
 	/*
@@ -806,7 +735,7 @@
 	if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
 		if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
 			power_supply_changed(&smb->battery);
-		ret = IRQ_HANDLED;
+		handled = true;
 	}
 
 	/*
@@ -814,15 +743,17 @@
 	 * was connected or disconnected.
 	 */
 	if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
-		if (smb347_update_status(smb) > 0) {
-			smb347_update_online(smb);
-			power_supply_changed(&smb->mains);
-			power_supply_changed(&smb->usb);
+		if (smb347_update_ps_status(smb) > 0) {
+			smb347_start_stop_charging(smb);
+			if (smb->pdata->use_mains)
+				power_supply_changed(&smb->mains);
+			if (smb->pdata->use_usb)
+				power_supply_changed(&smb->usb);
 		}
-		ret = IRQ_HANDLED;
+		handled = true;
 	}
 
-	return ret;
+	return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@
 	 *	- termination current reached
 	 *	- charger error
 	 */
-	if (enable) {
-		ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
-		if (ret < 0)
-			goto fail;
+	ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+				 enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+	if (ret < 0)
+		goto fail;
 
-		ret = smb347_write(smb, CFG_STATUS_IRQ,
-				   CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
-		if (ret < 0)
-			goto fail;
+	ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+			enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+	if (ret < 0)
+		goto fail;
 
-		ret = smb347_read(smb, CFG_PIN);
-		if (ret < 0)
-			goto fail;
-
-		ret |= CFG_PIN_EN_CHARGER_ERROR;
-
-		ret = smb347_write(smb, CFG_PIN, ret);
-	} else {
-		ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
-		if (ret < 0)
-			goto fail;
-
-		ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
-		if (ret < 0)
-			goto fail;
-
-		ret = smb347_read(smb, CFG_PIN);
-		if (ret < 0)
-			goto fail;
-
-		ret &= ~CFG_PIN_EN_CHARGER_ERROR;
-
-		ret = smb347_write(smb, CFG_PIN, ret);
-	}
-
+	ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+				 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
 	smb347_set_writable(smb, false);
 	return ret;
@@ -889,18 +797,18 @@
 	return smb347_irq_set(smb, false);
 }
 
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+			   struct i2c_client *client)
 {
 	const struct smb347_charger_platform_data *pdata = smb->pdata;
 	int ret, irq = gpio_to_irq(pdata->irq_gpio);
 
-	ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+	ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
 	if (ret < 0)
 		goto fail;
 
 	ret = request_threaded_irq(irq, NULL, smb347_interrupt,
-				   IRQF_TRIGGER_FALLING, smb->client->name,
-				   smb);
+				   IRQF_TRIGGER_FALLING, client->name, smb);
 	if (ret < 0)
 		goto fail_gpio;
 
@@ -912,23 +820,14 @@
 	 * Configure the STAT output to be suitable for interrupts: disable
 	 * all other output (except interrupts) and make it active low.
 	 */
-	ret = smb347_read(smb, CFG_STAT);
-	if (ret < 0)
-		goto fail_readonly;
-
-	ret &= ~CFG_STAT_ACTIVE_HIGH;
-	ret |= CFG_STAT_DISABLED;
-
-	ret = smb347_write(smb, CFG_STAT, ret);
-	if (ret < 0)
-		goto fail_readonly;
-
-	ret = smb347_irq_enable(smb);
+	ret = regmap_update_bits(smb->regmap, CFG_STAT,
+				 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+				 CFG_STAT_DISABLED);
 	if (ret < 0)
 		goto fail_readonly;
 
 	smb347_set_writable(smb, false);
-	smb->client->irq = irq;
+	client->irq = irq;
 	return 0;
 
 fail_readonly:
@@ -938,7 +837,7 @@
 fail_gpio:
 	gpio_free(pdata->irq_gpio);
 fail:
-	smb->client->irq = 0;
+	client->irq = 0;
 	return ret;
 }
 
@@ -987,13 +886,13 @@
 	const struct smb347_charger_platform_data *pdata = smb->pdata;
 	int ret;
 
-	ret = smb347_update_status(smb);
+	ret = smb347_update_ps_status(smb);
 	if (ret < 0)
 		return ret;
 
 	switch (prop) {
 	case POWER_SUPPLY_PROP_STATUS:
-		if (!smb347_is_online(smb)) {
+		if (!smb347_is_ps_online(smb)) {
 			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
 			break;
 		}
@@ -1004,7 +903,7 @@
 		break;
 
 	case POWER_SUPPLY_PROP_CHARGE_TYPE:
-		if (!smb347_is_online(smb))
+		if (!smb347_is_ps_online(smb))
 			return -ENODATA;
 
 		/*
@@ -1036,44 +935,6 @@
 		val->intval = pdata->battery_info.voltage_max_design;
 		break;
 
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		if (!smb347_is_online(smb))
-			return -ENODATA;
-		ret = smb347_read(smb, STAT_A);
-		if (ret < 0)
-			return ret;
-
-		ret &= STAT_A_FLOAT_VOLTAGE_MASK;
-		if (ret > 0x3d)
-			ret = 0x3d;
-
-		val->intval = 3500000 + ret * 20000;
-		break;
-
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		if (!smb347_is_online(smb))
-			return -ENODATA;
-
-		ret = smb347_read(smb, STAT_B);
-		if (ret < 0)
-			return ret;
-
-		/*
-		 * The current value is composition of FCC and PCC values
-		 * and we can detect which table to use from bit 5.
-		 */
-		if (ret & 0x20) {
-			val->intval = hw_to_current(fcc_tbl,
-						    ARRAY_SIZE(fcc_tbl),
-						    ret & 7);
-		} else {
-			ret >>= 3;
-			val->intval = hw_to_current(pcc_tbl,
-						    ARRAY_SIZE(pcc_tbl),
-						    ret & 7);
-		}
-		break;
-
 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
 		val->intval = pdata->battery_info.charge_full_design;
 		break;
@@ -1095,64 +956,58 @@
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-	POWER_SUPPLY_PROP_VOLTAGE_NOW,
-	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
 	POWER_SUPPLY_PROP_MODEL_NAME,
 };
 
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
 {
-	struct smb347_charger *smb = s->private;
-	int ret;
-	u8 reg;
-
-	seq_printf(s, "Control registers:\n");
-	seq_printf(s, "==================\n");
-	for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
-		ret = smb347_read(smb, reg);
-		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-	}
-	seq_printf(s, "\n");
-
-	seq_printf(s, "Command registers:\n");
-	seq_printf(s, "==================\n");
-	ret = smb347_read(smb, CMD_A);
-	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
-	ret = smb347_read(smb, CMD_B);
-	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
-	ret = smb347_read(smb, CMD_C);
-	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
-	seq_printf(s, "\n");
-
-	seq_printf(s, "Interrupt status registers:\n");
-	seq_printf(s, "===========================\n");
-	for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
-		ret = smb347_read(smb, reg);
-		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-	}
-	seq_printf(s, "\n");
-
-	seq_printf(s, "Status registers:\n");
-	seq_printf(s, "=================\n");
-	for (reg = STAT_A; reg <= STAT_E; reg++) {
-		ret = smb347_read(smb, reg);
-		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+	switch (reg) {
+	case IRQSTAT_A:
+	case IRQSTAT_C:
+	case IRQSTAT_E:
+	case IRQSTAT_F:
+	case STAT_A:
+	case STAT_B:
+	case STAT_C:
+	case STAT_E:
+		return true;
 	}
 
-	return 0;
+	return false;
 }
 
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
 {
-	return single_open(file, smb347_debugfs_show, inode->i_private);
+	switch (reg) {
+	case CFG_CHARGE_CURRENT:
+	case CFG_CURRENT_LIMIT:
+	case CFG_FLOAT_VOLTAGE:
+	case CFG_STAT:
+	case CFG_PIN:
+	case CFG_THERM:
+	case CFG_SYSOK:
+	case CFG_OTHER:
+	case CFG_OTG:
+	case CFG_TEMP_LIMIT:
+	case CFG_FAULT_IRQ:
+	case CFG_STATUS_IRQ:
+	case CFG_ADDRESS:
+	case CMD_A:
+	case CMD_B:
+	case CMD_C:
+		return true;
+	}
+
+	return smb347_volatile_reg(dev, reg);
 }
 
-static const struct file_operations smb347_debugfs_fops = {
-	.open		= smb347_debugfs_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct regmap_config smb347_regmap = {
+	.reg_bits	= 8,
+	.val_bits	= 8,
+	.max_register	= SMB347_MAX_REGISTER,
+	.volatile_reg	= smb347_volatile_reg,
+	.readable_reg	= smb347_readable_reg,
 };
 
 static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@
 	i2c_set_clientdata(client, smb);
 
 	mutex_init(&smb->lock);
-	smb->client = client;
+	smb->dev = &client->dev;
 	smb->pdata = pdata;
 
+	smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+	if (IS_ERR(smb->regmap))
+		return PTR_ERR(smb->regmap);
+
 	ret = smb347_hw_init(smb);
 	if (ret < 0)
 		return ret;
 
-	smb->mains.name = "smb347-mains";
-	smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
-	smb->mains.get_property = smb347_mains_get_property;
-	smb->mains.properties = smb347_mains_properties;
-	smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
-	smb->mains.supplied_to = battery;
-	smb->mains.num_supplicants = ARRAY_SIZE(battery);
+	if (smb->pdata->use_mains) {
+		smb->mains.name = "smb347-mains";
+		smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+		smb->mains.get_property = smb347_mains_get_property;
+		smb->mains.properties = smb347_mains_properties;
+		smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+		smb->mains.supplied_to = battery;
+		smb->mains.num_supplicants = ARRAY_SIZE(battery);
+		ret = power_supply_register(dev, &smb->mains);
+		if (ret < 0)
+			return ret;
+	}
 
-	smb->usb.name = "smb347-usb";
-	smb->usb.type = POWER_SUPPLY_TYPE_USB;
-	smb->usb.get_property = smb347_usb_get_property;
-	smb->usb.properties = smb347_usb_properties;
-	smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
-	smb->usb.supplied_to = battery;
-	smb->usb.num_supplicants = ARRAY_SIZE(battery);
+	if (smb->pdata->use_usb) {
+		smb->usb.name = "smb347-usb";
+		smb->usb.type = POWER_SUPPLY_TYPE_USB;
+		smb->usb.get_property = smb347_usb_get_property;
+		smb->usb.properties = smb347_usb_properties;
+		smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+		smb->usb.supplied_to = battery;
+		smb->usb.num_supplicants = ARRAY_SIZE(battery);
+		ret = power_supply_register(dev, &smb->usb);
+		if (ret < 0) {
+			if (smb->pdata->use_mains)
+				power_supply_unregister(&smb->mains);
+			return ret;
+		}
+	}
 
 	smb->battery.name = "smb347-battery";
 	smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@
 	smb->battery.properties = smb347_battery_properties;
 	smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
 
-	ret = power_supply_register(dev, &smb->mains);
-	if (ret < 0)
-		return ret;
-
-	ret = power_supply_register(dev, &smb->usb);
-	if (ret < 0) {
-		power_supply_unregister(&smb->mains);
-		return ret;
-	}
 
 	ret = power_supply_register(dev, &smb->battery);
 	if (ret < 0) {
-		power_supply_unregister(&smb->usb);
-		power_supply_unregister(&smb->mains);
+		if (smb->pdata->use_usb)
+			power_supply_unregister(&smb->usb);
+		if (smb->pdata->use_mains)
+			power_supply_unregister(&smb->mains);
 		return ret;
 	}
 
@@ -1229,15 +1094,15 @@
 	 * interrupt support here.
 	 */
 	if (pdata->irq_gpio >= 0) {
-		ret = smb347_irq_init(smb);
+		ret = smb347_irq_init(smb, client);
 		if (ret < 0) {
 			dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
 			dev_warn(dev, "disabling IRQ support\n");
+		} else {
+			smb347_irq_enable(smb);
 		}
 	}
 
-	smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
-					  &smb347_debugfs_fops);
 	return 0;
 }
 
@@ -1245,9 +1110,6 @@
 {
 	struct smb347_charger *smb = i2c_get_clientdata(client);
 
-	if (!IS_ERR_OR_NULL(smb->dentry))
-		debugfs_remove(smb->dentry);
-
 	if (client->irq) {
 		smb347_irq_disable(smb);
 		free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@
 	}
 
 	power_supply_unregister(&smb->battery);
-	power_supply_unregister(&smb->usb);
-	power_supply_unregister(&smb->mains);
+	if (smb->pdata->use_usb)
+		power_supply_unregister(&smb->usb);
+	if (smb->pdata->use_mains)
+		power_supply_unregister(&smb->mains);
 	return 0;
 }
 
@@ -1275,17 +1139,7 @@
 	.id_table     = smb347_id,
 };
 
-static int __init smb347_init(void)
-{
-	return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
-	i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
 
 MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 987332b..fc1ad95 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -565,7 +565,7 @@
 			    goto err_usb;
 	}
 
-	irq = platform_get_irq_byname(pdev, "SYSLO");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
 	ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
 				   IRQF_TRIGGER_RISING, "System power low",
 				   power);
@@ -575,7 +575,7 @@
 		goto err_battery;
 	}
 
-	irq = platform_get_irq_byname(pdev, "PWR SRC");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
 	ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
 				   IRQF_TRIGGER_RISING, "Power source",
 				   power);
@@ -586,7 +586,9 @@
 	}
 
 	for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
-		irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+		irq = wm831x_irq(wm831x,
+				 platform_get_irq_byname(pdev,
+							 wm831x_bat_irqs[i]));
 		ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
 					   IRQF_TRIGGER_RISING,
 					   wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@
 		irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
 		free_irq(irq, power);
 	}
-	irq = platform_get_irq_byname(pdev, "PWR SRC");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
 	free_irq(irq, power);
 err_syslo:
-	irq = platform_get_irq_byname(pdev, "SYSLO");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
 	free_irq(irq, power);
 err_battery:
 	if (power->have_battery)
@@ -626,17 +628,20 @@
 static __devexit int wm831x_power_remove(struct platform_device *pdev)
 {
 	struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+	struct wm831x *wm831x = wm831x_power->wm831x;
 	int irq, i;
 
 	for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
-		irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+		irq = wm831x_irq(wm831x, 
+				 platform_get_irq_byname(pdev,
+							 wm831x_bat_irqs[i]));
 		free_irq(irq, wm831x_power);
 	}
 
-	irq = platform_get_irq_byname(pdev, "PWR SRC");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
 	free_irq(irq, wm831x_power);
 
-	irq = platform_get_irq_byname(pdev, "SYSLO");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
 	free_irq(irq, wm831x_power);
 
 	if (wm831x_power->have_battery)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc87192..6194d35 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@
 	  ports for Input/Output direction to allow other traffic
 	  than Maintenance transfers.
 
+config RAPIDIO_DMA_ENGINE
+	bool "DMA Engine support for RapidIO"
+	depends on RAPIDIO
+	select DMADEVICES
+	select DMA_ENGINE
+	help
+	  Say Y here if you want to use DMA Engine frameork for RapidIO data
+	  transfers to/from target RIO devices. RapidIO uses NREAD and
+	  NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+	  memory and memory on remote target device. You need a DMA controller
+	  capable to perform data transfers to/from RapidIO.
+
+	  If you are unsure about this, say Y here.
+
 config RAPIDIO_DEBUG
 	bool "RapidIO subsystem debug messages"
 	depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2..7b62860 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
 #
 
 obj-$(CONFIG_RAPIDIO_TSI721)	+= tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721)	+= tsi721_dma.o
+endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072..722246c 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@
 			u16 destid, u8 hopcount, u32 offset, int len,
 			u32 *data, int do_wr)
 {
+	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
 	struct tsi721_dma_desc *bd_ptr;
 	u32 rd_count, swr_ptr, ch_stat;
 	int i, err = 0;
@@ -116,10 +117,9 @@
 	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
 		return -EINVAL;
 
-	bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+	bd_ptr = priv->mdma.bd_base;
 
-	rd_count = ioread32(
-			priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
 
 	/* Initialize DMA descriptor */
 	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@
 	mb();
 
 	/* Start DMA operation */
-	iowrite32(rd_count + 2,
-		priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
-	ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT);
+	ioread32(regs + TSI721_DMAC_DWRCNT);
 	i = 0;
 
 	/* Wait until DMA transfer is finished */
-	while ((ch_stat = ioread32(priv->regs +
-		TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+							& TSI721_DMAC_STS_RUN) {
 		udelay(1);
 		if (++i >= 5000000) {
 			dev_dbg(&priv->pdev->dev,
 				"%s : DMA[%d] read timeout ch_status=%x\n",
-				__func__, TSI721_DMACH_MAINT, ch_stat);
+				__func__, priv->mdma.ch_id, ch_stat);
 			if (!do_wr)
 				*data = 0xffffffff;
 			err = -EIO;
@@ -162,13 +161,10 @@
 			__func__, ch_stat);
 		dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
 			do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
-		iowrite32(TSI721_DMAC_INT_ALL,
-			priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
-		iowrite32(TSI721_DMAC_CTL_INIT,
-			priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
 		udelay(10);
-		iowrite32(0, priv->regs +
-				TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+		iowrite32(0, regs + TSI721_DMAC_DWRCNT);
 		udelay(1);
 		if (!do_wr)
 			*data = 0xffffffff;
@@ -184,8 +180,8 @@
 	 * NOTE: Skipping check and clear FIFO entries because we are waiting
 	 * for transfer to be completed.
 	 */
-	swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
-	iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
 err_out:
 
 	return err;
@@ -541,6 +537,22 @@
 			tsi721_pw_handler(mport);
 	}
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+		int ch;
+
+		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+			dev_dbg(&priv->pdev->dev,
+				"IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+					continue;
+				tsi721_bdma_handler(&priv->bdma[ch]);
+			}
+		}
+	}
+#endif
 	return IRQ_HANDLED;
 }
 
@@ -553,18 +565,26 @@
 		priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 	iowrite32(TSI721_SR_CHINT_IDBQRCV,
 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
-	iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
-		priv->regs + TSI721_DEV_CHAN_INTE);
 
 	/* Enable SRIO MAC interrupts */
 	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
 		priv->regs + TSI721_RIO_EM_DEV_INT_EN);
 
+	/* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+		(TSI721_INT_BDMA_CHAN_M &
+		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE);
+
 	if (priv->flags & TSI721_USING_MSIX)
 		intr = TSI721_DEV_INT_SRIO;
 	else
 		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
-			TSI721_DEV_INT_SMSG_CH;
+			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
 
 	iowrite32(intr, priv->regs + TSI721_DEV_INTE);
 	ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@
 					TSI721_MSIX_OMSG_INT(i);
 	}
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	/*
+	 * Initialize MSI-X entries for Block DMA Engine:
+	 * this driver supports XXX DMA channels
+	 * (one is reserved for SRIO maintenance transactions)
+	 */
+	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+		entries[TSI721_VECT_DMA0_DONE + i].entry =
+					TSI721_MSIX_DMACH_DONE(i);
+		entries[TSI721_VECT_DMA0_INT + i].entry =
+					TSI721_MSIX_DMACH_INT(i);
+	}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 	err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
 	if (err) {
 		if (err > 0)
 			dev_info(&priv->pdev->dev,
 				 "Only %d MSI-X vectors available, "
 				 "not using MSI-X\n", err);
+		else
+			dev_err(&priv->pdev->dev,
+				"Failed to enable MSI-X (err=%d)\n", err);
 		return err;
 	}
 
@@ -760,6 +797,22 @@
 			 i, pci_name(priv->pdev));
 	}
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+		priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+				entries[TSI721_VECT_DMA0_DONE + i].vector;
+		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+			 i, pci_name(priv->pdev));
+
+		priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+				entries[TSI721_VECT_DMA0_INT + i].vector;
+		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+			 i, pci_name(priv->pdev));
+	}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 	return 0;
 }
 #endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@
 	priv->idb_base = NULL;
 }
 
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
 {
 	struct tsi721_dma_desc *bd_ptr;
 	u64		*sts_ptr;
 	dma_addr_t	bd_phys, sts_phys;
 	int		sts_size;
-	int		bd_num = priv->bdma[chnum].bd_num;
+	int		bd_num = 2;
+	void __iomem	*regs;
 
-	dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+	dev_dbg(&priv->pdev->dev,
+		"Init Block DMA Engine for Maintenance requests, CH%d\n",
+		TSI721_DMACH_MAINT);
 
 	/*
 	 * Initialize DMA channel for maintenance requests
 	 */
 
+	priv->mdma.ch_id = TSI721_DMACH_MAINT;
+	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
 	/* Allocate space for DMA descriptors */
 	bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
 					bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@
 	if (!bd_ptr)
 		return -ENOMEM;
 
-	priv->bdma[chnum].bd_phys = bd_phys;
-	priv->bdma[chnum].bd_base = bd_ptr;
+	priv->mdma.bd_num = bd_num;
+	priv->mdma.bd_phys = bd_phys;
+	priv->mdma.bd_base = bd_ptr;
 
 	dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
 		bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@
 		dma_free_coherent(&priv->pdev->dev,
 				  bd_num * sizeof(struct tsi721_dma_desc),
 				  bd_ptr, bd_phys);
-		priv->bdma[chnum].bd_base = NULL;
+		priv->mdma.bd_base = NULL;
 		return -ENOMEM;
 	}
 
-	priv->bdma[chnum].sts_phys = sts_phys;
-	priv->bdma[chnum].sts_base = sts_ptr;
-	priv->bdma[chnum].sts_size = sts_size;
+	priv->mdma.sts_phys = sts_phys;
+	priv->mdma.sts_base = sts_ptr;
+	priv->mdma.sts_size = sts_size;
 
 	dev_dbg(&priv->pdev->dev,
 		"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@
 	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
 
 	/* Setup DMA descriptor pointers */
-	iowrite32(((u64)bd_phys >> 32),
-		priv->regs + TSI721_DMAC_DPTRH(chnum));
+	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH);
 	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
-		priv->regs + TSI721_DMAC_DPTRL(chnum));
+		regs + TSI721_DMAC_DPTRL);
 
 	/* Setup descriptor status FIFO */
-	iowrite32(((u64)sts_phys >> 32),
-		priv->regs + TSI721_DMAC_DSBH(chnum));
+	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
 	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
-		priv->regs + TSI721_DMAC_DSBL(chnum));
+		regs + TSI721_DMAC_DSBL);
 	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
-		priv->regs + TSI721_DMAC_DSSZ(chnum));
+		regs + TSI721_DMAC_DSSZ);
 
 	/* Clear interrupt bits */
-	iowrite32(TSI721_DMAC_INT_ALL,
-		priv->regs + TSI721_DMAC_INT(chnum));
+	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
 
-	ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+	ioread32(regs + TSI721_DMAC_INT);
 
 	/* Toggle DMA channel initialization */
-	iowrite32(TSI721_DMAC_CTL_INIT,	priv->regs + TSI721_DMAC_CTL(chnum));
-	ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
+	ioread32(regs + TSI721_DMAC_CTL);
 	udelay(10);
 
 	return 0;
 }
 
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
 {
 	u32 ch_stat;
+	struct tsi721_bdma_maint *mdma = &priv->mdma;
+	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
 
-	if (priv->bdma[chnum].bd_base == NULL)
+	if (mdma->bd_base == NULL)
 		return 0;
 
 	/* Check if DMA channel still running */
-	ch_stat = ioread32(priv->regs +	TSI721_DMAC_STS(chnum));
+	ch_stat = ioread32(regs + TSI721_DMAC_STS);
 	if (ch_stat & TSI721_DMAC_STS_RUN)
 		return -EFAULT;
 
 	/* Put DMA channel into init state */
-	iowrite32(TSI721_DMAC_CTL_INIT,
-		priv->regs + TSI721_DMAC_CTL(chnum));
+	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
 
 	/* Free space allocated for DMA descriptors */
 	dma_free_coherent(&priv->pdev->dev,
-		priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
-		priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
-	priv->bdma[chnum].bd_base = NULL;
+		mdma->bd_num * sizeof(struct tsi721_dma_desc),
+		mdma->bd_base, mdma->bd_phys);
+	mdma->bd_base = NULL;
 
 	/* Free space allocated for status FIFO */
 	dma_free_coherent(&priv->pdev->dev,
-		priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
-		priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
-	priv->bdma[chnum].sts_base = NULL;
+		mdma->sts_size * sizeof(struct tsi721_dma_sts),
+		mdma->sts_base, mdma->sts_phys);
+	mdma->sts_base = NULL;
 	return 0;
 }
 
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
-	/* Initialize BDMA channel allocated for RapidIO maintenance read/write
-	 * request generation
-	 */
-	priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
-	if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
-		dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
-			" channel %d, aborting\n", TSI721_DMACH_MAINT);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
-	tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
 /* Enable Inbound Messaging Interrupts */
 static void
 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@
 
 	/* Disable all BDMA Channel interrupts */
 	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
-		iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+		iowrite32(0,
+			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
 
 	/* Disable all general BDMA interrupts */
 	iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@
 	mport->phy_type = RIO_PHY_SERIAL;
 	mport->priv = (void *)priv;
 	mport->phys_efptr = 0x100;
+	priv->mport = mport;
 
 	INIT_LIST_HEAD(&mport->dbells);
 
@@ -2129,17 +2177,21 @@
 	if (!err) {
 		tsi721_interrupts_init(priv);
 		ops->pwenable = tsi721_pw_enable;
-	} else
+	} else {
 		dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
 			"vector %02X err=0x%x\n", pdev->irq, err);
+		goto err_exit;
+	}
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	tsi721_register_dma(priv);
+#endif
 	/* Enable SRIO link */
 	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
 		  TSI721_DEVCTL_SRBOOT_CMPL,
 		  priv->regs + TSI721_DEVCTL);
 
 	rio_register_mport(mport);
-	priv->mport = mport;
 
 	if (mport->host_deviceid >= 0)
 		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@
 		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
 
 	return 0;
+
+err_exit:
+	kfree(mport);
+	kfree(ops);
+	return err;
 }
 
 static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@
 	tsi721_init_pc2sr_mapping(priv);
 	tsi721_init_sr2pc_mapping(priv);
 
-	if (tsi721_bdma_init(priv)) {
+	if (tsi721_bdma_maint_init(priv)) {
 		dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
 		err = -ENOMEM;
 		goto err_unmap_bars;
@@ -2319,7 +2376,7 @@
 err_free_consistent:
 	tsi721_doorbell_free(priv);
 err_free_bdma:
-	tsi721_bdma_free(priv);
+	tsi721_bdma_maint_free(priv);
 err_unmap_bars:
 	if (priv->regs)
 		iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b3..59de9d7 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
 #define TSI721_DEV_INTE		0x29840
 #define TSI721_DEV_INT		0x29844
 #define TSI721_DEV_INTSET	0x29848
+#define TSI721_DEV_INT_BDMA_CH	0x00002000
+#define TSI721_DEV_INT_BDMA_NCH	0x00001000
 #define TSI721_DEV_INT_SMSG_CH	0x00000800
 #define TSI721_DEV_INT_SMSG_NCH	0x00000400
 #define TSI721_DEV_INT_SR2PC_CH	0x00000200
@@ -181,6 +183,8 @@
 #define TSI721_INT_IMSG_CHAN(x)	(1 << (16 + (x)))
 #define TSI721_INT_OMSG_CHAN_M	0x0000ff00
 #define TSI721_INT_OMSG_CHAN(x)	(1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M	0x000000ff
+#define TSI721_INT_BDMA_CHAN(x)	(1 << (x))
 
 /*
  * PC2SR block registers
@@ -235,14 +239,16 @@
  *   x = 0..7
  */
 
-#define TSI721_DMAC_DWRCNT(x)	(0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x)	(0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x)	(0x51000 + (x) * 0x1000)
 
-#define TSI721_DMAC_CTL(x)	(0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT	0x000
+#define TSI721_DMAC_DRDCNT	0x004
+
+#define TSI721_DMAC_CTL		0x008
 #define TSI721_DMAC_CTL_SUSP	0x00000002
 #define TSI721_DMAC_CTL_INIT	0x00000001
 
-#define TSI721_DMAC_INT(x)	(0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT		0x00c
 #define TSI721_DMAC_INT_STFULL	0x00000010
 #define TSI721_DMAC_INT_DONE	0x00000008
 #define TSI721_DMAC_INT_SUSP	0x00000004
@@ -250,34 +256,33 @@
 #define TSI721_DMAC_INT_IOFDONE	0x00000001
 #define TSI721_DMAC_INT_ALL	0x0000001f
 
-#define TSI721_DMAC_INTSET(x)	(0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET	0x010
 
-#define TSI721_DMAC_STS(x)	(0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS		0x014
 #define TSI721_DMAC_STS_ABORT	0x00400000
 #define TSI721_DMAC_STS_RUN	0x00200000
 #define TSI721_DMAC_STS_CS	0x001f0000
 
-#define TSI721_DMAC_INTE(x)	(0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE	0x018
 
-#define TSI721_DMAC_DPTRL(x)	(0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL	0x024
 #define TSI721_DMAC_DPTRL_MASK	0xffffffe0
 
-#define TSI721_DMAC_DPTRH(x)	(0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH	0x028
 
-#define TSI721_DMAC_DSBL(x)	(0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL	0x02c
 #define TSI721_DMAC_DSBL_MASK	0xffffffc0
 
-#define TSI721_DMAC_DSBH(x)	(0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH	0x030
 
-#define TSI721_DMAC_DSSZ(x)	(0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ	0x034
 #define TSI721_DMAC_DSSZ_SIZE_M	0x0000000f
 #define TSI721_DMAC_DSSZ_SIZE(size)	(__fls(size) - 4)
 
-
-#define TSI721_DMAC_DSRP(x)	(0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP	0x038
 #define TSI721_DMAC_DSRP_MASK	0x0007ffff
 
-#define TSI721_DMAC_DSWP(x)	(0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP	0x03c
 #define TSI721_DMAC_DSWP_MASK	0x0007ffff
 
 #define TSI721_BDMA_INTE	0x5f000
@@ -612,6 +617,8 @@
 #define TSI721_DMACH_MAINT	0	/* DMA channel for maint requests */
 #define TSI721_DMACH_MAINT_NBD	32	/* Number of BDs for maint requests */
 
+#define TSI721_DMACH_DMA	1	/* DMA channel for data transfers */
+
 #define MSG_DMA_ENTRY_INX_TO_SIZE(x)	((0x10 << (x)) & 0xFFFF0)
 
 enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@
 
 /* Structures */
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+	struct dma_async_tx_descriptor	txd;
+	struct tsi721_dma_desc		*hw_desc;
+	u16				destid;
+	/* low 64-bits of 66-bit RIO address */
+	u64				rio_addr;
+	/* upper 2-bits of 66-bit RIO address */
+	u8				rio_addr_u;
+	bool				interrupt;
+	struct list_head		desc_node;
+	struct list_head		tx_list;
+};
+
 struct tsi721_bdma_chan {
+	int		id;
+	void __iomem	*regs;
+	int		bd_num;		/* number of buffer descriptors */
+	void		*bd_base;	/* start of DMA descriptors */
+	dma_addr_t	bd_phys;
+	void		*sts_base;	/* start of DMA BD status FIFO */
+	dma_addr_t	sts_phys;
+	int		sts_size;
+	u32		sts_rdptr;
+	u32		wr_count;
+	u32		wr_count_next;
+
+	struct dma_chan		dchan;
+	struct tsi721_tx_desc	*tx_desc;
+	spinlock_t		lock;
+	struct list_head	active_list;
+	struct list_head	queue;
+	struct list_head	free_list;
+	dma_cookie_t		completed_cookie;
+	struct tasklet_struct	tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+	int		ch_id;		/* BDMA channel number */
 	int		bd_num;		/* number of buffer descriptors */
 	void		*bd_base;	/* start of DMA descriptors */
 	dma_addr_t	bd_phys;
@@ -721,6 +769,24 @@
 	TSI721_VECT_IMB1_INT,
 	TSI721_VECT_IMB2_INT,
 	TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	TSI721_VECT_DMA0_DONE,
+	TSI721_VECT_DMA1_DONE,
+	TSI721_VECT_DMA2_DONE,
+	TSI721_VECT_DMA3_DONE,
+	TSI721_VECT_DMA4_DONE,
+	TSI721_VECT_DMA5_DONE,
+	TSI721_VECT_DMA6_DONE,
+	TSI721_VECT_DMA7_DONE,
+	TSI721_VECT_DMA0_INT,
+	TSI721_VECT_DMA1_INT,
+	TSI721_VECT_DMA2_INT,
+	TSI721_VECT_DMA3_INT,
+	TSI721_VECT_DMA4_INT,
+	TSI721_VECT_DMA5_INT,
+	TSI721_VECT_DMA6_INT,
+	TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 	TSI721_VECT_MAX
 };
 
@@ -754,7 +820,11 @@
 	u32		pw_discard_count;
 
 	/* BDMA Engine */
+	struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 	struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
 
 	/* Inbound Messaging */
 	int		imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@
 	struct tsi721_omsg_ring	omsg_ring[TSI721_OMSG_CHNUM];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
 #endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 0000000..92e06a5
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+	return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+	return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+				struct tsi721_bdma_chan *bdma_chan)
+{
+	return list_first_entry(&bdma_chan->active_list,
+				struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+	struct tsi721_dma_desc *bd_ptr;
+	struct device *dev = bdma_chan->dchan.device->dev;
+	u64		*sts_ptr;
+	dma_addr_t	bd_phys;
+	dma_addr_t	sts_phys;
+	int		sts_size;
+	int		bd_num = bdma_chan->bd_num;
+
+	dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+	/* Allocate space for DMA descriptors */
+	bd_ptr = dma_zalloc_coherent(dev,
+				bd_num * sizeof(struct tsi721_dma_desc),
+				&bd_phys, GFP_KERNEL);
+	if (!bd_ptr)
+		return -ENOMEM;
+
+	bdma_chan->bd_phys = bd_phys;
+	bdma_chan->bd_base = bd_ptr;
+
+	dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+		bd_ptr, (unsigned long long)bd_phys);
+
+	/* Allocate space for descriptor status FIFO */
+	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+					bd_num : TSI721_DMA_MINSTSSZ;
+	sts_size = roundup_pow_of_two(sts_size);
+	sts_ptr = dma_zalloc_coherent(dev,
+				     sts_size * sizeof(struct tsi721_dma_sts),
+				     &sts_phys, GFP_KERNEL);
+	if (!sts_ptr) {
+		/* Free space allocated for DMA descriptors */
+		dma_free_coherent(dev,
+				  bd_num * sizeof(struct tsi721_dma_desc),
+				  bd_ptr, bd_phys);
+		bdma_chan->bd_base = NULL;
+		return -ENOMEM;
+	}
+
+	bdma_chan->sts_phys = sts_phys;
+	bdma_chan->sts_base = sts_ptr;
+	bdma_chan->sts_size = sts_size;
+
+	dev_dbg(dev,
+		"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+		sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+	/* Initialize DMA descriptors ring */
+	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+						 TSI721_DMAC_DPTRL_MASK);
+	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+	/* Setup DMA descriptor pointers */
+	iowrite32(((u64)bd_phys >> 32),
+		bdma_chan->regs + TSI721_DMAC_DPTRH);
+	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+		bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+	/* Setup descriptor status FIFO */
+	iowrite32(((u64)sts_phys >> 32),
+		bdma_chan->regs + TSI721_DMAC_DSBH);
+	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+		bdma_chan->regs + TSI721_DMAC_DSBL);
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+		bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+	/* Clear interrupt bits */
+	iowrite32(TSI721_DMAC_INT_ALL,
+		bdma_chan->regs + TSI721_DMAC_INT);
+
+	ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+	/* Toggle DMA channel initialization */
+	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
+	ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+	bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+	bdma_chan->sts_rdptr = 0;
+	udelay(10);
+
+	return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 ch_stat;
+
+	if (bdma_chan->bd_base == NULL)
+		return 0;
+
+	/* Check if DMA channel still running */
+	ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+	if (ch_stat & TSI721_DMAC_STS_RUN)
+		return -EFAULT;
+
+	/* Put DMA channel into init state */
+	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
+
+	/* Free space allocated for DMA descriptors */
+	dma_free_coherent(bdma_chan->dchan.device->dev,
+		bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+		bdma_chan->bd_base, bdma_chan->bd_phys);
+	bdma_chan->bd_base = NULL;
+
+	/* Free space allocated for status FIFO */
+	dma_free_coherent(bdma_chan->dchan.device->dev,
+		bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+		bdma_chan->sts_base, bdma_chan->sts_phys);
+	bdma_chan->sts_base = NULL;
+	return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+	if (enable) {
+		/* Clear pending BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INT);
+		ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+		/* Enable BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INTE);
+	} else {
+		/* Disable BDMA channel interrupts */
+		iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+		/* Clear pending BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INT);
+	}
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 sts;
+
+	sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+	return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+	/* Disable BDMA channel interrupts */
+	iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+	tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+	struct tsi721_bdma_chan *bdma_chan = ptr;
+
+	tsi721_bdma_handler(bdma_chan);
+	return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+	if (!tsi721_dma_is_idle(bdma_chan)) {
+		dev_err(bdma_chan->dchan.device->dev,
+			"BUG: Attempt to start non-idle channel\n");
+		return;
+	}
+
+	if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+		dev_err(bdma_chan->dchan.device->dev,
+			"BUG: Attempt to start DMA with no BDs ready\n");
+		return;
+	}
+
+	dev_dbg(bdma_chan->dchan.device->dev,
+		"tx_chan: %p, chan: %d, regs: %p\n",
+		bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+	iowrite32(bdma_chan->wr_count_next,
+		bdma_chan->regs + TSI721_DMAC_DWRCNT);
+	ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+	bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+			    struct tsi721_tx_desc *desc)
+{
+	dev_dbg(bdma_chan->dchan.device->dev,
+		"Put desc: %p into free list\n", desc);
+
+	if (desc) {
+		spin_lock_bh(&bdma_chan->lock);
+		list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+		list_add(&desc->desc_node, &bdma_chan->free_list);
+		bdma_chan->wr_count_next = bdma_chan->wr_count;
+		spin_unlock_bh(&bdma_chan->lock);
+	}
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+	struct tsi721_tx_desc *tx_desc, *_tx_desc;
+	struct tsi721_tx_desc *ret = NULL;
+	int i;
+
+	spin_lock_bh(&bdma_chan->lock);
+	list_for_each_entry_safe(tx_desc, _tx_desc,
+				 &bdma_chan->free_list, desc_node) {
+		if (async_tx_test_ack(&tx_desc->txd)) {
+			list_del(&tx_desc->desc_node);
+			ret = tx_desc;
+			break;
+		}
+		dev_dbg(bdma_chan->dchan.device->dev,
+			"desc %p not ACKed\n", tx_desc);
+	}
+
+	i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+	if (i == bdma_chan->bd_num - 1) {
+		i = 0;
+		bdma_chan->wr_count_next++; /* skip link descriptor */
+	}
+
+	bdma_chan->wr_count_next++;
+	tx_desc->txd.phys = bdma_chan->bd_phys +
+				i * sizeof(struct tsi721_dma_desc);
+	tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+	spin_unlock_bh(&bdma_chan->lock);
+
+	return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+	struct tsi721_tx_desc *desc, struct scatterlist *sg,
+	enum dma_rtype rtype, u32 sys_size)
+{
+	struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+	u64 rio_addr;
+
+	if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+		dev_err(bdma_chan->dchan.device->dev,
+			"SG element is too large\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(bdma_chan->dchan.device->dev,
+		"desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+		(u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+		sg_dma_len(sg));
+
+	dev_dbg(bdma_chan->dchan.device->dev,
+		"bd_ptr = %p did=%d raddr=0x%llx\n",
+		bd_ptr, desc->destid, desc->rio_addr);
+
+	/* Initialize DMA descriptor */
+	bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+					(rtype << 19) | desc->destid);
+	if (desc->interrupt)
+		bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+	bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+					(sys_size << 26) | sg_dma_len(sg));
+	rio_addr = (desc->rio_addr >> 2) |
+				((u64)(desc->rio_addr_u & 0x3) << 62);
+	bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+	bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+	bd_ptr->t1.bufptr_lo = cpu_to_le32(
+					(u64)sg_dma_address(sg) & 0xffffffff);
+	bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+	bd_ptr->t1.s_dist = 0;
+	bd_ptr->t1.s_size = 0;
+
+	return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+				      struct tsi721_tx_desc *desc)
+{
+	struct dma_async_tx_descriptor *txd = &desc->txd;
+	dma_async_tx_callback callback = txd->callback;
+	void *param = txd->callback_param;
+
+	list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+	list_move(&desc->desc_node, &bdma_chan->free_list);
+	bdma_chan->completed_cookie = txd->cookie;
+
+	if (callback)
+		callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+	struct tsi721_tx_desc *desc, *_d;
+	LIST_HEAD(list);
+
+	BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+	if (!list_empty(&bdma_chan->queue))
+		tsi721_start_dma(bdma_chan);
+
+	list_splice_init(&bdma_chan->active_list, &list);
+	list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+	list_for_each_entry_safe(desc, _d, &list, desc_node)
+		tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 srd_ptr;
+	u64 *sts_ptr;
+	int i, j;
+
+	/* Check and clear descriptor status FIFO entries */
+	srd_ptr = bdma_chan->sts_rdptr;
+	sts_ptr = bdma_chan->sts_base;
+	j = srd_ptr * 8;
+	while (sts_ptr[j]) {
+		for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+			sts_ptr[j] = 0;
+
+		++srd_ptr;
+		srd_ptr %= bdma_chan->sts_size;
+		j = srd_ptr * 8;
+	}
+
+	iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+	bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+	if (list_empty(&bdma_chan->active_list) ||
+		list_is_singular(&bdma_chan->active_list)) {
+		dev_dbg(bdma_chan->dchan.device->dev,
+			"%s: Active_list empty\n", __func__);
+		tsi721_dma_complete_all(bdma_chan);
+	} else {
+		dev_dbg(bdma_chan->dchan.device->dev,
+			"%s: Active_list NOT empty\n", __func__);
+		tsi721_dma_chain_complete(bdma_chan,
+					tsi721_dma_first_active(bdma_chan));
+		tsi721_start_dma(bdma_chan);
+	}
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+	struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+	u32 dmac_int, dmac_sts;
+
+	dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+	dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+		__func__, bdma_chan->id, dmac_int);
+	/* Clear channel interrupts */
+	iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+	if (dmac_int & TSI721_DMAC_INT_ERR) {
+		dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+		dev_err(bdma_chan->dchan.device->dev,
+			"%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+			__func__, bdma_chan->id, dmac_sts);
+	}
+
+	if (dmac_int & TSI721_DMAC_INT_STFULL) {
+		dev_err(bdma_chan->dchan.device->dev,
+			"%s: DMAC%d descriptor status FIFO is full\n",
+			__func__, bdma_chan->id);
+	}
+
+	if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+		tsi721_clr_stat(bdma_chan);
+		spin_lock(&bdma_chan->lock);
+		tsi721_advance_work(bdma_chan);
+		spin_unlock(&bdma_chan->lock);
+	}
+
+	/* Re-Enable BDMA channel interrupts */
+	iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+	dma_cookie_t cookie;
+
+	spin_lock_bh(&bdma_chan->lock);
+
+	cookie = txd->chan->cookie;
+	if (++cookie < 0)
+		cookie = 1;
+	txd->chan->cookie = cookie;
+	txd->cookie = cookie;
+
+	if (list_empty(&bdma_chan->active_list)) {
+		list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+		tsi721_start_dma(bdma_chan);
+	} else {
+		list_add_tail(&desc->desc_node, &bdma_chan->queue);
+	}
+
+	spin_unlock_bh(&bdma_chan->lock);
+	return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+	struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+	struct tsi721_tx_desc *desc = NULL;
+	LIST_HEAD(tmp_list);
+	int i;
+	int rc;
+
+	if (bdma_chan->bd_base)
+		return bdma_chan->bd_num - 1;
+
+	/* Initialize BDMA channel */
+	if (tsi721_bdma_ch_init(bdma_chan)) {
+		dev_err(dchan->device->dev, "Unable to initialize data DMA"
+			" channel %d, aborting\n", bdma_chan->id);
+		return -ENOMEM;
+	}
+
+	/* Alocate matching number of logical descriptors */
+	desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+			GFP_KERNEL);
+	if (!desc) {
+		dev_err(dchan->device->dev,
+			"Failed to allocate logical descriptors\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	bdma_chan->tx_desc = desc;
+
+	for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+		dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+		desc[i].txd.tx_submit = tsi721_tx_submit;
+		desc[i].txd.flags = DMA_CTRL_ACK;
+		INIT_LIST_HEAD(&desc[i].tx_list);
+		list_add_tail(&desc[i].desc_node, &tmp_list);
+	}
+
+	spin_lock_bh(&bdma_chan->lock);
+	list_splice(&tmp_list, &bdma_chan->free_list);
+	bdma_chan->completed_cookie = dchan->cookie = 1;
+	spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		/* Request interrupt service if we are in MSI-X mode */
+		rc = request_irq(
+			priv->msix[TSI721_VECT_DMA0_DONE +
+				   bdma_chan->id].vector,
+			tsi721_bdma_msix, 0,
+			priv->msix[TSI721_VECT_DMA0_DONE +
+				   bdma_chan->id].irq_name,
+			(void *)bdma_chan);
+
+		if (rc) {
+			dev_dbg(dchan->device->dev,
+				"Unable to allocate MSI-X interrupt for "
+				"BDMA%d-DONE\n", bdma_chan->id);
+			goto err_out;
+		}
+
+		rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+					    bdma_chan->id].vector,
+				tsi721_bdma_msix, 0,
+				priv->msix[TSI721_VECT_DMA0_INT +
+					   bdma_chan->id].irq_name,
+				(void *)bdma_chan);
+
+		if (rc)	{
+			dev_dbg(dchan->device->dev,
+				"Unable to allocate MSI-X interrupt for "
+				"BDMA%d-INT\n", bdma_chan->id);
+			free_irq(
+				priv->msix[TSI721_VECT_DMA0_DONE +
+					   bdma_chan->id].vector,
+				(void *)bdma_chan);
+			rc = -EIO;
+			goto err_out;
+		}
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	tasklet_enable(&bdma_chan->tasklet);
+	tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+	return bdma_chan->bd_num - 1;
+
+err_out:
+	kfree(desc);
+	tsi721_bdma_ch_free(bdma_chan);
+	return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+	struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+	LIST_HEAD(list);
+
+	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+	if (bdma_chan->bd_base == NULL)
+		return;
+
+	BUG_ON(!list_empty(&bdma_chan->active_list));
+	BUG_ON(!list_empty(&bdma_chan->queue));
+
+	tasklet_disable(&bdma_chan->tasklet);
+
+	spin_lock_bh(&bdma_chan->lock);
+	list_splice_init(&bdma_chan->free_list, &list);
+	spin_unlock_bh(&bdma_chan->lock);
+
+	tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+				    bdma_chan->id].vector, (void *)bdma_chan);
+		free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+				    bdma_chan->id].vector, (void *)bdma_chan);
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	tsi721_bdma_ch_free(bdma_chan);
+	kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+				 struct dma_tx_state *txstate)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	dma_cookie_t		last_used;
+	dma_cookie_t		last_completed;
+	int			ret;
+
+	spin_lock_bh(&bdma_chan->lock);
+	last_completed = bdma_chan->completed_cookie;
+	last_used = dchan->cookie;
+	spin_unlock_bh(&bdma_chan->lock);
+
+	ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+	dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+	dev_dbg(dchan->device->dev,
+		"%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+		__func__, ret, last_completed, last_used);
+
+	return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+	if (tsi721_dma_is_idle(bdma_chan)) {
+		spin_lock_bh(&bdma_chan->lock);
+		tsi721_advance_work(bdma_chan);
+		spin_unlock_bh(&bdma_chan->lock);
+	} else
+		dev_dbg(dchan->device->dev,
+			"%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+			struct scatterlist *sgl, unsigned int sg_len,
+			enum dma_transfer_direction dir, unsigned long flags,
+			void *tinfo)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	struct tsi721_tx_desc *desc = NULL;
+	struct tsi721_tx_desc *first = NULL;
+	struct scatterlist *sg;
+	struct rio_dma_ext *rext = tinfo;
+	u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+	unsigned int i;
+	u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+	enum dma_rtype rtype;
+
+	if (!sgl || !sg_len) {
+		dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM)
+		rtype = NREAD;
+	else if (dir == DMA_MEM_TO_DEV) {
+		switch (rext->wr_type) {
+		case RDW_ALL_NWRITE:
+			rtype = ALL_NWRITE;
+			break;
+		case RDW_ALL_NWRITE_R:
+			rtype = ALL_NWRITE_R;
+			break;
+		case RDW_LAST_NWRITE_R:
+		default:
+			rtype = LAST_NWRITE_R;
+			break;
+		}
+	} else {
+		dev_err(dchan->device->dev,
+			"%s: Unsupported DMA direction option\n", __func__);
+		return NULL;
+	}
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		int err;
+
+		dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+		desc = tsi721_desc_get(bdma_chan);
+		if (!desc) {
+			dev_err(dchan->device->dev,
+				"Not enough descriptors available\n");
+			goto err_desc_get;
+		}
+
+		if (sg_is_last(sg))
+			desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+		else
+			desc->interrupt = false;
+
+		desc->destid = rext->destid;
+		desc->rio_addr = rio_addr;
+		desc->rio_addr_u = 0;
+
+		err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+		if (err) {
+			dev_err(dchan->device->dev,
+				"Failed to build desc: %d\n", err);
+			goto err_desc_get;
+		}
+
+		rio_addr += sg_dma_len(sg);
+
+		if (!first)
+			first = desc;
+		else
+			list_add_tail(&desc->desc_node, &first->tx_list);
+	}
+
+	first->txd.cookie = -EBUSY;
+	desc->txd.flags = flags;
+
+	return &first->txd;
+
+err_desc_get:
+	tsi721_desc_put(bdma_chan, first);
+	return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+			     unsigned long arg)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	struct tsi721_tx_desc *desc, *_d;
+	LIST_HEAD(list);
+
+	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+	if (cmd != DMA_TERMINATE_ALL)
+		return -ENXIO;
+
+	spin_lock_bh(&bdma_chan->lock);
+
+	/* make sure to stop the transfer */
+	iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+	list_splice_init(&bdma_chan->active_list, &list);
+	list_splice_init(&bdma_chan->queue, &list);
+
+	list_for_each_entry_safe(desc, _d, &list, desc_node)
+		tsi721_dma_chain_complete(bdma_chan, desc);
+
+	spin_unlock_bh(&bdma_chan->lock);
+
+	return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+	int i;
+	int nr_channels = TSI721_DMA_MAXCH;
+	int err;
+	struct rio_mport *mport = priv->mport;
+
+	mport->dma.dev = &priv->pdev->dev;
+	mport->dma.chancnt = nr_channels;
+
+	INIT_LIST_HEAD(&mport->dma.channels);
+
+	for (i = 0; i < nr_channels; i++) {
+		struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+		if (i == TSI721_DMACH_MAINT)
+			continue;
+
+		bdma_chan->bd_num = 64;
+		bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+		bdma_chan->dchan.device = &mport->dma;
+		bdma_chan->dchan.cookie = 1;
+		bdma_chan->dchan.chan_id = i;
+		bdma_chan->id = i;
+
+		spin_lock_init(&bdma_chan->lock);
+
+		INIT_LIST_HEAD(&bdma_chan->active_list);
+		INIT_LIST_HEAD(&bdma_chan->queue);
+		INIT_LIST_HEAD(&bdma_chan->free_list);
+
+		tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+			     (unsigned long)bdma_chan);
+		tasklet_disable(&bdma_chan->tasklet);
+		list_add_tail(&bdma_chan->dchan.device_node,
+			      &mport->dma.channels);
+	}
+
+	dma_cap_zero(mport->dma.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+	dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+	mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+	mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+	mport->dma.device_tx_status = tsi721_tx_status;
+	mport->dma.device_issue_pending = tsi721_issue_pending;
+	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+	mport->dma.device_control = tsi721_device_control;
+
+	err = dma_async_device_register(&mport->dma);
+	if (err)
+		dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+	return err;
+}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a09..c40665a 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@
 	return 0;
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+	struct rio_dev *rdev = arg;
+
+	/* Check that DMA device belongs to the right MPORT */
+	return (rdev->net->hport ==
+		container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ *   specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+	dma_cap_mask_t mask;
+	struct dma_chan *dchan;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+	return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+	dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ *   for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+	struct dma_chan *dchan, struct rio_dma_data *data,
+	enum dma_transfer_direction direction, unsigned long flags)
+{
+	struct dma_async_tx_descriptor *txd = NULL;
+	struct rio_dma_ext rio_ext;
+
+	if (dchan->device->device_prep_slave_sg == NULL) {
+		pr_err("%s: prep_rio_sg == NULL\n", __func__);
+		return NULL;
+	}
+
+	rio_ext.destid = rdev->destid;
+	rio_ext.rio_addr_u = data->rio_addr_u;
+	rio_ext.rio_addr = data->rio_addr;
+	rio_ext.wr_type = data->wr_type;
+
+	txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+					direction, flags, &rio_ext);
+
+	return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54..a739f5c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -794,17 +794,17 @@
 }
 
 static struct of_regulator_match ab8500_regulator_matches[] = {
-	{ .name	= "LDO-AUX1",    .driver_data = (void *) AB8500_LDO_AUX1, },
-	{ .name	= "LDO-AUX2",    .driver_data = (void *) AB8500_LDO_AUX2, },
-	{ .name	= "LDO-AUX3",    .driver_data = (void *) AB8500_LDO_AUX3, },
-	{ .name	= "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
-	{ .name	= "LDO-TVOUT",   .driver_data = (void *) AB8500_LDO_TVOUT, },
-	{ .name = "LDO-USB",     .driver_data = (void *) AB8500_LDO_USB, },
-	{ .name = "LDO-AUDIO",   .driver_data = (void *) AB8500_LDO_AUDIO, },
-	{ .name	= "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
-	{ .name	= "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
-	{ .name	= "LDO-DMIC",    .driver_data = (void *) AB8500_LDO_DMIC, },
-	{ .name	= "LDO-ANA",     .driver_data = (void *) AB8500_LDO_ANA, },
+	{ .name	= "ab8500_ldo_aux1",    .driver_data = (void *) AB8500_LDO_AUX1, },
+	{ .name	= "ab8500_ldo_aux2",    .driver_data = (void *) AB8500_LDO_AUX2, },
+	{ .name	= "ab8500_ldo_aux3",    .driver_data = (void *) AB8500_LDO_AUX3, },
+	{ .name	= "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+	{ .name	= "ab8500_ldo_tvout",   .driver_data = (void *) AB8500_LDO_TVOUT, },
+	{ .name = "ab8500_ldo_usb",     .driver_data = (void *) AB8500_LDO_USB, },
+	{ .name = "ab8500_ldo_audio",   .driver_data = (void *) AB8500_LDO_AUDIO, },
+	{ .name	= "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+	{ .name	= "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+	{ .name	= "ab8500_ldo_dmic",    .driver_data = (void *) AB8500_LDO_DMIC, },
+	{ .name	= "ab8500_ldo_ana",     .driver_data = (void *) AB8500_LDO_ANA, },
 };
 
 static __devinit int
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 49b2112..e82e7ea 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -47,7 +47,7 @@
 				  int max_uV, unsigned *selector)
 {
 	struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-	u32 val, sel;
+	u32 val, sel, mask;
 	int uv;
 
 	uv = min_uV;
@@ -71,11 +71,10 @@
 	val = anatop_reg->min_bit_val + sel;
 	*selector = sel;
 	dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
-	anatop_set_bits(anatop_reg->mfd,
-			anatop_reg->control_reg,
-			anatop_reg->vol_bit_shift,
-			anatop_reg->vol_bit_width,
-			val);
+	mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
+		anatop_reg->vol_bit_shift;
+	val <<= anatop_reg->vol_bit_shift;
+	anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
 
 	return 0;
 }
@@ -88,10 +87,9 @@
 	if (!anatop_reg->control_reg)
 		return -ENOTSUPP;
 
-	val = anatop_get_bits(anatop_reg->mfd,
-			      anatop_reg->control_reg,
-			      anatop_reg->vol_bit_shift,
-			      anatop_reg->vol_bit_width);
+	val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
+	val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+		anatop_reg->vol_bit_shift;
 
 	return val - anatop_reg->min_bit_val;
 }
@@ -226,7 +224,7 @@
 		.of_match_table = of_anatop_regulator_match_tbl,
 	},
 	.probe	= anatop_regulator_probe,
-	.remove	= anatop_regulator_remove,
+	.remove	= __devexit_p(anatop_regulator_remove),
 };
 
 static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74..09a737c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2050,6 +2050,9 @@
 		return -EINVAL;
 	}
 
+	if (min_uV < rdev->desc->min_uV)
+		min_uV = rdev->desc->min_uV;
+
 	ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f..9dbb491 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@
 }
 
 static struct of_regulator_match db8500_regulator_matches[] = {
-	{ .name	= "db8500-vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
-	{ .name	= "db8500-varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
-	{ .name	= "db8500-vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
-	{ .name	= "db8500-vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
-	{ .name	= "db8500-vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
-	{ .name	= "db8500-vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
-	{ .name	= "db8500-vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
-	{ .name	= "db8500-vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
-	{ .name	= "db8500-sva-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
-	{ .name	= "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
-	{ .name	= "db8500-sva-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
-	{ .name	= "db8500-sia-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
-	{ .name	= "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
-	{ .name	= "db8500-sia-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
-	{ .name	= "db8500-sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
-	{ .name	= "db8500-b2r2-mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
-	{ .name	= "db8500-esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
-	{ .name	= "db8500-esram12-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
-	{ .name	= "db8500-esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
-	{ .name	= "db8500-esram34-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+	{ .name	= "db8500_vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+	{ .name	= "db8500_varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
+	{ .name	= "db8500_vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+	{ .name	= "db8500_vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+	{ .name	= "db8500_vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+	{ .name	= "db8500_vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+	{ .name	= "db8500_vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+	{ .name	= "db8500_vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+	{ .name	= "db8500_sva_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+	{ .name	= "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+	{ .name	= "db8500_sva_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+	{ .name	= "db8500_sia_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+	{ .name	= "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+	{ .name	= "db8500_sia_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+	{ .name	= "db8500_sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+	{ .name	= "db8500_b2r2_mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+	{ .name	= "db8500_esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+	{ .name	= "db8500_esram12_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+	{ .name	= "db8500_esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+	{ .name	= "db8500_esram34_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
 };
 
 static __devinit int
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7a..242851a 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -101,16 +101,20 @@
 }
 
 static int gpio_regulator_set_value(struct regulator_dev *dev,
-					int min, int max)
+					int min, int max, unsigned *selector)
 {
 	struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-	int ptr, target, state, best_val = INT_MAX;
+	int ptr, target = 0, state, best_val = INT_MAX;
 
 	for (ptr = 0; ptr < data->nr_states; ptr++)
 		if (data->states[ptr].value < best_val &&
 		    data->states[ptr].value >= min &&
-		    data->states[ptr].value <= max)
+		    data->states[ptr].value <= max) {
 			target = data->states[ptr].gpios;
+			best_val = data->states[ptr].value;
+			if (selector)
+				*selector = ptr;
+		}
 
 	if (best_val == INT_MAX)
 		return -EINVAL;
@@ -128,7 +132,7 @@
 					int min_uV, int max_uV,
 					unsigned *selector)
 {
-	return gpio_regulator_set_value(dev, min_uV, max_uV);
+	return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
 }
 
 static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@
 static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
 					int min_uA, int max_uA)
 {
-	return gpio_regulator_set_value(dev, min_uA, max_uA);
+	return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
 }
 
 static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@
 
 	cfg.dev = &pdev->dev;
 	cfg.init_data = config->init_data;
-	cfg.driver_data = &drvdata;
+	cfg.driver_data = drvdata;
 
 	drvdata->dev = regulator_register(&drvdata->desc, &cfg);
 	if (IS_ERR(drvdata->dev)) {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80..9d540cd 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@
 	config.dev = &client->dev;
 	config.init_data = pdata->regulator;
 	config.driver_data = info;
+	config.regmap = info->regmap;
 
 	info->regulator = regulator_register(&dcdc_desc, &config);
 	if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f6..795f75a 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -673,7 +673,9 @@
 			pmic->desc[id].ops = &palmas_ops_smps10;
 			pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
 			pmic->desc[id].vsel_mask = SMPS10_VSEL;
-			pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+			pmic->desc[id].enable_reg =
+					PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+							PALMAS_SMPS10_STATUS);
 			pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
 		}
 
@@ -739,7 +741,8 @@
 
 		pmic->desc[id].type = REGULATOR_VOLTAGE;
 		pmic->desc[id].owner = THIS_MODULE;
-		pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+		pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+						palmas_regs_info[id].ctrl_addr);
 		pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
 
 		if (pdata && pdata->reg_data)
@@ -775,9 +778,6 @@
 err_unregister_regulator:
 	while (--id >= 0)
 		regulator_unregister(pmic->rdev[id]);
-	kfree(pmic->rdev);
-	kfree(pmic->desc);
-	kfree(pmic);
 	return ret;
 }
 
@@ -788,10 +788,6 @@
 
 	for (id = 0; id < PALMAS_NUM_REGS; id++)
 		regulator_unregister(pmic->rdev[id]);
-
-	kfree(pmic->rdev);
-	kfree(pmic->desc);
-	kfree(pmic);
 	return 0;
 }
 
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc..9caadb4 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -451,7 +451,7 @@
 
 	desc = reg_voltage_map[reg_id];
 
-	if (old_sel < new_sel)
+	if ((old_sel < new_sel) && s5m8767->ramp_delay)
 		return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
 					s5m8767->ramp_delay * 1000);
 	return 0;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f841bd0..8f1be85 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -71,7 +71,7 @@
 
 /* LDO_CTRL bitfields */
 #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id)	((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)	(0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)	(0x07 << ((ldo_id)*4))
 
 /* Number of step-down converters available */
 #define TPS65023_NUM_DCDC		3
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index b88b3df..1b299aa 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -482,7 +482,7 @@
 	info	= &supply_info[rdev_get_id(rdev)];
 
 	if (info->flags & FIXED_VOLTAGE)
-		return info->fixed_voltage;
+		return 0;
 
 	ret = read_field(hw, &info->voltage);
 	if (ret < 0)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4e01a42..6bf864b 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -331,21 +331,16 @@
 
 static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
 {
-	u8 val;
+	unsigned int val;
 	int err;
 
-	err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+	err = tps65910_reg_read(pmic->mfd, reg, &val);
 	if (err)
 		return err;
 
 	return val;
 }
 
-static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
-	return pmic->mfd->write(pmic->mfd, reg, 1, &val);
-}
-
 static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
 					u8 set_mask, u8 clear_mask)
 {
@@ -362,7 +357,7 @@
 
 	data &= ~clear_mask;
 	data |= set_mask;
-	err = tps65910_write(pmic, reg, data);
+	err = tps65910_reg_write(pmic->mfd, reg, data);
 	if (err)
 		dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
 
@@ -371,7 +366,7 @@
 	return err;
 }
 
-static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
 {
 	int data;
 
@@ -385,13 +380,13 @@
 	return data;
 }
 
-static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
 {
 	int err;
 
 	mutex_lock(&pmic->mutex);
 
-	err = tps65910_write(pmic, reg, val);
+	err = tps65910_reg_write(pmic->mfd, reg, val);
 	if (err < 0)
 		dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
 
@@ -490,9 +485,9 @@
 							LDO_ST_MODE_BIT);
 	case REGULATOR_MODE_IDLE:
 		value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
-		return tps65910_set_bits(mfd, reg, value);
+		return tps65910_reg_set_bits(mfd, reg, value);
 	case REGULATOR_MODE_STANDBY:
-		return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+		return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
 	}
 
 	return -EINVAL;
@@ -507,7 +502,7 @@
 	if (reg < 0)
 		return reg;
 
-	value = tps65910_reg_read(pmic, reg);
+	value = tps65910_reg_read_locked(pmic, reg);
 	if (value < 0)
 		return value;
 
@@ -527,28 +522,28 @@
 
 	switch (id) {
 	case TPS65910_REG_VDD1:
-		opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
-		mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+		opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
+		mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
 		mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
-		srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+		srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
 		sr = opvsel & VDD1_OP_CMD_MASK;
 		opvsel &= VDD1_OP_SEL_MASK;
 		srvsel &= VDD1_SR_SEL_MASK;
 		vselmax = 75;
 		break;
 	case TPS65910_REG_VDD2:
-		opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
-		mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+		opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
+		mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
 		mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
-		srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+		srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
 		sr = opvsel & VDD2_OP_CMD_MASK;
 		opvsel &= VDD2_OP_SEL_MASK;
 		srvsel &= VDD2_SR_SEL_MASK;
 		vselmax = 75;
 		break;
 	case TPS65911_REG_VDDCTRL:
-		opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
-		srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+		opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
+		srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
 		sr = opvsel & VDDCTRL_OP_CMD_MASK;
 		opvsel &= VDDCTRL_OP_SEL_MASK;
 		srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@
 	if (reg < 0)
 		return reg;
 
-	value = tps65910_reg_read(pmic, reg);
+	value = tps65910_reg_read_locked(pmic, reg);
 	if (value < 0)
 		return value;
 
@@ -625,7 +620,7 @@
 
 	reg = pmic->get_ctrl_reg(id);
 
-	value = tps65910_reg_read(pmic, reg);
+	value = tps65910_reg_read_locked(pmic, reg);
 
 	switch (id) {
 	case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@
 		tps65910_modify_bits(pmic, TPS65910_VDD1,
 				(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
 						VDD1_VGAIN_SEL_MASK);
-		tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+		tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
 		break;
 	case TPS65910_REG_VDD2:
 		dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@
 		tps65910_modify_bits(pmic, TPS65910_VDD2,
 				(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
 						VDD1_VGAIN_SEL_MASK);
-		tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+		tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
 		break;
 	case TPS65911_REG_VDDCTRL:
 		vsel = selector + 3;
-		tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+		tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
 	}
 
 	return 0;
@@ -936,10 +931,10 @@
 
 	/* External EN1 control */
 	if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
-		ret = tps65910_set_bits(mfd,
+		ret = tps65910_reg_set_bits(mfd,
 				TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
 	else
-		ret = tps65910_clear_bits(mfd,
+		ret = tps65910_reg_clear_bits(mfd,
 				TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
 	if (ret < 0) {
 		dev_err(mfd->dev,
@@ -949,10 +944,10 @@
 
 	/* External EN2 control */
 	if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
-		ret = tps65910_set_bits(mfd,
+		ret = tps65910_reg_set_bits(mfd,
 				TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
 	else
-		ret = tps65910_clear_bits(mfd,
+		ret = tps65910_reg_clear_bits(mfd,
 				TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
 	if (ret < 0) {
 		dev_err(mfd->dev,
@@ -964,10 +959,10 @@
 	if ((tps65910_chip_id(mfd) == TPS65910) &&
 			(id >= TPS65910_REG_VDIG1)) {
 		if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
-			ret = tps65910_set_bits(mfd,
+			ret = tps65910_reg_set_bits(mfd,
 				TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
 		else
-			ret = tps65910_clear_bits(mfd,
+			ret = tps65910_reg_clear_bits(mfd,
 				TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
 		if (ret < 0) {
 			dev_err(mfd->dev,
@@ -979,10 +974,10 @@
 	/* Return if no external control is selected */
 	if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
 		/* Clear all sleep controls */
-		ret = tps65910_clear_bits(mfd,
+		ret = tps65910_reg_clear_bits(mfd,
 			TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
 		if (!ret)
-			ret = tps65910_clear_bits(mfd,
+			ret = tps65910_reg_clear_bits(mfd,
 				TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
 		if (ret < 0)
 			dev_err(mfd->dev,
@@ -1001,32 +996,33 @@
 				(tps65910_chip_id(mfd) == TPS65911))) {
 		int op_reg_add = pmic->get_ctrl_reg(id) + 1;
 		int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
-		int opvsel = tps65910_reg_read(pmic, op_reg_add);
-		int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+		int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
+		int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
 		if (opvsel & VDD1_OP_CMD_MASK) {
 			u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
-			ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+			ret = tps65910_reg_write_locked(pmic, op_reg_add,
+							reg_val);
 			if (ret < 0) {
 				dev_err(mfd->dev,
 					"Error in configuring op register\n");
 				return ret;
 			}
 		}
-		ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+		ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
 		if (ret < 0) {
 			dev_err(mfd->dev, "Error in settting sr register\n");
 			return ret;
 		}
 	}
 
-	ret = tps65910_clear_bits(mfd,
+	ret = tps65910_reg_clear_bits(mfd,
 			TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
 	if (!ret) {
 		if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-			ret = tps65910_set_bits(mfd,
+			ret = tps65910_reg_set_bits(mfd,
 				TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
 		else
-			ret = tps65910_clear_bits(mfd,
+			ret = tps65910_reg_clear_bits(mfd,
 				TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
 	}
 	if (ret < 0)
@@ -1177,7 +1173,7 @@
 	platform_set_drvdata(pdev, pmic);
 
 	/* Give control of all register to control port */
-	tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+	tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
 				DEVCTRL_SR_CTL_I2C_SEL_MASK);
 
 	switch(tps65910_chip_id(tps65910)) {
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a885911..099da11 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -535,7 +535,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq_byname(pdev, "UV");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
 	ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
 				   IRQF_TRIGGER_RISING, dcdc->name, dcdc);
 	if (ret != 0) {
@@ -544,7 +544,7 @@
 		goto err_regulator;
 	}
 
-	irq = platform_get_irq_byname(pdev, "HC");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
 	ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
 				   IRQF_TRIGGER_RISING, dcdc->name, dcdc);
 	if (ret != 0) {
@@ -558,7 +558,8 @@
 	return 0;
 
 err_uv:
-	free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+	free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+		 dcdc);
 err_regulator:
 	regulator_unregister(dcdc->regulator);
 err:
@@ -570,11 +571,14 @@
 static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
 {
 	struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+	struct wm831x *wm831x = dcdc->wm831x;
 
 	platform_set_drvdata(pdev, NULL);
 
-	free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
-	free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+	free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
+			    dcdc);
+	free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+			    dcdc);
 	regulator_unregister(dcdc->regulator);
 	if (dcdc->dvs_gpio)
 		gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq_byname(pdev, "UV");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
 	ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
 				   IRQF_TRIGGER_RISING,	dcdc->name, dcdc);
 	if (ret != 0) {
@@ -751,7 +755,8 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+	free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+			    dcdc);
 	regulator_unregister(dcdc->regulator);
 
 	return 0;
@@ -859,7 +864,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq_byname(pdev, "UV");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
 	ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
 				   IRQF_TRIGGER_RISING, dcdc->name,
 				   dcdc);
@@ -885,7 +890,8 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+	free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+		 dcdc);
 	regulator_unregister(dcdc->regulator);
 
 	return 0;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b50ab77..0d207c2 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -202,7 +202,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq(pdev, 0);
+	irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
 	ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
 				   IRQF_TRIGGER_RISING, isink->name, isink);
 	if (ret != 0) {
@@ -227,7 +227,7 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	free_irq(platform_get_irq(pdev, 0), isink);
+	free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
 
 	regulator_unregister(isink->regulator);
 
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index aa1f8b3..a9a28d8 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -321,7 +321,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq_byname(pdev, "UV");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
 	ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
 				   IRQF_TRIGGER_RISING, ldo->name,
 				   ldo);
@@ -347,7 +347,8 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+	free_irq(wm831x_irq(ldo->wm831x,
+			    platform_get_irq_byname(pdev, "UV")), ldo);
 	regulator_unregister(ldo->regulator);
 
 	return 0;
@@ -582,7 +583,7 @@
 		goto err;
 	}
 
-	irq = platform_get_irq_byname(pdev, "UV");
+	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
 	ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
 				   IRQF_TRIGGER_RISING, ldo->name, ldo);
 	if (ret != 0) {
@@ -605,7 +606,8 @@
 {
 	struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
 
-	free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+	free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
+		 ldo);
 	regulator_unregister(ldo->regulator);
 
 	return 0;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4..de138e3 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@
 
 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	if (ret) {
-		dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+		dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index d6f8ada..66324ee 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@
  * the recovery of the remote processor.
  */
 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
-		unsigned long iova, int flags)
+		unsigned long iova, int flags, void *token)
 {
 	dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
 
@@ -117,7 +117,7 @@
 		return -ENOMEM;
 	}
 
-	iommu_set_fault_handler(domain, rproc_iommu_fault);
+	iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
 
 	ret = iommu_attach_device(domain, dev);
 	if (ret) {
@@ -247,7 +247,7 @@
 		}
 
 		if (offset + filesz > len) {
-			dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
 					offset + filesz, len);
 			ret = -EINVAL;
 			break;
@@ -934,7 +934,7 @@
 		unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
 		if (unmapped != entry->len) {
 			/* nothing much to do besides complaining */
-			dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+			dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
 								unmapped);
 		}
 
@@ -1020,7 +1020,7 @@
 
 	ehdr = (struct elf32_hdr *)fw->data;
 
-	dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+	dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
 
 	/*
 	 * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@
 
 	/* look for the resource table */
 	table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
-	if (!table)
+	if (!table) {
+		ret = -EINVAL;
 		goto clean_up;
+	}
 
 	/* handle fw resources which are required to boot rproc */
 	ret = rproc_handle_boot_rsc(rproc, table, tablesz);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe..08cbdb9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-msm6242.
 
-config RTC_DRV_IMXDI
-	tristate "Freescale IMX DryIce Real Time Clock"
-	depends on ARCH_MX25
-	depends on RTC_CLASS
-	help
-	   Support for Freescale IMX DryIce RTC
-
-	   This driver can also be built as a module, if so, the module
-	   will be called "rtc-imxdi".
-
-config RTC_MXC
-	tristate "Freescale MXC Real Time Clock"
-	depends on ARCH_MXC
-	depends on RTC_CLASS
-	help
-	   If you say yes here you get support for the Freescale MXC
-	   RTC module.
-
-	   This driver can also be built as a module, if so, the module
-	   will be called "rtc-mxc".
-
 config RTC_DRV_BQ4802
 	tristate "TI BQ4802"
 	help
@@ -738,6 +717,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-davinci.
 
+config RTC_DRV_IMXDI
+	tristate "Freescale IMX DryIce Real Time Clock"
+	depends on SOC_IMX25
+	depends on RTC_CLASS
+	help
+	   Support for Freescale IMX DryIce RTC
+
+	   This driver can also be built as a module, if so, the module
+	   will be called "rtc-imxdi".
+
 config RTC_DRV_OMAP
 	tristate "TI OMAP1"
 	depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ls1x.
 
+config RTC_DRV_MXC
+	tristate "Freescale MXC Real Time Clock"
+	depends on ARCH_MXC
+	depends on RTC_CLASS
+	help
+	   If you say yes here you get support for the Freescale MXC
+	   RTC module.
+
+	   This driver can also be built as a module, if so, the module
+	   will be called "rtc-mxc".
+
 endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae77..2973921 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@
 obj-$(CONFIG_RTC_DRV_M48T35)	+= rtc-m48t35.o
 obj-$(CONFIG_RTC_DRV_M48T59)	+= rtc-m48t59.o
 obj-$(CONFIG_RTC_DRV_M48T86)	+= rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC)		+= rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC)	+= rtc-mxc.o
 obj-$(CONFIG_RTC_DRV_MAX6900)	+= rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX8925)	+= rtc-max8925.o
 obj-$(CONFIG_RTC_DRV_MAX8998)	+= rtc-max8998.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56e..4267789 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -910,14 +910,17 @@
 
 static u32 rtc_handler(void *context)
 {
+	struct device *dev = context;
+
+	pm_wakeup_event(dev, 0);
 	acpi_clear_event(ACPI_EVENT_RTC);
 	acpi_disable_event(ACPI_EVENT_RTC, 0);
 	return ACPI_INTERRUPT_HANDLED;
 }
 
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
 {
-	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
 	/*
 	 * After the RTC handler is installed, the Fixed_RTC event should
 	 * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@
 	if (acpi_disabled)
 		return;
 
-	rtc_wake_setup();
+	rtc_wake_setup(dev);
 	acpi_rtc_info.wake_on = rtc_wake_on;
 	acpi_rtc_info.wake_off = rtc_wake_off;
 
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0c..836710c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
 #include <linux/string.h>
 #include <linux/rtc.h>
 #include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
 
 /*
  * We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@
 #	define DS1337_BIT_A2I		0x02
 #	define DS1337_BIT_A1I		0x01
 #define DS1339_REG_ALARM1_SECS	0x07
-#define DS1339_REG_TRICKLE	0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC	0xa0
 
 #define RX8025_REG_CTRL1	0x0e
 #	define RX8025_BIT_2412		0x20
@@ -124,6 +124,7 @@
 	unsigned		alarm:1;
 	u16			nvram_offset;
 	u16			nvram_size;
+	u16			trickle_charger_reg;
 };
 
 static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@
 	},
 	[ds_1339] = {
 		.alarm		= 1,
+		.trickle_charger_reg = 0x10,
+	},
+	[ds_1340] = {
+		.trickle_charger_reg = 0x08,
+	},
+	[ds_1388] = {
+		.trickle_charger_reg = 0x0a,
 	},
 	[ds_3231] = {
 		.alarm		= 1,
@@ -619,6 +627,7 @@
 	struct i2c_adapter	*adapter = to_i2c_adapter(client->dev.parent);
 	int			want_irq = false;
 	unsigned char		*buf;
+	struct ds1307_platform_data *pdata = client->dev.platform_data;
 	static const int	bbsqi_bitpos[] = {
 		[ds_1337] = 0,
 		[ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@
 
 	ds1307->client	= client;
 	ds1307->type	= id->driver_data;
-	ds1307->offset	= 0;
+
+	if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+		i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+			DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
 
 	buf = ds1307->regs;
 	if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1..9602278 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@
 	.attrs	= ep93xx_rtc_attrs,
 };
 
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
 {
 	struct ep93xx_rtc *ep93xx_rtc;
 	struct resource *res;
@@ -174,7 +174,7 @@
 	return err;
 }
 
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
 {
 	struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
@@ -186,31 +186,19 @@
 	return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
 static struct platform_driver ep93xx_rtc_driver = {
 	.driver		= {
 		.name	= "ep93xx-rtc",
 		.owner	= THIS_MODULE,
 	},
-	.remove		= __exit_p(ep93xx_rtc_remove),
+	.probe		= ep93xx_rtc_probe,
+	.remove		= __devexit_p(ep93xx_rtc_remove),
 };
 
-static int __init ep93xx_rtc_init(void)
-{
-        return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
-	platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
 
 MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
 MODULE_DESCRIPTION("EP93XX RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d93a960..891cd6c 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -405,7 +405,7 @@
 	imxdi->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(imxdi->clk))
 		return PTR_ERR(imxdi->clk);
-	clk_enable(imxdi->clk);
+	clk_prepare_enable(imxdi->clk);
 
 	/*
 	 * Initialize dryice hardware
@@ -470,7 +470,7 @@
 	return 0;
 
 err:
-	clk_disable(imxdi->clk);
+	clk_disable_unprepare(imxdi->clk);
 	clk_put(imxdi->clk);
 
 	return rc;
@@ -487,7 +487,7 @@
 
 	rtc_device_unregister(imxdi->rtc);
 
-	clk_disable(imxdi->clk);
+	clk_disable_unprepare(imxdi->clk);
 	clk_put(imxdi->clk);
 
 	return 0;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c7218..d521855 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/of.h>
 
 /*
  * Clock and Power control register offsets
@@ -386,13 +387,22 @@
 #define LPC32XX_RTC_PM_OPS NULL
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+	{ .compatible = "nxp,lpc3220-rtc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
 static struct platform_driver lpc32xx_rtc_driver = {
 	.probe		= lpc32xx_rtc_probe,
 	.remove		= __devexit_p(lpc32xx_rtc_remove),
 	.driver = {
 		.name	= RTC_NAME,
 		.owner	= THIS_MODULE,
-		.pm	= LPC32XX_RTC_PM_OPS
+		.pm	= LPC32XX_RTC_PM_OPS,
+		.of_match_table = of_match_ptr(lpc32xx_rtc_match),
 	},
 };
 
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29..efab3d4 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@
 static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct spi_device *spi = to_spi_device(dev);
+	int tmp;
 	u8 buf[9] = {0x80};        /* write cmd + 8 data bytes */
 	u8 * const data = &buf[1]; /* ptr to first data byte */
 
@@ -62,6 +63,30 @@
 		return -EINVAL;
 	}
 
+	tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+	if (tmp < 0)
+		return tmp;
+
+	if (tmp & M41T93_FLAG_OF) {
+		dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+		m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+		tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+		if (tmp < 0) {
+			return tmp;
+		} else if (tmp & M41T93_FLAG_OF) {
+			/* OF cannot be immediately reset: oscillator has to be
+			 * restarted. */
+			u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+			dev_warn(&spi->dev,
+				 "OF bit is still set, kickstarting clock.\n");
+			m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+			reset_osc &= ~M41T93_FLAG_ST;
+			m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+		}
+	}
+
 	data[M41T93_REG_SSEC]		= 0;
 	data[M41T93_REG_ST_SEC]		= bin2bcd(tm->tm_sec);
 	data[M41T93_REG_MIN]		= bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@
 	   1. halt bit (HT) is set: the clock is running but update of readout
 	      registers has been disabled due to power failure. This is normal
 	      case after poweron. Time is valid after resetting HT bit.
-	   2. oscillator fail bit (OF) is set. Oscillator has be stopped and
-	      time is invalid:
-	      a) OF can be immeditely reset.
-	      b) OF cannot be immediately reset: oscillator has to be restarted.
+	   2. oscillator fail bit (OF) is set: time is invalid.
 	*/
 	tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
 	if (tmp < 0)
@@ -110,21 +132,7 @@
 
 	if (tmp & M41T93_FLAG_OF) {
 		ret = -EINVAL;
-		dev_warn(&spi->dev, "OF bit is set, resetting.\n");
-		m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
-		tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
-		if (tmp < 0)
-			return tmp;
-		else if (tmp & M41T93_FLAG_OF) {
-			u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
-			dev_warn(&spi->dev,
-				 "OF bit is still set, kickstarting clock.\n");
-			m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
-			reset_osc &= ~M41T93_FLAG_ST;
-			m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
-		}
+		dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
 	}
 
 	if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677d..97a3284 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@
 	 * 1970...2069.
 	 */
 	int c_polarity;	/* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+	int voltage_low; /* incicates if a low_voltage was detected */
 };
 
 /*
@@ -86,9 +87,11 @@
 		return -EIO;
 	}
 
-	if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+	if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+		pcf8563->voltage_low = 1;
 		dev_info(&client->dev,
 			"low voltage detected, date/time is not reliable.\n");
+	}
 
 	dev_dbg(&client->dev,
 		"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@
 	return 0;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+	struct rtc_time tm;
+
+	switch (cmd) {
+	case RTC_VL_READ:
+		if (pcf8563->voltage_low)
+			dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+		if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+					sizeof(int)))
+			return -EFAULT;
+		return 0;
+	case RTC_VL_CLR:
+		/*
+		 * Clear the VL bit in the seconds register in case
+		 * the time has not been set already (which would
+		 * have cleared it). This does not really matter
+		 * because of the cached voltage_low value but do it
+		 * anyway for consistency.
+		 */
+		if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+			pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+		/* Clear the cached value. */
+		pcf8563->voltage_low = 0;
+
+		return 0;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
 static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@
 }
 
 static const struct rtc_class_ops pcf8563_rtc_ops = {
+	.ioctl		= pcf8563_rtc_ioctl,
 	.read_time	= pcf8563_rtc_read_time,
 	.set_time	= pcf8563_rtc_set_time,
 };
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c06..cc05339 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@
 	unsigned long events = 0;
 
 	rtcmis = readl(ldata->base + RTC_MIS);
-	if (rtcmis) {
-		writel(rtcmis, ldata->base + RTC_ICR);
-
-		if (rtcmis & RTC_BIT_AI)
-			events |= (RTC_AF | RTC_IRQF);
-
-		/* Timer interrupt is only available in ST variants */
-		if ((rtcmis & RTC_BIT_PI) &&
-			(ldata->hw_designer == AMBA_VENDOR_ST))
-			events |= (RTC_PF | RTC_IRQF);
-
+	if (rtcmis & RTC_BIT_AI) {
+		writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+		events |= (RTC_AF | RTC_IRQF);
 		rtc_update_irq(ldata->rtc, 1, events);
 
 		return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a297..7e6af0b 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@
 #define s3c_rtc_resume  NULL
 #endif
 
+#ifdef CONFIG_OF
 static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
 	[TYPE_S3C2410] = { TYPE_S3C2410 },
 	[TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@
 	[TYPE_S3C64XX] = { TYPE_S3C64XX },
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id s3c_rtc_dt_match[] = {
 	{
 		.compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0d..1f76320 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
@@ -519,6 +520,14 @@
 	clk_disable(config->clk);
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+	{ .compatible = "st,spear600-rtc" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
 static struct platform_driver spear_rtc_driver = {
 	.probe = spear_rtc_probe,
 	.remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@
 	.shutdown = spear_rtc_shutdown,
 	.driver = {
 		.name = "rtc-spear",
+		.of_match_table = of_match_ptr(spear_rtc_id_table),
 	},
 };
 
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe..c006025 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@
 	struct resource *res;
 	int ret;
 
-	info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+	info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+		GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -317,29 +318,18 @@
 	if (!res) {
 		dev_err(&pdev->dev,
 			"Unable to allocate resources for device.\n");
-		ret = -EBUSY;
-		goto err_free_info;
+		return -EBUSY;
 	}
 
-	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-		dev_err(&pdev->dev,
-			"Unable to request mem region for device.\n");
-		ret = -EBUSY;
-		goto err_free_info;
+	info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+	if (!info->rtc_base) {
+		dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+		return -EBUSY;
 	}
 
 	info->tegra_rtc_irq = platform_get_irq(pdev, 0);
-	if (info->tegra_rtc_irq <= 0) {
-		ret = -EBUSY;
-		goto err_release_mem_region;
-	}
-
-	info->rtc_base = ioremap_nocache(res->start, resource_size(res));
-	if (!info->rtc_base) {
-		dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
-		ret = -EBUSY;
-		goto err_release_mem_region;
-	}
+	if (info->tegra_rtc_irq <= 0)
+		return -EBUSY;
 
 	/* set context info. */
 	info->pdev = pdev;
@@ -362,11 +352,12 @@
 		dev_err(&pdev->dev,
 			"Unable to register device (err=%d).\n",
 			ret);
-		goto err_iounmap;
+		return ret;
 	}
 
-	ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
-		IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+	ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+			tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+			"rtc alarm", &pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev,
 			"Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@
 
 err_dev_unreg:
 	rtc_device_unregister(info->rtc_dev);
-err_iounmap:
-	iounmap(info->rtc_base);
-err_release_mem_region:
-	release_mem_region(res->start, resource_size(res));
-err_free_info:
-	kfree(info);
 
 	return ret;
 }
@@ -393,17 +378,8 @@
 static int __devexit tegra_rtc_remove(struct platform_device *pdev)
 {
 	struct tegra_rtc_info *info = platform_get_drvdata(pdev);
-	struct resource *res;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EBUSY;
-
-	free_irq(info->tegra_rtc_irq, &pdev->dev);
 	rtc_device_unregister(info->rtc_dev);
-	iounmap(info->rtc_base);
-	release_mem_region(res->start, resource_size(res));
-	kfree(info);
 
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 3b6e6a6..59c6245 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -396,7 +396,7 @@
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_rtc *wm831x_rtc;
-	int alm_irq = platform_get_irq_byname(pdev, "ALM");
+	int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
 	int ret = 0;
 
 	wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743..c05da00 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -10,8 +10,6 @@
 #ifndef DASD_INT_H
 #define DASD_INT_H
 
-#ifdef __KERNEL__
-
 /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
 #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
 #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@
 #define dasd_eer_enabled(d)	(0)
 #endif	/* CONFIG_DASD_ERR */
 
-#endif				/* __KERNEL__ */
-
 #endif				/* DASD_H */
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50..50f7115 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -211,7 +211,7 @@
 	sccb.evbuf.event_qual = EQ_STORE_DATA;
 	sccb.evbuf.data_id = DI_FCP_DUMP;
 	sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 	sccb.evbuf.asa_size = ASA_SIZE_64;
 #else
 	sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04c..2a09679 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@
 static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
 			       int iscsi_cmd, int size)
 {
-	cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
-				       &cmd->dma);
+	cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
 	if (!cmd->va) {
 		SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
 		return -ENOMEM;
 	}
-	memset(cmd->va, 0, sizeof(size));
+	memset(cmd->va, 0, size);
 	cmd->size = size;
 	be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
 	return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf..b839274 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@
 		vshost = vport->drv_port.im_port->shost;
 		fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
 		fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+		fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+		memset(fc_host_supported_fc4s(vshost), 0,
+			sizeof(fc_host_supported_fc4s(vshost)));
+
+		/* For FCP type 0x08 */
+		if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+			fc_host_supported_fc4s(vshost)[2] = 1;
+
+		/* For fibre channel services type 0x20 */
+		fc_host_supported_fc4s(vshost)[7] = 1;
+
+		fc_host_supported_speeds(vshost) =
+				bfad_im_supported_speeds(&bfad->bfa);
+		fc_host_maxframe_size(vshost) =
+				bfa_fcport_get_maxfrsize(&bfad->bfa);
+
 		fc_vport->dd_data = vport;
 		vport->drv_port.im_port->fc_vport = fc_vport;
 	} else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923..1ac09af 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@
 	return 0;
 }
 
-static u32
+u32
 bfad_im_supported_speeds(struct bfa_s *bfa)
 {
 	struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367..f6c1023 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@
 		struct bfad_im_port_s *im_port, struct device *dev);
 void bfad_im_scsi_host_free(struct bfad_s *bfad,
 				struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
 
 #define MAX_FCP_TARGET 1024
 #define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef..0578fa0d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.10"
+#define BNX2FC_VERSION		"1.0.11"
 
 #define PFX			"bnx2fc: "
 
@@ -228,13 +228,16 @@
 	struct packet_type fip_packet_type;
 	struct workqueue_struct *timer_work_queue;
 	struct kref kref;
-	struct fcoe_ctlr ctlr;
 	u8 vlan_enabled;
 	int vlan_id;
 	bool enabled;
 };
 
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x)			\
+	((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x)					\
+	((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
 
 struct bnx2fc_lport {
 	struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e..bdbbb13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@
 	struct fc_exch *exch = fc_seq_exch(seq);
 	struct fc_lport *lport = exch->lp;
 	u8 *mac;
-	struct fc_frame_header *fh;
 	u8 op;
 
 	if (IS_ERR(fp))
@@ -862,13 +861,6 @@
 
 	mac = fr_cb(fp)->granted_mac;
 	if (is_zero_ether_addr(mac)) {
-		fh = fc_frame_header_get(fp);
-		if (fh->fh_type != FC_TYPE_ELS) {
-			printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
-				"fh_type != FC_TYPE_ELS\n");
-			fc_frame_free(fp);
-			return;
-		}
 		op = fc_frame_payload_op(fp);
 		if (lport->vport) {
 			if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@
 				return;
 			}
 		}
-		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
-			fc_frame_free(fp);
-			return;
-		}
+		fcoe_ctlr_recv_flogi(fip, lport, fp);
 	}
-	fip->update_mac(lport, mac);
+	if (!is_zero_ether_addr(mac))
+		fip->update_mac(lport, mac);
 done:
 	fc_lport_flogi_resp(seq, fp, lport);
 }
@@ -910,7 +900,7 @@
 {
 	struct fcoe_port *port = lport_priv(lport);
 	struct bnx2fc_interface *interface = port->priv;
-	struct fcoe_ctlr *fip = &interface->ctlr;
+	struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 
 	switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92..f52f668f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Jan 22, 2011"
+#define DRV_MODULE_RELDATE	"Apr 24, 2012"
 
 
 static char version[] __devinitdata =
@@ -54,6 +54,7 @@
 static struct libfc_function_template bnx2fc_libfc_fcn_templ;
 static struct scsi_host_template bnx2fc_shost_template;
 static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
 static struct fc_function_template bnx2fc_vport_xport_function;
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@
 static void bnx2fc_stop(struct bnx2fc_interface *interface);
 static int __init bnx2fc_mod_init(void);
 static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@
 	__fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+	struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+	struct net_device *netdev = bnx2fc_netdev(fip->lp);
+	struct fcoe_fc_els_lesb *fcoe_lesb;
+	struct fc_els_lesb fc_lesb;
+
+	__fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+	fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+	ctlr_dev->lesb.lesb_link_fail =
+		ntohl(fcoe_lesb->lesb_link_fail);
+	ctlr_dev->lesb.lesb_vlink_fail =
+		ntohl(fcoe_lesb->lesb_vlink_fail);
+	ctlr_dev->lesb.lesb_miss_fka =
+		ntohl(fcoe_lesb->lesb_miss_fka);
+	ctlr_dev->lesb.lesb_symb_err =
+		ntohl(fcoe_lesb->lesb_symb_err);
+	ctlr_dev->lesb.lesb_err_block =
+		ntohl(fcoe_lesb->lesb_err_block);
+	ctlr_dev->lesb.lesb_fcs_error =
+		ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+	struct fcoe_ctlr_device *ctlr_dev =
+		fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+	fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
 static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
 {
 	struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@
 	struct sk_buff		*skb;
 	struct fc_frame_header	*fh;
 	struct bnx2fc_interface	*interface;
+	struct fcoe_ctlr        *ctlr;
 	struct bnx2fc_hba *hba;
 	struct fcoe_port	*port;
 	struct fcoe_hdr		*hp;
@@ -256,6 +294,7 @@
 
 	port = (struct fcoe_port *)lport_priv(lport);
 	interface = port->priv;
+	ctlr = bnx2fc_to_ctlr(interface);
 	hba = interface->hba;
 
 	fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@
 	}
 
 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
-		if (!interface->ctlr.sel_fcf) {
+		if (!ctlr->sel_fcf) {
 			BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
 			kfree_skb(skb);
 			return -EINVAL;
 		}
-		if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+		if (fcoe_ctlr_els_send(ctlr, lport, skb))
 			return 0;
 	}
 
@@ -346,14 +385,14 @@
 	/* fill up mac and fcoe headers */
 	eh = eth_hdr(skb);
 	eh->h_proto = htons(ETH_P_FCOE);
-	if (interface->ctlr.map_dest)
+	if (ctlr->map_dest)
 		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
 	else
 		/* insert GW address */
-		memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+		memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
 
-	if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-		memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+	if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+		memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
 	else
 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -403,6 +442,7 @@
 {
 	struct fc_lport *lport;
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	struct fc_frame_header *fh;
 	struct fcoe_rcv_info *fr;
 	struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@
 
 	interface = container_of(ptype, struct bnx2fc_interface,
 				 fcoe_packet_type);
-	lport = interface->ctlr.lp;
+	ctlr = bnx2fc_to_ctlr(interface);
+	lport = ctlr->lp;
 
 	if (unlikely(lport == NULL)) {
 		printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@
 {
 	struct bnx2fc_hba *hba;
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_port *port;
 	u64 wwnn, wwpn;
 
 	port = lport_priv(lport);
 	interface = port->priv;
+	ctlr = bnx2fc_to_ctlr(interface);
 	hba = interface->hba;
 
 	/* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@
 
 	if (!lport->vport) {
 		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-			wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+			wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
 						 1, 0);
 		BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
 		fc_set_wwnn(lport, wwnn);
 
 		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-			wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+			wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
 						 2, 0);
 
 		BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@
 	struct fc_lport *lport;
 	struct fc_lport *vport;
 	struct bnx2fc_interface *interface, *tmp;
+	struct fcoe_ctlr *ctlr;
 	int wait_for_upload = 0;
 	u32 link_possible = 1;
 
@@ -874,7 +918,8 @@
 		if (interface->hba != hba)
 			continue;
 
-		lport = interface->ctlr.lp;
+		ctlr = bnx2fc_to_ctlr(interface);
+		lport = ctlr->lp;
 		BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
 				interface->netdev->name, event);
 
@@ -889,8 +934,8 @@
 			 * on a stale vlan
 			 */
 			if (interface->enabled)
-				fcoe_ctlr_link_up(&interface->ctlr);
-		} else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+				fcoe_ctlr_link_up(ctlr);
+		} else if (fcoe_ctlr_link_down(ctlr)) {
 			mutex_lock(&lport->lp_mutex);
 			list_for_each_entry(vport, &lport->vports, list)
 				fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@
 			   struct net_device *orig_dev)
 {
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	interface = container_of(ptype, struct bnx2fc_interface,
 				 fip_packet_type);
-	fcoe_ctlr_recv(&interface->ctlr, skb);
+	ctlr = bnx2fc_to_ctlr(interface);
+	fcoe_ctlr_recv(ctlr, skb);
 	return 0;
 }
 
@@ -1155,6 +1202,7 @@
 {
 	struct net_device *netdev = interface->netdev;
 	struct net_device *physdev = interface->hba->phys_dev;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct netdev_hw_addr *ha;
 	int sel_san_mac = 0;
 
@@ -1169,7 +1217,7 @@
 
 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
 		    (is_valid_ether_addr(ha->addr))) {
-			memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+			memcpy(ctlr->ctl_src_addr, ha->addr,
 			       ETH_ALEN);
 			sel_san_mac = 1;
 			BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@
 
 static void bnx2fc_interface_release(struct kref *kref)
 {
+	struct fcoe_ctlr_device *ctlr_dev;
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	struct net_device *netdev;
 
 	interface = container_of(kref, struct bnx2fc_interface, kref);
 	BNX2FC_MISC_DBG("Interface is being released\n");
 
+	ctlr = bnx2fc_to_ctlr(interface);
+	ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
 	netdev = interface->netdev;
 
 	/* tear-down FIP controller */
 	if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
-		fcoe_ctlr_destroy(&interface->ctlr);
+		fcoe_ctlr_destroy(ctlr);
 
-	kfree(interface);
+	fcoe_ctlr_device_delete(ctlr_dev);
 
 	dev_put(netdev);
 	module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@
 				      struct net_device *netdev,
 				      enum fip_state fip_mode)
 {
+	struct fcoe_ctlr_device *ctlr_dev;
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
+	int size;
 	int rc = 0;
 
-	interface = kzalloc(sizeof(*interface), GFP_KERNEL);
-	if (!interface) {
+	size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+	ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+					 size);
+	if (!ctlr_dev) {
 		printk(KERN_ERR PFX "Unable to allocate interface structure\n");
 		return NULL;
 	}
+	ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	interface = fcoe_ctlr_priv(ctlr);
 	dev_hold(netdev);
 	kref_init(&interface->kref);
 	interface->hba = hba;
 	interface->netdev = netdev;
 
 	/* Initialize FIP */
-	fcoe_ctlr_init(&interface->ctlr, fip_mode);
-	interface->ctlr.send = bnx2fc_fip_send;
-	interface->ctlr.update_mac = bnx2fc_update_src_mac;
-	interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+	fcoe_ctlr_init(ctlr, fip_mode);
+	ctlr->send = bnx2fc_fip_send;
+	ctlr->update_mac = bnx2fc_update_src_mac;
+	ctlr->get_src_addr = bnx2fc_get_src_mac;
 	set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
 
 	rc = bnx2fc_interface_setup(interface);
 	if (!rc)
 		return interface;
 
-	fcoe_ctlr_destroy(&interface->ctlr);
+	fcoe_ctlr_destroy(ctlr);
 	dev_put(netdev);
-	kfree(interface);
+	fcoe_ctlr_device_delete(ctlr_dev);
 	return NULL;
 }
 
@@ -1373,6 +1432,7 @@
 static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
 				  struct device *parent, int npiv)
 {
+	struct fcoe_ctlr        *ctlr = bnx2fc_to_ctlr(interface);
 	struct fc_lport		*lport, *n_port;
 	struct fcoe_port	*port;
 	struct Scsi_Host	*shost;
@@ -1383,7 +1443,7 @@
 
 	blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
 	if (!blport) {
-		BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+		BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
 		return NULL;
 	}
 
@@ -1479,7 +1539,8 @@
 
 static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
 {
-	struct fc_lport *lport = interface->ctlr.lp;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+	struct fc_lport *lport = ctlr->lp;
 	struct fcoe_port *port = lport_priv(lport);
 	struct bnx2fc_hba *hba = interface->hba;
 
@@ -1519,7 +1580,8 @@
 
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
 {
-	struct fc_lport *lport = interface->ctlr.lp;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+	struct fc_lport *lport = ctlr->lp;
 	struct fcoe_port *port = lport_priv(lport);
 
 	bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@
 {
 	struct bnx2fc_interface *interface = NULL;
 	struct workqueue_struct *timer_work_queue;
+	struct fcoe_ctlr *ctlr;
 	int rc = 0;
 
 	rtnl_lock();
 	mutex_lock(&bnx2fc_dev_lock);
 
 	interface = bnx2fc_interface_lookup(netdev);
-	if (!interface || !interface->ctlr.lp) {
+	ctlr = bnx2fc_to_ctlr(interface);
+	if (!interface || !ctlr->lp) {
 		rc = -ENODEV;
 		printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
 		goto netdev_err;
@@ -1646,6 +1710,7 @@
 {
 	struct bnx2fc_hba *hba = handle;
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	struct fc_lport *lport;
 
 	mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@
 
 	list_for_each_entry(interface, &if_list, list) {
 		if (interface->hba == hba) {
-			lport = interface->ctlr.lp;
+			ctlr = bnx2fc_to_ctlr(interface);
+			lport = ctlr->lp;
 			/* Kick off Fabric discovery*/
 			printk(KERN_ERR PFX "ulp_init: start discovery\n");
 			lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@
 
 static void bnx2fc_stop(struct bnx2fc_interface *interface)
 {
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct fc_lport *lport;
 	struct fc_lport *vport;
 
 	if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
 		return;
 
-	lport = interface->ctlr.lp;
+	lport = ctlr->lp;
 	bnx2fc_port_shutdown(lport);
 
 	mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@
 					FC_PORTTYPE_UNKNOWN;
 	mutex_unlock(&lport->lp_mutex);
 	fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
-	fcoe_ctlr_link_down(&interface->ctlr);
+	fcoe_ctlr_link_down(ctlr);
 	fcoe_clean_pending_queue(lport);
 }
 
@@ -1804,6 +1871,7 @@
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
 {
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct fc_lport *lport;
 	int wait_cnt = 0;
 
@@ -1814,18 +1882,18 @@
 		return;
 	}
 
-	lport = interface->ctlr.lp;
+	lport = ctlr->lp;
 	BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
 
 	if (!bnx2fc_link_ok(lport) && interface->enabled) {
 		BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
-		fcoe_ctlr_link_up(&interface->ctlr);
+		fcoe_ctlr_link_up(ctlr);
 		fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
 		set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
 	}
 
 	/* wait for the FCF to be selected before issuing FLOGI */
-	while (!interface->ctlr.sel_fcf) {
+	while (!ctlr->sel_fcf) {
 		msleep(250);
 		/* give up after 3 secs */
 		if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@
 static int bnx2fc_disable(struct net_device *netdev)
 {
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	int rc = 0;
 
 	rtnl_lock();
 	mutex_lock(&bnx2fc_dev_lock);
 
 	interface = bnx2fc_interface_lookup(netdev);
-	if (!interface || !interface->ctlr.lp) {
+	ctlr = bnx2fc_to_ctlr(interface);
+	if (!interface || !ctlr->lp) {
 		rc = -ENODEV;
 		printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
 	} else {
 		interface->enabled = false;
-		fcoe_ctlr_link_down(&interface->ctlr);
-		fcoe_clean_pending_queue(interface->ctlr.lp);
+		fcoe_ctlr_link_down(ctlr);
+		fcoe_clean_pending_queue(ctlr->lp);
 	}
 
 	mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@
 static int bnx2fc_enable(struct net_device *netdev)
 {
 	struct bnx2fc_interface *interface;
+	struct fcoe_ctlr *ctlr;
 	int rc = 0;
 
 	rtnl_lock();
 	mutex_lock(&bnx2fc_dev_lock);
 
 	interface = bnx2fc_interface_lookup(netdev);
-	if (!interface || !interface->ctlr.lp) {
+	ctlr = bnx2fc_to_ctlr(interface);
+	if (!interface || !ctlr->lp) {
 		rc = -ENODEV;
 		printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-	} else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
-		fcoe_ctlr_link_up(&interface->ctlr);
+	} else if (!bnx2fc_link_ok(ctlr->lp)) {
+		fcoe_ctlr_link_up(ctlr);
 		interface->enabled = true;
 	}
 
@@ -1944,6 +2016,7 @@
  */
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 {
+	struct fcoe_ctlr *ctlr;
 	struct bnx2fc_interface *interface;
 	struct bnx2fc_hba *hba;
 	struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@
 		goto ifput_err;
 	}
 
+	ctlr = bnx2fc_to_ctlr(interface);
 	interface->vlan_id = vlan_id;
 	interface->vlan_enabled = 1;
 
@@ -2035,10 +2109,10 @@
 	lport->boot_time = jiffies;
 
 	/* Make this master N_port */
-	interface->ctlr.lp = lport;
+	ctlr->lp = lport;
 
 	if (!bnx2fc_link_ok(lport)) {
-		fcoe_ctlr_link_up(&interface->ctlr);
+		fcoe_ctlr_link_up(ctlr);
 		fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
 		set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
 	}
@@ -2439,6 +2513,19 @@
 module_init(bnx2fc_mod_init);
 module_exit(bnx2fc_mod_exit);
 
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+	.get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+	.get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+	.get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+	.get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+	.get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+	.get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+	.get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+	.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+	.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
 static struct fc_function_template bnx2fc_transport_function = {
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd5709..2ca6bfe 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@
 {
 	struct fc_lport *lport = port->lport;
 	struct bnx2fc_interface *interface = port->priv;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct bnx2fc_hba *hba = interface->hba;
 	struct kwqe *kwqe_arr[4];
 	struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@
 	ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
 	ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
 	ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
-	ofld_req4.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
+	ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 							/* fcf mac */
-	ofld_req4.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-	ofld_req4.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-	ofld_req4.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-	ofld_req4.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-	ofld_req4.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+	ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+	ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+	ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+	ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+	ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
 	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@
 {
 	struct kwqe *kwqe_arr[2];
 	struct bnx2fc_interface *interface = port->priv;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct bnx2fc_hba *hba = interface->hba;
 	struct fcoe_kwqe_conn_enable_disable enbl_req;
 	struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@
 	enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
 	memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 
-	enbl_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-	enbl_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-	enbl_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-	enbl_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-	enbl_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-	enbl_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+	enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+	enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+	enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+	enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+	enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+	enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
 	port_id = fc_host_port_id(lport->host);
 	if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@
 				    struct bnx2fc_rport *tgt)
 {
 	struct bnx2fc_interface *interface = port->priv;
+	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 	struct bnx2fc_hba *hba = interface->hba;
 	struct fcoe_kwqe_conn_enable_disable disable_req;
 	struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@
 	disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
 	disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 
-	disable_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-	disable_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-	disable_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-	disable_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-	disable_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-	disable_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+	disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+	disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+	disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+	disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+	disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+	disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
 	port_id = tgt->sid;
 	disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce9..4f7453b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@
 	spin_lock_bh(&tgt->tgt_lock);
 
 	io_req->wait_for_comp = 0;
-	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
 		set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+		if (io_req->on_tmf_queue) {
+			list_del_init(&io_req->link);
+			io_req->on_tmf_queue = 0;
+		}
+		io_req->wait_for_comp = 1;
+		bnx2fc_initiate_cleanup(io_req);
+		spin_unlock_bh(&tgt->tgt_lock);
+		rc = wait_for_completion_timeout(&io_req->tm_done,
+						 BNX2FC_FW_TIMEOUT);
+		spin_lock_bh(&tgt->tgt_lock);
+		io_req->wait_for_comp = 0;
+		if (!rc)
+			kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	}
 
 	spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1089,6 +1103,48 @@
 	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct fc_rport_priv *rdata = tgt->rdata;
+	int logo_issued;
+	int rc = SUCCESS;
+	int wait_cnt = 0;
+
+	BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+		      tgt->flags);
+	logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+				       &tgt->flags);
+	io_req->wait_for_comp = 1;
+	bnx2fc_initiate_cleanup(io_req);
+
+	spin_unlock_bh(&tgt->tgt_lock);
+
+	wait_for_completion(&io_req->tm_done);
+
+	io_req->wait_for_comp = 0;
+	/*
+	 * release the reference taken in eh_abort to allow the
+	 * target to re-login after flushing IOs
+	 */
+	 kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+	if (!logo_issued) {
+		clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+		mutex_lock(&lport->disc.disc_mutex);
+		lport->tt.rport_logoff(rdata);
+		mutex_unlock(&lport->disc.disc_mutex);
+		do {
+			msleep(BNX2FC_RELOGIN_WAIT_TIME);
+			if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+				rc = FAILED;
+				break;
+			}
+		} while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+	}
+	spin_lock_bh(&tgt->tgt_lock);
+	return rc;
+}
 /**
  * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
  *			SCSI command
@@ -1103,10 +1159,7 @@
 	struct fc_rport_libfc_priv *rp = rport->dd_data;
 	struct bnx2fc_cmd *io_req;
 	struct fc_lport *lport;
-	struct fc_rport_priv *rdata;
 	struct bnx2fc_rport *tgt;
-	int logo_issued;
-	int wait_cnt = 0;
 	int rc = FAILED;
 
 
@@ -1183,58 +1236,31 @@
 	list_add_tail(&io_req->link, &tgt->io_retire_queue);
 
 	init_completion(&io_req->tm_done);
-	io_req->wait_for_comp = 1;
 
-	if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
-		/* Cancel the current timer running on this io_req */
-		if (cancel_delayed_work(&io_req->timeout_work))
-			kref_put(&io_req->refcount,
-				 bnx2fc_cmd_release); /* drop timer hold */
-		set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
-		rc = bnx2fc_initiate_abts(io_req);
-	} else {
+	if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
 		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
 				"already in abts processing\n", io_req->xid);
 		if (cancel_delayed_work(&io_req->timeout_work))
 			kref_put(&io_req->refcount,
 				 bnx2fc_cmd_release); /* drop timer hold */
+		rc = bnx2fc_expl_logo(lport, io_req);
+		goto out;
+	}
+
+	/* Cancel the current timer running on this io_req */
+	if (cancel_delayed_work(&io_req->timeout_work))
+		kref_put(&io_req->refcount,
+			 bnx2fc_cmd_release); /* drop timer hold */
+	set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+	io_req->wait_for_comp = 1;
+	rc = bnx2fc_initiate_abts(io_req);
+	if (rc == FAILED) {
 		bnx2fc_initiate_cleanup(io_req);
-
 		spin_unlock_bh(&tgt->tgt_lock);
-
 		wait_for_completion(&io_req->tm_done);
-
 		spin_lock_bh(&tgt->tgt_lock);
 		io_req->wait_for_comp = 0;
-		rdata = io_req->tgt->rdata;
-		logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
-					       &tgt->flags);
-		kref_put(&io_req->refcount, bnx2fc_cmd_release);
-		spin_unlock_bh(&tgt->tgt_lock);
-
-		if (!logo_issued) {
-			BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
-				      tgt->flags);
-			mutex_lock(&lport->disc.disc_mutex);
-			lport->tt.rport_logoff(rdata);
-			mutex_unlock(&lport->disc.disc_mutex);
-			do {
-				msleep(BNX2FC_RELOGIN_WAIT_TIME);
-				/*
-				 * If session not recovered, let SCSI-ml
-				 * escalate error recovery.
-				 */
-				if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
-					return FAILED;
-			} while (!test_bit(BNX2FC_FLAG_SESSION_READY,
-					   &tgt->flags));
-		}
-		return SUCCESS;
-	}
-	if (rc == FAILED) {
-		kref_put(&io_req->refcount, bnx2fc_cmd_release);
-		spin_unlock_bh(&tgt->tgt_lock);
-		return rc;
+		goto done;
 	}
 	spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1247,7 +1273,8 @@
 		/* Let the scsi-ml try to recover this command */
 		printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
 		       io_req->xid);
-		rc = FAILED;
+		rc = bnx2fc_expl_logo(lport, io_req);
+		goto out;
 	} else {
 		/*
 		 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@
 		bnx2fc_scsi_done(io_req, DID_ABORT);
 		kref_put(&io_req->refcount, bnx2fc_cmd_release);
 	}
-
+done:
 	/* release the reference taken in eh_abort */
 	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
 	spin_unlock_bh(&tgt->tgt_lock);
 	return rc;
 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b5..082a25c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@
 		BUG_ON(rc);
 	}
 
+	list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+		i++;
+		io_req = (struct bnx2fc_cmd *)list;
+		list_del_init(&io_req->link);
+		io_req->on_tmf_queue = 0;
+		BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+		if (io_req->wait_for_comp)
+			complete(&io_req->tm_done);
+	}
+
 	list_for_each_safe(list, tmp, &tgt->els_queue) {
 		i++;
 		io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@
 
 		BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
 
-		if (cancel_delayed_work(&io_req->timeout_work))
+		if (cancel_delayed_work(&io_req->timeout_work)) {
+			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+						&io_req->req_flags)) {
+				/* Handle eh_abort timeout */
+				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+					      "in retire_q\n");
+				if (io_req->wait_for_comp)
+					complete(&io_req->tm_done);
+			}
 			kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		}
 
 		clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
 	}
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0..aed0f5d 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
 
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b..fe30b1b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
 
 #include <scsi/fc/fc_encaps.h>
 #include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include <scsi/libfc.h>
 #include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+	.get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+	.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+	.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+	.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+	.get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+	.get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+	.get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+	.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+	.get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
 
 static struct libfc_function_template fcoe_libfc_fcn_templ = {
 	.frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@
 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 				struct net_device *netdev)
 {
-	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
 	struct netdev_hw_addr *ha;
 	struct net_device *real_dev;
 	u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@
 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
 						    enum fip_state fip_mode)
 {
+	struct fcoe_ctlr_device *ctlr_dev;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
+	int size;
 	int err;
 
 	if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@
 		goto out;
 	}
 
-	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
-	if (!fcoe) {
-		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+	size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+	ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+					size);
+	if (!ctlr_dev) {
+		FCOE_DBG("Failed to add fcoe_ctlr_device\n");
 		fcoe = ERR_PTR(-ENOMEM);
 		goto out_putmod;
 	}
 
+	ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	fcoe = fcoe_ctlr_priv(ctlr);
+
 	dev_hold(netdev);
 
 	/*
 	 * Initialize FIP.
 	 */
-	fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
-	fcoe->ctlr.send = fcoe_fip_send;
-	fcoe->ctlr.update_mac = fcoe_update_src_mac;
-	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+	fcoe_ctlr_init(ctlr, fip_mode);
+	ctlr->send = fcoe_fip_send;
+	ctlr->update_mac = fcoe_update_src_mac;
+	ctlr->get_src_addr = fcoe_get_src_mac;
 
 	err = fcoe_interface_setup(fcoe, netdev);
 	if (err) {
-		fcoe_ctlr_destroy(&fcoe->ctlr);
-		kfree(fcoe);
+		fcoe_ctlr_destroy(ctlr);
+		fcoe_ctlr_device_delete(ctlr_dev);
 		dev_put(netdev);
 		fcoe = ERR_PTR(err);
 		goto out_putmod;
@@ -419,7 +443,7 @@
 static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 {
 	struct net_device *netdev = fcoe->netdev;
-	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
 	u8 flogi_maddr[ETH_ALEN];
 	const struct net_device_ops *ops;
 
@@ -462,7 +486,8 @@
 static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
 	struct net_device *netdev = fcoe->netdev;
-	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
 
 	rtnl_lock();
 	if (!fcoe->removed)
@@ -472,8 +497,8 @@
 	/* Release the self-reference taken during fcoe_interface_create() */
 	/* tear-down the FCoE controller */
 	fcoe_ctlr_destroy(fip);
-	scsi_host_put(fcoe->ctlr.lp->host);
-	kfree(fcoe);
+	scsi_host_put(fip->lp->host);
+	fcoe_ctlr_device_delete(ctlr_dev);
 	dev_put(netdev);
 	module_put(THIS_MODULE);
 }
@@ -493,9 +518,11 @@
 			 struct net_device *orig_dev)
 {
 	struct fcoe_interface *fcoe;
+	struct fcoe_ctlr *ctlr;
 
 	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
-	fcoe_ctlr_recv(&fcoe->ctlr, skb);
+	ctlr = fcoe_to_ctlr(fcoe);
+	fcoe_ctlr_recv(ctlr, skb);
 	return 0;
 }
 
@@ -645,11 +672,13 @@
 	u32 mfs;
 	u64 wwnn, wwpn;
 	struct fcoe_interface *fcoe;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_port *port;
 
 	/* Setup lport private data to point to fcoe softc */
 	port = lport_priv(lport);
 	fcoe = port->priv;
+	ctlr = fcoe_to_ctlr(fcoe);
 
 	/*
 	 * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@
 
 	if (!lport->vport) {
 		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-			wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+			wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
 		fc_set_wwnn(lport, wwnn);
 		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-			wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+			wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
 						 2, 0);
 		fc_set_wwpn(lport, wwpn);
 	}
@@ -1056,6 +1085,7 @@
 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 				       struct device *parent, int npiv)
 {
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 	struct net_device *netdev = fcoe->netdev;
 	struct fc_lport *lport, *n_port;
 	struct fcoe_port *port;
@@ -1119,7 +1149,7 @@
 	}
 
 	/* Initialize the library */
-	rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+	rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
 				"interface\n");
@@ -1386,6 +1416,7 @@
 {
 	struct fc_lport *lport;
 	struct fcoe_rcv_info *fr;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct fc_frame_header *fh;
 	struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@
 	unsigned int cpu;
 
 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
-	lport = fcoe->ctlr.lp;
+	ctlr = fcoe_to_ctlr(fcoe);
+	lport = ctlr->lp;
 	if (unlikely(!lport)) {
 		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
 		goto err2;
@@ -1409,8 +1441,8 @@
 
 	eh = eth_hdr(skb);
 
-	if (is_fip_mode(&fcoe->ctlr) &&
-	    compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+	if (is_fip_mode(ctlr) &&
+	    compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
 		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
 				eh->h_source);
 		goto err;
@@ -1544,6 +1576,7 @@
 	unsigned int elen;		/* eth header, may include vlan */
 	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->priv;
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 	u8 sof, eof;
 	struct fcoe_hdr *hp;
 
@@ -1559,7 +1592,7 @@
 	}
 
 	if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
-	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+	    fcoe_ctlr_els_send(ctlr, lport, skb))
 		return 0;
 
 	sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@
 	/* fill up mac and fcoe headers */
 	eh = eth_hdr(skb);
 	eh->h_proto = htons(ETH_P_FCOE);
-	memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
-	if (fcoe->ctlr.map_dest)
+	memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+	if (ctlr->map_dest)
 		memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
 
-	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+	if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+		memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
 	else
 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -1677,6 +1710,7 @@
 static inline int fcoe_filter_frames(struct fc_lport *lport,
 				     struct fc_frame *fp)
 {
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct fc_frame_header *fh;
 	struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@
 		return 0;
 
 	fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
-	if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+	ctlr = fcoe_to_ctlr(fcoe);
+	if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
 	    ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
 		FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
 		return -EINVAL;
@@ -1877,6 +1912,7 @@
 				     ulong event, void *ptr)
 {
 	struct dcb_app_type *entry = ptr;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct net_device *netdev;
 	struct fcoe_port *port;
@@ -1894,6 +1930,8 @@
 	if (!fcoe)
 		return NOTIFY_OK;
 
+	ctlr = fcoe_to_ctlr(fcoe);
+
 	if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
 		prio = ffs(entry->app.priority) - 1;
 	else
@@ -1904,10 +1942,10 @@
 
 	if (entry->app.protocol == ETH_P_FIP ||
 	    entry->app.protocol == ETH_P_FCOE)
-		fcoe->ctlr.priority = prio;
+		ctlr->priority = prio;
 
 	if (entry->app.protocol == ETH_P_FCOE) {
-		port = lport_priv(fcoe->ctlr.lp);
+		port = lport_priv(ctlr->lp);
 		port->priority = prio;
 	}
 
@@ -1929,6 +1967,7 @@
 {
 	struct fc_lport *lport = NULL;
 	struct net_device *netdev = ptr;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct fcoe_port *port;
 	struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@
 
 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
 		if (fcoe->netdev == netdev) {
-			lport = fcoe->ctlr.lp;
+			ctlr = fcoe_to_ctlr(fcoe);
+			lport = ctlr->lp;
 			break;
 		}
 	}
@@ -1967,7 +2007,7 @@
 		break;
 	case NETDEV_UNREGISTER:
 		list_del(&fcoe->list);
-		port = lport_priv(fcoe->ctlr.lp);
+		port = lport_priv(ctlr->lp);
 		queue_work(fcoe_wq, &port->destroy_work);
 		goto out;
 		break;
@@ -1982,8 +2022,8 @@
 	fcoe_link_speed_update(lport);
 
 	if (link_possible && !fcoe_link_ok(lport))
-		fcoe_ctlr_link_up(&fcoe->ctlr);
-	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+		fcoe_ctlr_link_up(ctlr);
+	else if (fcoe_ctlr_link_down(ctlr)) {
 		stats = per_cpu_ptr(lport->dev_stats, get_cpu());
 		stats->LinkFailureCount++;
 		put_cpu();
@@ -2003,6 +2043,7 @@
  */
 static int fcoe_disable(struct net_device *netdev)
 {
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	int rc = 0;
 
@@ -2013,8 +2054,9 @@
 	rtnl_unlock();
 
 	if (fcoe) {
-		fcoe_ctlr_link_down(&fcoe->ctlr);
-		fcoe_clean_pending_queue(fcoe->ctlr.lp);
+		ctlr = fcoe_to_ctlr(fcoe);
+		fcoe_ctlr_link_down(ctlr);
+		fcoe_clean_pending_queue(ctlr->lp);
 	} else
 		rc = -ENODEV;
 
@@ -2032,6 +2074,7 @@
  */
 static int fcoe_enable(struct net_device *netdev)
 {
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	int rc = 0;
 
@@ -2040,11 +2083,17 @@
 	fcoe = fcoe_hostlist_lookup_port(netdev);
 	rtnl_unlock();
 
-	if (!fcoe)
+	if (!fcoe) {
 		rc = -ENODEV;
-	else if (!fcoe_link_ok(fcoe->ctlr.lp))
-		fcoe_ctlr_link_up(&fcoe->ctlr);
+		goto out;
+	}
 
+	ctlr = fcoe_to_ctlr(fcoe);
+
+	if (!fcoe_link_ok(ctlr->lp))
+		fcoe_ctlr_link_up(ctlr);
+
+out:
 	mutex_unlock(&fcoe_config_mutex);
 	return rc;
 }
@@ -2059,6 +2108,7 @@
  */
 static int fcoe_destroy(struct net_device *netdev)
 {
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct fc_lport *lport;
 	struct fcoe_port *port;
@@ -2071,7 +2121,8 @@
 		rc = -ENODEV;
 		goto out_nodev;
 	}
-	lport = fcoe->ctlr.lp;
+	ctlr = fcoe_to_ctlr(fcoe);
+	lport = ctlr->lp;
 	port = lport_priv(lport);
 	list_del(&fcoe->list);
 	queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@
 	int dcbx;
 	u8 fup, up;
 	struct net_device *netdev = fcoe->realdev;
-	struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+	struct fcoe_port *port = lport_priv(ctlr->lp);
 	struct dcb_app app = {
 				.priority = 0,
 				.protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@
 		}
 
 		port->priority = ffs(up) ? ffs(up) - 1 : 0;
-		fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+		ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
 	}
 #endif
 }
@@ -2166,6 +2218,8 @@
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 {
 	int rc = 0;
+	struct fcoe_ctlr_device *ctlr_dev;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct fc_lport *lport;
 
@@ -2184,7 +2238,9 @@
 		goto out_nodev;
 	}
 
-	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+	ctlr = fcoe_to_ctlr(fcoe);
+	ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+	lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
 	if (IS_ERR(lport)) {
 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
 		       netdev->name);
@@ -2195,7 +2251,7 @@
 	}
 
 	/* Make this the "master" N_Port */
-	fcoe->ctlr.lp = lport;
+	ctlr->lp = lport;
 
 	/* setup DCB priority attributes. */
 	fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@
 	fc_fabric_login(lport);
 	if (!fcoe_link_ok(lport)) {
 		rtnl_unlock();
-		fcoe_ctlr_link_up(&fcoe->ctlr);
+		fcoe_ctlr_link_up(ctlr);
 		mutex_unlock(&fcoe_config_mutex);
 		return rc;
 	}
@@ -2320,11 +2376,12 @@
 	struct fc_lport *lport = shost_priv(shost);
 	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->priv;
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
-	fcoe_ctlr_link_down(&fcoe->ctlr);
-	fcoe_clean_pending_queue(fcoe->ctlr.lp);
-	if (!fcoe_link_ok(fcoe->ctlr.lp))
-		fcoe_ctlr_link_up(&fcoe->ctlr);
+	fcoe_ctlr_link_down(ctlr);
+	fcoe_clean_pending_queue(ctlr->lp);
+	if (!fcoe_link_ok(ctlr->lp))
+		fcoe_ctlr_link_up(ctlr);
 	return 0;
 }
 
@@ -2359,10 +2416,12 @@
  */
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 {
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 
 	fcoe = fcoe_hostlist_lookup_port(netdev);
-	return (fcoe) ? fcoe->ctlr.lp : NULL;
+	ctlr = fcoe_to_ctlr(fcoe);
+	return (fcoe) ? ctlr->lp : NULL;
 }
 
 /**
@@ -2466,6 +2525,7 @@
 static void __exit fcoe_exit(void)
 {
 	struct fcoe_interface *fcoe, *tmp;
+	struct fcoe_ctlr *ctlr;
 	struct fcoe_port *port;
 	unsigned int cpu;
 
@@ -2477,7 +2537,8 @@
 	rtnl_lock();
 	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
 		list_del(&fcoe->list);
-		port = lport_priv(fcoe->ctlr.lp);
+		ctlr = fcoe_to_ctlr(fcoe);
+		port = lport_priv(ctlr->lp);
 		queue_work(fcoe_wq, &port->destroy_work);
 	}
 	rtnl_unlock();
@@ -2573,7 +2634,7 @@
 {
 	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->priv;
-	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 
 	switch (op) {
@@ -2730,6 +2791,40 @@
 	__fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+	struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+	struct net_device *netdev = fcoe_netdev(fip->lp);
+	struct fcoe_fc_els_lesb *fcoe_lesb;
+	struct fc_els_lesb fc_lesb;
+
+	__fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+	fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+	ctlr_dev->lesb.lesb_link_fail =
+		ntohl(fcoe_lesb->lesb_link_fail);
+	ctlr_dev->lesb.lesb_vlink_fail =
+		ntohl(fcoe_lesb->lesb_vlink_fail);
+	ctlr_dev->lesb.lesb_miss_fka =
+		ntohl(fcoe_lesb->lesb_miss_fka);
+	ctlr_dev->lesb.lesb_symb_err =
+		ntohl(fcoe_lesb->lesb_symb_err);
+	ctlr_dev->lesb.lesb_err_block =
+		ntohl(fcoe_lesb->lesb_err_block);
+	ctlr_dev->lesb.lesb_fcs_error =
+		ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+	struct fcoe_ctlr_device *ctlr_dev =
+		fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+	fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
 /**
  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
  * @lport: the local port
@@ -2747,7 +2842,8 @@
 {
 	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->priv;
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
 	if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
-		fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+		fcoe_ctlr_recv_flogi(ctlr, lport, fp);
 }
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938..a624add4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@
  * @netdev:	      The associated net device
  * @fcoe_packet_type: FCoE packet type
  * @fip_packet_type:  FIP packet type
- * @ctlr:	      The FCoE controller (for FIP)
  * @oem:	      The offload exchange manager for all local port
  *		      instances associated with this port
  * @removed:	      Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@
 	struct net_device  *realdev;
 	struct packet_type fcoe_packet_type;
 	struct packet_type fip_packet_type;
-	struct fcoe_ctlr   ctlr;
 	struct fc_exch_mgr *oem;
 	u8	removed;
 };
 
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x)						\
+	(struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x)			\
+	((struct fcoe_interface *)((x) + 1))
 
 /**
  * fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c725..d68d572 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@
 }
 EXPORT_SYMBOL(fcoe_ctlr_init);
 
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+	struct fcoe_ctlr *fip = new->fip;
+	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+	struct fcoe_fcf_device temp, *fcf_dev;
+	int rc = 0;
+
+	LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+			new->fabric_name, new->fcf_mac);
+
+	mutex_lock(&ctlr_dev->lock);
+
+	temp.fabric_name = new->fabric_name;
+	temp.switch_name = new->switch_name;
+	temp.fc_map = new->fc_map;
+	temp.vfid = new->vfid;
+	memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+	temp.priority = new->pri;
+	temp.fka_period = new->fka_period;
+	temp.selected = 0; /* default to unselected */
+
+	fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+	if (unlikely(!fcf_dev)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * The fcoe_sysfs layer can return a CONNECTED fcf that
+	 * has a priv (fcf was never deleted) or a CONNECTED fcf
+	 * that doesn't have a priv (fcf was deleted). However,
+	 * libfcoe will always delete FCFs before trying to add
+	 * them. This is ensured because both recv_adv and
+	 * age_fcfs are protected by the the fcoe_ctlr's mutex.
+	 * This means that we should never get a FCF with a
+	 * non-NULL priv pointer.
+	 */
+	BUG_ON(fcf_dev->priv);
+
+	fcf_dev->priv = new;
+	new->fcf_dev = fcf_dev;
+
+	list_add(&new->list, &fip->fcfs);
+	fip->fcf_count++;
+
+out:
+	mutex_unlock(&ctlr_dev->lock);
+	return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+	struct fcoe_ctlr *fip = new->fip;
+	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+	struct fcoe_fcf_device *fcf_dev;
+
+	list_del(&new->list);
+	fip->fcf_count--;
+
+	mutex_lock(&ctlr_dev->lock);
+
+	fcf_dev = fcoe_fcf_to_fcf_dev(new);
+	WARN_ON(!fcf_dev);
+	new->fcf_dev = NULL;
+	fcoe_fcf_device_delete(fcf_dev);
+	kfree(new);
+
+	mutex_unlock(&ctlr_dev->lock);
+}
+
 /**
  * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
  * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@
 
 	fip->sel_fcf = NULL;
 	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
-		list_del(&fcf->list);
-		kfree(fcf);
+		fcoe_sysfs_fcf_del(fcf);
 	}
-	fip->fcf_count = 0;
+	WARN_ON(fip->fcf_count);
+
 	fip->sel_time = 0;
 }
 
@@ -717,8 +787,11 @@
 	unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
 	unsigned long deadline;
 	unsigned long sel_time = 0;
+	struct list_head del_list;
 	struct fcoe_dev_stats *stats;
 
+	INIT_LIST_HEAD(&del_list);
+
 	stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
 
 	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@
 		if (time_after_eq(jiffies, deadline)) {
 			if (fip->sel_fcf == fcf)
 				fip->sel_fcf = NULL;
+			/*
+			 * Move to delete list so we can call
+			 * fcoe_sysfs_fcf_del (which can sleep)
+			 * after the put_cpu().
+			 */
 			list_del(&fcf->list);
-			WARN_ON(!fip->fcf_count);
-			fip->fcf_count--;
-			kfree(fcf);
+			list_add(&fcf->list, &del_list);
 			stats->VLinkFailureCount++;
 		} else {
 			if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@
 		}
 	}
 	put_cpu();
+
+	list_for_each_entry_safe(fcf, next, &del_list, list) {
+		/* Removes fcf from current list */
+		fcoe_sysfs_fcf_del(fcf);
+	}
+
 	if (sel_time && !fip->sel_fcf && !fip->sel_time) {
 		sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
 		fip->sel_time = sel_time;
@@ -903,23 +985,23 @@
 {
 	struct fcoe_fcf *fcf;
 	struct fcoe_fcf new;
-	struct fcoe_fcf *found;
 	unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
 	int first = 0;
 	int mtu_valid;
+	int found = 0;
+	int rc = 0;
 
 	if (fcoe_ctlr_parse_adv(fip, skb, &new))
 		return;
 
 	mutex_lock(&fip->ctlr_mutex);
 	first = list_empty(&fip->fcfs);
-	found = NULL;
 	list_for_each_entry(fcf, &fip->fcfs, list) {
 		if (fcf->switch_name == new.switch_name &&
 		    fcf->fabric_name == new.fabric_name &&
 		    fcf->fc_map == new.fc_map &&
 		    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
-			found = fcf;
+			found = 1;
 			break;
 		}
 	}
@@ -931,9 +1013,16 @@
 		if (!fcf)
 			goto out;
 
-		fip->fcf_count++;
 		memcpy(fcf, &new, sizeof(new));
-		list_add(&fcf->list, &fip->fcfs);
+		fcf->fip = fip;
+		rc = fcoe_sysfs_fcf_add(fcf);
+		if (rc) {
+			printk(KERN_ERR "Failed to allocate sysfs instance "
+			       "for FCF, fab %16.16llx mac %pM\n",
+			       new.fabric_name, new.fcf_mac);
+			kfree(fcf);
+			goto out;
+		}
 	} else {
 		/*
 		 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@
 		fcf->fka_period = new.fka_period;
 		memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
 	}
+
 	mtu_valid = fcoe_ctlr_mtu_valid(fcf);
 	fcf->time = jiffies;
 	if (!found)
@@ -996,6 +1086,7 @@
 		    time_before(fip->sel_time, fip->timer.expires))
 			mod_timer(&fip->timer, fip->sel_time);
 	}
+
 out:
 	mutex_unlock(&fip->ctlr_mutex);
 }
@@ -2718,9 +2809,9 @@
 
 /**
  * fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport:    The local port to configure libfc for
+ * @fip:      The FCoE controller in use by the local port
+ * @tt:       The libfc function template
  * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
  *
  * Returns : 0 for success
@@ -2753,3 +2844,43 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+	struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+	struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+	struct fcoe_fcf *fcf;
+
+	mutex_lock(&fip->ctlr_mutex);
+	mutex_lock(&ctlr_dev->lock);
+
+	fcf = fcoe_fcf_device_priv(fcf_dev);
+	if (fcf)
+		fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+	else
+		fcf_dev->selected = 0;
+
+	mutex_unlock(&ctlr_dev->lock);
+	mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+	mutex_lock(&ctlr->ctlr_mutex);
+	switch (ctlr->mode) {
+	case FIP_MODE_FABRIC:
+		ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+		break;
+	case FIP_MODE_VN2VN:
+		ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+		break;
+	default:
+		ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+		break;
+	}
+	mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 0000000..2bc1631
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800;  /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+		   uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+		 "Maximum number of seconds that libfcoe should"
+		 " insulate the loss of a fcf. Once this value is"
+		 " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x)				\
+	((x)->id)
+#define fcoe_ctlr_work_q_name(x)		\
+	((x)->work_q_name)
+#define fcoe_ctlr_work_q(x)			\
+	((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x)	\
+	((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x)		\
+	((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x)			\
+	((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x)		\
+	((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x)			\
+	((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x)			\
+	((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x)			\
+	((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x)			\
+	((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x)			\
+	((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x)			\
+	((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x)			\
+	((x)->state)
+#define fcoe_fcf_fabric_name(x)			\
+	((x)->fabric_name)
+#define fcoe_fcf_switch_name(x)			\
+	((x)->switch_name)
+#define fcoe_fcf_fc_map(x)			\
+	((x)->fc_map)
+#define fcoe_fcf_vfid(x)			\
+	((x)->vfid)
+#define fcoe_fcf_mac(x)				\
+	((x)->mac)
+#define fcoe_fcf_priority(x)			\
+	((x)->priority)
+#define fcoe_fcf_fka_period(x)			\
+	((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x)		\
+	((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x)			\
+	((x)->selected)
+#define fcoe_fcf_vlan_id(x)			\
+	((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+	int ret;
+
+	ret = kstrtoul(buf, 0, val);
+	if (ret || *val < 0)
+		return -EINVAL;
+	/*
+	 * Check for overflow; dev_loss_tmo is u32
+	 */
+	if (*val > UINT_MAX)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+				     unsigned long val)
+{
+	if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+	    (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+	    (fcf->state == FCOE_FCF_STATE_DELETED))
+		return -EBUSY;
+	/*
+	 * Check for overflow; dev_loss_tmo is u32
+	 */
+	if (val > UINT_MAX)
+		return -EINVAL;
+
+	fcoe_fcf_dev_loss_tmo(fcf) = val;
+	return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store)	\
+struct device_attribute device_attr_fcoe_##_prefix##_##_name =	\
+	__ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast)	\
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+					    struct device_attribute *attr, \
+					    char *buf)			\
+{									\
+	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);		\
+	if (ctlr->f->get_fcoe_ctlr_##field)				\
+		ctlr->f->get_fcoe_ctlr_##field(ctlr);			\
+	return snprintf(buf, sz, format_string,				\
+			cast fcoe_ctlr_##field(ctlr));			\
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast)	\
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,	\
+					   struct device_attribute *attr, \
+					   char *buf)			\
+{									\
+	struct fcoe_fcf_device *fcf = dev_to_fcf(dev);			\
+	struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);	\
+	if (ctlr->f->get_fcoe_fcf_##field)				\
+		ctlr->f->get_fcoe_fcf_##field(fcf);			\
+	return snprintf(buf, sz, format_string,				\
+			cast fcoe_fcf_##field(fcf));			\
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast)	\
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+					    struct device_attribute *attr, \
+					    char *buf)			\
+{									\
+	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);		\
+	return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast)	\
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,	\
+					   struct device_attribute *attr, \
+					   char *buf)			\
+{								\
+	struct fcoe_fcf_device *fcf = dev_to_fcf(dev);			\
+	return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz)		\
+	fcoe_ctlr_private_show_function(field, format_string, sz, )	\
+	static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,			\
+				show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz)			\
+	fcoe_ctlr_show_function(field, format_string, sz, )		\
+	static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,			\
+				show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz)			\
+	fcoe_fcf_show_function(field, format_string, sz, )		\
+	static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,			\
+				show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz)		\
+	fcoe_fcf_private_show_function(field, format_string, sz, )	\
+	static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,			\
+				show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast)	\
+	fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+	static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,			\
+				show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast)	\
+	fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+	static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,			\
+				show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table)			\
+static const char *get_fcoe_##title##_name(enum table_type table_key)	\
+{									\
+	int i;								\
+	char *name = NULL;						\
+									\
+	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
+		if (table[i].value == table_key) {			\
+			name = table[i].name;				\
+			break;						\
+		}							\
+	}								\
+	return name;							\
+}
+
+static struct {
+	enum fcf_state value;
+	char           *name;
+} fcf_state_names[] = {
+	{ FCOE_FCF_STATE_UNKNOWN,      "Unknown" },
+	{ FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+	{ FCOE_FCF_STATE_CONNECTED,    "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+	const char *name;
+	name = get_fcoe_fcf_state_name(fcf->state);
+	if (!name)
+		return -EINVAL;
+	return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+	enum fip_conn_type value;
+	char               *name;
+} fip_conn_type_names[] = {
+	{ FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+	{ FIP_CONN_TYPE_FABRIC, "Fabric" },
+	{ FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+	const char *name;
+
+	if (ctlr->f->get_fcoe_ctlr_mode)
+		ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+	name = get_fcoe_ctlr_mode_name(ctlr->mode);
+	if (!name)
+		return -EINVAL;
+	return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+			"%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+			show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+	struct fcoe_fcf_device *fcf;
+	unsigned long val;
+	int rc;
+
+	rc = fcoe_str_to_dev_loss(buf, &val);
+	if (rc)
+		return rc;
+
+	fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+	mutex_lock(&ctlr->lock);
+	list_for_each_entry(fcf, &ctlr->fcfs, peers)
+		fcoe_fcf_set_dev_loss_tmo(fcf, val);
+	mutex_unlock(&ctlr->lock);
+	return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+			show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+			store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+	unsigned long val;
+	int rc;
+
+	rc = fcoe_str_to_dev_loss(buf, &val);
+	if (rc)
+		return rc;
+
+	rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+	if (rc)
+		return rc;
+	return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+			show_fcoe_fcf_device_dev_loss_tmo,
+			store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+	&device_attr_fcoe_ctlr_link_fail.attr,
+	&device_attr_fcoe_ctlr_vlink_fail.attr,
+	&device_attr_fcoe_ctlr_miss_fka.attr,
+	&device_attr_fcoe_ctlr_symb_err.attr,
+	&device_attr_fcoe_ctlr_err_block.attr,
+	&device_attr_fcoe_ctlr_fcs_error.attr,
+	NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+	.name = "lesb",
+	.attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+	&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+	&device_attr_fcoe_ctlr_mode.attr,
+	NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+	.attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+	&fcoe_ctlr_attr_group,
+	&fcoe_ctlr_lesb_attr_group,
+	NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+	&device_attr_fcoe_fcf_fabric_name.attr,
+	&device_attr_fcoe_fcf_switch_name.attr,
+	&device_attr_fcoe_fcf_dev_loss_tmo.attr,
+	&device_attr_fcoe_fcf_fc_map.attr,
+	&device_attr_fcoe_fcf_vfid.attr,
+	&device_attr_fcoe_fcf_mac.attr,
+	&device_attr_fcoe_fcf_priority.attr,
+	&device_attr_fcoe_fcf_fka_period.attr,
+	&device_attr_fcoe_fcf_state.attr,
+	&device_attr_fcoe_fcf_selected.attr,
+	&device_attr_fcoe_fcf_vlan_id.attr,
+	NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+	.attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+	&fcoe_fcf_attr_group,
+	NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+			  struct device_driver *drv)
+{
+	if (dev->bus == &fcoe_bus_type)
+		return 1;
+	return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+	kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+	struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+	kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+	.name = "fcoe_ctlr",
+	.groups = fcoe_ctlr_attr_groups,
+	.release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+	.name = "fcoe_fcf",
+	.groups = fcoe_fcf_attr_groups,
+	.release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+	.name = "fcoe",
+	.match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+	if (!fcoe_ctlr_work_q(ctlr)) {
+		printk(KERN_ERR
+		       "ERROR: FIP Ctlr '%d' attempted to flush work, "
+		       "when no workqueue created.\n", ctlr->id);
+		dump_stack();
+		return;
+	}
+
+	flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ *
+ * Return value:
+ *	1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+			       struct work_struct *work)
+{
+	if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+		printk(KERN_ERR
+		       "ERROR: FIP Ctlr '%d' attempted to queue work, "
+		       "when no workqueue created.\n", ctlr->id);
+		dump_stack();
+
+		return -EINVAL;
+	}
+
+	return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+	if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+		printk(KERN_ERR
+		       "ERROR: FIP Ctlr '%d' attempted to flush work, "
+		       "when no workqueue created.\n", ctlr->id);
+		dump_stack();
+		return;
+	}
+
+	flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ * @delay:  jiffies to delay the work queuing
+ *
+ * Return value:
+ *	1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+				       struct delayed_work *work,
+				       unsigned long delay)
+{
+	if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+		printk(KERN_ERR
+		       "ERROR: FIP Ctlr '%d' attempted to queue work, "
+		       "when no workqueue created.\n", ctlr->id);
+		dump_stack();
+
+		return -EINVAL;
+	}
+
+	return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+				 struct fcoe_fcf_device *old)
+{
+	if (new->switch_name == old->switch_name &&
+	    new->fabric_name == old->fabric_name &&
+	    new->fc_map == old->fc_map &&
+	    compare_ether_addr(new->mac, old->mac) == 0)
+		return 1;
+	return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent:    The parent device to which the fcoe_ctlr instance
+ *             should be attached
+ * @f:         The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+				    struct fcoe_sysfs_function_template *f,
+				    int priv_size)
+{
+	struct fcoe_ctlr_device *ctlr;
+	int error = 0;
+
+	ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+		       GFP_KERNEL);
+	if (!ctlr)
+		goto out;
+
+	ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+	ctlr->f = f;
+	INIT_LIST_HEAD(&ctlr->fcfs);
+	mutex_init(&ctlr->lock);
+	ctlr->dev.parent = parent;
+	ctlr->dev.bus = &fcoe_bus_type;
+	ctlr->dev.type = &fcoe_ctlr_device_type;
+
+	ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+	snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+		 "ctlr_wq_%d", ctlr->id);
+	ctlr->work_q = create_singlethread_workqueue(
+		ctlr->work_q_name);
+	if (!ctlr->work_q)
+		goto out_del;
+
+	snprintf(ctlr->devloss_work_q_name,
+		 sizeof(ctlr->devloss_work_q_name),
+		 "ctlr_dl_wq_%d", ctlr->id);
+	ctlr->devloss_work_q = create_singlethread_workqueue(
+		ctlr->devloss_work_q_name);
+	if (!ctlr->devloss_work_q)
+		goto out_del_q;
+
+	dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+	error = device_register(&ctlr->dev);
+	if (error)
+		goto out_del_q2;
+
+	return ctlr;
+
+out_del_q2:
+	destroy_workqueue(ctlr->devloss_work_q);
+	ctlr->devloss_work_q = NULL;
+out_del_q:
+	destroy_workqueue(ctlr->work_q);
+	ctlr->work_q = NULL;
+out_del:
+	kfree(ctlr);
+out:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+	struct fcoe_fcf_device *fcf, *next;
+	/* Remove any attached fcfs */
+	mutex_lock(&ctlr->lock);
+	list_for_each_entry_safe(fcf, next,
+				 &ctlr->fcfs, peers) {
+		list_del(&fcf->peers);
+		fcf->state = FCOE_FCF_STATE_DELETED;
+		fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+	}
+	mutex_unlock(&ctlr->lock);
+
+	fcoe_ctlr_device_flush_work(ctlr);
+
+	destroy_workqueue(ctlr->devloss_work_q);
+	ctlr->devloss_work_q = NULL;
+	destroy_workqueue(ctlr->work_q);
+	ctlr->work_q = NULL;
+
+	device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+	struct fcoe_fcf_device *fcf =
+		container_of(work, struct fcoe_fcf_device, delete_work);
+	struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+	/*
+	 * Cancel any outstanding timers. These should really exist
+	 * only when rmmod'ing the LLDD and we're asking for
+	 * immediate termination of the rports
+	 */
+	if (!cancel_delayed_work(&fcf->dev_loss_work))
+		fcoe_ctlr_device_flush_devloss(ctlr);
+
+	device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+	struct fcoe_fcf_device *fcf =
+		container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+	struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+	mutex_lock(&ctlr->lock);
+
+	/*
+	 * If the fcf is deleted or reconnected before the timer
+	 * fires the devloss queue will be flushed, but the state will
+	 * either be CONNECTED or DELETED. If that is the case we
+	 * cancel deleting the fcf.
+	 */
+	if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+		goto out;
+
+	dev_printk(KERN_ERR, &fcf->dev,
+		   "FIP fcf connection time out: removing fcf\n");
+
+	list_del(&fcf->peers);
+	fcf->state = FCOE_FCF_STATE_DELETED;
+	fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+	mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+	struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+	int timeout = fcf->dev_loss_tmo;
+
+	if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+		return;
+
+	fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+	/*
+	 * FCF will only be re-connected by the LLD calling
+	 * fcoe_fcf_device_add, and it should be setting up
+	 * priv then.
+	 */
+	fcf->priv = NULL;
+
+	fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+					   timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr:    The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+					    struct fcoe_fcf_device *new_fcf)
+{
+	struct fcoe_fcf_device *fcf;
+	int error = 0;
+
+	list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+		if (fcoe_fcf_device_match(new_fcf, fcf)) {
+			if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+				return fcf;
+
+			fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+			if (!cancel_delayed_work(&fcf->dev_loss_work))
+				fcoe_ctlr_device_flush_devloss(ctlr);
+
+			return fcf;
+		}
+	}
+
+	fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+	if (unlikely(!fcf))
+		goto out;
+
+	INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+	INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+	fcf->dev.parent = &ctlr->dev;
+	fcf->dev.bus = &fcoe_bus_type;
+	fcf->dev.type = &fcoe_fcf_device_type;
+	fcf->id = atomic_inc_return(&fcf_num) - 1;
+	fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+	fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+	dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+	fcf->fabric_name = new_fcf->fabric_name;
+	fcf->switch_name = new_fcf->switch_name;
+	fcf->fc_map = new_fcf->fc_map;
+	fcf->vfid = new_fcf->vfid;
+	memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+	fcf->priority = new_fcf->priority;
+	fcf->fka_period = new_fcf->fka_period;
+	fcf->selected = new_fcf->selected;
+
+	error = device_register(&fcf->dev);
+	if (error)
+		goto out_del;
+
+	fcf->state = FCOE_FCF_STATE_CONNECTED;
+	list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+	return fcf;
+
+out_del:
+	kfree(fcf);
+out:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+	int error;
+
+	atomic_set(&ctlr_num, 0);
+	atomic_set(&fcf_num, 0);
+
+	error = bus_register(&fcoe_bus_type);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+	bus_unregister(&fcoe_bus_type);
+}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149..b46f43dc 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@
  */
 static int __init libfcoe_init(void)
 {
-	fcoe_transport_init();
+	int rc = 0;
 
-	return 0;
+	rc = fcoe_transport_init();
+	if (rc)
+		return rc;
+
+	rc = fcoe_sysfs_setup();
+	if (rc)
+		fcoe_transport_exit();
+
+	return rc;
 }
 module_init(libfcoe_init);
 
@@ -826,6 +834,7 @@
  */
 static void __exit libfcoe_exit(void)
 {
+	fcoe_sysfs_teardown();
 	fcoe_transport_exit();
 }
 module_exit(libfcoe_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2..9d46fcb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@
 static inline u8
 _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
 {
-	return ioc->cpu_msix_table[smp_processor_id()];
+	return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
 /**
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d56..317a7fd 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@
 	Firmware images can be retrieved from:
 
 		ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+	tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+	depends on SCSI_QLA_FC && TARGET_CORE
+	select LIBFC
+	select BTREE
+	default n
+	---help---
+	Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f..dce7d78 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o
+        qla_nx.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a..5ab9530 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
@@ -576,6 +577,7 @@
 		scsi_block_requests(vha->host);
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		if (IS_QLA82XX(ha)) {
+			ha->flags.isp82xx_no_md_cap = 1;
 			qla82xx_idc_lock(ha);
 			qla82xx_set_reset_owner(vha);
 			qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@
 		scsi_unblock_requests(vha->host);
 		break;
 	case 0x2025d:
-		if (!IS_QLA81XX(ha))
+		if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
 			return -EPERM;
 
 		ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@
 			      struct device_attribute *attr, char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-	    ha->qla_stats.total_isp_aborts);
+	    vha->qla_stats.total_isp_aborts);
 }
 
 static ssize_t
@@ -1154,7 +1155,7 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@
 	dma_addr_t stats_dma;
 	struct fc_host_statistics *pfc_host_stat;
 
-	pfc_host_stat = &ha->fc_host_stat;
+	pfc_host_stat = &vha->fc_host_stat;
 	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
 
 	if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@
 		pfc_host_stat->dumped_frames = stats->dumped_frames;
 		pfc_host_stat->nos_count = stats->nos_rcvd;
 	}
-	pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
-	pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+	pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+	pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
 
 done_free:
         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@
 	fc_host_supported_speeds(vha->host) =
 		fc_host_supported_speeds(base_vha->host);
 
+	qlt_vport_create(vha, ha);
 	qla24xx_vport_disable(fc_vport, disable);
 
 	if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@
 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
-	fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+	fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
 	if (IS_CNA_CAPABLE(ha))
 		speed = FC_PORTSPEED_10GBIT;
+	else if (IS_QLA2031(ha))
+		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+		    FC_PORTSPEED_4GBIT;
 	else if (IS_QLA25XX(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d..c688838 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@
 
 		/* Initialize all required  fields of fcport */
 		fcport->vha = vha;
-		fcport->vp_idx = vha->vp_idx;
 		fcport->d_id.b.al_pa =
 			bsg_job->request->rqst_data.h_els.port_id[0];
 		fcport->d_id.b.area =
@@ -483,7 +482,6 @@
 
 	/* Initialize all required  fields of fcport */
 	fcport->vha = vha;
-	fcport->vp_idx = vha->vp_idx;
 	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
 	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
 	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@
 	int rval = 0;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		goto done_set_internal;
 
 	new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@
 	uint16_t new_config[4];
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		goto done_reset_internal;
 
 	memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@
 	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
 	if ((ha->current_topology == ISP_CFG_F ||
-	    (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
-	    ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+	    ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 		elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@
 	if (rval)
 		return rval;
 
+	/* Set the isp82xx_no_md_cap not to capture minidump */
+	ha->flags.isp82xx_no_md_cap = 1;
+
 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
 	    ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1..fdee561 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0120       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x113e       | 0x112c-0x112e  |
+ * | Module Init and Probe        |       0x0122       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1140       | 0x111a-0x111b  |
+ * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
  * | Queue Command and IO tracing |       0x3030       | 0x3006,0x3008  |
  * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401c       |		|
- * | Async Events                 |       0x505d       | 0x502b-0x502f  |
+ * | DPC Thread                   |       0x401c       | 0x4002,0x4013  |
+ * | Async Events                 |       0x505f       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
- * | Timer Routines               |       0x6011       | 0x600e-0x600f  |
+ * | Timer Routines               |       0x6011       |                |
  * | User Space Interactions      |       0x709f       | 0x7018,0x702e, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
  * |                              |                    | 0x708c         |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
- * | AER/EEH                      |       0x900f       |		|
+ * | AER/EEH                      |       0x9011       |		|
  * | Virtual Port                 |       0xa007       |		|
- * | ISP82XX Specific             |       0xb054       | 0xb053         |
+ * | ISP82XX Specific             |       0xb054       | 0xb024         |
  * | MultiQ                       |       0xc00c       |		|
  * | Misc                         |       0xd010       |		|
+ * | Target Mode		  |	  0xe06f       |		|
+ * | Target Mode Management	  |	  0xf071       |		|
+ * | Target Mode Task Management  |	  0x1000b      |		|
  * ----------------------------------------------------------------------
  */
 
@@ -379,6 +383,54 @@
 }
 
 static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+	uint32_t **last_chain)
+{
+	struct qla2xxx_mqueue_chain *q;
+	struct qla2xxx_mqueue_header *qh;
+	uint32_t num_queues;
+	int que;
+	struct {
+		int length;
+		void *ring;
+	} aq, *aqp;
+
+	if (!ha->tgt.atio_q_length)
+		return ptr;
+
+	num_queues = 1;
+	aqp = &aq;
+	aqp->length = ha->tgt.atio_q_length;
+	aqp->ring = ha->tgt.atio_ring;
+
+	for (que = 0; que < num_queues; que++) {
+		/* aqp = ha->atio_q_map[que]; */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (aqp->length * sizeof(request_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(aqp->length * sizeof(request_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+		ptr += aqp->length * sizeof(request_t);
+	}
+
+	return ptr;
+}
+
+static inline void *
 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 {
 	struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@
 	struct qla24xx_fw_dump *fw;
 	uint32_t	ext_mem_cnt;
 	void		*nxt;
+	void		*nxt_chain;
+	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@
 
 	qla24xx_copy_eft(ha, nxt);
 
+	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
 qla24xx_fw_dump_failed_0:
 	qla2xxx_dump_post_process(base_vha, rval);
 
@@ -1399,6 +1463,7 @@
 	/* Chain entries -- started with MQ. */
 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@
 	/* Chain entries -- started with MQ. */
 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@
 	/* Chain entries -- started with MQ. */
 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf..f278df8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@
 	uint32_t queue;
 #define TYPE_REQUEST_QUEUE	0x1
 #define TYPE_RESPONSE_QUEUE	0x2
+#define TYPE_ATIO_QUEUE		0x3
 	uint32_t number;
 	uint32_t size;
 };
@@ -339,3 +340,11 @@
 #define ql_dbg_misc	0x00010000 /* For dumping everything that is not
 				    * not covered by upper categories
 				    */
+#define ql_dbg_verbose	0x00008000 /* More verbosity for each level
+				    * This is to be used with other levels where
+				    * more verbosity is required. It might not
+				    * be applicable to all the levels.
+				    */
+#define ql_dbg_tgt	0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt	0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr	0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a244303..39007f5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
 #define RESPONSE_ENTRY_CNT_2100		64	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300		512	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ		128	/* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX		4096	/* Number of ATIO entries. */
 
 struct req_que;
 
@@ -1234,11 +1235,27 @@
  * ISP queue - response queue entry definition.
  */
 typedef struct {
-	uint8_t		data[60];
+	uint8_t		entry_type;		/* Entry type. */
+	uint8_t		entry_count;		/* Entry count. */
+	uint8_t		sys_define;		/* System defined. */
+	uint8_t		entry_status;		/* Entry Status. */
+	uint32_t	handle;			/* System defined handle */
+	uint8_t		data[52];
 	uint32_t	signature;
 #define RESPONSE_PROCESSED	0xDEADDEAD	/* Signature */
 } response_t;
 
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+	uint8_t		entry_type;		/* Entry type. */
+	uint8_t		entry_count;		/* Entry count. */
+	uint8_t		data[58];
+	uint32_t	signature;
+#define ATIO_PROCESSED 0xDEADDEAD		/* Signature */
+};
+
 typedef union {
 	uint16_t extended;
 	struct {
@@ -1719,11 +1736,13 @@
 	struct fc_rport *rport, *drport;
 	u32 supported_classes;
 
-	uint16_t vp_idx;
 	uint8_t fc4_type;
 	uint8_t scan_state;
 } fc_port_t;
 
+#define QLA_FCPORT_SCAN_NONE	0
+#define QLA_FCPORT_SCAN_FOUND	1
+
 /*
  * Fibre channel port/lun states.
  */
@@ -1747,6 +1766,7 @@
 #define FCF_LOGIN_NEEDED	BIT_1
 #define FCF_FCP2_DEVICE		BIT_2
 #define FCF_ASYNC_SENT		BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
 
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID		0x1000
@@ -2419,6 +2439,40 @@
 	uint32_t len;
 };
 
+struct qlt_hw_data {
+	/* Protected by hw lock */
+	uint32_t enable_class_2:1;
+	uint32_t enable_explicit_conf:1;
+	uint32_t ini_mode_force_reverse:1;
+	uint32_t node_name_set:1;
+
+	dma_addr_t atio_dma;	/* Physical address. */
+	struct atio *atio_ring;	/* Base virtual address */
+	struct atio *atio_ring_ptr;	/* Current address. */
+	uint16_t atio_ring_index; /* Current index. */
+	uint16_t atio_q_length;
+
+	void *target_lport_ptr;
+	struct qla_tgt_func_tmpl *tgt_ops;
+	struct qla_tgt *qla_tgt;
+	struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+	uint16_t current_handle;
+
+	struct qla_tgt_vp_map *tgt_vp_map;
+	struct mutex tgt_mutex;
+	struct mutex tgt_host_action_mutex;
+
+	int saved_set;
+	uint16_t saved_exchange_count;
+	uint32_t saved_firmware_options_1;
+	uint32_t saved_firmware_options_2;
+	uint32_t saved_firmware_options_3;
+	uint8_t saved_firmware_options[2];
+	uint8_t saved_add_firmware_options[2];
+
+	uint8_t tgt_node_name[WWN_SIZE];
+};
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -2460,7 +2514,9 @@
 		uint32_t	thermal_supported:1;
 		uint32_t	isp82xx_reset_hdlr_active:1;
 		uint32_t	isp82xx_reset_owner:1;
-		/* 28 bits */
+		uint32_t	isp82xx_no_md_cap:1;
+		uint32_t	host_shutting_down:1;
+		/* 30 bits */
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@
 					/* ISP2322: red, green, amber. */
 	uint16_t        zio_mode;
 	uint16_t        zio_timer;
-	struct fc_host_statistics fc_host_stat;
 
 	struct qla_msix_entry *msix_entries;
 
@@ -2817,7 +2872,6 @@
 	int             cur_vport_count;
 
 	struct qla_chip_state_84xx *cs84xx;
-	struct qla_statistics qla_stats;
 	struct isp_operations *isp_ops;
 	struct workqueue_struct *wq;
 	struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@
 	dma_addr_t      md_tmplt_hdr_dma;
 	void            *md_dump;
 	uint32_t	md_dump_size;
+
+	struct qlt_hw_data tgt;
 };
 
 /*
@@ -2920,6 +2976,7 @@
 #define FCOE_CTX_RESET_NEEDED	18	/* Initiate FCoE context reset */
 #define MPI_RESET_NEEDED	19	/* Initiate MPI FW reset */
 #define ISP_QUIESCE_NEEDED	20	/* Driver need some quiescence */
+#define SCR_PENDING		21	/* SCR in target mode */
 
 	uint32_t	device_flags;
 #define SWITCH_FOUND		BIT_0
@@ -2979,10 +3036,21 @@
 	struct req_que *req;
 	int		fw_heartbeat_counter;
 	int		seconds_since_last_heartbeat;
+	struct fc_host_statistics fc_host_stat;
+	struct qla_statistics qla_stats;
 
 	atomic_t	vref_count;
 } scsi_qla_host_t;
 
+#define SET_VP_IDX	1
+#define SET_AL_PA	2
+#define RESET_VP_IDX	3
+#define RESET_AL_PA	4
+struct qla_tgt_vp_map {
+	uint8_t	idx;
+	scsi_qla_host_t *vha;
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f06580..9eacd2d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@
 /*
  * Global Function Prototypes in qla_iocb.c source file.
  */
+
 extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@
 extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
 
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@
 qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
 
 extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
+extern int
 qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
 
 extern int
@@ -383,6 +389,8 @@
 extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
 extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
 /*
  * Global Function Prototypes in qla_sup.c source file.
  */
@@ -546,6 +554,7 @@
 extern void qla2x00_sp_timeout(unsigned long);
 extern void qla2x00_bsg_job_done(void *, void *, int);
 extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
 
 /* Interrupt related */
 extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80..05260d2 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@
 	ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
 	ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
 
-	ct_req->req.rff_id.fc4_feature = BIT_1;
+	qlt_rff_id(vha, ct_req);
+
 	ct_req->req.rff_id.fc4_type = 0x08;		/* SCSI - FCP */
 
 	/* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b946564..ca50847 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
 #include <asm/prom.h>
 #endif
 
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
 /*
 *  QLogic ISP2x00 Hardware Support Function Prototypes.
 */
@@ -518,7 +521,10 @@
 			return QLA_FUNCTION_FAILED;
 		}
 	}
-	rval = qla2x00_init_rings(vha);
+
+	if (qla_ini_mode_enabled(vha))
+		rval = qla2x00_init_rings(vha);
+
 	ha->flags.chip_reset_done = 1;
 
 	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@
 			mq_size += ha->max_rsp_queues *
 			    (rsp->length * sizeof(response_t));
 		}
+		if (ha->tgt.atio_q_length)
+			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
 		/* Allocate memory for Fibre Channel Event Buffer. */
 		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 			goto try_eft;
@@ -1696,6 +1704,12 @@
 	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
+	/* Setup ATIO queue dma pointers for target mode */
+	icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
 	if (ha->mqenable || IS_QLA83XX(ha)) {
 		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
 		icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@
 		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
 		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
 	}
+	qlt_24xx_config_rings(vha, reg);
+
 	/* PCI posting */
 	RD_REG_DWORD(&ioreg->hccr);
 }
@@ -1794,6 +1810,11 @@
 
 	spin_unlock(&ha->vport_slock);
 
+	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+	ha->tgt.atio_ring_index = 0;
+	/* Initialize ATIO queue entries */
+	qlt_init_atio_q_entries(vha);
+
 	ha->isp_ops->config_rings(vha);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@
 	vha->d_id.b.area = area;
 	vha->d_id.b.al_pa = al_pa;
 
+	spin_lock(&ha->vport_slock);
+	qlt_update_vp_map(vha, SET_AL_PA);
+	spin_unlock(&ha->vport_slock);
+
 	if (!vha->flags.init_done)
 		ql_log(ql_log_info, vha, 0x2010,
 		    "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@
 	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_warn, vha, 0x0064,
-		    "Inconisistent NVRAM "
+		    "Inconsistent NVRAM "
 		    "detected: checksum=0x%x id=%c version=0x%x.\n",
 		    chksum, nv->id[0], nv->nvram_version);
 		ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@
 	if (IS_QLA23XX(ha)) {
 		nv->firmware_options[0] |= BIT_2;
 		nv->firmware_options[0] &= ~BIT_3;
-		nv->firmware_options[0] &= ~BIT_6;
+		nv->special_options[0] &= ~BIT_6;
 		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
 
 		if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@
 {
 	fc_port_t *fcport = data;
 	struct fc_rport *rport;
+	scsi_qla_host_t *vha = fcport->vha;
 	unsigned long flags;
 
 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	rport = fcport->drport ? fcport->drport: fcport->rport;
 	fcport->drport = NULL;
 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
-	if (rport)
+	if (rport) {
 		fc_remote_port_delete(rport);
+		/*
+		 * Release the target mode FC NEXUS in qla_target.c code
+		 * if target mod is enabled.
+		 */
+		qlt_fc_port_deleted(vha, fcport);
+	}
 }
 
 /**
@@ -2495,11 +2527,11 @@
 
 	/* Setup fcport template structure. */
 	fcport->vha = vha;
-	fcport->vp_idx = vha->vp_idx;
 	fcport->port_type = FCT_UNKNOWN;
 	fcport->loop_id = FC_NO_LOOP_ID;
 	qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
 	fcport->supported_classes = FC_COS_UNSPECIFIED;
+	fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
 	return fcport;
 }
@@ -2726,7 +2758,6 @@
 		new_fcport->d_id.b.area = area;
 		new_fcport->d_id.b.al_pa = al_pa;
 		new_fcport->loop_id = loop_id;
-		new_fcport->vp_idx = vha->vp_idx;
 		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
 		if (rval2 != QLA_SUCCESS) {
 			ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@
 
 		if (!found) {
 			/* New device, add to fcports list. */
-			if (vha->vp_idx) {
-				new_fcport->vha = vha;
-				new_fcport->vp_idx = vha->vp_idx;
-			}
 			list_add_tail(&new_fcport->list, &vha->vp_fcports);
 
 			/* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@
 static void
 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-#define LS_UNKNOWN      2
-	static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
 	char *link_speed;
 	int rval;
 	uint16_t mb[4];
@@ -2829,11 +2854,7 @@
 		    fcport->port_name[6], fcport->port_name[7], rval,
 		    fcport->fp_speed, mb[0], mb[1]);
 	} else {
-		link_speed = link_speeds[LS_UNKNOWN];
-		if (fcport->fp_speed < 5)
-			link_speed = link_speeds[fcport->fp_speed];
-		else if (fcport->fp_speed == 0x13)
-			link_speed = link_speeds[5];
+		link_speed = qla2x00_get_link_speed_str(ha);
 		ql_dbg(ql_dbg_disc, vha, 0x2005,
 		    "iIDMA adjusted to %s GB/s "
 		    "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@
 		    "Unable to allocate fc remote port.\n");
 		return;
 	}
+	/*
+	 * Create target mode FC NEXUS in qla_target.c if target mode is
+	 * enabled..
+	 */
+	qlt_fc_port_added(vha, fcport);
+
 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	*((fc_port_t **)rport->dd_data) = fcport;
 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
 	int	rval;
-	fc_port_t	*fcport, *fcptemp;
+	fc_port_t	*fcport;
 	uint16_t	next_loopid;
 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
 	uint16_t	loop_id;
@@ -2959,7 +2986,7 @@
 		    0xfc, mb, BIT_1|BIT_0);
 		if (rval != QLA_SUCCESS) {
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-			return rval;
+			break;
 		}
 		if (mb[0] != MBS_COMMAND_COMPLETE) {
 			ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@
 			}
 		}
 
-#define QLA_FCPORT_SCAN		1
-#define QLA_FCPORT_FOUND	2
-
-		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			fcport->scan_state = QLA_FCPORT_SCAN;
-		}
-
 		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
 		if (rval != QLA_SUCCESS)
 			break;
 
-		/*
-		 * Logout all previous fabric devices marked lost, except
-		 * FCP2 devices.
-		 */
+		/* Add new ports to existing port list */
+		list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+		/* Starting free loop ID. */
+		next_loopid = ha->min_external_loopid;
+
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
 				break;
@@ -3013,7 +3035,8 @@
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
 				continue;
 
-			if (fcport->scan_state == QLA_FCPORT_SCAN &&
+			/* Logout lost/gone fabric devices (non-FCP2) */
+			if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
 			    atomic_read(&fcport->state) == FCS_ONLINE) {
 				qla2x00_mark_device_lost(vha, fcport,
 				    ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@
 					    fcport->d_id.b.domain,
 					    fcport->d_id.b.area,
 					    fcport->d_id.b.al_pa);
-					fcport->loop_id = FC_NO_LOOP_ID;
 				}
-			}
-		}
-
-		/* Starting free loop ID. */
-		next_loopid = ha->min_external_loopid;
-
-		/*
-		 * Scan through our port list and login entries that need to be
-		 * logged in.
-		 */
-		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			if (atomic_read(&vha->loop_down_timer) ||
-			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-				break;
-
-			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
-			    (fcport->flags & FCF_LOGIN_NEEDED) == 0)
 				continue;
+			}
+			fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
-			if (fcport->loop_id == FC_NO_LOOP_ID) {
-				fcport->loop_id = next_loopid;
-				rval = qla2x00_find_new_loop_id(
-				    base_vha, fcport);
-				if (rval != QLA_SUCCESS) {
-					/* Ran out of IDs to use */
-					break;
+			/* Login fabric devices that need a login */
+			if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+			    atomic_read(&vha->loop_down_timer) == 0) {
+				if (fcport->loop_id == FC_NO_LOOP_ID) {
+					fcport->loop_id = next_loopid;
+					rval = qla2x00_find_new_loop_id(
+					    base_vha, fcport);
+					if (rval != QLA_SUCCESS) {
+						/* Ran out of IDs to use */
+						continue;
+					}
 				}
 			}
-			/* Login and update database */
-			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-		}
-
-		/* Exit if out of loop IDs. */
-		if (rval != QLA_SUCCESS) {
-			break;
-		}
-
-		/*
-		 * Login and add the new devices to our port list.
-		 */
-		list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-			if (atomic_read(&vha->loop_down_timer) ||
-			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-				break;
-
-			/* Find a new loop ID to use. */
-			fcport->loop_id = next_loopid;
-			rval = qla2x00_find_new_loop_id(base_vha, fcport);
-			if (rval != QLA_SUCCESS) {
-				/* Ran out of IDs to use */
-				break;
-			}
 
 			/* Login and update database */
 			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
-			if (vha->vp_idx) {
-				fcport->vha = vha;
-				fcport->vp_idx = vha->vp_idx;
-			}
-			list_move_tail(&fcport->list, &vha->vp_fcports);
 		}
 	} while (0);
 
-	/* Free all new device structures not processed. */
-	list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-		list_del(&fcport->list);
-		kfree(fcport);
-	}
-
 	if (rval) {
 		ql_dbg(ql_dbg_disc, vha, 0x2068,
 		    "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@
 			    WWN_SIZE))
 				continue;
 
-			fcport->scan_state = QLA_FCPORT_FOUND;
+			fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
 
 			found++;
 
@@ -3595,6 +3570,12 @@
 			if (mb[10] & BIT_1)
 				fcport->supported_classes |= FC_COS_CLASS3;
 
+			if (IS_FWI2_CAPABLE(ha)) {
+				if (mb[10] & BIT_7)
+					fcport->flags |=
+					    FCF_CONF_COMP_SUPPORTED;
+			}
+
 			rval = QLA_SUCCESS;
 			break;
 		} else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@
 		vha->flags.online = 0;
 	ha->flags.chip_reset_done = 0;
 	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-	ha->qla_stats.total_isp_aborts++;
+	vha->qla_stats.total_isp_aborts++;
 
 	ql_log(ql_log_info, vha, 0x00af,
 	    "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
 	struct rsp_que *rsp = ha->rsp_q_map[0];
+	unsigned long flags;
 
 	/* If firmware needs to be loaded */
 	if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@
 			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 
 			vha->flags.online = 1;
+
+			/*
+			 * Process any ATIO queue entries that came in
+			 * while we weren't online.
+			 */
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			if (qla_tgt_mode_enabled(vha))
+				qlt_24xx_process_atio_queue(vha);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
 			wait_time = 256;
 			do {
@@ -4279,7 +4271,7 @@
 	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_warn, vha, 0x006b,
-		    "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
 		    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
 		ql_log(ql_log_warn, vha, 0x006c,
 		    "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@
 		rval = 1;
 	}
 
+	if (!qla_ini_mode_enabled(vha)) {
+		/* Don't enable full login after initial LIP */
+		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+		/* Don't enable LIP full login for initiator */
+		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+	}
+
+	qlt_24xx_config_nvram_stage1(vha, nv);
+
 	/* Reset Initialization control block */
 	memset(icb, 0, ha->init_cb_size);
 
@@ -4357,8 +4358,10 @@
 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
 	    "QLA2462");
 
-	/* Use alternate WWN? */
+	qlt_24xx_config_nvram_stage2(vha, icb);
+
 	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+		/* Use alternate WWN? */
 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
 	}
@@ -5029,7 +5032,7 @@
 	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_info, vha, 0x0073,
-		    "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
 		    "version=0x%x.\n", chksum, nv->id[0],
 		    le16_to_cpu(nv->nvram_version));
 		ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac9509..70dbf53 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -23,18 +24,17 @@
 {
 	uint16_t cflags;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->fcport->vha;
 
 	cflags = 0;
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cflags = CF_WRITE;
-		sp->fcport->vha->hw->qla_stats.output_bytes +=
-		    scsi_bufflen(cmd);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cflags = CF_READ;
-		sp->fcport->vha->hw->qla_stats.input_bytes +=
-		    scsi_bufflen(cmd);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 	}
 	return (cflags);
 }
@@ -385,9 +385,10 @@
 		else
 			req->cnt = req->length -
 			    (req->ring_index - cnt);
+		/* If still no head room then bail out */
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
 	}
-	if (req->cnt < (req_cnt + 2))
-		goto queuing_error;
 
 	/* Build command packet */
 	req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@
 /**
  * qla2x00_start_iocbs() - Execute the IOCB command
  */
-static void
+void
 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@
 	return (ret);
 }
 
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+	if (ha_locked) {
+		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+					MK_SYNC_ALL) != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+	} else {
+		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+					MK_SYNC_ALL) != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+	}
+	vha->marker_needed = 0;
+
+	return QLA_SUCCESS;
+}
+
 /**
  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->control_flags =
 		    __constant_cpu_to_le16(CF_WRITE_DATA);
-		ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->control_flags =
 		    __constant_cpu_to_le16(CF_READ_DATA);
-		ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 	}
 
 	cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
-		sp->fcport->vha->hw->qla_stats.output_bytes +=
-		    scsi_bufflen(cmd);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_READ_DATA);
-		sp->fcport->vha->hw->qla_stats.input_bytes +=
-		    scsi_bufflen(cmd);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 	}
 
 	/* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@
 		return QLA_SUCCESS;
 	}
 
-	cmd_pkt->vp_index = sp->fcport->vp_idx;
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@
 		else
 			req->cnt = req->length -
 				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
 	}
-	if (req->cnt < (req_cnt + 2))
-		goto queuing_error;
 
 	/* Build command packet. */
 	req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@
 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-	cmd_pkt->vp_index = sp->fcport->vp_idx;
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@
 		else
 			req->cnt = req->length -
 				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
 	}
 
-	if (req->cnt < (req_cnt + 2))
-		goto queuing_error;
-
 	status |= QDSS_GOT_Q_SPACE;
 
 	/* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
 	logio->port_id[1] = sp->fcport->d_id.b.area;
 	logio->port_id[2] = sp->fcport->d_id.b.domain;
-	logio->vp_index = sp->fcport->vp_idx;
+	logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1922,7 +1943,7 @@
 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
 	    sp->fcport->d_id.b.al_pa);
-	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -1935,7 +1956,7 @@
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
 	logio->port_id[1] = sp->fcport->d_id.b.area;
 	logio->port_id[2] = sp->fcport->d_id.b.domain;
-	logio->vp_index = sp->fcport->vp_idx;
+	logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1952,7 +1973,7 @@
 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
 	    sp->fcport->d_id.b.al_pa);
-	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 	/* Implicit: mbx->mbx10 = 0. */
 }
 
@@ -1962,7 +1983,7 @@
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-	logio->vp_index = sp->fcport->vp_idx;
+	logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1983,7 +2004,7 @@
 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
-	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -2009,7 +2030,7 @@
 	tsk->port_id[0] = fcport->d_id.b.al_pa;
 	tsk->port_id[1] = fcport->d_id.b.area;
 	tsk->port_id[2] = fcport->d_id.b.domain;
-	tsk->vp_index = fcport->vp_idx;
+	tsk->vp_index = fcport->vha->vp_idx;
 
 	if (flags == TCF_LUN_RESET) {
 		int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@
         els_iocb->handle = sp->handle;
         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
-        els_iocb->vp_index = sp->fcport->vp_idx;
+	els_iocb->vp_index = sp->fcport->vha->vp_idx;
         els_iocb->sof_type = EST_SOFI3;
         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 
@@ -2160,7 +2181,7 @@
         ct_iocb->handle = sp->handle;
 
 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-	ct_iocb->vp_index = sp->fcport->vp_idx;
+	ct_iocb->vp_index = sp->fcport->vha->vp_idx;
         ct_iocb->comp_status = __constant_cpu_to_le16(0);
 
 	ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@
 			else
 				req->cnt = req->length -
 					(req->ring_index - cnt);
+			if (req->cnt < (req_cnt + 2))
+				goto queuing_error;
 		}
 
-		if (req->cnt < (req_cnt + 2))
-			goto queuing_error;
-
 		ctx = sp->u.scmd.ctx =
 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
 		if (!ctx) {
@@ -2362,7 +2382,7 @@
 		if (!ctx->fcp_cmnd) {
 			ql_log(ql_log_fatal, vha, 0x3011,
 			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
-			goto queuing_error_fcp_cmnd;
+			goto queuing_error;
 		}
 
 		/* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@
 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-		cmd_pkt->vp_index = sp->fcport->vp_idx;
+		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
 		/* Build IOCB segments */
 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@
 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-		cmd_pkt->vp_index = sp->fcport->vp_idx;
+		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288..6f67a9d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -309,6 +310,28 @@
 		    "IDC failed to post ACK.\n");
 }
 
+#define LS_UNKNOWN	2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+	static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+	char *link_speed;
+	int fw_speed = ha->link_data_rate;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		link_speed = link_speeds[0];
+	else if (fw_speed == 0x13)
+		link_speed = link_speeds[6];
+	else {
+		link_speed = link_speeds[LS_UNKNOWN];
+		if (fw_speed < 6)
+			link_speed =
+			    link_speeds[fw_speed];
+	}
+
+	return link_speed;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@
 void
 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
-#define LS_UNKNOWN	2
-	static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
-	char		*link_speed;
 	uint16_t	handle_cnt;
 	uint16_t	cnt, mbx;
 	uint32_t	handles[5];
@@ -454,8 +474,8 @@
 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
 		ql_dbg(ql_dbg_async, vha, 0x5008,
 		    "Asynchronous WAKEUP_THRES.\n");
-		break;
 
+		break;
 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
 		ql_dbg(ql_dbg_async, vha, 0x5009,
 		    "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@
 		break;
 
 	case MBA_LOOP_UP:		/* Loop Up Event */
-		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-			link_speed = link_speeds[0];
+		if (IS_QLA2100(ha) || IS_QLA2200(ha))
 			ha->link_data_rate = PORT_SPEED_1GB;
-		} else {
-			link_speed = link_speeds[LS_UNKNOWN];
-			if (mb[1] < 6)
-				link_speed = link_speeds[mb[1]];
-			else if (mb[1] == 0x13)
-				link_speed = link_speeds[6];
+		else
 			ha->link_data_rate = mb[1];
-		}
 
 		ql_dbg(ql_dbg_async, vha, 0x500a,
-		    "LOOP UP detected (%s Gbps).\n", link_speed);
+		    "LOOP UP detected (%s Gbps).\n",
+		    qla2x00_get_link_speed_str(ha));
 
 		vha->flags.management_server_logged_in = 0;
 		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@
 			ql_dbg(ql_dbg_async, vha, 0x5010,
 			    "Port unavailable %04x %04x %04x.\n",
 			    mb[1], mb[2], mb[3]);
+			ql_log(ql_log_warn, vha, 0x505e,
+			    "Link is offline.\n");
 
 			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 				atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@
 			ql_dbg(ql_dbg_async, vha, 0x5011,
 			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
 			    mb[1], mb[2], mb[3]);
+
+			qlt_async_event(mb[0], vha, mb);
 			break;
 		}
 
 		ql_dbg(ql_dbg_async, vha, 0x5012,
 		    "Port database changed %04x %04x %04x.\n",
 		    mb[1], mb[2], mb[3]);
+		ql_log(ql_log_warn, vha, 0x505f,
+		    "Link is operational (%s Gbps).\n",
+		    qla2x00_get_link_speed_str(ha));
 
 		/*
 		 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@
 
 		qla2x00_mark_all_devices_lost(vha, 1);
 
+		if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+			set_bit(SCR_PENDING, &vha->dpc_flags);
+
 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+		qlt_async_event(mb[0], vha, mb);
 		break;
 
 	case MBA_RSCN_UPDATE:		/* State Change Registration */
@@ -807,6 +833,8 @@
 		    mb[0], mb[1], mb[2], mb[3]);
 	}
 
+	qlt_async_event(mb[0], vha, mb);
+
 	if (!vha->vp_idx && ha->num_vhosts)
 		qla2x00_alert_all_vps(rsp, mb);
 }
@@ -1172,6 +1200,9 @@
 		} else if (iop[0] & BIT_5)
 			fcport->port_type = FCT_INITIATOR;
 
+		if (iop[0] & BIT_7)
+			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
 		if (logio->io_parameter[7] || logio->io_parameter[8])
 			fcport->supported_classes |= FC_COS_CLASS2;
 		if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@
 
 		if (pkt->entry_status != 0) {
 			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+			(void)qlt_24xx_process_response_error(vha, pkt);
+
 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 			wmb();
 			continue;
@@ -2016,6 +2050,14 @@
                 case ELS_IOCB_TYPE:
 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
 			break;
+		case ABTS_RECV_24XX:
+			/* ensure that the ATIO queue is empty */
+			qlt_24xx_process_atio_queue(vha);
+		case ABTS_RESP_24XX:
+		case CTIO_TYPE7:
+		case NOTIFY_ACK_TYPE:
+			qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+			break;
 		case MARKER_TYPE:
 			/* Do nothing in this case, this check is to prevent it
 			 * from falling into default case
@@ -2168,6 +2210,13 @@
 		case 0x14:
 			qla24xx_process_response_queue(vha, rsp);
 			break;
+		case 0x1C: /* ATIO queue updated */
+			qlt_24xx_process_atio_queue(vha);
+			break;
+		case 0x1D: /* ATIO and response queues updated */
+			qlt_24xx_process_atio_queue(vha);
+			qla24xx_process_response_queue(vha, rsp);
+			break;
 		default:
 			ql_dbg(ql_dbg_async, vha, 0x504f,
 			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@
 		case 0x14:
 			qla24xx_process_response_queue(vha, rsp);
 			break;
+		case 0x1C: /* ATIO queue updated */
+			qlt_24xx_process_atio_queue(vha);
+			break;
+		case 0x1D: /* ATIO and response queues updated */
+			qlt_24xx_process_atio_queue(vha);
+			qla24xx_process_response_queue(vha, rsp);
+			break;
 		default:
 			ql_dbg(ql_dbg_async, vha, 0x5051,
 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@
 qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct rsp_que *rsp = ha->rsp_q_map[0];
+	struct rsp_que *rsp;
+
+	/*
+	 * We need to check that ha->rsp_q_map is valid in case we are called
+	 * from a probe failure context.
+	 */
+	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+		return;
+	rsp = ha->rsp_q_map[0];
 
 	if (ha->flags.msix_enabled)
 		qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a2339..d5ce92c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/gfp.h>
@@ -270,11 +271,8 @@
 			ictrl = RD_REG_WORD(&reg->isp.ictrl);
 		}
 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-		    "MBX Command timeout for cmd %x.\n", command);
-		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
-		    "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
-		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
-		    "mb[0] = 0x%x.\n", mb0);
+		    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+		    "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
 		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
 		/*
@@ -320,7 +318,7 @@
 					    CRB_NIU_XG_PAUSE_CTL_P1);
 				}
 				ql_log(ql_log_info, base_vha, 0x101c,
-				    "Mailbox cmd timeout occured, cmd=0x%x, "
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
 				    "abort.\n", command, mcp->mb[0],
 				    ha->flags.eeh_busy);
@@ -345,7 +343,7 @@
 					    CRB_NIU_XG_PAUSE_CTL_P1);
 				}
 				ql_log(ql_log_info, base_vha, 0x101e,
-				    "Mailbox cmd timeout occured, cmd=0x%x, "
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
 				    "mb[0]=0x%x. Scheduling ISP abort ",
 				    command, mcp->mb[0]);
 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+	    "Entered %s.\n", __func__);
 
 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -454,7 +454,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
 	mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
 		if (IS_FWI2_CAPABLE(ha)) {
-			ql_dbg(ql_dbg_mbx, vha, 0x1027,
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
 			    "Done exchanges=%x.\n", mcp->mb[1]);
 		} else {
-			ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+			    "Done %s.\n", __func__);
 		}
 	}
 
@@ -523,7 +525,8 @@
 	mbx_cmd_t	*mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
 	mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@
 			ha->fw_attributes_h = mcp->mb[15];
 			ha->fw_attributes_ext[0] = mcp->mb[16];
 			ha->fw_attributes_ext[1] = mcp->mb[17];
-			ql_dbg(ql_dbg_mbx, vha, 0x1139,
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
 			    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
 			    __func__, mcp->mb[15], mcp->mb[6]);
 		} else
-			ql_dbg(ql_dbg_mbx, vha, 0x112f,
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
 			    "%s: FwAttributes [Upper]  invalid, MB6:%04x\n",
 			    __func__, mcp->mb[6]);
 	}
@@ -576,7 +579,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+		    "Done %s.\n", __func__);
 	}
 	return rval;
 }
@@ -602,7 +606,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
 	mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@
 		fwopts[2] = mcp->mb[2];
 		fwopts[3] = mcp->mb[3];
 
-		ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -648,7 +654,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
 	mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@
 		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -704,7 +712,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
 	mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -762,7 +772,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
 	mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@
 		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
 		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -819,7 +831,8 @@
 	mbx_cmd_t	mc;
 	mbx_cmd_t	*mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
 	mcp->mb[1] = 0;
@@ -842,7 +855,8 @@
 		/* Mask reserved bits. */
 		sts_entry->entry_status &=
 		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
-		ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -884,7 +898,8 @@
 	struct req_que *req = vha->req;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
-	ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+	    "Entered %s.\n", __func__);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -934,7 +950,8 @@
 	l = l;
 	vha = fcport->vha;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+	    "Entered %s.\n", __func__);
 
 	req = vha->hw->req_q_map[0];
 	rsp = req->rsp;
@@ -955,7 +972,8 @@
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
-		ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+		    "Failed=%x.\n", rval);
 	}
 
 	/* Issue marker IOCB. */
@@ -965,7 +983,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
 		    "Failed to issue marker IOCB (%x).\n", rval2);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -983,7 +1002,8 @@
 
 	vha = fcport->vha;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+	    "Entered %s.\n", __func__);
 
 	req = vha->hw->req_q_map[0];
 	rsp = req->rsp;
@@ -1012,7 +1032,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
 		    "Failed to issue marker IOCB (%x).\n", rval2);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1046,7 +1067,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
 	mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+		    "Done %s.\n", __func__);
 
 		if (IS_CNA_CAPABLE(vha->hw)) {
 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
 	mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@
 			*tov = ratov;
 		}
 
-		ql_dbg(ql_dbg_mbx, vha, 0x104b,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
 		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
 	}
 
@@ -1170,7 +1194,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+	    "Entered %s.\n", __func__);
 
 	if (IS_QLA82XX(ha) && ql2xdbwr)
 		qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,13 +1238,104 @@
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
 }
 
 /*
+ * qla2x00_get_node_name_list
+ *      Issue get node name list mailbox command, kmalloc()
+ *      and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ *      ha = adapter state pointer.
+ *      out_data = resulting list
+ *      out_len = length of the resulting list
+ *
+ * Returns:
+ *      qla2x00 local function return status code.
+ *
+ * Context:
+ *      Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_port_24xx_data *list = NULL;
+	void *pmap;
+	mbx_cmd_t mc;
+	dma_addr_t pmap_dma;
+	ulong dma_size;
+	int rval, left;
+
+	left = 1;
+	while (left > 0) {
+		dma_size = left * sizeof(*list);
+		pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+					 &pmap_dma, GFP_KERNEL);
+		if (!pmap) {
+			ql_log(ql_log_warn, vha, 0x113f,
+			    "%s(%ld): DMA Alloc failed of %ld\n",
+			    __func__, vha->host_no, dma_size);
+			rval = QLA_MEMORY_ALLOC_FAILED;
+			goto out;
+		}
+
+		mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+		mc.mb[1] = BIT_1 | BIT_3;
+		mc.mb[2] = MSW(pmap_dma);
+		mc.mb[3] = LSW(pmap_dma);
+		mc.mb[6] = MSW(MSD(pmap_dma));
+		mc.mb[7] = LSW(MSD(pmap_dma));
+		mc.mb[8] = dma_size;
+		mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+		mc.in_mb = MBX_0|MBX_1;
+		mc.tov = 30;
+		mc.flags = MBX_DMA_IN;
+
+		rval = qla2x00_mailbox_command(vha, &mc);
+		if (rval != QLA_SUCCESS) {
+			if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+			    (mc.mb[1] == 0xA)) {
+				left += le16_to_cpu(mc.mb[2]) /
+				    sizeof(struct qla_port_24xx_data);
+				goto restart;
+			}
+			goto out_free;
+		}
+
+		left = 0;
+
+		list = kzalloc(dma_size, GFP_KERNEL);
+		if (!list) {
+			ql_log(ql_log_warn, vha, 0x1140,
+			    "%s(%ld): failed to allocate node names list "
+			    "structure.\n", __func__, vha->host_no);
+			rval = QLA_MEMORY_ALLOC_FAILED;
+			goto out_free;
+		}
+
+		memcpy(list, pmap, dma_size);
+restart:
+		dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
+	}
+
+	*out_data = list;
+	*out_len = dma_size;
+
+out:
+	return rval;
+
+out_free:
+	dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
+	return rval;
+}
+
+/*
  * qla2x00_get_port_database
  *	Issue normal/enhanced get port database mailbox command
  *	and copy device name as necessary.
@@ -1246,7 +1362,8 @@
 	dma_addr_t pd_dma;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+	    "Entered %s.\n", __func__);
 
 	pd24 = NULL;
 	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@
 			fcport->port_type = FCT_INITIATOR;
 		else
 			fcport->port_type = FCT_TARGET;
+
+		/* Passback COS information. */
+		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+				FC_COS_CLASS2 : FC_COS_CLASS3;
+
+		if (pd24->prli_svc_param_word_3[0] & BIT_7)
+			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
 	} else {
 		uint64_t zero = 0;
 
@@ -1378,7 +1502,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
 		    mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1407,7 +1532,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
 	mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1465,7 +1592,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_PORT_NAME;
 	mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@
 			name[7] = LSB(mcp->mb[7]);
 		}
 
-		ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1527,7 +1656,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+	    "Entered %s.\n", __func__);
 
 	if (IS_CNA_CAPABLE(vha->hw)) {
 		/* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1596,9 +1727,10 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+	    "Entered %s.\n", __func__);
 
-	ql_dbg(ql_dbg_mbx, vha, 0x105e,
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
 	    "Retry cnt=%d ratov=%d total tov=%d.\n",
 	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
 
@@ -1622,7 +1754,8 @@
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1641,7 +1774,8 @@
 	struct req_que *req;
 	struct rsp_que *rsp;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+	    "Entered %s.\n", __func__);
 
 	if (ha->flags.cpu_affinity_enabled)
 		req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@
 			break;
 		}
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+		    "Done %s.\n", __func__);
 
 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
 
@@ -1733,6 +1868,10 @@
 			mb[10] |= BIT_0;	/* Class 2. */
 		if (lg->io_parameter[9] || lg->io_parameter[10])
 			mb[10] |= BIT_1;	/* Class 3. */
+		if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+			mb[10] |= BIT_7;	/* Confirmed Completion
+						 * Allowed
+						 */
 	}
 
 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -1849,7 +1990,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+	    "Entered %s.\n", __func__);
 
 	if (IS_FWI2_CAPABLE(ha))
 		return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+		    "Done %s.\n", __func__);
 	}
 
 	return (rval);
@@ -1908,7 +2051,8 @@
 	struct req_que *req;
 	struct rsp_que *rsp;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+	    "Entered %s.\n", __func__);
 
 	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
 	if (lg == NULL) {
@@ -1952,7 +2096,8 @@
 		    le32_to_cpu(lg->io_parameter[1]));
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+		    "Done %s.\n", __func__);
 	}
 
 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
 	mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@
 		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2035,7 +2182,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
 	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2078,7 +2227,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+	    "Entered %s.\n", __func__);
 
 	if (id_list == NULL)
 		return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
 	} else {
 		*entries = mcp->mb[1];
-		ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2138,7 +2289,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x107d,
 		    "Failed mb[0]=%x.\n", mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x107e,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
 		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
 		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
 		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@
 	dma_addr_t pmap_dma;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+	    "Entered %s.\n", __func__);
 
 	pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
 	if (pmap  == NULL) {
@@ -2224,7 +2377,7 @@
 	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
-		ql_dbg(ql_dbg_mbx, vha, 0x1081,
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
 		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
 		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2267,7 +2421,8 @@
 	uint32_t *siter, *diter, dwords;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_LINK_STATUS;
 	mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@
 			rval = QLA_FUNCTION_FAILED;
 		} else {
 			/* Copy over data -- firmware data is LE. */
-			ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+			    "Done %s.\n", __func__);
 			dwords = offsetof(struct link_statistics, unused1) / 4;
 			siter = diter = &stats->link_fail_cnt;
 			while (dwords--)
@@ -2324,7 +2480,8 @@
 	mbx_cmd_t *mcp = &mc;
 	uint32_t *siter, *diter, dwords;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
 	mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@
 			    "Failed mb[0]=%x.\n", mcp->mb[0]);
 			rval = QLA_FUNCTION_FAILED;
 		} else {
-			ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+			    "Done %s.\n", __func__);
 			/* Copy over data -- firmware data is LE. */
 			dwords = sizeof(struct link_statistics) / 4;
 			siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = vha->req;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+	    "Entered %s.\n", __func__);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@
 	abt->port_id[0] = fcport->d_id.b.al_pa;
 	abt->port_id[1] = fcport->d_id.b.area;
 	abt->port_id[2] = fcport->d_id.b.domain;
-	abt->vp_index = fcport->vp_idx;
+	abt->vp_index = fcport->vha->vp_idx;
 
 	abt->req_que_no = cpu_to_le16(req->id);
 
@@ -2423,7 +2582,8 @@
 		    le16_to_cpu(abt->nport_handle));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+		    "Done %s.\n", __func__);
 	}
 
 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@
 	ha = vha->hw;
 	req = vha->req;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+	    "Entered %s.\n", __func__);
 
 	if (ha->flags.cpu_affinity_enabled)
 		rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@
 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
-	tsk->p.tsk.vp_index = fcport->vp_idx;
+	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
 	if (type == TCF_LUN_RESET) {
 		int_to_scsilun(l, &tsk->p.tsk.lun);
 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@
 	} else if (le16_to_cpu(sts->scsi_status) &
 	    SS_RESPONSE_INFO_LEN_VALID) {
 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
-			ql_dbg(ql_dbg_mbx, vha, 0x1097,
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
 			    "Ignoring inconsistent data length -- not enough "
 			    "response info (%d).\n",
 			    le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
 		    "Failed to issue marker IOCB (%x).\n", rval2);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+		    "Done %s.\n", __func__);
 	}
 
 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@
 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
 	mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2596,7 +2760,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_SERDES_PARAMS;
 	mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2631,7 +2797,8 @@
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_STOP_FIRMWARE;
 	mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@
 		if (mcp->mb[0] == MBS_INVALID_COMMAND)
 			rval = QLA_INVALID_COMMAND;
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2660,7 +2828,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2699,7 +2869,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2733,7 +2905,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
 	    !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+		    "Done %s.\n", __func__);
 
 		if (mb)
 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+		    "Done %s.\n", __func__);
 
 		if (wr)
 			*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_IIDMA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+		    "Done %s.\n", __func__);
 		if (port_speed)
 			*port_speed = mcp->mb[3];
 	}
@@ -2870,7 +3048,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_IIDMA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@
 	}
 
 	if (rval != QLA_SUCCESS) {
-		ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+		    "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -2915,24 +3096,25 @@
 	scsi_qla_host_t *vp;
 	unsigned long   flags;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+	    "Entered %s.\n", __func__);
 
 	if (rptid_entry->entry_status != 0)
 		return;
 
 	if (rptid_entry->format == 0) {
-		ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
 		    "Format 0 : Number of VPs setup %d, number of "
 		    "VPs acquired %d.\n",
 		    MSB(le16_to_cpu(rptid_entry->vp_count)),
 		    LSB(le16_to_cpu(rptid_entry->vp_count)));
-		ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
 		    "Primary port id %02x%02x%02x.\n",
 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
 		    rptid_entry->port_id[0]);
 	} else if (rptid_entry->format == 1) {
 		vp_idx = LSB(stat);
-		ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
 		    "Format 1: VP[%d] enabled - status %d - with "
 		    "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@
 
 	/* This can be called by the parent */
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+	    "Entered %s.\n", __func__);
 
 	vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
 	if (!vpmod) {
@@ -3015,6 +3198,9 @@
 	vpmod->vp_count = 1;
 	vpmod->vp_index1 = vha->vp_idx;
 	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+	qlt_modify_vp_config(vha, vpmod);
+
 	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
 	vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		/* EMPTY */
-		ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+		    "Done %s.\n", __func__);
 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
 	}
 	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@
 	int	vp_index = vha->vp_idx;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
 	    "Entered %s enabling index %d.\n", __func__, vp_index);
 
 	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@
 		    le16_to_cpu(vce->comp_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+		    "Done %s.\n", __func__);
 	}
 
 	dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
-	/*
-	 * This command is implicitly executed by firmware during login for the
-	 * physical hosts
-	 */
-	if (vp_idx == 0)
-		return QLA_FUNCTION_FAILED;
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
 	mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+	    "Entered %s.\n", __func__);
 
 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1008,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3244,7 +3428,8 @@
 	unsigned long flags;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+	    "Entered %s.\n", __func__);
 
 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
 	if (mn == NULL) {
@@ -3285,7 +3470,7 @@
 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
-		ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
 		    "cs=%x fc=%x.\n", status[0], status[1]);
 
 		if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@
 				retry = 1;
 			}
 		} else {
-			ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
 			    "Firmware updated to %x.\n",
 			    le32_to_cpu(mn->p.rsp.fw_ver));
 
@@ -3316,9 +3501,11 @@
 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
 
 	if (rval != QLA_SUCCESS) {
-		ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+		    "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3334,7 +3521,8 @@
 	struct device_reg_25xxmq __iomem *reg;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
 	mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3404,7 +3593,8 @@
 	struct device_reg_25xxmq __iomem *reg;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
 	mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3469,7 +3660,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_IDC_ACK;
 	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10da,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3496,7 +3689,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+		    "Done %s.\n", __func__);
 		*sector_size = mcp->mb[1];
 	}
 
@@ -3531,7 +3726,8 @@
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
 	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3563,7 +3760,8 @@
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
 	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3595,7 +3794,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_RESTART_MPI_FW;
 	mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3624,7 +3825,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3669,7 +3872,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3713,7 +3918,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_CNA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+		    "Done %s.\n", __func__);
 
 
 		*actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_CNA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -3788,7 +3997,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+		    "Done %s.\n", __func__);
 		*data = mcp->mb[3] << 16 | mcp->mb[2];
 	}
 
@@ -3821,7 +4032,8 @@
 	mbx_cmd_t *mcp = &mc;
 	uint32_t iter_cnt = 0x1;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+	    "Entered %s.\n", __func__);
 
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@
 		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
 		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+		    "Done %s.\n", __func__);
 	}
 
 	/* Copy mailbox information */
@@ -3882,7 +4095,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+	    "Entered %s.\n", __func__);
 
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@
 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+		    "Done %s.\n", __func__);
 	}
 
 	/* Copy mailbox information */
@@ -3941,7 +4156,7 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
 	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
 
 	mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@
 	if (rval != QLA_SUCCESS)
 		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
 	else
-		ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+		    "Done %s.\n", __func__);
 
 	return rval;
 }
@@ -3967,7 +4183,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4003,7 +4221,8 @@
 
 	rval = QLA_SUCCESS;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+	    "Entered %s.\n", __func__);
 
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
@@ -4046,7 +4265,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1104,
 		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4060,7 +4280,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+		    "Done %s.\n", __func__);
 		if (mcp->mb[1] != 0x7)
 			ha->link_data_rate = mcp->mb[1];
 	}
@@ -4094,7 +4316,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@
 		/* Copy all bits to preserve original value */
 		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
 
-		ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+		    "Done %s.\n", __func__);
 	}
 	return rval;
 }
@@ -4125,7 +4349,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_SET_PORT_CONFIG;
 	/* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x110d,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else
-		ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+		    "Done %s.\n", __func__);
 
 	return rval;
 }
@@ -4155,7 +4381,8 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
 		return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4196,7 +4424,8 @@
 	uint8_t byte;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+	    "Entered %s.\n", __func__);
 
 	/* Integer part */
 	rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@
 	}
 	*frac = (byte >> 6) * 25;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+	    "Done %s.\n", __func__);
 fail:
 	return rval;
 }
@@ -4229,7 +4459,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1016,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4262,7 +4494,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA82XX(ha))
 		return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x100c,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4295,7 +4529,8 @@
 	mbx_cmd_t *mcp = &mc;
 	int rval = QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+	    "Entered %s.\n", __func__);
 
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@
 		    (mcp->mb[1] << 16) | mcp->mb[0],
 		    (mcp->mb[3] << 16) | mcp->mb[2]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+		    "Done %s.\n", __func__);
 		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
 		if (!ha->md_template_size) {
 			ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@
 	mbx_cmd_t *mcp = &mc;
 	int rval = QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+	    "Entered %s.\n", __func__);
 
 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@
 		    ((mcp->mb[1] << 16) | mcp->mb[0]),
 		    ((mcp->mb[3] << 16) | mcp->mb[2]));
 	} else
-		ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+		    "Done %s.\n", __func__);
 	return rval;
 }
 
@@ -4387,7 +4625,8 @@
 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+	    "Entered %s.\n", __func__);
 
 	memset(mcp, 0, sizeof(mbx_cmd_t));
 	mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1134,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4429,7 +4669,8 @@
 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+	    "Entered %s.\n", __func__);
 
 	memset(mcp, 0, sizeof(mbx_cmd_t));
 	mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@
 			led_cfg[4] = mcp->mb[5];
 			led_cfg[5] = mcp->mb[6];
 		}
-		ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+		    "Done %s.\n", __func__);
 	}
 
 	return rval;
@@ -4471,7 +4713,7 @@
 	if (!IS_QLA82XX(ha))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1127,
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
 		"Entered %s.\n", __func__);
 
 	memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1128,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1129,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
 		    "Done %s.\n", __func__);
 	}
 
@@ -4509,7 +4751,8 @@
 	if (!IS_QLA83XX(ha))
 		return QLA_FUNCTION_FAILED;
 
-	ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
 	mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x1131,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
-		ql_dbg(ql_dbg_mbx, vha, 0x1132,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
 		    "Done %s.\n", __func__);
 	}
 
@@ -4543,13 +4786,14 @@
 	mbx_cmd_t *mcp = &mc;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		ql_dbg(ql_dbg_mbx, vha, 0x113b,
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
 		    "Implicit LOGO Unsupported.\n");
 		return QLA_FUNCTION_FAILED;
 	}
 
 
-	ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n",  __func__);
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+	    "Entering %s.\n",  __func__);
 
 	/* Perform Implicit LOGO. */
 	mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@
 		ql_dbg(ql_dbg_mbx, vha, 0x113d,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	else
-		ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+		    "Done %s.\n", __func__);
 
 	return rval;
 }
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1..3e8b324 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include "qla_gbl.h"
+#include "qla_target.h"
 
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
@@ -49,6 +50,9 @@
 
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	list_add_tail(&vha->list, &ha->vp_list);
+
+	qlt_update_vp_map(vha, SET_VP_IDX);
+
 	spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 	mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@
 		spin_lock_irqsave(&ha->vport_slock, flags);
 	}
 	list_del(&vha->list);
+	qlt_update_vp_map(vha, RESET_VP_IDX);
 	spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 	vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		ql_dbg(ql_dbg_vport, vha, 0xa001,
 		    "Marking port dead, loop_id=0x%04x : %x.\n",
-		    fcport->loop_id, fcport->vp_idx);
+		    fcport->loop_id, fcport->vha->vp_idx);
 
 		qla2x00_mark_device_lost(vha, fcport, 0, 0);
 		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 
+	/* Remove port id from vp target map */
+	qlt_update_vp_map(vha, RESET_AL_PA);
+
 	qla2x00_mark_vp_devices_dead(vha);
 	atomic_set(&vha->vp_state, VP_FAILED);
 	vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
-	ql_dbg(ql_dbg_dpc, vha, 0x4012,
-	    "Entering %s.\n", __func__);
-	ql_dbg(ql_dbg_dpc, vha, 0x4013,
-	    "vp_flags: 0x%lx.\n", vha->vp_flags);
+	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
 
 	qla2x00_do_work(vha);
 
@@ -348,7 +354,7 @@
 		}
 	}
 
-	ql_dbg(ql_dbg_dpc, vha, 0x401c,
+	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
 	    "Exiting %s.\n", __func__);
 	return 0;
 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a9..caf627b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@
 	}
 
 	/* Offset in flash = lower 16 bits
-	 * Number of enteries = upper 16 bits
+	 * Number of entries = upper 16 bits
 	 */
 	offset = n & 0xffffU;
 	n = (n >> 16) & 0xffffU;
 
-	/* number of addr/value pair should not exceed 1024 enteries */
+	/* number of addr/value pair should not exceed 1024 entries */
 	if (n  >= 1024) {
 		ql_log(ql_log_fatal, vha, 0x0071,
 		    "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		ql_log(ql_log_info, NULL, 0xb054,
+		ql_log(ql_log_info, NULL, 0xb053,
 		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
@@ -2446,7 +2446,7 @@
 
 	if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
 		ql_log(ql_log_info, vha, 0x00a1,
-		    "Firmware loaded successully from flash.\n");
+		    "Firmware loaded successfully from flash.\n");
 		return QLA_SUCCESS;
 	} else {
 		ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@
 	blob = ha->hablob = qla2x00_request_firmware(vha);
 	if (!blob) {
 		ql_log(ql_log_fatal, vha, 0x00a3,
-		    "Firmware image not preset.\n");
+		    "Firmware image not present.\n");
 		goto fw_load_failed;
 	}
 
@@ -2689,7 +2689,7 @@
 		if (!optrom) {
 			ql_log(ql_log_warn, vha, 0xb01b,
 			    "Unable to allocate memory "
-			    "for optron burst write (%x KB).\n",
+			    "for optrom burst write (%x KB).\n",
 			    OPTROM_BURST_SIZE / 1024);
 		}
 	}
@@ -2960,9 +2960,8 @@
 			 * changing the state to DEV_READY
 			 */
 			ql_log(ql_log_info, vha, 0xb023,
-			    "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
-			ql_log(ql_log_info, vha, 0xb024,
-			    "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+			    "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+			    "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
 			    drv_active, drv_state);
 			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
 			    QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@
 		if (ql2xmdenable) {
 			if (qla82xx_md_collect(vha))
 				ql_log(ql_log_warn, vha, 0xb02c,
-				    "Not able to collect minidump.\n");
+				    "Minidump not collected.\n");
 		} else
 			ql_log(ql_log_warn, vha, 0xb04f,
 			    "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@
 				    "Firmware version differs "
 				    "Previous version: %d:%d:%d - "
 				    "New version: %d:%d:%d\n",
+				    fw_major_version, fw_minor_version,
+				    fw_subminor_version,
 				    ha->fw_major_version,
 				    ha->fw_minor_version,
-				    ha->fw_subminor_version,
-				    fw_major_version, fw_minor_version,
-				    fw_subminor_version);
+				    ha->fw_subminor_version);
 				/* Release MiniDump resources */
 				qla82xx_md_free(vha);
 				/* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@
 	return rval;
 }
 
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+	uint32_t temp, temp_state, temp_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql_log(ql_log_warn, vha, 0x600e,
+		    "Device temperature %d degrees C exceeds "
+		    " maximum allowed. Hardware has been shut down.\n",
+		    temp_val);
+		return 1;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		ql_log(ql_log_warn, vha, 0x600f,
+		    "Device temperature %d degrees C exceeds "
+		    "operating range. Immediate action needed.\n",
+		    temp_val);
+	}
+	return 0;
+}
+
 void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@
 	/* don't poll if reset is going on */
 	if (!ha->flags.isp82xx_reset_hdlr_active) {
 		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-		if (dev_state == QLA82XX_DEV_NEED_RESET &&
+		if (qla82xx_check_temp(vha)) {
+			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+			ha->flags.isp82xx_fw_hung = 1;
+			qla82xx_clear_pending_mbx(vha);
+		} else if (dev_state == QLA82XX_DEV_NEED_RESET &&
 		    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
 			ql_log(ql_log_warn, vha, 0x6001,
 			    "Adapter reset needed.\n");
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-			qla2xxx_wake_dpc(vha);
 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
 			!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
 			ql_log(ql_log_warn, vha, 0x6002,
 			    "Quiescent needed.\n");
 			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
-			qla2xxx_wake_dpc(vha);
 		} else {
 			if (qla82xx_check_fw_alive(vha)) {
 				ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@
 					set_bit(ISP_ABORT_NEEDED,
 					    &vha->dpc_flags);
 				}
-				qla2xxx_wake_dpc(vha);
 				ha->flags.isp82xx_fw_hung = 1;
 				ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
 				qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@
 		goto md_failed;
 	}
 
+	if (ha->flags.isp82xx_no_md_cap) {
+		ql_log(ql_log_warn, vha, 0xb054,
+		    "Forced reset from application, "
+		    "ignore minidump capture\n");
+		ha->flags.isp82xx_no_md_cap = 0;
+		goto md_failed;
+	}
+
 	if (qla82xx_validate_template_chksum(vha)) {
 		ql_log(ql_log_info, vha, 0xb039,
 		    "Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e2..6eb210e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
 #define CRB_RCVPEG_STATE		QLA82XX_REG(0x13c)
 #define BOOT_LOADER_DIMM_STATUS		QLA82XX_REG(0x54)
 #define CRB_DMA_SHIFT			QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE			QLA82XX_REG(0x1b4)
 #define QLA82XX_DMA_SHIFT_VALUE		0x55555555
 
 #define QLA82XX_HW_H0_CH_HUB_ADR    0x05
@@ -561,7 +562,6 @@
 #define QLA82XX_FW_VERSION_SUB		(QLA82XX_CAM_RAM(0x158))
 #define QLA82XX_PCIE_REG(reg)		(QLA82XX_CRB_PCIE + (reg))
 
-#define PCIE_CHICKEN3			(0x120c8)
 #define PCIE_SETUP_FUNCTION		(0x12040)
 #define PCIE_SETUP_FUNCTION2		(0x12048)
 
@@ -1178,4 +1178,16 @@
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
 
+#define qla82xx_get_temp_val(x)          ((x) >> 16)
+#define qla82xx_get_temp_state(x)        ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)  (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+	QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+	QLA82XX_TEMP_WARN,	   /* Sound alert, temperature getting high */
+	QLA82XX_TEMP_PANIC	   /* Fatal error, hardware has shut down. */
+};
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8..6d1d873 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
-
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "qla_target.h"
+
 /*
  * Driver version
  */
@@ -40,6 +41,12 @@
  */
 int ql_errlev = ql_log_all;
 
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+		"Specify if Class 2 operations are supported from the very "
+		"beginning. Default is 0 - class 2 not supported.");
+
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@
 
 	.max_sectors		= 0xFFFF,
 	.shost_attrs		= qla2x00_host_attrs,
+
+	.supported_mode		= MODE_INITIATOR,
 };
 
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@
 static void qla2x00_mem_free(struct qla_hw_data *);
 
 /* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+				struct rsp_que *rsp)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@
 		    "Unable to allocate memory for response queue ptrs.\n");
 		goto fail_rsp_map;
 	}
+	/*
+	 * Make sure we record at least the request and response queue zero in
+	 * case we need to free them if part of the probe fails.
+	 */
+	ha->rsp_q_map[0] = rsp;
+	ha->req_q_map[0] = req;
 	set_bit(0, ha->rsp_qid_map);
 	set_bit(0, ha->req_qid_map);
 	return 1;
@@ -642,12 +658,12 @@
 
 	if (ha->flags.eeh_busy) {
 		if (ha->flags.pci_channel_io_perm_failure) {
-			ql_dbg(ql_dbg_io, vha, 0x3001,
+			ql_dbg(ql_dbg_aer, vha, 0x9010,
 			    "PCI Channel IO permanent failure, exiting "
 			    "cmd=%p.\n", cmd);
 			cmd->result = DID_NO_CONNECT << 16;
 		} else {
-			ql_dbg(ql_dbg_io, vha, 0x3002,
+			ql_dbg(ql_dbg_aer, vha, 0x9011,
 			    "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
 			cmd->result = DID_REQUEUE << 16;
 		}
@@ -657,7 +673,7 @@
 	rval = fc_remote_port_chkready(rport);
 	if (rval) {
 		cmd->result = rval;
-		ql_dbg(ql_dbg_io, vha, 0x3003,
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
 		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
 		    cmd, rval);
 		goto qc24_fail_command;
@@ -1136,7 +1152,7 @@
 	ret = FAILED;
 
 	ql_log(ql_log_info, vha, 0x8012,
-	    "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+	    "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 		ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@
 	ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
 	    "Memory allocated for ha=%p.\n", ha);
 	ha->pdev = pdev;
+	ha->tgt.enable_class_2 = ql2xenableclass2;
 
 	/* Clear our data area */
 	ha->bars = bars;
@@ -2243,6 +2260,7 @@
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
 		ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
 		ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@
 	    host->max_cmd_len, host->max_channel, host->max_lun,
 	    host->transportt, sht->vendor_id);
 
+que_init:
+	/* Alloc arrays of request and response ring ptrs */
+	if (!qla2x00_alloc_queues(ha, req, rsp)) {
+		ql_log(ql_log_fatal, base_vha, 0x003d,
+		    "Failed to allocate memory for queue pointers..."
+		    "aborting.\n");
+		goto probe_init_failed;
+	}
+
+	qlt_probe_one_stage1(base_vha, ha);
+
 	/* Set up the irqs */
 	ret = qla2x00_request_irqs(ha, rsp);
 	if (ret)
@@ -2424,20 +2454,10 @@
 
 	pci_save_state(pdev);
 
-	/* Alloc arrays of request and response ring ptrs */
-que_init:
-	if (!qla2x00_alloc_queues(ha)) {
-		ql_log(ql_log_fatal, base_vha, 0x003d,
-		    "Failed to allocate memory for queue pointers.. aborting.\n");
-		goto probe_init_failed;
-	}
-
-	ha->rsp_q_map[0] = rsp;
-	ha->req_q_map[0] = req;
+	/* Assign back pointers */
 	rsp->req = req;
 	req->rsp = rsp;
-	set_bit(0, ha->req_qid_map);
-	set_bit(0, ha->rsp_qid_map);
+
 	/* FWI2-capable only. */
 	req->req_q_in = &ha->iobase->isp24.req_q_in;
 	req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@
 	ql_dbg(ql_dbg_init, base_vha, 0x00ee,
 	    "DPC thread started successfully.\n");
 
+	/*
+	 * If we're not coming up in initiator mode, we might sit for
+	 * a while without waking up the dpc thread, which leads to a
+	 * stuck process warning.  So just kick the dpc once here and
+	 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+	 */
+	qla2xxx_wake_dpc(base_vha);
+
 skip_dpc:
 	list_add_tail(&base_vha->list, &ha->vp_list);
 	base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@
 	ql_dbg(ql_dbg_init, base_vha, 0x00f2,
 	    "Init done and hba is online.\n");
 
-	scsi_scan_host(host);
+	if (qla_ini_mode_enabled(base_vha))
+		scsi_scan_host(host);
+	else
+		ql_dbg(ql_dbg_init, base_vha, 0x0122,
+			"skipping scsi_scan_host() for non-initiator port\n");
 
 	qla2x00_alloc_sysfs_attr(base_vha);
 
@@ -2577,11 +2609,17 @@
 	    base_vha->host_no,
 	    ha->isp_ops->fw_version_str(base_vha, fw_str));
 
+	qlt_add_target(ha, base_vha);
+
 	return 0;
 
 probe_init_failed:
 	qla2x00_free_req_que(ha, req);
+	ha->req_q_map[0] = NULL;
+	clear_bit(0, ha->req_qid_map);
 	qla2x00_free_rsp_que(ha, rsp);
+	ha->rsp_q_map[0] = NULL;
+	clear_bit(0, ha->rsp_qid_map);
 	ha->max_req_queues = ha->max_rsp_queues = 0;
 
 probe_failed:
@@ -2621,6 +2659,22 @@
 }
 
 static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct task_struct *t = ha->dpc_thread;
+
+	if (ha->dpc_thread == NULL)
+		return;
+	/*
+	 * qla2xxx_wake_dpc checks for ->dpc_thread
+	 * so we need to zero it out.
+	 */
+	ha->dpc_thread = NULL;
+	kthread_stop(t);
+}
+
+static void
 qla2x00_shutdown(struct pci_dev *pdev)
 {
 	scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@
 	struct qla_hw_data  *ha;
 	unsigned long flags;
 
+	/*
+	 * If the PCI device is disabled that means that probe failed and any
+	 * resources should be have cleaned up on probe exit.
+	 */
+	if (!atomic_read(&pdev->enable_cnt))
+		return;
+
 	base_vha = pci_get_drvdata(pdev);
 	ha = base_vha->hw;
 
+	ha->flags.host_shutting_down = 1;
+
 	mutex_lock(&ha->vport_lock);
 	while (ha->cur_vport_count) {
 		struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@
 		ha->dpc_thread = NULL;
 		kthread_stop(t);
 	}
+	qlt_remove_target(ha, base_vha);
 
 	qla2x00_free_sysfs_attr(base_vha);
 
@@ -2770,17 +2834,7 @@
 	if (vha->timer_active)
 		qla2x00_stop_timer(vha);
 
-	/* Kill the kernel thread for this host */
-	if (ha->dpc_thread) {
-		struct task_struct *t = ha->dpc_thread;
-
-		/*
-		 * qla2xxx_wake_dpc checks for ->dpc_thread
-		 * so we need to zero it out.
-		 */
-		ha->dpc_thread = NULL;
-		kthread_stop(t);
-	}
+	qla2x00_stop_dpc_thread(vha);
 
 	qla25xx_delete_queues(vha);
 
@@ -2842,8 +2896,10 @@
 		spin_unlock_irqrestore(vha->host->host_lock, flags);
 		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		qla2xxx_wake_dpc(base_vha);
-	} else
+	} else {
 		fc_remote_port_delete(rport);
+		qlt_fc_port_deleted(vha, fcport);
+	}
 }
 
 /*
@@ -2859,7 +2915,7 @@
     int do_login, int defer)
 {
 	if (atomic_read(&fcport->state) == FCS_ONLINE &&
-	    vha->vp_idx == fcport->vp_idx) {
+	    vha->vp_idx == fcport->vha->vp_idx) {
 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
 		qla2x00_schedule_rport_del(vha, fcport, defer);
 	}
@@ -2908,7 +2964,7 @@
 	fc_port_t *fcport;
 
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
-		if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+		if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
 			continue;
 
 		/*
@@ -2921,7 +2977,7 @@
 			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
 			if (defer)
 				qla2x00_schedule_rport_del(vha, fcport, defer);
-			else if (vha->vp_idx == fcport->vp_idx)
+			else if (vha->vp_idx == fcport->vha->vp_idx)
 				qla2x00_schedule_rport_del(vha, fcport, defer);
 		}
 	}
@@ -2946,10 +3002,13 @@
 	if (!ha->init_cb)
 		goto fail;
 
+	if (qlt_mem_alloc(ha) < 0)
+		goto fail_free_init_cb;
+
 	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
 		qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
 	if (!ha->gid_list)
-		goto fail_free_init_cb;
+		goto fail_free_tgt_mem;
 
 	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
 	if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@
 	ha->gid_list_dma);
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+	qlt_mem_free(ha);
 fail_free_init_cb:
 	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
 	ha->init_cb_dma);
@@ -3282,6 +3343,8 @@
 	if (ha->ctx_mempool)
 		mempool_destroy(ha->ctx_mempool);
 
+	qlt_mem_free(ha);
+
 	if (ha->init_cb)
 		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
 			ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@
 
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
+
+	ha->tgt.atio_ring = NULL;
+	ha->tgt.atio_dma = 0;
+	ha->tgt.tgt_vp_map = NULL;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@
 
 		ha->dpc_active = 1;
 
-		ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
-		    "DPC handler waking up.\n");
-		ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
-		    "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+		ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+		    "DPC handler waking up, dpc_flags=0x%lx.\n",
+		    base_vha->dpc_flags);
 
 		qla2x00_do_work(base_vha);
 
@@ -3740,6 +3806,16 @@
 			clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		}
 
+		if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+			int ret;
+			ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+			if (ret != QLA_SUCCESS)
+				ql_log(ql_log_warn, base_vha, 0x121,
+				    "Failed to enable receiving of RSCN "
+				    "requests: 0x%x.\n", ret);
+			clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+		}
+
 		if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
 			ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
 			    "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@
 		return -ENOMEM;
 	}
 
+	/* Initialize target kmem_cache and mem_pools */
+	ret = qlt_init();
+	if (ret < 0) {
+		kmem_cache_destroy(srb_cachep);
+		return ret;
+	} else if (ret > 0) {
+		/*
+		 * If initiator mode is explictly disabled by qlt_init(),
+		 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+		 * performing scsi_scan_target() during LOOP UP event.
+		 */
+		qla2xxx_transport_functions.disable_target_scan = 1;
+		qla2xxx_transport_vport_functions.disable_target_scan = 1;
+	}
+
 	/* Derive version string. */
 	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
 	if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@
 		kmem_cache_destroy(srb_cachep);
 		ql_log(ql_log_fatal, NULL, 0x0002,
 		    "fc_attach_transport failed...Failing load!.\n");
+		qlt_exit();
 		return -ENODEV;
 	}
 
@@ -4481,6 +4573,7 @@
 	    fc_attach_transport(&qla2xxx_transport_vport_functions);
 	if (!qla2xxx_transport_vport_template) {
 		kmem_cache_destroy(srb_cachep);
+		qlt_exit();
 		fc_release_transport(qla2xxx_transport_template);
 		ql_log(ql_log_fatal, NULL, 0x0004,
 		    "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@
 	ret = pci_register_driver(&qla2xxx_pci_driver);
 	if (ret) {
 		kmem_cache_destroy(srb_cachep);
+		qlt_exit();
 		fc_release_transport(qla2xxx_transport_template);
 		fc_release_transport(qla2xxx_transport_vport_template);
 		ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@
 	pci_unregister_driver(&qla2xxx_pci_driver);
 	qla2x00_release_firmware();
 	kmem_cache_destroy(srb_cachep);
+	qlt_exit();
 	if (ctx_cachep)
 		kmem_cache_destroy(ctx_cachep);
 	fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 0000000..6986552
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4972 @@
+/*
+ *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ *  based on qla2x00t.c code:
+ *
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation, version 2
+ *  of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+	"Determines when initiator mode will be enabled. Possible values: "
+	"\"exclusive\" - initiator mode will be enabled on load, "
+	"disabled on enabling target mode and then on disabling target mode "
+	"enabled back; "
+	"\"disabled\" - initiator mode will never be enabled; "
+	"\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+	FCP_TMF_CMPL = 0,
+	FCP_DATA_LEN_INVALID = 1,
+	FCP_CMND_FIELDS_INVALID = 2,
+	FCP_DATA_PARAM_MISMATCH = 3,
+	FCP_TMF_REJECTED = 4,
+	FCP_TMF_FAILED = 5,
+	FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE      0   /* simple task attribute */
+#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCP_PTA_ORDERED     2   /* ordered task attribute */
+#define FCP_PTA_ACA         4   /* auto. contigent allegiance */
+#define FCP_PTA_MASK        7   /* mask for task attribute field */
+#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ *   - Either context is IRQ and only IRQ handler can modify HW data,
+ *     including rings related fields,
+ *
+ *   - Or access to target mode variables from struct qla_tgt doesn't
+ *     cross those functions boundaries, except tgt_stop, which
+ *     additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+	struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+	int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+	*cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+	struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+	struct qla_tgt *tgt,
+	const uint8_t *port_name)
+{
+	struct qla_tgt_sess *sess;
+
+	list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+		if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+			return sess;
+	}
+
+	return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+	/* Send marker if required */
+	if (unlikely(vha->marker_needed != 0)) {
+		int rc = qla2x00_issue_marker(vha, vha_locked);
+		if (rc != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+			    "qla_target(%d): issue_marker() failed\n",
+			    vha->vp_idx);
+		}
+		return rc;
+	}
+	return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+	uint8_t *d_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t vp_idx;
+
+	if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+		return NULL;
+
+	if (vha->d_id.b.al_pa == d_id[2])
+		return vha;
+
+	BUG_ON(ha->tgt.tgt_vp_map == NULL);
+	vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+		return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+	return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+	uint16_t vp_idx)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->vp_idx == vp_idx)
+		return vha;
+
+	BUG_ON(ha->tgt.tgt_vp_map == NULL);
+	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+		return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+	return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio)
+{
+	switch (atio->u.raw.entry_type) {
+	case ATIO_TYPE7:
+	{
+		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+		    atio->u.isp24.fcp_hdr.d_id);
+		if (unlikely(NULL == host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+			    "qla_target(%d): Received ATIO_TYPE7 "
+			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+			    atio->u.isp24.fcp_hdr.d_id[0],
+			    atio->u.isp24.fcp_hdr.d_id[1],
+			    atio->u.isp24.fcp_hdr.d_id[2]);
+			break;
+		}
+		qlt_24xx_atio_pkt(host, atio);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct imm_ntfy_from_isp *entry =
+		    (struct imm_ntfy_from_isp *)atio;
+
+		if ((entry->u.isp24.vp_index != 0xFF) &&
+		    (entry->u.isp24.nport_handle != 0xFFFF)) {
+			host = qlt_find_host_by_vp_idx(vha,
+			    entry->u.isp24.vp_index);
+			if (unlikely(!host)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+				    "qla_target(%d): Received "
+				    "ATIO (IMMED_NOTIFY_TYPE) "
+				    "with unknown vp_index %d\n",
+				    vha->vp_idx, entry->u.isp24.vp_index);
+				break;
+			}
+		}
+		qlt_24xx_atio_pkt(host, atio);
+		break;
+	}
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe040,
+		    "qla_target(%d): Received unknown ATIO atio "
+		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+		break;
+	}
+
+	return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+	switch (pkt->entry_type) {
+	case CTIO_TYPE7:
+	{
+		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe041,
+			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
+			    "received, with unknown vp_index %d\n",
+			    vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, pkt);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct imm_ntfy_from_isp *entry =
+		    (struct imm_ntfy_from_isp *)pkt;
+
+		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe042,
+			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+			    "received, with unknown vp_index %d\n",
+			    vha->vp_idx, entry->u.isp24.vp_index);
+			break;
+		}
+		qlt_response_pkt(host, pkt);
+		break;
+	}
+
+	case NOTIFY_ACK_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+		if (0xFF != entry->u.isp24.vp_index) {
+			host = qlt_find_host_by_vp_idx(vha,
+			    entry->u.isp24.vp_index);
+			if (unlikely(!host)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe043,
+				    "qla_target(%d): Response "
+				    "pkt (NOTIFY_ACK_TYPE) "
+				    "received, with unknown "
+				    "vp_index %d\n", vha->vp_idx,
+				    entry->u.isp24.vp_index);
+				break;
+			}
+		}
+		qlt_response_pkt(host, pkt);
+		break;
+	}
+
+	case ABTS_RECV_24XX:
+	{
+		struct abts_recv_from_24xx *entry =
+		    (struct abts_recv_from_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe044,
+			    "qla_target(%d): Response pkt "
+			    "(ABTS_RECV_24XX) received, with unknown "
+			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, pkt);
+		break;
+	}
+
+	case ABTS_RESP_24XX:
+	{
+		struct abts_resp_to_24xx *entry =
+		    (struct abts_resp_to_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe045,
+			    "qla_target(%d): Response pkt "
+			    "(ABTS_RECV_24XX) received, with unknown "
+			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, pkt);
+		break;
+	}
+
+	default:
+		qlt_response_pkt(vha, pkt);
+		break;
+	}
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+	struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+	    free_work);
+	struct qla_tgt *tgt = sess->tgt;
+	struct scsi_qla_host *vha = sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+
+	BUG_ON(!tgt);
+	/*
+	 * Release the target session for FC Nexus from fabric module code.
+	 */
+	if (sess->se_sess != NULL)
+		ha->tgt.tgt_ops->free_session(sess);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+	    "Unregistration of sess %p finished\n", sess);
+
+	kfree(sess);
+	/*
+	 * We need to protect against race, when tgt is freed before or
+	 * inside wake_up()
+	 */
+	tgt->sess_count--;
+	if (tgt->sess_count == 0)
+		wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+	struct scsi_qla_host *vha = sess->vha;
+
+	vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+	list_del(&sess->sess_list_entry);
+	if (sess->deleted)
+		list_del(&sess->del_list_entry);
+
+	INIT_WORK(&sess->free_work, qlt_free_session_done);
+	schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess = NULL;
+	uint32_t unpacked_lun, lun = 0;
+	uint16_t loop_id;
+	int res = 0;
+	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+	if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+		/* Global event */
+		atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+		qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+		if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+			sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+			    typeof(*sess), sess_list_entry);
+			switch (mcmd) {
+			case QLA_TGT_NEXUS_LOSS_SESS:
+				mcmd = QLA_TGT_NEXUS_LOSS;
+				break;
+			case QLA_TGT_ABORT_ALL_SESS:
+				mcmd = QLA_TGT_ABORT_ALL;
+				break;
+			case QLA_TGT_NEXUS_LOSS:
+			case QLA_TGT_ABORT_ALL:
+				break;
+			default:
+				ql_dbg(ql_dbg_tgt, vha, 0xe046,
+				    "qla_target(%d): Not allowed "
+				    "command %x in %s", vha->vp_idx,
+				    mcmd, __func__);
+				sess = NULL;
+				break;
+			}
+		} else
+			sess = NULL;
+#endif
+	} else {
+		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe000,
+	    "Using sess for qla_tgt_reset: %p\n", sess);
+	if (!sess) {
+		res = -ESRCH;
+		return res;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe047,
+	    "scsi(%ld): resetting (session %p from port "
+	    "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+	    "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+	    sess->port_name[0], sess->port_name[1],
+	    sess->port_name[2], sess->port_name[3],
+	    sess->port_name[4], sess->port_name[5],
+	    sess->port_name[6], sess->port_name[7],
+	    mcmd, loop_id);
+
+	lun = a->u.isp24.fcp_cmnd.lun;
+	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+	return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+	    iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+	bool immediate)
+{
+	struct qla_tgt *tgt = sess->tgt;
+	uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+	if (sess->deleted)
+		return;
+
+	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+	    "Scheduling sess %p for deletion\n", sess);
+	list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+	sess->deleted = 1;
+
+	if (immediate)
+		dev_loss_tmo = 0;
+
+	sess->expires = jiffies + dev_loss_tmo * HZ;
+
+	ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+	    "qla_target(%d): session for port %02x:%02x:%02x:"
+	    "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+	    "deletion in %u secs (expires: %lu) immed: %d\n",
+	    sess->vha->vp_idx,
+	    sess->port_name[0], sess->port_name[1],
+	    sess->port_name[2], sess->port_name[3],
+	    sess->port_name[4], sess->port_name[5],
+	    sess->port_name[6], sess->port_name[7],
+	    sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+	if (immediate)
+		schedule_delayed_work(&tgt->sess_del_work, 0);
+	else
+		schedule_delayed_work(&tgt->sess_del_work,
+		    jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+	struct qla_tgt_sess *sess;
+
+	list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+		qlt_schedule_sess_for_deletion(sess, true);
+
+	/* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+	uint16_t *loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	dma_addr_t gid_list_dma;
+	struct gid_list_info *gid_list;
+	char *id_iter;
+	int res, rc, i;
+	uint16_t entries;
+
+	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	    &gid_list_dma, GFP_KERNEL);
+	if (!gid_list) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+		    "qla_target(%d): DMA Alloc failed of %u\n",
+		    vha->vp_idx, qla2x00_gid_list_size(ha));
+		return -ENOMEM;
+	}
+
+	/* Get list of logged in devices */
+	rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+	if (rc != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+		    "qla_target(%d): get_id_list() failed: %x\n",
+		    vha->vp_idx, rc);
+		res = -1;
+		goto out_free_id_list;
+	}
+
+	id_iter = (char *)gid_list;
+	res = -1;
+	for (i = 0; i < entries; i++) {
+		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+		if ((gid->al_pa == s_id[2]) &&
+		    (gid->area == s_id[1]) &&
+		    (gid->domain == s_id[0])) {
+			*loop_id = le16_to_cpu(gid->loop_id);
+			res = 0;
+			break;
+		}
+		id_iter += ha->gid_list_info_size;
+	}
+
+out_free_id_list:
+	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	    gid_list, gid_list_dma);
+	return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+	struct qla_tgt_sess *sess)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_port_24xx_data *pmap24;
+	bool res, found = false;
+	int rc, i;
+	uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+	uint16_t entries;
+	void *pmap;
+	int pmap_len;
+	fc_port_t *fcport;
+	int global_resets;
+
+retry:
+	global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+	rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+	if (rc != QLA_SUCCESS) {
+		res = false;
+		goto out;
+	}
+
+	pmap24 = pmap;
+	entries = pmap_len/sizeof(*pmap24);
+
+	for (i = 0; i < entries; ++i) {
+		if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+			loop_id = le16_to_cpu(pmap24[i].loop_id);
+			found = true;
+			break;
+		}
+	}
+
+	kfree(pmap);
+
+	if (!found) {
+		res = false;
+		goto out;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+	    "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+	if (fcport == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+		    "qla_target(%d): Allocation of tmp FC port failed",
+		    vha->vp_idx);
+		res = false;
+		goto out;
+	}
+
+	fcport->loop_id = loop_id;
+
+	rc = qla2x00_get_port_database(vha, fcport, 0);
+	if (rc != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+		    "qla_target(%d): Failed to retrieve fcport "
+		    "information -- get_port_database() returned %x "
+		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+		res = false;
+		goto out_free_fcport;
+	}
+
+	if (global_resets !=
+	    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+		    "qla_target(%d): global reset during session discovery"
+		    " (counter was %d, new %d), retrying",
+		    vha->vp_idx, global_resets,
+		    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+		goto retry;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+	    "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+	    "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+	    sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+	sess->s_id = fcport->d_id;
+	sess->loop_id = fcport->loop_id;
+	sess->conf_compl_supported = !!(fcport->flags &
+	    FCF_CONF_COMP_SUPPORTED);
+
+	res = true;
+
+out_free_fcport:
+	kfree(fcport);
+
+out:
+	return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+	BUG_ON(!sess->deleted);
+
+	list_del(&sess->del_list_entry);
+	sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+	struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+	    sess_del_work);
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	while (!list_empty(&tgt->del_sess_list)) {
+		sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+		    del_list_entry);
+		if (time_after_eq(jiffies, sess->expires)) {
+			bool cancel;
+
+			qlt_undelete_sess(sess);
+
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			cancel = qlt_check_fcport_exist(vha, sess);
+
+			if (cancel) {
+				if (sess->deleted) {
+					/*
+					 * sess was again deleted while we were
+					 * discovering it
+					 */
+					spin_lock_irqsave(&ha->hardware_lock,
+					    flags);
+					continue;
+				}
+
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+				    "qla_target(%d): cancel deletion of "
+				    "session for port %02x:%02x:%02x:%02x:%02x:"
+				    "%02x:%02x:%02x (loop ID %d), because "
+				    " it isn't deleted by firmware",
+				    vha->vp_idx, sess->port_name[0],
+				    sess->port_name[1], sess->port_name[2],
+				    sess->port_name[3], sess->port_name[4],
+				    sess->port_name[5], sess->port_name[6],
+				    sess->port_name[7], sess->loop_id);
+			} else {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+				    "Timeout: sess %p about to be deleted\n",
+				    sess);
+				ha->tgt.tgt_ops->shutdown_sess(sess);
+				ha->tgt.tgt_ops->put_sess(sess);
+			}
+
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+		} else {
+			schedule_delayed_work(&tgt->sess_del_work,
+			    jiffies - sess->expires);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+	struct scsi_qla_host *vha,
+	fc_port_t *fcport,
+	bool local)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess;
+	unsigned long flags;
+	unsigned char be_sid[3];
+
+	/* Check to avoid double sessions */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+				sess_list_entry) {
+		if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+			    "Double sess %p found (s_id %x:%x:%x, "
+			    "loop_id %d), updating to d_id %x:%x:%x, "
+			    "loop_id %d", sess, sess->s_id.b.domain,
+			    sess->s_id.b.al_pa, sess->s_id.b.area,
+			    sess->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.al_pa, fcport->d_id.b.area,
+			    fcport->loop_id);
+
+			if (sess->deleted)
+				qlt_undelete_sess(sess);
+
+			kref_get(&sess->se_sess->sess_kref);
+			sess->s_id = fcport->d_id;
+			sess->loop_id = fcport->loop_id;
+			sess->conf_compl_supported = !!(fcport->flags &
+			    FCF_CONF_COMP_SUPPORTED);
+			if (sess->local && !local)
+				sess->local = 0;
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+			return sess;
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+	if (!sess) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+		    "qla_target(%u): session allocation failed, "
+		    "all commands from port %02x:%02x:%02x:%02x:"
+		    "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+		    fcport->port_name[0], fcport->port_name[1],
+		    fcport->port_name[2], fcport->port_name[3],
+		    fcport->port_name[4], fcport->port_name[5],
+		    fcport->port_name[6], fcport->port_name[7]);
+
+		return NULL;
+	}
+	sess->tgt = ha->tgt.qla_tgt;
+	sess->vha = vha;
+	sess->s_id = fcport->d_id;
+	sess->loop_id = fcport->loop_id;
+	sess->local = local;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+	    sess, ha->tgt.qla_tgt);
+
+	be_sid[0] = sess->s_id.b.domain;
+	be_sid[1] = sess->s_id.b.area;
+	be_sid[2] = sess->s_id.b.al_pa;
+	/*
+	 * Determine if this fc_port->port_name is allowed to access
+	 * target mode using explict NodeACLs+MappedLUNs, or using
+	 * TPG demo mode.  If this is successful a target mode FC nexus
+	 * is created.
+	 */
+	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+	    &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+		kfree(sess);
+		return NULL;
+	}
+	/*
+	 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+	 * access across ->hardware_lock reaquire.
+	 */
+	kref_get(&sess->se_sess->sess_kref);
+
+	sess->conf_compl_supported = !!(fcport->flags &
+	    FCF_CONF_COMP_SUPPORTED);
+	BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+	ha->tgt.qla_tgt->sess_count++;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+	    "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+	    "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+	    " completion %ssupported) added\n",
+	    vha->vp_idx, local ?  "local " : "", fcport->port_name[0],
+	    fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+	    fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+	    fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+	    sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+	    "" : "not ");
+
+	return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_sess *sess;
+	unsigned long flags;
+
+	if (!vha->hw->tgt.tgt_ops)
+		return;
+
+	if (!tgt || (fcport->port_type != FCT_INITIATOR))
+		return;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (tgt->tgt_stop) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		mutex_lock(&ha->tgt.tgt_mutex);
+		sess = qlt_create_sess(vha, fcport, false);
+		mutex_unlock(&ha->tgt.tgt_mutex);
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	} else {
+		kref_get(&sess->se_sess->sess_kref);
+
+		if (sess->deleted) {
+			qlt_undelete_sess(sess);
+
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+			    "qla_target(%u): %ssession for port %02x:"
+			    "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+			    "reappeared\n", vha->vp_idx, sess->local ? "local "
+			    : "", sess->port_name[0], sess->port_name[1],
+			    sess->port_name[2], sess->port_name[3],
+			    sess->port_name[4], sess->port_name[5],
+			    sess->port_name[6], sess->port_name[7],
+			    sess->loop_id);
+
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+			    "Reappeared sess %p\n", sess);
+		}
+		sess->s_id = fcport->d_id;
+		sess->loop_id = fcport->loop_id;
+		sess->conf_compl_supported = !!(fcport->flags &
+		    FCF_CONF_COMP_SUPPORTED);
+	}
+
+	if (sess && sess->local) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+		    "qla_target(%u): local session for "
+		    "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+		    "(loop ID %d) became global\n", vha->vp_idx,
+		    fcport->port_name[0], fcport->port_name[1],
+		    fcport->port_name[2], fcport->port_name[3],
+		    fcport->port_name[4], fcport->port_name[5],
+		    fcport->port_name[6], fcport->port_name[7],
+		    sess->loop_id);
+		sess->local = 0;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_sess *sess;
+	unsigned long flags;
+
+	if (!vha->hw->tgt.tgt_ops)
+		return;
+
+	if (!tgt || (fcport->port_type != FCT_INITIATOR))
+		return;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (tgt->tgt_stop) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+	sess->local = 1;
+	qlt_schedule_sess_for_deletion(sess, false);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+	struct qla_hw_data *ha = tgt->ha;
+	unsigned long flags;
+	int res;
+	/*
+	 * We need to protect against race, when tgt is freed before or
+	 * inside wake_up()
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+	    "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+	    tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+	res = (tgt->sess_count == 0);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = tgt->ha;
+	unsigned long flags;
+
+	if (tgt->tgt_stop || tgt->tgt_stopped) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+		    "Already in tgt->tgt_stop or tgt_stopped state\n");
+		dump_stack();
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+	    vha->host_no, vha);
+	/*
+	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+	 * Lock is needed, because we still can get an incoming packet.
+	 */
+	mutex_lock(&ha->tgt.tgt_mutex);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tgt->tgt_stop = 1;
+	qlt_clear_tgt_db(tgt, true);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	mutex_unlock(&ha->tgt.tgt_mutex);
+
+	flush_delayed_work_sync(&tgt->sess_del_work);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+	    "Waiting for sess works (tgt %p)", tgt);
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	while (!list_empty(&tgt->sess_works_list)) {
+		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+		flush_scheduled_work();
+		spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	}
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+	    "Waiting for tgt %p: list_empty(sess_list)=%d "
+	    "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+	    tgt->sess_count);
+
+	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+	/* Big hammer */
+	if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+		qlt_disable_vha(vha);
+
+	/* Wait for sessions to clear out (just in case) */
+	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+	struct qla_hw_data *ha = tgt->ha;
+	unsigned long flags;
+
+	if (tgt->tgt_stopped) {
+		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+		    "Already in tgt->tgt_stopped state\n");
+		dump_stack();
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+	    "Waiting for %d IRQ commands to complete (tgt %p)",
+	    tgt->irq_cmd_count, tgt);
+
+	mutex_lock(&ha->tgt.tgt_mutex);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	while (tgt->irq_cmd_count != 0) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		udelay(2);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	tgt->tgt_stop = 0;
+	tgt->tgt_stopped = 1;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	mutex_unlock(&ha->tgt.tgt_mutex);
+
+	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+	    tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+	struct qla_hw_data *ha = tgt->ha;
+
+	if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+		qlt_stop_phase2(tgt);
+
+	ha->tgt.qla_tgt = NULL;
+
+	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+	    "Release of tgt %p finished\n", tgt);
+
+	kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+	const void *param, unsigned int param_size)
+{
+	struct qla_tgt_sess_work_param *prm;
+	unsigned long flags;
+
+	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+	if (!prm) {
+		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+		    "qla_target(%d): Unable to create session "
+		    "work, command will be refused", 0);
+		return -ENOMEM;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+	    "Scheduling work (type %d, prm %p)"
+	    " to find session for param %p (size %d, tgt %p)\n",
+	    type, prm, param, param_size, tgt);
+
+	prm->type = type;
+	memcpy(&prm->tm_iocb, param, param_size);
+
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+	schedule_work(&tgt->sess_work);
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *ntfy,
+	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	struct nack_to_isp *nack;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+	/* Send marker if required */
+	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+		return;
+
+	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+	if (!pkt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe049,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return;
+	}
+
+	if (ha->tgt.qla_tgt != NULL)
+		ha->tgt.qla_tgt->notify_ack_expected++;
+
+	pkt->entry_type = NOTIFY_ACK_TYPE;
+	pkt->entry_count = 1;
+
+	nack = (struct nack_to_isp *)pkt;
+	nack->ox_id = ntfy->ox_id;
+
+	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+		nack->u.isp24.flags = ntfy->u.isp24.flags &
+			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+	}
+	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+	nack->u.isp24.status = ntfy->u.isp24.status;
+	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+	nack->u.isp24.srr_reject_code = srr_reject_code;
+	nack->u.isp24.srr_reject_code_expl = srr_explan;
+	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe005,
+	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
+	    vha->vp_idx, nack->u.isp24.status);
+
+	qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts, uint32_t status,
+	bool ids_reversed)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct abts_resp_to_24xx *resp;
+	uint32_t f_ctl;
+	uint8_t *p;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe006,
+	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+	    ha, abts, status);
+
+	/* Send marker if required */
+	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+		return;
+
+	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+	if (!resp) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet", vha->vp_idx, __func__);
+		return;
+	}
+
+	resp->entry_type = ABTS_RESP_24XX;
+	resp->entry_count = 1;
+	resp->nport_handle = abts->nport_handle;
+	resp->vp_index = vha->vp_idx;
+	resp->sof_type = abts->sof_type;
+	resp->exchange_address = abts->exchange_address;
+	resp->fcp_hdr_le = abts->fcp_hdr_le;
+	f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+	    F_CTL_SEQ_INITIATIVE);
+	p = (uint8_t *)&f_ctl;
+	resp->fcp_hdr_le.f_ctl[0] = *p++;
+	resp->fcp_hdr_le.f_ctl[1] = *p++;
+	resp->fcp_hdr_le.f_ctl[2] = *p;
+	if (ids_reversed) {
+		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+	} else {
+		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+	}
+	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+	if (status == FCP_TMF_CMPL) {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+		resp->payload.ba_acct.low_seq_cnt = 0x0000;
+		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+	} else {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+		resp->payload.ba_rjt.reason_code =
+			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+		/* Other bytes are zero */
+	}
+
+	ha->tgt.qla_tgt->abts_resp_expected++;
+
+	qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+	struct abts_resp_from_24xx_fw *entry)
+{
+	struct ctio7_to_24xx *ctio;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe007,
+	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+	/* Send marker if required */
+	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+		return;
+
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+	if (ctio == NULL) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return;
+	}
+
+	/*
+	 * We've got on entrance firmware's response on by us generated
+	 * ABTS response. So, in it ID fields are reversed.
+	 */
+
+	ctio->entry_type = CTIO_TYPE7;
+	ctio->entry_count = 1;
+	ctio->nport_handle = entry->nport_handle;
+	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
+	ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = vha->vp_idx;
+	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+	ctio->exchange_addr = entry->exchange_addr_to_abort;
+	ctio->u.status1.flags =
+	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+		CTIO7_FLAGS_TERMINATE);
+	ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+	qla2x00_start_iocbs(vha, vha->req);
+
+	qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+	    FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	int rc;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+	    "qla_target(%d): task abort (tag=%d)\n",
+	    vha->vp_idx, abts->exchange_addr_to_abort);
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (mcmd == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
+		    vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+
+	mcmd->sess = sess;
+	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+	    abts->exchange_addr_to_abort);
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+		    "qla_target(%d):  tgt_ops->handle_tmr()"
+		    " failed: %d", vha->vp_idx, rc);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess;
+	uint32_t tag = abts->exchange_addr_to_abort;
+	uint8_t s_id[3];
+	int rc;
+
+	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+		    "qla_target(%d): ABTS: Abort Sequence not "
+		    "supported\n", vha->vp_idx);
+		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+		return;
+	}
+
+	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+		    "qla_target(%d): ABTS: Unknown Exchange "
+		    "Address received\n", vha->vp_idx);
+		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
+	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+	    le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+	s_id[0] = abts->fcp_hdr_le.s_id[2];
+	s_id[1] = abts->fcp_hdr_le.s_id[1];
+	s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+	if (!sess) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+		    "qla_target(%d): task abort for non-existant session\n",
+		    vha->vp_idx);
+		rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+		if (rc != 0) {
+			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+			    false);
+		}
+		return;
+	}
+
+	rc = __qlt_24xx_handle_abts(vha, abts, sess);
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+		    vha->vp_idx, rc);
+		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+		return;
+	}
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+	struct ctio7_to_24xx *ctio;
+
+	ql_dbg(ql_dbg_tgt, ha, 0xe008,
+	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+	    ha, atio, resp_code);
+
+	/* Send marker if required */
+	if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+		return;
+
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+	if (ctio == NULL) {
+		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", ha->vp_idx, __func__);
+		return;
+	}
+
+	ctio->entry_type = CTIO_TYPE7;
+	ctio->entry_count = 1;
+	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+	ctio->nport_handle = mcmd->sess->loop_id;
+	ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = ha->vp_idx;
+	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio->exchange_addr = atio->u.isp24.exchange_addr;
+	ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+		CTIO7_FLAGS_SEND_STATUS);
+	ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+	ctio->u.status1.scsi_status =
+	    __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+	ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+	((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+	qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	struct scsi_qla_host *vha = mcmd->sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+	    "TM response mcmd (%p) status %#x state %#x",
+	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+		qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+		    0, 0, 0, 0, 0, 0);
+	else {
+		if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+			qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+			    mcmd->fc_tm_rsp, false);
+		else
+			qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+			    mcmd->fc_tm_rsp);
+	}
+	/*
+	 * Make the callback for ->free_mcmd() to queue_work() and invoke
+	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
+	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+	 * qlt_xmit_tm_rsp() returns here..
+	 */
+	ha->tgt.tgt_ops->free_mcmd(mcmd);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+	struct qla_tgt_cmd *cmd = prm->cmd;
+
+	BUG_ON(cmd->sg_cnt == 0);
+
+	prm->sg = (struct scatterlist *)cmd->sg;
+	prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+	    cmd->sg_cnt, cmd->dma_data_direction);
+	if (unlikely(prm->seg_cnt == 0))
+		goto out_err;
+
+	prm->cmd->sg_mapped = 1;
+
+	/*
+	 * If greater than four sg entries then we need to allocate
+	 * the continuation entries
+	 */
+	if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+		prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+		    prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+	    prm->seg_cnt, prm->req_cnt);
+	return 0;
+
+out_err:
+	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+	    0, prm->cmd->sg_cnt);
+	return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	BUG_ON(!cmd->sg_mapped);
+	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+	cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+	uint32_t req_cnt)
+{
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t __iomem *reg = ha->iobase;
+	uint32_t cnt;
+
+	if (vha->req->cnt < (req_cnt + 2)) {
+		cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+		ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+		    "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+		    "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+		    vha->req->ring_index, vha->req->cnt, req_cnt);
+		if  (vha->req->ring_index < cnt)
+			vha->req->cnt = cnt - vha->req->ring_index;
+		else
+			vha->req->cnt = vha->req->length -
+			    (vha->req->ring_index - cnt);
+	}
+
+	if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+		    "qla_target(%d): There is no room in the "
+		    "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+		    "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+		    vha->req->cnt, req_cnt);
+		return -EAGAIN;
+	}
+	vha->req->cnt -= req_cnt;
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+	/* Adjust ring index. */
+	vha->req->ring_index++;
+	if (vha->req->ring_index == vha->req->length) {
+		vha->req->ring_index = 0;
+		vha->req->ring_ptr = vha->req->ring;
+	} else {
+		vha->req->ring_ptr++;
+	}
+	return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t h;
+
+	h = ha->tgt.current_handle;
+	/* always increment cmd handle */
+	do {
+		++h;
+		if (h > MAX_OUTSTANDING_COMMANDS)
+			h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+		if (h == ha->tgt.current_handle) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+			    "qla_target(%d): Ran out of "
+			    "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+			h = QLA_TGT_NULL_HANDLE;
+			break;
+		}
+	} while ((h == QLA_TGT_NULL_HANDLE) ||
+	    (h == QLA_TGT_SKIP_HANDLE) ||
+	    (ha->tgt.cmds[h-1] != NULL));
+
+	if (h != QLA_TGT_NULL_HANDLE)
+		ha->tgt.current_handle = h;
+
+	return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+	struct scsi_qla_host *vha)
+{
+	uint32_t h;
+	struct ctio7_to_24xx *pkt;
+	struct qla_hw_data *ha = vha->hw;
+	struct atio_from_isp *atio = &prm->cmd->atio;
+
+	pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+	prm->pkt = pkt;
+	memset(pkt, 0, sizeof(*pkt));
+
+	pkt->entry_type = CTIO_TYPE7;
+	pkt->entry_count = (uint8_t)prm->req_cnt;
+	pkt->vp_index = vha->vp_idx;
+
+	h = qlt_make_handle(vha);
+	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+		/*
+		 * CTIO type 7 from the firmware doesn't provide a way to
+		 * know the initiator's LOOP ID, hence we can't find
+		 * the session and, so, the command.
+		 */
+		return -EAGAIN;
+	} else
+		ha->tgt.cmds[h-1] = prm->cmd;
+
+	pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+	pkt->nport_handle = prm->cmd->loop_id;
+	pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	pkt->exchange_addr = atio->u.isp24.exchange_addr;
+	pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+	pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+	    "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+	    vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+	    le16_to_cpu(pkt->u.status0.ox_id));
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+	struct scsi_qla_host *vha)
+{
+	int cnt;
+	uint32_t *dword_ptr;
+	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+	/* Build continuation packets */
+	while (prm->seg_cnt > 0) {
+		cont_a64_entry_t *cont_pkt64 =
+			(cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+		/*
+		 * Make sure that from cont_pkt64 none of
+		 * 64-bit specific fields used for 32-bit
+		 * addressing. Cast to (cont_entry_t *) for
+		 * that.
+		 */
+
+		memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+		cont_pkt64->entry_count = 1;
+		cont_pkt64->sys_define = 0;
+
+		if (enable_64bit_addressing) {
+			cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+			dword_ptr =
+			    (uint32_t *)&cont_pkt64->dseg_0_address;
+		} else {
+			cont_pkt64->entry_type = CONTINUE_TYPE;
+			dword_ptr =
+			    (uint32_t *)&((cont_entry_t *)
+				cont_pkt64)->dseg_0_address;
+		}
+
+		/* Load continuation entry data segments */
+		for (cnt = 0;
+		    cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+		    cnt++, prm->seg_cnt--) {
+			*dword_ptr++ =
+			    cpu_to_le32(pci_dma_lo32
+				(sg_dma_address(prm->sg)));
+			if (enable_64bit_addressing) {
+				*dword_ptr++ =
+				    cpu_to_le32(pci_dma_hi32
+					(sg_dma_address
+					(prm->sg)));
+			}
+			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+			ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+			    "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+			    (long long unsigned int)
+			    pci_dma_hi32(sg_dma_address(prm->sg)),
+			    (long long unsigned int)
+			    pci_dma_lo32(sg_dma_address(prm->sg)),
+			    (int)sg_dma_len(prm->sg));
+
+			prm->sg = sg_next(prm->sg);
+		}
+	}
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+	struct scsi_qla_host *vha)
+{
+	int cnt;
+	uint32_t *dword_ptr;
+	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+	    "iocb->scsi_status=%x, iocb->flags=%x\n",
+	    le16_to_cpu(pkt24->u.status0.scsi_status),
+	    le16_to_cpu(pkt24->u.status0.flags));
+
+	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+	/* Setup packet address segment pointer */
+	dword_ptr = pkt24->u.status0.dseg_0_address;
+
+	/* Set total data segment count */
+	if (prm->seg_cnt)
+		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+	if (prm->seg_cnt == 0) {
+		/* No data transfer */
+		*dword_ptr++ = 0;
+		*dword_ptr = 0;
+		return;
+	}
+
+	/* If scatter gather */
+	ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+	/* Load command entry data segments */
+	for (cnt = 0;
+	    (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+	    cnt++, prm->seg_cnt--) {
+		*dword_ptr++ =
+		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+		if (enable_64bit_addressing) {
+			*dword_ptr++ =
+			    cpu_to_le32(pci_dma_hi32(
+				sg_dma_address(prm->sg)));
+		}
+		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+		ql_dbg(ql_dbg_tgt, vha, 0xe010,
+		    "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+		    (long long unsigned int)pci_dma_hi32(sg_dma_address(
+		    prm->sg)),
+		    (long long unsigned int)pci_dma_lo32(sg_dma_address(
+		    prm->sg)),
+		    (int)sg_dma_len(prm->sg));
+
+		prm->sg = sg_next(prm->sg);
+	}
+
+	qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+	return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+	uint32_t *full_req_cnt)
+{
+	struct qla_tgt *tgt = cmd->tgt;
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (unlikely(cmd->aborted)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+		    "qla_target(%d): terminating exchange "
+		    "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+		    se_cmd, cmd->tag);
+
+		cmd->state = QLA_TGT_STATE_ABORTED;
+
+		qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+		/* !! At this point cmd could be already freed !! */
+		return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+	    vha->vp_idx, cmd->tag);
+
+	prm->cmd = cmd;
+	prm->tgt = tgt;
+	prm->rq_result = scsi_status;
+	prm->sense_buffer = &cmd->sense_buffer[0];
+	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+	prm->sg = NULL;
+	prm->seg_cnt = -1;
+	prm->req_cnt = 1;
+	prm->add_status_pkt = 0;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+	    prm->rq_result, xmit_type);
+
+	/* Send marker if required */
+	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+		return -EFAULT;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+		if  (qlt_pci_map_calc_cnt(prm) != 0)
+			return -EAGAIN;
+	}
+
+	*full_req_cnt = prm->req_cnt;
+
+	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		prm->residual = se_cmd->residual_count;
+		ql_dbg(ql_dbg_tgt, vha, 0xe014,
+		    "Residual underflow: %d (tag %d, "
+		    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+		    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+		    cmd->bufflen, prm->rq_result);
+		prm->rq_result |= SS_RESIDUAL_UNDER;
+	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+		prm->residual = se_cmd->residual_count;
+		ql_dbg(ql_dbg_tgt, vha, 0xe015,
+		    "Residual overflow: %d (tag %d, "
+		    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+		    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+		    cmd->bufflen, prm->rq_result);
+		prm->rq_result |= SS_RESIDUAL_OVER;
+	}
+
+	if (xmit_type & QLA_TGT_XMIT_STATUS) {
+		/*
+		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+		 * ignored in *xmit_response() below
+		 */
+		if (qlt_has_data(cmd)) {
+			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+			    (IS_FWI2_CAPABLE(ha) &&
+			    (prm->rq_result != 0))) {
+				prm->add_status_pkt = 1;
+				(*full_req_cnt)++;
+			}
+		}
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe016,
+	    "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+	    prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+	return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+	struct qla_tgt_cmd *cmd, int sending_sense)
+{
+	if (ha->tgt.enable_class_2)
+		return 0;
+
+	if (sending_sense)
+		return cmd->conf_compl_supported;
+	else
+		return ha->tgt.enable_explicit_conf &&
+		    cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ *  Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+	static int Inited;
+	static unsigned long RandomValue;
+	static DEFINE_SPINLOCK(lock);
+	/* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+	register long rv;
+	register long lo;
+	register long hi;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+	if (!Inited) {
+		RandomValue = jiffies;
+		Inited = 1;
+	}
+	rv = RandomValue;
+	hi = rv / 127773;
+	lo = rv % 127773;
+	rv = 16807 * lo - 2836 * hi;
+	if (rv <= 0)
+		rv += 2147483647;
+	RandomValue = rv;
+	spin_unlock_irqrestore(&lock, flags);
+	return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+	if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+	    == 50) {
+		*xmit_type &= ~QLA_TGT_XMIT_STATUS;
+		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+		    "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+	}
+#endif
+	/*
+	 * It's currently not possible to simulate SRRs for FCP_WRITE without
+	 * a physical link layer failure, so don't even try here..
+	 */
+	if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+		return;
+
+	if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+	    ((qlt_srr_random() % 100) == 20)) {
+		int i, leave = 0;
+		unsigned int tot_len = 0;
+
+		while (leave == 0)
+			leave = qlt_srr_random() % cmd->sg_cnt;
+
+		for (i = 0; i < leave; i++)
+			tot_len += cmd->sg[i].length;
+
+		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+		    "Cutting cmd %p (tag %d) buffer"
+		    " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+		    " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+		    cmd->bufflen, cmd->sg_cnt);
+
+		cmd->bufflen = tot_len;
+		cmd->sg_cnt = leave;
+	}
+
+	if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+		unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+		    "Cutting cmd %p (tag %d) buffer head "
+		    "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+		    cmd->bufflen);
+		if (offset == 0)
+			*xmit_type &= ~QLA_TGT_XMIT_DATA;
+		else if (qlt_set_data_offset(cmd, offset)) {
+			ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+			    "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+		}
+	}
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+	struct qla_tgt_prm *prm)
+{
+	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+	    (uint32_t)sizeof(ctio->u.status1.sense_data));
+	ctio->u.status0.flags |=
+	    __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+	if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+		ctio->u.status0.flags |= __constant_cpu_to_le16(
+		    CTIO7_FLAGS_EXPLICIT_CONFORM |
+		    CTIO7_FLAGS_CONFORM_REQ);
+	}
+	ctio->u.status0.residual = cpu_to_le32(prm->residual);
+	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+		int i;
+
+		if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+			if (prm->cmd->se_cmd.scsi_status != 0) {
+				ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+				    "Skipping EXPLICIT_CONFORM and "
+				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+				    "non GOOD status\n");
+				goto skip_explict_conf;
+			}
+			ctio->u.status1.flags |= __constant_cpu_to_le16(
+			    CTIO7_FLAGS_EXPLICIT_CONFORM |
+			    CTIO7_FLAGS_CONFORM_REQ);
+		}
+skip_explict_conf:
+		ctio->u.status1.flags &=
+		    ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		ctio->u.status1.flags |=
+		    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		ctio->u.status1.scsi_status |=
+		    __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+		ctio->u.status1.sense_length =
+		    cpu_to_le16(prm->sense_buffer_len);
+		for (i = 0; i < prm->sense_buffer_len/4; i++)
+			((uint32_t *)ctio->u.status1.sense_data)[i] =
+				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+		if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+			static int q;
+			if (q < 10) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+				    "qla_target(%d): %d bytes of sense "
+				    "lost", prm->tgt->ha->vp_idx,
+				    prm->sense_buffer_len % 4);
+				q++;
+			}
+		}
+#endif
+	} else {
+		ctio->u.status1.flags &=
+		    ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		ctio->u.status1.flags |=
+		    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		ctio->u.status1.sense_length = 0;
+		memset(ctio->u.status1.sense_data, 0,
+		    sizeof(ctio->u.status1.sense_data));
+	}
+
+	/* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+	uint8_t scsi_status)
+{
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct ctio7_to_24xx *pkt;
+	struct qla_tgt_prm prm;
+	uint32_t full_req_cnt = 0;
+	unsigned long flags = 0;
+	int res;
+
+	memset(&prm, 0, sizeof(prm));
+	qlt_check_srr_debug(cmd, &xmit_type);
+
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+	    "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+	    &full_req_cnt);
+	if (unlikely(res != 0)) {
+		if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+			return 0;
+
+		return res;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Does F/W have an IOCBs for this request */
+	res = qlt_check_reserve_free_req(vha, full_req_cnt);
+	if (unlikely(res))
+		goto out_unmap_unlock;
+
+	res = qlt_24xx_build_ctio_pkt(&prm, vha);
+	if (unlikely(res != 0))
+		goto out_unmap_unlock;
+
+
+	pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+		pkt->u.status0.flags |=
+		    __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+			CTIO7_FLAGS_STATUS_MODE_0);
+
+		qlt_load_data_segments(&prm, vha);
+
+		if (prm.add_status_pkt == 0) {
+			if (xmit_type & QLA_TGT_XMIT_STATUS) {
+				pkt->u.status0.scsi_status =
+				    cpu_to_le16(prm.rq_result);
+				pkt->u.status0.residual =
+				    cpu_to_le32(prm.residual);
+				pkt->u.status0.flags |= __constant_cpu_to_le16(
+				    CTIO7_FLAGS_SEND_STATUS);
+				if (qlt_need_explicit_conf(ha, cmd, 0)) {
+					pkt->u.status0.flags |=
+					    __constant_cpu_to_le16(
+						CTIO7_FLAGS_EXPLICIT_CONFORM |
+						CTIO7_FLAGS_CONFORM_REQ);
+				}
+			}
+
+		} else {
+			/*
+			 * We have already made sure that there is sufficient
+			 * amount of request entries to not drop HW lock in
+			 * req_pkt().
+			 */
+			struct ctio7_to_24xx *ctio =
+				(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+			ql_dbg(ql_dbg_tgt, vha, 0xe019,
+			    "Building additional status packet\n");
+
+			memcpy(ctio, pkt, sizeof(*ctio));
+			ctio->entry_count = 1;
+			ctio->dseg_count = 0;
+			ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+			    CTIO7_FLAGS_DATA_IN);
+
+			/* Real finish is ctio_m1's finish */
+			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+			pkt->u.status0.flags |= __constant_cpu_to_le16(
+			    CTIO7_FLAGS_DONT_RET_CTIO);
+			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+			    &prm);
+			pr_debug("Status CTIO7: %p\n", ctio);
+		}
+	} else
+		qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+	    "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+	    pkt, scsi_status);
+
+	qla2x00_start_iocbs(vha, vha->req);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return 0;
+
+out_unmap_unlock:
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(vha, cmd);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+	struct ctio7_to_24xx *pkt;
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = cmd->tgt;
+	struct qla_tgt_prm prm;
+	unsigned long flags;
+	int res = 0;
+
+	memset(&prm, 0, sizeof(prm));
+	prm.cmd = cmd;
+	prm.tgt = tgt;
+	prm.sg = NULL;
+	prm.req_cnt = 1;
+
+	/* Send marker if required */
+	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+		return -EIO;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+	    (int)vha->vp_idx);
+
+	/* Calculate number of entries and segments required */
+	if (qlt_pci_map_calc_cnt(&prm) != 0)
+		return -EAGAIN;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Does F/W have an IOCBs for this request */
+	res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+	if (res != 0)
+		goto out_unlock_free_unmap;
+
+	res = qlt_24xx_build_ctio_pkt(&prm, vha);
+	if (unlikely(res != 0))
+		goto out_unlock_free_unmap;
+	pkt = (struct ctio7_to_24xx *)prm.pkt;
+	pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+	    CTIO7_FLAGS_STATUS_MODE_0);
+	qlt_load_data_segments(&prm, vha);
+
+	cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+	qla2x00_start_iocbs(vha, vha->req);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return res;
+
+out_unlock_free_unmap:
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(vha, cmd);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd,
+	struct atio_from_isp *atio)
+{
+	struct ctio7_to_24xx *ctio24;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	int ret = 0;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+	if (pkt == NULL) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe050,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+
+	if (cmd != NULL) {
+		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe051,
+			    "qla_target(%d): Terminating cmd %p with "
+			    "incorrect state %d\n", vha->vp_idx, cmd,
+			    cmd->state);
+		} else
+			ret = 1;
+	}
+
+	pkt->entry_count = 1;
+	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+	ctio24 = (struct ctio7_to_24xx *)pkt;
+	ctio24->entry_type = CTIO_TYPE7;
+	ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+	ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->vp_index = vha->vp_idx;
+	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+		CTIO7_FLAGS_TERMINATE);
+	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+	/* Most likely, it isn't needed */
+	ctio24->u.status1.residual = get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[
+	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
+	if (ctio24->u.status1.residual != 0)
+		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+	qla2x00_start_iocbs(vha, vha->req);
+	return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+	unsigned long flags;
+	int rc;
+
+	if (qlt_issue_marker(vha, ha_locked) < 0)
+		return;
+
+	if (ha_locked) {
+		rc = __qlt_send_term_exchange(vha, cmd, atio);
+		goto done;
+	}
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	rc = __qlt_send_term_exchange(vha, cmd, atio);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+	if (rc == 1) {
+		if (!ha_locked && !in_interrupt())
+			msleep(250); /* just in case */
+
+		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+	}
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+	BUG_ON(cmd->sg_mapped);
+
+	if (unlikely(cmd->free_sg))
+		kfree(cmd->sg);
+	kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd, void *ctio)
+{
+	struct qla_tgt_srr_ctio *sc;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_srr_imm *imm;
+
+	tgt->ctio_srr_id++;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+	    "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+	if (!ctio) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+		    "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+		    vha->vp_idx);
+		return -EINVAL;
+	}
+
+	sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+	if (sc != NULL) {
+		sc->cmd = cmd;
+		/* IRQ is already OFF */
+		spin_lock(&tgt->srr_lock);
+		sc->srr_id = tgt->ctio_srr_id;
+		list_add_tail(&sc->srr_list_entry,
+		    &tgt->srr_ctio_list);
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+		    "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+		if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+			int found = 0;
+			list_for_each_entry(imm, &tgt->srr_imm_list,
+			    srr_list_entry) {
+				if (imm->srr_id == sc->srr_id) {
+					found = 1;
+					break;
+				}
+			}
+			if (found) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+				    "Scheduling srr work\n");
+				schedule_work(&tgt->srr_work);
+			} else {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+				    "qla_target(%d): imm_srr_id "
+				    "== ctio_srr_id (%d), but there is no "
+				    "corresponding SRR IMM, deleting CTIO "
+				    "SRR %p\n", vha->vp_idx,
+				    tgt->ctio_srr_id, sc);
+				list_del(&sc->srr_list_entry);
+				spin_unlock(&tgt->srr_lock);
+
+				kfree(sc);
+				return -EINVAL;
+			}
+		}
+		spin_unlock(&tgt->srr_lock);
+	} else {
+		struct qla_tgt_srr_imm *ti;
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+		    "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+		    vha->vp_idx);
+		spin_lock(&tgt->srr_lock);
+		list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+		    srr_list_entry) {
+			if (imm->srr_id == tgt->ctio_srr_id) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+				    "IMM SRR %p deleted (id %d)\n",
+				    imm, imm->srr_id);
+				list_del(&imm->srr_list_entry);
+				qlt_reject_free_srr_imm(vha, imm, 1);
+			}
+		}
+		spin_unlock(&tgt->srr_lock);
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+	struct qla_tgt_cmd *cmd, uint32_t status)
+{
+	int term = 0;
+
+	if (ctio != NULL) {
+		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+		term = !(c->flags &
+		    __constant_cpu_to_le16(OF_TERM_EXCH));
+	} else
+		term = 1;
+
+	if (term)
+		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+	return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+	uint32_t handle)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	handle--;
+	if (ha->tgt.cmds[handle] != NULL) {
+		struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+		ha->tgt.cmds[handle] = NULL;
+		return cmd;
+	} else
+		return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+	uint32_t handle, void *ctio)
+{
+	struct qla_tgt_cmd *cmd = NULL;
+
+	/* Clear out internal marks */
+	handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+	    CTIO_INTERMEDIATE_HANDLE_MARK);
+
+	if (handle != QLA_TGT_NULL_HANDLE) {
+		if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+			    "SKIP_HANDLE CTIO\n");
+			return NULL;
+		}
+		/* handle-1 is actually used */
+		if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe052,
+			    "qla_target(%d): Wrong handle %x received\n",
+			    vha->vp_idx, handle);
+			return NULL;
+		}
+		cmd = qlt_get_cmd(vha, handle);
+		if (unlikely(cmd == NULL)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe053,
+			    "qla_target(%d): Suspicious: unable to "
+			    "find the command with handle %x\n", vha->vp_idx,
+			    handle);
+			return NULL;
+		}
+	} else if (ctio != NULL) {
+		/* We can't get loop ID from CTIO7 */
+		ql_dbg(ql_dbg_tgt, vha, 0xe054,
+		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+		    "support NULL handles\n", vha->vp_idx);
+		return NULL;
+	}
+
+	return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+	uint32_t status, void *ctio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct se_cmd *se_cmd;
+	struct target_core_fabric_ops *tfo;
+	struct qla_tgt_cmd *cmd;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+	    "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+	    vha->vp_idx, ctio, status, handle);
+
+	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+		/* That could happen only in case of an error/reset/abort */
+		if (status != CTIO_SUCCESS) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+			    "Intermediate CTIO received"
+			    " (status %x)\n", status);
+		}
+		return;
+	}
+
+	cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+	if (cmd == NULL)
+		return;
+
+	se_cmd = &cmd->se_cmd;
+	tfo = se_cmd->se_tfo;
+
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(vha, cmd);
+
+	if (unlikely(status != CTIO_SUCCESS)) {
+		switch (status & 0xFFFF) {
+		case CTIO_LIP_RESET:
+		case CTIO_TARGET_RESET:
+		case CTIO_ABORTED:
+		case CTIO_TIMEOUT:
+		case CTIO_INVALID_RX_ID:
+			/* They are OK */
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+			    "qla_target(%d): CTIO with "
+			    "status %#x received, state %x, se_cmd %p, "
+			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+			    status, cmd->state, se_cmd);
+			break;
+
+		case CTIO_PORT_LOGGED_OUT:
+		case CTIO_PORT_UNAVAILABLE:
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+			    "qla_target(%d): CTIO with PORT LOGGED "
+			    "OUT (29) or PORT UNAVAILABLE (28) status %x "
+			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
+			    status, cmd->state, se_cmd);
+			break;
+
+		case CTIO_SRR_RECEIVED:
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+			    "qla_target(%d): CTIO with SRR_RECEIVED"
+			    " status %x received (state %x, se_cmd %p)\n",
+			    vha->vp_idx, status, cmd->state, se_cmd);
+			if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+				break;
+			else
+				return;
+
+		default:
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+			    "qla_target(%d): CTIO with error status "
+			    "0x%x received (state %x, se_cmd %p\n",
+			    vha->vp_idx, status, cmd->state, se_cmd);
+			break;
+		}
+
+		if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+				return;
+	}
+
+	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+		int rx_status = 0;
+
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+
+		if (unlikely(status != CTIO_SUCCESS))
+			rx_status = -EIO;
+		else
+			cmd->write_data_transferred = 1;
+
+		ql_dbg(ql_dbg_tgt, vha, 0xe020,
+		    "Data received, context %x, rx_status %d\n",
+		    0x0, rx_status);
+
+		ha->tgt.tgt_ops->handle_data(cmd);
+		return;
+	} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+		    "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+	} else {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+		    "qla_target(%d): A command in state (%d) should "
+		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+	}
+
+	if (unlikely(status != CTIO_SUCCESS)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+		dump_stack();
+	}
+
+	ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+	if (likely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe021,
+		    "CTIO, but target mode not enabled"
+		    " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+		return;
+	}
+
+	tgt->irq_cmd_count++;
+	qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+	tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+	uint8_t task_codes)
+{
+	int fcp_task_attr;
+
+	switch (task_codes) {
+	case ATIO_SIMPLE_QUEUE:
+		fcp_task_attr = MSG_SIMPLE_TAG;
+		break;
+	case ATIO_HEAD_OF_QUEUE:
+		fcp_task_attr = MSG_HEAD_TAG;
+		break;
+	case ATIO_ORDERED_QUEUE:
+		fcp_task_attr = MSG_ORDERED_TAG;
+		break;
+	case ATIO_ACA_QUEUE:
+		fcp_task_attr = MSG_ACA_TAG;
+		break;
+	case ATIO_UNTAGGED:
+		fcp_task_attr = MSG_SIMPLE_TAG;
+		break;
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+		    "qla_target: unknown task code %x, use ORDERED instead\n",
+		    task_codes);
+		fcp_task_attr = MSG_ORDERED_TAG;
+		break;
+	}
+
+	return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+					uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+	scsi_qla_host_t *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_sess *sess = NULL;
+	struct atio_from_isp *atio = &cmd->atio;
+	unsigned char *cdb;
+	unsigned long flags;
+	uint32_t data_length;
+	int ret, fcp_task_attr, data_dir, bidi = 0;
+
+	if (tgt->tgt_stop)
+		goto out_term;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    atio->u.isp24.fcp_hdr.s_id);
+	if (sess) {
+		if (unlikely(sess->tearing_down)) {
+			sess = NULL;
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			goto out_term;
+		} else {
+			/*
+			 * Do the extra kref_get() before dropping
+			 * qla_hw_data->hardware_lock.
+			 */
+			kref_get(&sess->se_sess->sess_kref);
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (unlikely(!sess)) {
+		uint8_t *s_id =	atio->u.isp24.fcp_hdr.s_id;
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+			"qla_target(%d): Unable to find wwn login"
+			" (s_id %x:%x:%x), trying to create it manually\n",
+			vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+		if (atio->u.raw.entry_count > 1) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+				"Dropping multy entry cmd %p\n", cmd);
+			goto out_term;
+		}
+
+		mutex_lock(&ha->tgt.tgt_mutex);
+		sess = qlt_make_local_sess(vha, s_id);
+		/* sess has an extra creation ref. */
+		mutex_unlock(&ha->tgt.tgt_mutex);
+
+		if (!sess)
+			goto out_term;
+	}
+
+	cmd->sess = sess;
+	cmd->loop_id = sess->loop_id;
+	cmd->conf_compl_supported = sess->conf_compl_supported;
+
+	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+	cmd->tag = atio->u.isp24.exchange_addr;
+	cmd->unpacked_lun = scsilun_to_int(
+	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+	if (atio->u.isp24.fcp_cmnd.rddata &&
+	    atio->u.isp24.fcp_cmnd.wrdata) {
+		bidi = 1;
+		data_dir = DMA_TO_DEVICE;
+	} else if (atio->u.isp24.fcp_cmnd.rddata)
+		data_dir = DMA_FROM_DEVICE;
+	else if (atio->u.isp24.fcp_cmnd.wrdata)
+		data_dir = DMA_TO_DEVICE;
+	else
+		data_dir = DMA_NONE;
+
+	fcp_task_attr = qlt_get_fcp_task_attr(vha,
+	    atio->u.isp24.fcp_cmnd.task_attr);
+	data_length = be32_to_cpu(get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[
+	    atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe022,
+	    "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+	    cmd, cmd->unpacked_lun, cmd->tag);
+
+	ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+	    fcp_task_attr, data_dir, bidi);
+	if (ret != 0)
+		goto out_term;
+	/*
+	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+	 */
+	ha->tgt.tgt_ops->put_sess(sess);
+	return;
+
+out_term:
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+	/*
+	 * cmd has not sent to target yet, so pass NULL as the second
+	 * argument to qlt_send_term_exchange() and free the memory here.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+	kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (sess)
+		ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_cmd *cmd;
+
+	if (unlikely(tgt->tgt_stop)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+		    "New command while device %p is shutting down\n", tgt);
+		return -EFAULT;
+	}
+
+	cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+	if (!cmd) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cmd->cmd_list);
+
+	memcpy(&cmd->atio, atio, sizeof(*atio));
+	cmd->state = QLA_TGT_STATE_NEW;
+	cmd->tgt = ha->tgt.qla_tgt;
+	cmd->vha = vha;
+
+	INIT_WORK(&cmd->work, qlt_do_work);
+	queue_work(qla_tgt_wq, &cmd->work);
+	return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+	int fn, void *iocb, int flags)
+{
+	struct scsi_qla_host *vha = sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	int res;
+	uint8_t tmr_func;
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (!mcmd) {
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+		    "qla_target(%d): Allocation of management "
+		    "command failed, some commands and their data could "
+		    "leak\n", vha->vp_idx);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+	mcmd->sess = sess;
+
+	if (iocb) {
+		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+		    sizeof(mcmd->orig_iocb.imm_ntfy));
+	}
+	mcmd->tmr_func = fn;
+	mcmd->flags = flags;
+
+	switch (fn) {
+	case QLA_TGT_CLEAR_ACA:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+		    "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+		tmr_func = TMR_CLEAR_ACA;
+		break;
+
+	case QLA_TGT_TARGET_RESET:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+		    "qla_target(%d): TARGET_RESET received\n",
+		    sess->vha->vp_idx);
+		tmr_func = TMR_TARGET_WARM_RESET;
+		break;
+
+	case QLA_TGT_LUN_RESET:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+		    "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+		tmr_func = TMR_LUN_RESET;
+		break;
+
+	case QLA_TGT_CLEAR_TS:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+		    "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+		tmr_func = TMR_CLEAR_TASK_SET;
+		break;
+
+	case QLA_TGT_ABORT_TS:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+		    "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+		tmr_func = TMR_ABORT_TASK_SET;
+		break;
+#if 0
+	case QLA_TGT_ABORT_ALL:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+		    "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+		    sess->vha->vp_idx);
+		tmr_func = 0;
+		break;
+
+	case QLA_TGT_ABORT_ALL_SESS:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+		    "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+		    sess->vha->vp_idx);
+		tmr_func = 0;
+		break;
+
+	case QLA_TGT_NEXUS_LOSS_SESS:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+		    "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+		    sess->vha->vp_idx);
+		tmr_func = 0;
+		break;
+
+	case QLA_TGT_NEXUS_LOSS:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+		    "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+		tmr_func = 0;
+		break;
+#endif
+	default:
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+		    "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+		    sess->vha->vp_idx, fn);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -ENOSYS;
+	}
+
+	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+	if (res != 0) {
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+		    sess->vha->vp_idx, res);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt;
+	struct qla_tgt_sess *sess;
+	uint32_t lun, unpacked_lun;
+	int lun_size, fn;
+
+	tgt = ha->tgt.qla_tgt;
+
+	lun = a->u.isp24.fcp_cmnd.lun;
+	lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    a->u.isp24.fcp_hdr.s_id);
+	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+	if (!sess) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+		    "qla_target(%d): task mgmt fn 0x%x for "
+		    "non-existant session\n", vha->vp_idx, fn);
+		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+		    sizeof(struct atio_from_isp));
+	}
+
+	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	uint32_t lun, unpacked_lun;
+	int rc;
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (mcmd == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+		    vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+
+	mcmd->sess = sess;
+	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+	    sizeof(mcmd->orig_iocb.imm_ntfy));
+
+	lun = a->u.isp24.fcp_cmnd.lun;
+	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+	    le16_to_cpu(iocb->u.isp2x.seq_id));
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+		    vha->vp_idx, rc);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess;
+	int loop_id;
+
+	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+	if (sess == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+		    "qla_target(%d): task abort for unexisting "
+		    "session\n", vha->vp_idx);
+		return qlt_sched_sess_work(ha->tgt.qla_tgt,
+		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+	}
+
+	return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int res = 0;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+	    "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+	    " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+	    iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+	    iocb->u.isp24.status_subcode);
+
+	switch (iocb->u.isp24.status_subcode) {
+	case ELS_PLOGI:
+	case ELS_FLOGI:
+	case ELS_PRLI:
+	case ELS_LOGO:
+	case ELS_PRLO:
+		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+		break;
+	case ELS_PDISC:
+	case ELS_ADISC:
+	{
+		struct qla_tgt *tgt = ha->tgt.qla_tgt;
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+			    0, 0, 0, 0, 0, 0);
+			tgt->link_reinit_iocb_pending = 0;
+		}
+		res = 1; /* send notify ack */
+		break;
+	}
+
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+		    "qla_target(%d): Unsupported ELS command %x "
+		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+		break;
+	}
+
+	return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+	struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+	size_t first_offset = 0, rem_offset = offset, tmp = 0;
+	int i, sg_srr_cnt, bufflen = 0;
+
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+	    "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+	    "cmd->sg_cnt: %u, direction: %d\n",
+	    cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+	/*
+	 * FIXME: Reject non zero SRR relative offset until we can test
+	 * this code properly.
+	 */
+	pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+	return -1;
+
+	if (!cmd->sg || !cmd->sg_cnt) {
+		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+		    "Missing cmd->sg or zero cmd->sg_cnt in"
+		    " qla_tgt_set_data_offset\n");
+		return -EINVAL;
+	}
+	/*
+	 * Walk the current cmd->sg list until we locate the new sg_srr_start
+	 */
+	for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+		    "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+		    i, sg, sg_page(sg), sg->length, sg->offset);
+
+		if ((sg->length + tmp) > offset) {
+			first_offset = rem_offset;
+			sg_srr_start = sg;
+			ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+			    "Found matching sg[%d], using %p as sg_srr_start, "
+			    "and using first_offset: %zu\n", i, sg,
+			    first_offset);
+			break;
+		}
+		tmp += sg->length;
+		rem_offset -= sg->length;
+	}
+
+	if (!sg_srr_start) {
+		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+		    "Unable to locate sg_srr_start for offset: %u\n", offset);
+		return -EINVAL;
+	}
+	sg_srr_cnt = (cmd->sg_cnt - i);
+
+	sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+	if (!sg_srr) {
+		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+		    "Unable to allocate sgp\n");
+		return -ENOMEM;
+	}
+	sg_init_table(sg_srr, sg_srr_cnt);
+	sgp = &sg_srr[0];
+	/*
+	 * Walk the remaining list for sg_srr_start, mapping to the newly
+	 * allocated sg_srr taking first_offset into account.
+	 */
+	for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+		if (first_offset) {
+			sg_set_page(sgp, sg_page(sg),
+			    (sg->length - first_offset), first_offset);
+			first_offset = 0;
+		} else {
+			sg_set_page(sgp, sg_page(sg), sg->length, 0);
+		}
+		bufflen += sgp->length;
+
+		sgp = sg_next(sgp);
+		if (!sgp)
+			break;
+	}
+
+	cmd->sg = sg_srr;
+	cmd->sg_cnt = sg_srr_cnt;
+	cmd->bufflen = bufflen;
+	cmd->offset += offset;
+	cmd->free_sg = 1;
+
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+	    cmd->sg_cnt);
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+	    cmd->bufflen);
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+	    cmd->offset);
+
+	if (cmd->sg_cnt < 0)
+		BUG();
+
+	if (cmd->bufflen < 0)
+		BUG();
+
+	return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+	uint32_t srr_rel_offs, int *xmit_type)
+{
+	int res = 0, rel_offs;
+
+	rel_offs = srr_rel_offs - cmd->offset;
+	ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+	    srr_rel_offs, rel_offs);
+
+	*xmit_type = QLA_TGT_XMIT_ALL;
+
+	if (rel_offs < 0) {
+		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+		    "qla_target(%d): SRR rel_offs (%d) < 0",
+		    cmd->vha->vp_idx, rel_offs);
+		res = -1;
+	} else if (rel_offs == cmd->bufflen)
+		*xmit_type = QLA_TGT_XMIT_STATUS;
+	else if (rel_offs > 0)
+		res = qlt_set_data_offset(cmd, rel_offs);
+
+	return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+	struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+	struct imm_ntfy_from_isp *ntfy =
+	    (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_cmd *cmd = sctio->cmd;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	unsigned long flags;
+	int xmit_type = 0, resp = 0;
+	uint32_t offset;
+	uint16_t srr_ui;
+
+	offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+	srr_ui = ntfy->u.isp24.srr_ui;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+	    cmd, srr_ui);
+
+	switch (srr_ui) {
+	case SRR_IU_STATUS:
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		qlt_send_notify_ack(vha, ntfy,
+		    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		xmit_type = QLA_TGT_XMIT_STATUS;
+		resp = 1;
+		break;
+	case SRR_IU_DATA_IN:
+		if (!cmd->sg || !cmd->sg_cnt) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+			    "Unable to process SRR_IU_DATA_IN due to"
+			    " missing cmd->sg, state: %d\n", cmd->state);
+			dump_stack();
+			goto out_reject;
+		}
+		if (se_cmd->scsi_status != 0) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+			    "Rejecting SRR_IU_DATA_IN with non GOOD "
+			    "scsi_status\n");
+			goto out_reject;
+		}
+		cmd->bufflen = se_cmd->data_length;
+
+		if (qlt_has_data(cmd)) {
+			if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+				goto out_reject;
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			qlt_send_notify_ack(vha, ntfy,
+			    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			resp = 1;
+		} else {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+			    "qla_target(%d): SRR for in data for cmd "
+			    "without them (tag %d, SCSI status %d), "
+			    "reject", vha->vp_idx, cmd->tag,
+			    cmd->se_cmd.scsi_status);
+			goto out_reject;
+		}
+		break;
+	case SRR_IU_DATA_OUT:
+		if (!cmd->sg || !cmd->sg_cnt) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+			    "Unable to process SRR_IU_DATA_OUT due to"
+			    " missing cmd->sg\n");
+			dump_stack();
+			goto out_reject;
+		}
+		if (se_cmd->scsi_status != 0) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+			    "Rejecting SRR_IU_DATA_OUT"
+			    " with non GOOD scsi_status\n");
+			goto out_reject;
+		}
+		cmd->bufflen = se_cmd->data_length;
+
+		if (qlt_has_data(cmd)) {
+			if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+				goto out_reject;
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			qlt_send_notify_ack(vha, ntfy,
+			    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			if (xmit_type & QLA_TGT_XMIT_DATA)
+				qlt_rdy_to_xfer(cmd);
+		} else {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+			    "qla_target(%d): SRR for out data for cmd "
+			    "without them (tag %d, SCSI status %d), "
+			    "reject", vha->vp_idx, cmd->tag,
+			    cmd->se_cmd.scsi_status);
+			goto out_reject;
+		}
+		break;
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+		    "qla_target(%d): Unknown srr_ui value %x",
+		    vha->vp_idx, srr_ui);
+		goto out_reject;
+	}
+
+	/* Transmit response in case of status and data-in cases */
+	if (resp)
+		qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+	return;
+
+out_reject:
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT,
+	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+	if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+		dump_stack();
+	} else
+		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+	struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags = 0;
+
+	if (!ha_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT,
+	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+	if (!ha_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+	struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_tgt_srr_ctio *sctio;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+	    tgt);
+
+restart:
+	spin_lock_irqsave(&tgt->srr_lock, flags);
+	list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+		struct qla_tgt_srr_imm *imm, *i, *ti;
+		struct qla_tgt_cmd *cmd;
+		struct se_cmd *se_cmd;
+
+		imm = NULL;
+		list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+						srr_list_entry) {
+			if (i->srr_id == sctio->srr_id) {
+				list_del(&i->srr_list_entry);
+				if (imm) {
+					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+					  "qla_target(%d): There must be "
+					  "only one IMM SRR per CTIO SRR "
+					  "(IMM SRR %p, id %d, CTIO %p\n",
+					  vha->vp_idx, i, i->srr_id, sctio);
+					qlt_reject_free_srr_imm(tgt->vha, i, 0);
+				} else
+					imm = i;
+			}
+		}
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+		    "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+		    sctio->srr_id);
+
+		if (imm == NULL) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+			    "Not found matching IMM for SRR CTIO (id %d)\n",
+			    sctio->srr_id);
+			continue;
+		} else
+			list_del(&sctio->srr_list_entry);
+
+		spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+		cmd = sctio->cmd;
+		/*
+		 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+		 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+		 * logic..
+		 */
+		cmd->offset = 0;
+		if (cmd->free_sg) {
+			kfree(cmd->sg);
+			cmd->sg = NULL;
+			cmd->free_sg = 0;
+		}
+		se_cmd = &cmd->se_cmd;
+
+		cmd->sg_cnt = se_cmd->t_data_nents;
+		cmd->sg = se_cmd->t_data_sg;
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+		    "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+		    "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+		    se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+		qlt_handle_srr(vha, sctio, imm);
+
+		kfree(imm);
+		kfree(sctio);
+		goto restart;
+	}
+	spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_tgt_srr_imm *imm;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt_srr_ctio *sctio;
+
+	tgt->imm_srr_id++;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+	    vha->vp_idx);
+
+	imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+	if (imm != NULL) {
+		memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+		/* IRQ is already OFF */
+		spin_lock(&tgt->srr_lock);
+		imm->srr_id = tgt->imm_srr_id;
+		list_add_tail(&imm->srr_list_entry,
+		    &tgt->srr_imm_list);
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+		    "IMM NTFY SRR %p added (id %d, ui %x)\n",
+		    imm, imm->srr_id, iocb->u.isp24.srr_ui);
+		if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+			int found = 0;
+			list_for_each_entry(sctio, &tgt->srr_ctio_list,
+			    srr_list_entry) {
+				if (sctio->srr_id == imm->srr_id) {
+					found = 1;
+					break;
+				}
+			}
+			if (found) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+				    "Scheduling srr work\n");
+				schedule_work(&tgt->srr_work);
+			} else {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+				    "qla_target(%d): imm_srr_id "
+				    "== ctio_srr_id (%d), but there is no "
+				    "corresponding SRR CTIO, deleting IMM "
+				    "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+				    imm);
+				list_del(&imm->srr_list_entry);
+
+				kfree(imm);
+
+				spin_unlock(&tgt->srr_lock);
+				goto out_reject;
+			}
+		}
+		spin_unlock(&tgt->srr_lock);
+	} else {
+		struct qla_tgt_srr_ctio *ts;
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+		    "qla_target(%d): Unable to allocate SRR IMM "
+		    "entry, SRR request will be rejected\n", vha->vp_idx);
+
+		/* IRQ is already OFF */
+		spin_lock(&tgt->srr_lock);
+		list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+		    srr_list_entry) {
+			if (sctio->srr_id == tgt->imm_srr_id) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+				    "CTIO SRR %p deleted (id %d)\n",
+				    sctio, sctio->srr_id);
+				list_del(&sctio->srr_list_entry);
+				qlt_send_term_exchange(vha, sctio->cmd,
+				    &sctio->cmd->atio, 1);
+				kfree(sctio);
+			}
+		}
+		spin_unlock(&tgt->srr_lock);
+		goto out_reject;
+	}
+
+	return;
+
+out_reject:
+	qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT,
+	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t add_flags = 0;
+	int send_notify_ack = 1;
+	uint16_t status;
+
+	status = le16_to_cpu(iocb->u.isp2x.status);
+	switch (status) {
+	case IMM_NTFY_LIP_RESET:
+	{
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+
+		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+			send_notify_ack = 0;
+		break;
+	}
+
+	case IMM_NTFY_LIP_LINK_REINIT:
+	{
+		struct qla_tgt *tgt = ha->tgt.qla_tgt;
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+		    "qla_target(%d): LINK REINIT (loop %#x, "
+		    "subcode %x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+			    0, 0, 0, 0, 0, 0);
+		}
+		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+		tgt->link_reinit_iocb_pending = 1;
+		/*
+		 * QLogic requires to wait after LINK REINIT for possible
+		 * PDISC or ADISC ELS commands
+		 */
+		send_notify_ack = 0;
+		break;
+	}
+
+	case IMM_NTFY_PORT_LOGOUT:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+		    "qla_target(%d): Port logout (loop "
+		    "%#x, subcode %x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_GLBL_TPRLO:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_PORT_CONFIG:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+		    status);
+		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_GLBL_LOGO:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+		    "qla_target(%d): Link failure detected\n",
+		    vha->vp_idx);
+		/* I_T nexus loss */
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_IOCB_OVERFLOW:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+		    "qla_target(%d): Cannot provide requested "
+		    "capability (IOCB overflowed the immediate notify "
+		    "resource count)\n", vha->vp_idx);
+		break;
+
+	case IMM_NTFY_ABORT_TASK:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+		    "qla_target(%d): Abort Task (S %08x I %#x -> "
+		    "L %#x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp2x.seq_id),
+		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+		    le16_to_cpu(iocb->u.isp2x.lun));
+		if (qlt_abort_task(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_RESOURCE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+		    "qla_target(%d): Out of resources, host %ld\n",
+		    vha->vp_idx, vha->host_no);
+		break;
+
+	case IMM_NTFY_MSG_RX:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+		    "qla_target(%d): Immediate notify task %x\n",
+		    vha->vp_idx, iocb->u.isp2x.task_flags);
+		if (qlt_handle_task_mgmt(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_ELS:
+		if (qlt_24xx_handle_els(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_SRR:
+		qlt_prepare_srr_imm(vha, iocb);
+		send_notify_ack = 0;
+		break;
+
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+		    "qla_target(%d): Received unknown immediate "
+		    "notify status %x\n", vha->vp_idx, status);
+		break;
+	}
+
+	if (send_notify_ack)
+		qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status)
+{
+	struct ctio7_to_24xx *ctio24;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	struct qla_tgt_sess *sess = NULL;
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    atio->u.isp24.fcp_hdr.s_id);
+	if (!sess) {
+		qlt_send_term_exchange(vha, NULL, atio, 1);
+		return;
+	}
+	/* Sending marker isn't necessary, since we called from ISR */
+
+	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+	if (!pkt) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet", vha->vp_idx, __func__);
+		return;
+	}
+
+	pkt->entry_count = 1;
+	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+	ctio24 = (struct ctio7_to_24xx *)pkt;
+	ctio24->entry_type = CTIO_TYPE7;
+	ctio24->nport_handle = sess->loop_id;
+	ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->vp_index = vha->vp_idx;
+	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+	    __constant_cpu_to_le16(
+		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+		CTIO7_FLAGS_DONT_RET_CTIO);
+	/*
+	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+	 * if the explicit conformation is used.
+	 */
+	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+	ctio24->u.status1.scsi_status = cpu_to_le16(status);
+	ctio24->u.status1.residual = get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[
+	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
+	if (ctio24->u.status1.residual != 0)
+		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+	qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	int rc;
+
+	if (unlikely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+		    "ATIO pkt, but no tgt (ha %p)", ha);
+		return;
+	}
+	ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+	    "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+	    vha->vp_idx, atio, atio->u.raw.entry_type,
+	    atio->u.raw.entry_count);
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+	tgt->irq_cmd_count++;
+
+	switch (atio->u.raw.entry_type) {
+	case ATIO_TYPE7:
+		ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+		    "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+		    "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+		    vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+		    atio->u.isp24.fcp_cmnd.rddata,
+		    atio->u.isp24.fcp_cmnd.wrdata,
+		    atio->u.isp24.fcp_cmnd.add_cdb_len,
+		    be32_to_cpu(get_unaligned((uint32_t *)
+			&atio->u.isp24.fcp_cmnd.add_cdb[
+			atio->u.isp24.fcp_cmnd.add_cdb_len])),
+		    atio->u.isp24.fcp_hdr.s_id[0],
+		    atio->u.isp24.fcp_hdr.s_id[1],
+		    atio->u.isp24.fcp_hdr.s_id[2]);
+
+		if (unlikely(atio->u.isp24.exchange_addr ==
+		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe058,
+			    "qla_target(%d): ATIO_TYPE7 "
+			    "received with UNKNOWN exchange address, "
+			    "sending QUEUE_FULL\n", vha->vp_idx);
+			qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+			break;
+		}
+		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+			rc = qlt_handle_cmd_for_atio(vha, atio);
+		else
+			rc = qlt_handle_task_mgmt(vha, atio);
+		if (unlikely(rc != 0)) {
+			if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+				qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+				qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+			} else {
+				if (tgt->tgt_stop) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe059,
+					    "qla_target: Unable to send "
+					    "command to target for req, "
+					    "ignoring.\n");
+				} else {
+					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+					    "qla_target(%d): Unable to send "
+					    "command to target, sending BUSY "
+					    "status.\n", vha->vp_idx);
+					qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+				}
+			}
+		}
+		break;
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		if (unlikely(atio->u.isp2x.entry_status != 0)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+			    "qla_target(%d): Received ATIO packet %x "
+			    "with error status %x\n", vha->vp_idx,
+			    atio->u.raw.entry_type,
+			    atio->u.isp2x.entry_status);
+			break;
+		}
+		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+		break;
+	}
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+		    "qla_target(%d): Received unknown ATIO atio "
+		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+		break;
+	}
+
+	tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+	if (unlikely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+		    "qla_target(%d): Response pkt %x received, but no "
+		    "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+	    "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+	    "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+	    pkt->entry_count, pkt->entry_status, pkt->handle);
+
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+	tgt->irq_cmd_count++;
+
+	switch (pkt->entry_type) {
+	case CTIO_TYPE7:
+	{
+		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+		ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+		    vha->vp_idx);
+		qlt_do_ctio_completion(vha, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case ACCEPT_TGT_IO_TYPE:
+	{
+		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+		int rc;
+		ql_dbg(ql_dbg_tgt, vha, 0xe031,
+		    "ACCEPT_TGT_IO instance %d status %04x "
+		    "lun %04x read/write %d data_length %04x "
+		    "target_id %02x rx_id %04x\n ", vha->vp_idx,
+		    le16_to_cpu(atio->u.isp2x.status),
+		    le16_to_cpu(atio->u.isp2x.lun),
+		    atio->u.isp2x.execution_codes,
+		    le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+		    atio), atio->u.isp2x.rx_id);
+		if (atio->u.isp2x.status !=
+		    __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+			    "qla_target(%d): ATIO with error "
+			    "status %x received\n", vha->vp_idx,
+			    le16_to_cpu(atio->u.isp2x.status));
+			break;
+		}
+		ql_dbg(ql_dbg_tgt, vha, 0xe032,
+		    "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+		    atio->u.isp2x.cdb[0], (unsigned long
+		    int)sizeof(atio->u.isp2x.cdb));
+
+		rc = qlt_handle_cmd_for_atio(vha, atio);
+		if (unlikely(rc != 0)) {
+			if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+				qlt_send_busy(vha, atio, 0);
+#else
+				qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+			} else {
+				if (tgt->tgt_stop) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+					    "qla_target: Unable to send "
+					    "command to target, sending TERM "
+					    "EXCHANGE for rsp\n");
+					qlt_send_term_exchange(vha, NULL,
+					    atio, 1);
+				} else {
+					ql_dbg(ql_dbg_tgt, vha, 0xe060,
+					    "qla_target(%d): Unable to send "
+					    "command to target, sending BUSY "
+					    "status\n", vha->vp_idx);
+					qlt_send_busy(vha, atio, 0);
+				}
+			}
+		}
+	}
+	break;
+
+	case CONTINUE_TGT_IO_TYPE:
+	{
+		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+		ql_dbg(ql_dbg_tgt, vha, 0xe033,
+		    "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+		qlt_do_ctio_completion(vha, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case CTIO_A64_TYPE:
+	{
+		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+		ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+		    vha->vp_idx);
+		qlt_do_ctio_completion(vha, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+		break;
+
+	case NOTIFY_ACK_TYPE:
+		if (tgt->notify_ack_expected > 0) {
+			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+			ql_dbg(ql_dbg_tgt, vha, 0xe036,
+			    "NOTIFY_ACK seq %08x status %x\n",
+			    le16_to_cpu(entry->u.isp2x.seq_id),
+			    le16_to_cpu(entry->u.isp2x.status));
+			tgt->notify_ack_expected--;
+			if (entry->u.isp2x.status !=
+			    __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe061,
+				    "qla_target(%d): NOTIFY_ACK "
+				    "failed %x\n", vha->vp_idx,
+				    le16_to_cpu(entry->u.isp2x.status));
+			}
+		} else {
+			ql_dbg(ql_dbg_tgt, vha, 0xe062,
+			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+			    vha->vp_idx);
+		}
+		break;
+
+	case ABTS_RECV_24XX:
+		ql_dbg(ql_dbg_tgt, vha, 0xe037,
+		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+		break;
+
+	case ABTS_RESP_24XX:
+		if (tgt->abts_resp_expected > 0) {
+			struct abts_resp_from_24xx_fw *entry =
+				(struct abts_resp_from_24xx_fw *)pkt;
+			ql_dbg(ql_dbg_tgt, vha, 0xe038,
+			    "ABTS_RESP_24XX: compl_status %x\n",
+			    entry->compl_status);
+			tgt->abts_resp_expected--;
+			if (le16_to_cpu(entry->compl_status) !=
+			    ABTS_RESP_COMPL_SUCCESS) {
+				if ((entry->error_subcode1 == 0x1E) &&
+				    (entry->error_subcode2 == 0)) {
+					/*
+					 * We've got a race here: aborted
+					 * exchange not terminated, i.e.
+					 * response for the aborted command was
+					 * sent between the abort request was
+					 * received and processed.
+					 * Unfortunately, the firmware has a
+					 * silly requirement that all aborted
+					 * exchanges must be explicitely
+					 * terminated, otherwise it refuses to
+					 * send responses for the abort
+					 * requests. So, we have to
+					 * (re)terminate the exchange and retry
+					 * the abort response.
+					 */
+					qlt_24xx_retry_term_exchange(vha,
+					    entry);
+				} else
+					ql_dbg(ql_dbg_tgt, vha, 0xe063,
+					    "qla_target(%d): ABTS_RESP_24XX "
+					    "failed %x (subcode %x:%x)",
+					    vha->vp_idx, entry->compl_status,
+					    entry->error_subcode1,
+					    entry->error_subcode2);
+			}
+		} else {
+			ql_dbg(ql_dbg_tgt, vha, 0xe064,
+			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
+			    "received\n", vha->vp_idx);
+		}
+		break;
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe065,
+		    "qla_target(%d): Received unknown response pkt "
+		    "type %x\n", vha->vp_idx, pkt->entry_type);
+		break;
+	}
+
+	tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+	uint16_t *mailbox)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	int reason_code;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe039,
+	    "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+	    vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+	    ha->operating_mode, ha->current_topology);
+
+	if (!ha->tgt.tgt_ops)
+		return;
+
+	if (unlikely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+		    "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+		return;
+	}
+
+	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+	    IS_QLA2100(ha))
+		return;
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+	tgt->irq_cmd_count++;
+
+	switch (code) {
+	case MBA_RESET:			/* Reset */
+	case MBA_SYSTEM_ERR:		/* System Error */
+	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
+	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+		    "qla_target(%d): System error async event %#x "
+		    "occured", vha->vp_idx, code);
+		break;
+	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_LOOP_UP:
+	{
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+		    "qla_target(%d): Async LOOP_UP occured "
+		    "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+		    le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+		    le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+			    0, 0, 0, 0, 0, 0);
+			tgt->link_reinit_iocb_pending = 0;
+		}
+		break;
+	}
+
+	case MBA_LIP_OCCURRED:
+	case MBA_LOOP_DOWN:
+	case MBA_LIP_RESET:
+	case MBA_RSCN_UPDATE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+		    "qla_target(%d): Async event %#x occured "
+		    "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+		    le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+		    le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+		break;
+
+	case MBA_PORT_UPDATE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+		    "qla_target(%d): Port update async event %#x "
+		    "occured: updating the ports database (m[1]=%x, m[2]=%x, "
+		    "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+		    le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+		    le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+		reason_code = le16_to_cpu(mailbox[2]);
+		if (reason_code == 0x4)
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+			    "Async MB 2: Got PLOGI Complete\n");
+		else if (reason_code == 0x7)
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+			    "Async MB 2: Port Logged Out\n");
+		break;
+
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+		    "qla_target(%d): Async event %#x occured: "
+		    "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+		    code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+		    le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+		break;
+	}
+
+	tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+	uint16_t loop_id)
+{
+	fc_port_t *fcport;
+	int rc;
+
+	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+	if (!fcport) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+		    "qla_target(%d): Allocation of tmp FC port failed",
+		    vha->vp_idx);
+		return NULL;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+	fcport->loop_id = loop_id;
+
+	rc = qla2x00_get_port_database(vha, fcport, 0);
+	if (rc != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+		    "qla_target(%d): Failed to retrieve fcport "
+		    "information -- get_port_database() returned %x "
+		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+		kfree(fcport);
+		return NULL;
+	}
+
+	return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+	uint8_t *s_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess = NULL;
+	fc_port_t *fcport = NULL;
+	int rc, global_resets;
+	uint16_t loop_id = 0;
+
+retry:
+	global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+	if (rc != 0) {
+		if ((s_id[0] == 0xFF) &&
+		    (s_id[1] == 0xFC)) {
+			/*
+			 * This is Domain Controller, so it should be
+			 * OK to drop SCSI commands from it.
+			 */
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+			    "Unable to find initiator with S_ID %x:%x:%x",
+			    s_id[0], s_id[1], s_id[2]);
+		} else
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+			    "qla_target(%d): Unable to find "
+			    "initiator with S_ID %x:%x:%x",
+			    vha->vp_idx, s_id[0], s_id[1],
+			    s_id[2]);
+		return NULL;
+	}
+
+	fcport = qlt_get_port_database(vha, loop_id);
+	if (!fcport)
+		return NULL;
+
+	if (global_resets !=
+	    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+		    "qla_target(%d): global reset during session discovery "
+		    "(counter was %d, new %d), retrying", vha->vp_idx,
+		    global_resets,
+		    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+		goto retry;
+	}
+
+	sess = qlt_create_sess(vha, fcport, true);
+
+	kfree(fcport);
+	return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+	struct qla_tgt_sess_work_param *prm)
+{
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess = NULL;
+	unsigned long flags;
+	uint32_t be_s_id;
+	uint8_t s_id[3];
+	int rc;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	if (tgt->tgt_stop)
+		goto out_term;
+
+	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    (unsigned char *)&be_s_id);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		mutex_lock(&ha->tgt.tgt_mutex);
+		sess = qlt_make_local_sess(vha, s_id);
+		/* sess has got an extra creation ref */
+		mutex_unlock(&ha->tgt.tgt_mutex);
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		if (!sess)
+			goto out_term;
+	} else {
+		kref_get(&sess->se_sess->sess_kref);
+	}
+
+	if (tgt->tgt_stop)
+		goto out_term;
+
+	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+	if (rc != 0)
+		goto out_term;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ha->tgt.tgt_ops->put_sess(sess);
+	return;
+
+out_term:
+	qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (sess)
+		ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+	struct qla_tgt_sess_work_param *prm)
+{
+	struct atio_from_isp *a = &prm->tm_iocb2;
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess = NULL;
+	unsigned long flags;
+	uint8_t *s_id = NULL; /* to hide compiler warnings */
+	int rc;
+	uint32_t lun, unpacked_lun;
+	int lun_size, fn;
+	void *iocb;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	if (tgt->tgt_stop)
+		goto out_term;
+
+	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		mutex_lock(&ha->tgt.tgt_mutex);
+		sess = qlt_make_local_sess(vha, s_id);
+		/* sess has got an extra creation ref */
+		mutex_unlock(&ha->tgt.tgt_mutex);
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		if (!sess)
+			goto out_term;
+	} else {
+		kref_get(&sess->se_sess->sess_kref);
+	}
+
+	iocb = a;
+	lun = a->u.isp24.fcp_cmnd.lun;
+	lun_size = sizeof(lun);
+	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+	if (rc != 0)
+		goto out_term;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ha->tgt.tgt_ops->put_sess(sess);
+	return;
+
+out_term:
+	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (sess)
+		ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+	struct scsi_qla_host *vha = tgt->vha;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	while (!list_empty(&tgt->sess_works_list)) {
+		struct qla_tgt_sess_work_param *prm = list_entry(
+		    tgt->sess_works_list.next, typeof(*prm),
+		    sess_works_list_entry);
+
+		/*
+		 * This work can be scheduled on several CPUs at time, so we
+		 * must delete the entry to eliminate double processing
+		 */
+		list_del(&prm->sess_works_list_entry);
+
+		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+		switch (prm->type) {
+		case QLA_TGT_SESS_WORK_ABORT:
+			qlt_abort_work(tgt, prm);
+			break;
+		case QLA_TGT_SESS_WORK_TM:
+			qlt_tmr_work(tgt, prm);
+			break;
+		default:
+			BUG_ON(1);
+			break;
+		}
+
+		spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+		kfree(prm);
+	}
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+	struct qla_tgt *tgt;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+	    "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+	BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+		    "Unable to allocate struct qla_tgt\n");
+		return -ENOMEM;
+	}
+
+	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+		base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+	tgt->ha = ha;
+	tgt->vha = base_vha;
+	init_waitqueue_head(&tgt->waitQ);
+	INIT_LIST_HEAD(&tgt->sess_list);
+	INIT_LIST_HEAD(&tgt->del_sess_list);
+	INIT_DELAYED_WORK(&tgt->sess_del_work,
+		(void (*)(struct work_struct *))qlt_del_sess_work_fn);
+	spin_lock_init(&tgt->sess_work_lock);
+	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+	INIT_LIST_HEAD(&tgt->sess_works_list);
+	spin_lock_init(&tgt->srr_lock);
+	INIT_LIST_HEAD(&tgt->srr_ctio_list);
+	INIT_LIST_HEAD(&tgt->srr_imm_list);
+	INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+	atomic_set(&tgt->tgt_global_resets_count, 0);
+
+	ha->tgt.qla_tgt = tgt;
+
+	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+		"qla_target(%d): using 64 Bit PCI addressing",
+		base_vha->vp_idx);
+	tgt->tgt_enable_64bit_addr = 1;
+	/* 3 is reserved */
+	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+	mutex_lock(&qla_tgt_mutex);
+	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+	mutex_unlock(&qla_tgt_mutex);
+
+	return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+	if (!ha->tgt.qla_tgt)
+		return 0;
+
+	mutex_lock(&qla_tgt_mutex);
+	list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+	mutex_unlock(&qla_tgt_mutex);
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+	    vha->host_no, ha);
+	qlt_release(ha->tgt.qla_tgt);
+
+	return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+	unsigned char *b)
+{
+	int i;
+
+	pr_debug("qla2xxx HW vha->node_name: ");
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", vha->node_name[i]);
+	pr_debug("\n");
+	pr_debug("qla2xxx HW vha->port_name: ");
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", vha->port_name[i]);
+	pr_debug("\n");
+
+	pr_debug("qla2xxx passed configfs WWPN: ");
+	put_unaligned_be64(wwpn, b);
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", b[i]);
+	pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback:  lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+	int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+	struct qla_tgt *tgt;
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+	struct Scsi_Host *host;
+	unsigned long flags;
+	int rc;
+	u8 b[WWN_SIZE];
+
+	mutex_lock(&qla_tgt_mutex);
+	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+		vha = tgt->vha;
+		ha = vha->hw;
+
+		host = vha->host;
+		if (!host)
+			continue;
+
+		if (ha->tgt.tgt_ops != NULL)
+			continue;
+
+		if (!(host->hostt->supported_mode & MODE_TARGET))
+			continue;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		if (host->active_mode & MODE_TARGET) {
+			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+			    host->host_no);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			continue;
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if (!scsi_host_get(host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe068,
+			    "Unable to scsi_host_get() for"
+			    " qla2xxx scsi_host\n");
+			continue;
+		}
+		qlt_lport_dump(vha, wwpn, b);
+
+		if (memcmp(vha->port_name, b, WWN_SIZE)) {
+			scsi_host_put(host);
+			continue;
+		}
+		/*
+		 * Setup passed parameters ahead of invoking callback
+		 */
+		ha->tgt.tgt_ops = qla_tgt_ops;
+		ha->tgt.target_lport_ptr = target_lport_ptr;
+		rc = (*callback)(vha);
+		if (rc != 0) {
+			ha->tgt.tgt_ops = NULL;
+			ha->tgt.target_lport_ptr = NULL;
+		}
+		mutex_unlock(&qla_tgt_mutex);
+		return rc;
+	}
+	mutex_unlock(&qla_tgt_mutex);
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha:  Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct Scsi_Host *sh = vha->host;
+	/*
+	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+	 */
+	ha->tgt.target_lport_ptr = NULL;
+	ha->tgt.tgt_ops = NULL;
+	/*
+	 * Release the Scsi_Host reference for the underlying qla2xxx host
+	 */
+	scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	switch (ql2x_ini_mode) {
+	case QLA2XXX_INI_MODE_DISABLED:
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		vha->host->active_mode = MODE_TARGET;
+		break;
+	case QLA2XXX_INI_MODE_ENABLED:
+		vha->host->active_mode |= MODE_TARGET;
+		break;
+	default:
+		break;
+	}
+
+	if (ha->tgt.ini_mode_force_reverse)
+		qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	switch (ql2x_ini_mode) {
+	case QLA2XXX_INI_MODE_DISABLED:
+		vha->host->active_mode = MODE_UNKNOWN;
+		break;
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		vha->host->active_mode = MODE_INITIATOR;
+		break;
+	case QLA2XXX_INI_MODE_ENABLED:
+		vha->host->active_mode &= ~MODE_TARGET;
+		break;
+	default:
+		break;
+	}
+
+	if (ha->tgt.ini_mode_force_reverse)
+		qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	unsigned long flags;
+
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe069,
+		    "Unable to locate qla_tgt pointer from"
+		    " struct qla_hw_data\n");
+		dump_stack();
+		return;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tgt->tgt_stopped = 0;
+	qlt_set_mode(vha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	unsigned long flags;
+
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+		    "Unable to locate qla_tgt pointer from"
+		    " struct qla_hw_data\n");
+		dump_stack();
+		return;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_clear_mode(vha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+	if (!qla_tgt_mode_enabled(vha))
+		return;
+
+	mutex_init(&ha->tgt.tgt_mutex);
+	mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+	qlt_clear_mode(vha);
+
+	/*
+	 * NOTE: Currently the value is kept the same for <24xx and
+	 * >=24xx ISPs. If it is necessary to change it,
+	 * the check should be added for specific ISPs,
+	 * assigning the value appropriately.
+	 */
+	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+	/*
+	 * FC-4 Feature bit 0 indicates target functionality to the name server.
+	 */
+	if (qla_tgt_mode_enabled(vha)) {
+		if (qla_ini_mode_enabled(vha))
+			ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+		else
+			ct_req->req.rff_id.fc4_feature = BIT_0;
+	} else if (qla_ini_mode_enabled(vha)) {
+		ct_req->req.rff_id.fc4_feature = BIT_1;
+	}
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t cnt;
+	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+	if (!qla_tgt_mode_enabled(vha))
+		return;
+
+	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+		pkt->u.raw.signature = ATIO_PROCESSED;
+		pkt++;
+	}
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct atio_from_isp *pkt;
+	int cnt, i;
+
+	if (!vha->flags.online)
+		return;
+
+	while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		cnt = pkt->u.raw.entry_count;
+
+		qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+		for (i = 0; i < cnt; i++) {
+			ha->tgt.atio_ring_index++;
+			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+				ha->tgt.atio_ring_index = 0;
+				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+			} else
+				ha->tgt.atio_ring_ptr++;
+
+			pkt->u.raw.signature = ATIO_PROCESSED;
+			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		}
+		wmb();
+	}
+
+	/* Adjust ring index */
+	WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+	if (ha->mqenable) {
+#if 0
+		WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+		WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+		RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+	} else {
+		/* Setup APTIO registers for target mode */
+		WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+		WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+		RD_REG_DWORD(&reg->isp24.atio_q_out);
+	}
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla_tgt_mode_enabled(vha)) {
+		if (!ha->tgt.saved_set) {
+			/* We save only once */
+			ha->tgt.saved_exchange_count = nv->exchange_count;
+			ha->tgt.saved_firmware_options_1 =
+			    nv->firmware_options_1;
+			ha->tgt.saved_firmware_options_2 =
+			    nv->firmware_options_2;
+			ha->tgt.saved_firmware_options_3 =
+			    nv->firmware_options_3;
+			ha->tgt.saved_set = 1;
+		}
+
+		nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+		/* Enable target mode */
+		nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+		/* Disable ini mode, if requested */
+		if (!qla_ini_mode_enabled(vha))
+			nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+		/* Disable Full Login after LIP */
+		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+		/* Enable initial LIP */
+		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+		/* Enable FC tapes support */
+		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+		/* Disable Full Login after LIP */
+		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+		/* Enable target PRLI control */
+		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+	} else {
+		if (ha->tgt.saved_set) {
+			nv->exchange_count = ha->tgt.saved_exchange_count;
+			nv->firmware_options_1 =
+			    ha->tgt.saved_firmware_options_1;
+			nv->firmware_options_2 =
+			    ha->tgt.saved_firmware_options_2;
+			nv->firmware_options_3 =
+			    ha->tgt.saved_firmware_options_3;
+		}
+		return;
+	}
+
+	/* out-of-order frames reassembly */
+	nv->firmware_options_3 |= BIT_6|BIT_9;
+
+	if (ha->tgt.enable_class_2) {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) =
+				FC_COS_CLASS2 | FC_COS_CLASS3;
+
+		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+	} else {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+		nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+	}
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+	struct init_cb_24xx *icb)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ha->tgt.node_name_set) {
+		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+		icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+	}
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+	struct sts_entry_24xx *pkt)
+{
+	switch (pkt->entry_type) {
+	case ABTS_RECV_24XX:
+	case ABTS_RESP_24XX:
+	case CTIO_TYPE7:
+	case NOTIFY_ACK_TYPE:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+	struct vp_config_entry_24xx *vpmod)
+{
+	if (qla_tgt_mode_enabled(vha))
+		vpmod->options_idx1 &= ~BIT_5;
+	/* Disable ini mode, if requested */
+	if (!qla_ini_mode_enabled(vha))
+		vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	mutex_init(&ha->tgt.tgt_mutex);
+	mutex_init(&ha->tgt.tgt_host_action_mutex);
+	qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+	if (!ha->tgt.tgt_vp_map)
+		return -ENOMEM;
+
+	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+	    &ha->tgt.atio_dma, GFP_KERNEL);
+	if (!ha->tgt.atio_ring) {
+		kfree(ha->tgt.tgt_vp_map);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (ha->tgt.atio_ring) {
+		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+		    ha->tgt.atio_dma);
+	}
+	kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	switch (cmd) {
+	case SET_VP_IDX:
+		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+		break;
+	case SET_AL_PA:
+		vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+		break;
+	case RESET_VP_IDX:
+		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+		break;
+	case RESET_AL_PA:
+		vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+		break;
+	}
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+	else
+		return false;
+
+	return true;
+}
+
+int __init qlt_init(void)
+{
+	int ret;
+
+	if (!qlt_parse_ini_mode()) {
+		ql_log(ql_log_fatal, NULL, 0xe06b,
+		    "qlt_parse_ini_mode() failed\n");
+		return -EINVAL;
+	}
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+	    sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+	    NULL);
+	if (!qla_tgt_cmd_cachep) {
+		ql_log(ql_log_fatal, NULL, 0xe06c,
+		    "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+		return -ENOMEM;
+	}
+
+	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+	    qla_tgt_mgmt_cmd), 0, NULL);
+	if (!qla_tgt_mgmt_cmd_cachep) {
+		ql_log(ql_log_fatal, NULL, 0xe06d,
+		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+	if (!qla_tgt_mgmt_cmd_mempool) {
+		ql_log(ql_log_fatal, NULL, 0xe06e,
+		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+		ret = -ENOMEM;
+		goto out_mgmt_cmd_cachep;
+	}
+
+	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+	if (!qla_tgt_wq) {
+		ql_log(ql_log_fatal, NULL, 0xe06f,
+		    "alloc_workqueue for qla_tgt_wq failed\n");
+		ret = -ENOMEM;
+		goto out_cmd_mempool;
+	}
+	/*
+	 * Return 1 to signal that initiator-mode is being disabled
+	 */
+	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+	kmem_cache_destroy(qla_tgt_cmd_cachep);
+	return ret;
+}
+
+void qlt_exit(void)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	destroy_workqueue(qla_tgt_wq);
+	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+	kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 0000000..9f9ef16
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1004 @@
+/*
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  Additional file for the target driver support.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC	269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC   57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE	"exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED	"disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED	"enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE	0
+#define QLA2XXX_INI_MODE_DISABLED	1
+#define QLA2XXX_INI_MODE_ENABLED	2
+
+#define QLA2XXX_COMMAND_COUNT_INIT	250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK	BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK	BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0        0
+#define OF_SS_MODE_1        1
+#define OF_SS_MODE_2        2
+#define OF_SS_MODE_3        3
+
+#define OF_EXPL_CONF        BIT_5       /* Explicit Confirmation Requested */
+#define OF_DATA_IN          BIT_6       /* Data in to initiator */
+					/*  (data from target to initiator) */
+#define OF_DATA_OUT         BIT_7       /* Data out from initiator */
+					/*  (data from initiator to target) */
+#define OF_NO_DATA          (BIT_7 | BIT_6)
+#define OF_INC_RC           BIT_8       /* Increment command resource count */
+#define OF_FAST_POST        BIT_9       /* Enable mailbox fast posting. */
+#define OF_CONF_REQ         BIT_13      /* Confirmation Requested */
+#define OF_TERM_EXCH        BIT_14      /* Terminate exchange */
+#define OF_SSTS             BIT_15      /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32	3
+#define QLA_TGT_DATASEGS_PER_CONT32	7
+#define QLA_TGT_MAX_SG32(ql) \
+	(((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+		QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64	2
+#define QLA_TGT_DATASEGS_PER_CONT64	5
+#define QLA_TGT_MAX_SG64(ql) \
+	(((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+		QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX	1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX	5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+	(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+		QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))			\
+			 ? le16_to_cpu((iocb)->u.isp2x.target.extended)	\
+			 : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D		/* Immediate notify entry. */
+/*
+ * ISP queue -	immediate notify entry structure definition.
+ *		This is sent by the ISP to the Target driver.
+ *		This IOCB would have report of events sent by the
+ *		initiator, that needs to be handled by the target
+ *		driver immediately.
+ */
+struct imm_ntfy_from_isp {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	union {
+		struct {
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint16_t lun;
+			uint8_t  target_id;
+			uint8_t  reserved_1;
+			uint16_t status_modifier;
+			uint16_t status;
+			uint16_t task_flags;
+			uint16_t seq_id;
+			uint16_t srr_rx_id;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+#define SRR_IU_DATA_IN	0x1
+#define SRR_IU_DATA_OUT	0x5
+#define SRR_IU_STATUS	0x7
+			uint16_t srr_ox_id;
+			uint8_t reserved_2[28];
+		} isp2x;
+		struct {
+			uint32_t reserved;
+			uint16_t nport_handle;
+			uint16_t reserved_2;
+			uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO   BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB     BIT_0
+			uint16_t srr_rx_id;
+			uint16_t status;
+			uint8_t  status_subcode;
+			uint8_t  reserved_3;
+			uint32_t exchange_address;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_ox_id;
+			uint8_t  reserved_4[19];
+			uint8_t  vp_index;
+			uint32_t reserved_5;
+			uint8_t  port_id[3];
+			uint8_t  reserved_6;
+		} isp24;
+	} u;
+	uint16_t reserved_7;
+	uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E	  /* Notify acknowledge entry. */
+/*
+ * ISP queue -	notify acknowledge entry structure definition.
+ *		This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	union {
+		struct {
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint8_t	 target_id;
+			uint8_t	 reserved_1;
+			uint16_t flags;
+			uint16_t resp_code;
+			uint16_t status;
+			uint16_t task_flags;
+			uint16_t seq_id;
+			uint16_t srr_rx_id;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_flags;
+			uint16_t srr_reject_code;
+			uint8_t  srr_reject_vendor_uniq;
+			uint8_t  srr_reject_code_expl;
+			uint8_t  reserved_2[24];
+		} isp2x;
+		struct {
+			uint32_t handle;
+			uint16_t nport_handle;
+			uint16_t reserved_1;
+			uint16_t flags;
+			uint16_t srr_rx_id;
+			uint16_t status;
+			uint8_t  status_subcode;
+			uint8_t  reserved_3;
+			uint32_t exchange_address;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_flags;
+			uint8_t  reserved_4[19];
+			uint8_t  vp_index;
+			uint8_t  srr_reject_vendor_uniq;
+			uint8_t  srr_reject_code_expl;
+			uint8_t  srr_reject_code;
+			uint8_t  reserved_5[5];
+		} isp24;
+	} u;
+	uint8_t  reserved[2];
+	uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT	0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT	1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM	0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL		0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA	0x2a
+
+#define NOTIFY_ACK_SUCCESS      0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue -	Continue Target I/O (CTIO) entry for status mode 0 structure.
+ *		This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+	uint8_t	 entry_type;		/* Entry type. */
+	uint8_t	 entry_count;		/* Entry count. */
+	uint8_t	 sys_define;		/* System defined. */
+	uint8_t	 entry_status;		/* Entry Status. */
+	uint32_t handle;		/* System defined handle */
+	target_id_t target;
+	uint16_t rx_id;
+	uint16_t flags;
+	uint16_t status;
+	uint16_t timeout;		/* 0 = 30 seconds, 0xFFFF = disable */
+	uint16_t dseg_count;		/* Data segment count. */
+	uint32_t relative_offset;
+	uint32_t residual;
+	uint16_t reserved_1[3];
+	uint16_t scsi_status;
+	uint32_t transfer_length;
+	uint32_t dseg_0_address;	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address;	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	uint32_t dseg_2_address;	/* Data segment 2 address. */
+	uint32_t dseg_2_length;		/* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID       0x07
+#define ATIO_CANT_PROV_CAP      0x16
+#define ATIO_CDB_VALID          0x3D
+
+#define ATIO_EXEC_READ          BIT_1
+#define ATIO_EXEC_WRITE         BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS			0x01
+#define CTIO_ABORTED			0x02
+#define CTIO_INVALID_RX_ID		0x08
+#define CTIO_TIMEOUT			0x0B
+#define CTIO_LIP_RESET			0x0E
+#define CTIO_TARGET_RESET		0x17
+#define CTIO_PORT_UNAVAILABLE		0x28
+#define CTIO_PORT_LOGGED_OUT		0x29
+#define CTIO_PORT_CONF_CHANGED		0x2A
+#define CTIO_SRR_RECEIVED		0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE	0x17		/* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+	uint8_t  r_ctl;
+	uint8_t  d_id[3];
+	uint8_t  cs_ctl;
+	uint8_t  s_id[3];
+	uint8_t  type;
+	uint8_t  f_ctl[3];
+	uint8_t  seq_id;
+	uint8_t  df_ctl;
+	uint16_t seq_cnt;
+	uint16_t ox_id;
+	uint16_t rx_id;
+	uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+	uint8_t  d_id[3];
+	uint8_t  r_ctl;
+	uint8_t  s_id[3];
+	uint8_t  cs_ctl;
+	uint8_t  f_ctl[3];
+	uint8_t  type;
+	uint16_t seq_cnt;
+	uint8_t  df_ctl;
+	uint8_t  seq_id;
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP	BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP	BIT_22
+#define F_CTL_LAST_SEQ		BIT_20
+#define F_CTL_END_SEQ		BIT_19
+#define F_CTL_SEQ_INITIATIVE	BIT_16
+
+#define R_CTL_BASIC_LINK_SERV	0x80
+#define R_CTL_B_ACC		0x4
+#define R_CTL_B_RJT		0x5
+
+struct atio7_fcp_cmnd {
+	uint64_t lun;
+	uint8_t  cmnd_ref;
+	uint8_t  task_attr:3;
+	uint8_t  reserved:5;
+	uint8_t  task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA		6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET		5
+#define FCP_CMND_TASK_MGMT_LU_RESET		4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET	2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET	1
+	uint8_t  wrdata:1;
+	uint8_t  rddata:1;
+	uint8_t  add_cdb_len:6;
+	uint8_t  cdb[16];
+	/*
+	 * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+	 * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+	 * BUILD_BUG_ON in qlt_init().
+	 */
+	uint8_t  add_cdb[4];
+	/* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue -	Accept Target I/O (ATIO) type entry IOCB structure.
+ *		This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+	union {
+		struct {
+			uint16_t entry_hdr;
+			uint8_t  sys_define;   /* System defined. */
+			uint8_t  entry_status; /* Entry Status.   */
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint16_t rx_id;
+			uint16_t flags;
+			uint16_t status;
+			uint8_t  command_ref;
+			uint8_t  task_codes;
+			uint8_t  task_flags;
+			uint8_t  execution_codes;
+			uint8_t  cdb[MAX_CMDSZ];
+			uint32_t data_length;
+			uint16_t lun;
+			uint8_t  initiator_port_name[WWN_SIZE]; /* on qla23xx */
+			uint16_t reserved_32[6];
+			uint16_t ox_id;
+		} isp2x;
+		struct {
+			uint16_t entry_hdr;
+			uint8_t  fcp_cmnd_len_low;
+			uint8_t  fcp_cmnd_len_high:4;
+			uint8_t  attr:4;
+			uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN	0xFFFFFFFF
+			struct fcp_hdr fcp_hdr;
+			struct atio7_fcp_cmnd fcp_cmnd;
+		} isp24;
+		struct {
+			uint8_t  entry_type;	/* Entry type. */
+			uint8_t  entry_count;	/* Entry count. */
+			uint8_t  data[58];
+			uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD		/* Signature */
+		} raw;
+	} u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue -	Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ *		This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;		    /* System defined handle */
+	uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED	0xFFFF
+	uint16_t timeout;
+	uint16_t dseg_count;		    /* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  add_flags;
+	uint8_t  initiator_id[3];
+	uint8_t  reserved;
+	uint32_t exchange_addr;
+	union {
+		struct {
+			uint16_t reserved1;
+			uint16_t flags;
+			uint32_t residual;
+			uint16_t ox_id;
+			uint16_t scsi_status;
+			uint32_t relative_offset;
+			uint32_t reserved2;
+			uint32_t transfer_length;
+			uint32_t reserved3;
+			/* Data segment 0 address. */
+			uint32_t dseg_0_address[2];
+			/* Data segment 0 length. */
+			uint32_t dseg_0_length;
+		} status0;
+		struct {
+			uint16_t sense_length;
+			uint16_t flags;
+			uint32_t residual;
+			uint16_t ox_id;
+			uint16_t scsi_status;
+			uint16_t response_len;
+			uint16_t reserved;
+			uint8_t sense_data[24];
+		} status1;
+	} u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;		    /* System defined handle */
+	uint16_t status;
+	uint16_t timeout;
+	uint16_t dseg_count;		    /* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  reserved1[5];
+	uint32_t exchange_address;
+	uint16_t reserved2;
+	uint16_t flags;
+	uint32_t residual;
+	uint16_t ox_id;
+	uint16_t reserved3;
+	uint32_t relative_offset;
+	uint8_t  reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS		BIT_15
+#define CTIO7_FLAGS_TERMINATE		BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ		BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO	BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0	0
+#define CTIO7_FLAGS_STATUS_MODE_1	BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM	BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF	BIT_4
+#define CTIO7_FLAGS_DSD_PTR		BIT_2
+#define CTIO7_FLAGS_DATA_IN		BIT_1
+#define CTIO7_FLAGS_DATA_OUT		BIT_0
+
+#define ELS_PLOGI			0x3
+#define ELS_FLOGI			0x4
+#define ELS_LOGO			0x5
+#define ELS_PRLI			0x20
+#define ELS_PRLO			0x21
+#define ELS_TPRLO			0x24
+#define ELS_PDISC			0x50
+#define ELS_ADISC			0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX		0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX		0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue -	ABTS received IOCB entry structure definition for 24xx.
+ *		The ABTS BLS received from the wire is sent to the
+ *		target driver by the ISP 24xx.
+ *		The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint8_t  reserved_1[6];
+	uint16_t nport_handle;
+	uint8_t  reserved_2[2];
+	uint8_t  vp_index;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	uint8_t  reserved_4[16];
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ		BIT_0
+
+struct ba_acc_le {
+	uint16_t reserved;
+	uint8_t  seq_id_last;
+	uint8_t  seq_id_valid;
+#define SEQ_ID_VALID	0x80
+#define SEQ_ID_INVALID	0x00
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint16_t high_seq_cnt;
+	uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+	uint8_t vendor_uniq;
+	uint8_t reason_expl;
+	uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND	0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM	0x9
+	uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue -	ABTS Response IOCB entry structure definition for 24xx.
+ *		The ABTS response to the ABTS received is sent by the
+ *		target driver to the ISP 24xx.
+ *		The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;
+	uint16_t reserved_1;
+	uint16_t nport_handle;
+	uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG	BIT_0
+	uint8_t  vp_index;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	union {
+		struct ba_acc_le ba_acct;
+		struct ba_rjt_le ba_rjt;
+	} __packed payload;
+	uint32_t reserved_4;
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue -	ABTS Response IOCB from ISP24xx Firmware entry structure.
+ *		The ABTS response with completion status to the ABTS response
+ *		(sent by the target driver to the ISP 24xx) is sent by the
+ *		ISP24xx firmware to the target driver.
+ *		The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;
+	uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS		0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR	0x31
+	uint16_t nport_handle;
+	uint16_t reserved_1;
+	uint8_t  reserved_2;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	uint8_t reserved_4[8];
+	uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM	0x1E
+	uint32_t error_subcode2;
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+	int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+			unsigned char *, uint32_t, int, int, int);
+	int (*handle_data)(struct qla_tgt_cmd *);
+	int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+			uint32_t);
+	void (*free_cmd)(struct qla_tgt_cmd *);
+	void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+	void (*free_session)(struct qla_tgt_sess *);
+
+	int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+					void *, uint8_t *, uint16_t);
+	struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+						const uint16_t);
+	struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+						const uint8_t *);
+	void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+	void (*put_sess)(struct qla_tgt_sess *);
+	void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT			10	/* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME	60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET          0x000E
+#define IMM_NTFY_LIP_LINK_REINIT    0x000F
+#define IMM_NTFY_IOCB_OVERFLOW      0x0016
+#define IMM_NTFY_ABORT_TASK         0x0020
+#define IMM_NTFY_PORT_LOGOUT        0x0029
+#define IMM_NTFY_PORT_CONFIG        0x002A
+#define IMM_NTFY_GLBL_TPRLO         0x002D
+#define IMM_NTFY_GLBL_LOGO          0x002E
+#define IMM_NTFY_RESOURCE           0x0034
+#define IMM_NTFY_MSG_RX             0x0036
+#define IMM_NTFY_SRR                0x0045
+#define IMM_NTFY_ELS                0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT    8
+
+#define QLA_TGT_CLEAR_ACA               0x40
+#define QLA_TGT_TARGET_RESET            0x20
+#define QLA_TGT_LUN_RESET               0x10
+#define QLA_TGT_CLEAR_TS                0x04
+#define QLA_TGT_ABORT_TS                0x02
+#define QLA_TGT_ABORT_ALL_SESS          0xFFFF
+#define QLA_TGT_ABORT_ALL               0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
+#define QLA_TGT_NEXUS_LOSS              0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT        BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET  BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW		0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA		1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN		2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED		3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED		4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE	0
+#define QLA_TGT_SKIP_HANDLE	(0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE           0
+#define ATIO_HEAD_OF_QUEUE          1
+#define ATIO_ORDERED_QUEUE          2
+#define ATIO_ACA_QUEUE              4
+#define ATIO_UNTAGGED               5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define	FC_TM_SUCCESS               0
+#define	FC_TM_BAD_FCP_DATA          1
+#define	FC_TM_BAD_CMD               2
+#define	FC_TM_FCP_DATA_MISMATCH     3
+#define	FC_TM_REJECT                4
+#define FC_TM_FAILED                5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED	0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense)  ((sense != NULL) && \
+				(((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+	uint8_t port_name[WWN_SIZE];
+	uint16_t loop_id;
+	uint16_t reserved;
+};
+
+struct qla_tgt {
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+
+	/*
+	 * To sync between IRQ handlers and qlt_target_release(). Needed,
+	 * because req_pkt() can drop/reaquire HW lock inside. Protected by
+	 * HW lock.
+	 */
+	int irq_cmd_count;
+
+	int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+	/* Target's flags, serialized by pha->hardware_lock */
+	unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+	unsigned int link_reinit_iocb_pending:1;
+
+	/*
+	 * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+	 * OR hardware_lock for reading.
+	 */
+	int tgt_stop; /* the target mode driver is being stopped */
+	int tgt_stopped; /* the target mode driver has been stopped */
+
+	/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+	int sess_count;
+
+	/* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+	struct list_head sess_list;
+
+	/* Protected by hardware_lock */
+	struct list_head del_sess_list;
+	struct delayed_work sess_del_work;
+
+	spinlock_t sess_work_lock;
+	struct list_head sess_works_list;
+	struct work_struct sess_work;
+
+	struct imm_ntfy_from_isp link_reinit_iocb;
+	wait_queue_head_t waitQ;
+	int notify_ack_expected;
+	int abts_resp_expected;
+	int modify_lun_expected;
+
+	int ctio_srr_id;
+	int imm_srr_id;
+	spinlock_t srr_lock;
+	struct list_head srr_ctio_list;
+	struct list_head srr_imm_list;
+	struct work_struct srr_work;
+
+	atomic_t tgt_global_resets_count;
+
+	struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+	uint16_t loop_id;
+	port_id_t s_id;
+
+	unsigned int conf_compl_supported:1;
+	unsigned int deleted:1;
+	unsigned int local:1;
+	unsigned int tearing_down:1;
+
+	struct se_session *se_sess;
+	struct scsi_qla_host *vha;
+	struct qla_tgt *tgt;
+
+	struct list_head sess_list_entry;
+	unsigned long expires;
+	struct list_head del_list_entry;
+
+	uint8_t port_name[WWN_SIZE];
+	struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+	struct qla_tgt_sess *sess;
+	int state;
+	struct se_cmd se_cmd;
+	struct work_struct free_work;
+	struct work_struct work;
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+	/* to save extra sess dereferences */
+	unsigned int conf_compl_supported:1;
+	unsigned int sg_mapped:1;
+	unsigned int free_sg:1;
+	unsigned int aborted:1; /* Needed in case of SRR */
+	unsigned int write_data_transferred:1;
+
+	struct scatterlist *sg;	/* cmd data buffer SG vector */
+	int sg_cnt;		/* SG segments count */
+	int bufflen;		/* cmd buffer length */
+	int offset;
+	uint32_t tag;
+	uint32_t unpacked_lun;
+	enum dma_data_direction dma_data_direction;
+
+	uint16_t loop_id;	/* to save extra sess dereferences */
+	struct qla_tgt *tgt;	/* to save extra sess dereferences */
+	struct scsi_qla_host *vha;
+	struct list_head cmd_list;
+
+	struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+	struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT	1
+#define QLA_TGT_SESS_WORK_TM	2
+	int type;
+
+	union {
+		struct abts_recv_from_24xx abts;
+		struct imm_ntfy_from_isp tm_iocb;
+		struct atio_from_isp tm_iocb2;
+	};
+};
+
+struct qla_tgt_mgmt_cmd {
+	uint8_t tmr_func;
+	uint8_t fc_tm_rsp;
+	struct qla_tgt_sess *sess;
+	struct se_cmd se_cmd;
+	struct work_struct free_work;
+	unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK	1
+	union {
+		struct atio_from_isp atio;
+		struct imm_ntfy_from_isp imm_ntfy;
+		struct abts_recv_from_24xx abts;
+	} __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+	struct qla_tgt_cmd *cmd;
+	struct qla_tgt *tgt;
+	void *pkt;
+	struct scatterlist *sg;	/* cmd data buffer SG vector */
+	int seg_cnt;
+	int req_cnt;
+	uint16_t rq_result;
+	uint16_t scsi_status;
+	unsigned char *sense_buffer;
+	int sense_buffer_len;
+	int residual;
+	int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+	struct list_head srr_list_entry;
+	int srr_id;
+	struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+	struct list_head srr_list_entry;
+	int srr_id;
+	struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA		1
+#define QLA_TGT_XMIT_STATUS		2
+#define QLA_TGT_XMIT_ALL		(QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+			int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+	return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+	return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+	if (ha->host->active_mode & MODE_INITIATOR)
+		ha->host->active_mode &= ~MODE_INITIATOR;
+	else
+		ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+	struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+	device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+	struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+	struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+	struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+	struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 0000000..6e64314
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1919 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	u32 nibble;
+	u32 byte = 0;
+	u32 pos = 0;
+	u32 err;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		if (isdigit(c))
+			nibble = c - '0';
+		else if (isxdigit(c) && (islower(c) || !strict))
+			nibble = tolower(c) - 'a' + 10;
+		else
+			goto fail;
+		*wwn = (*wwn << 4) | nibble;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+			err, cp - name, pos, byte);
+	return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	u8 b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+	return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+	unsigned int i, j;
+	u8 wwn[8];
+
+	memset(wwn, 0, sizeof(wwn));
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		int value;
+
+		value = hex_to_bin(*ns++);
+		if (value >= 0)
+			j = (j << 4) | value;
+		else
+			return -EINVAL;
+
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+
+	*nm = wwn_to_u64(wwn);
+	return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+	const char *name,
+	size_t count,
+	u64 *wwpn,
+	u64 *wwnn)
+{
+	unsigned int cnt = count;
+	int rc;
+
+	*wwpn = 0;
+	*wwnn = 0;
+
+	/* count may include a LF at end of string */
+	if (name[cnt-1] == '\n')
+		cnt--;
+
+	/* validate we have enough characters for WWPN */
+	if ((cnt != (16+1+16)) || (name[16] != ':'))
+		return -EINVAL;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+	if (rc != 0)
+		return rc;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+	if (rc != 0)
+		return rc;
+
+	return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+					u64 wwpn, u64 wwnn)
+{
+	u8 b[8], b2[8];
+
+	put_unaligned_be64(wwpn, b);
+	put_unaligned_be64(wwnn, b2);
+	return snprintf(buf, len,
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+		b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+	return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	u8 proto_id;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		proto_id = fc_get_fabric_proto_ident(se_tpg);
+		break;
+	}
+
+	return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+	return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+	return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	int ret = 0;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+		break;
+	}
+
+	return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	int ret = 0;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+		break;
+	}
+
+	return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	char *tid = NULL;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+		break;
+	}
+
+	return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+	if (!nacl) {
+		pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+			struct tcm_qla2xxx_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+	struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+			struct qla_tgt_mgmt_cmd, free_work);
+
+	transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+	queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd;
+
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+		struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+		qlt_free_mcmd(mcmd);
+		return;
+	}
+
+	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	BUG_ON(!sess);
+	vha = sess->vha;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	sess->tearing_down = 1;
+	target_splice_sess_cmd_list(se_sess);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+	return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	BUG_ON(!sess);
+	vha = sess->vha;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	qlt_unreg_sess(sess);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ).  However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+	if (se_cmd->se_cmd_flags & SCF_BIDI)
+		return DMA_BIDIRECTIONAL;
+
+	switch (se_cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return DMA_FROM_DEVICE;
+	case DMA_FROM_DEVICE:
+		return DMA_TO_DEVICE;
+	case DMA_NONE:
+	default:
+		return DMA_NONE;
+	}
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+	cmd->sg_cnt = se_cmd->t_data_nents;
+	cmd->sg = se_cmd->t_data_sg;
+
+	/*
+	 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+	 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+	 */
+	return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Check for WRITE_PENDING status to determine if we need to wait for
+	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+	 */
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+						3000);
+		return 0;
+	}
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+	return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+	unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+	int data_dir, int bidi)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct se_session *se_sess;
+	struct qla_tgt_sess *sess;
+	int flags = TARGET_SCF_ACK_KREF;
+
+	if (bidi)
+		flags |= TARGET_SCF_BIDI_OP;
+
+	sess = cmd->sess;
+	if (!sess) {
+		pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+		return -EINVAL;
+	}
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("Unable to locate active struct se_session\n");
+		return -EINVAL;
+	}
+
+	target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+				cmd->unpacked_lun, data_length, fcp_task_attr,
+				data_dir, flags);
+	return 0;
+}
+
+static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+	/*
+	 * Dispatch ->queue_status from workqueue process context
+	 */
+	transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	unsigned long flags;
+	/*
+	 * Ensure that the complete FCP WRITE payload has been received.
+	 * Otherwise return an exception via CHECK_CONDITION status.
+	 */
+	if (!cmd->write_data_transferred) {
+		/*
+		 * Check if se_cmd has already been aborted via LUN_RESET, and
+		 * waiting upon completion in tcm_qla2xxx_write_pending_status()
+		 */
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		if (se_cmd->transport_state & CMD_T_ABORTED) {
+			spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+			complete(&se_cmd->t_transport_stop_comp);
+			return 0;
+		}
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+		INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+		queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+		return 0;
+	}
+	/*
+	 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+	 * status to the backstore processing thread.
+	 */
+	return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+	uint8_t tmr_func, uint32_t tag)
+{
+	struct qla_tgt_sess *sess = mcmd->sess;
+	struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+	return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+			tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+	cmd->sg_cnt = se_cmd->t_data_nents;
+	cmd->sg = se_cmd->t_data_sg;
+	cmd->offset = 0;
+
+	/*
+	 * Now queue completed DATA_IN the qla2xxx LLD and response ring
+	 */
+	return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+				se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+	int xmit_type = QLA_TGT_XMIT_STATUS;
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->sg = NULL;
+	cmd->sg_cnt = 0;
+	cmd->offset = 0;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+		/*
+		 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+		 * for qla_tgt_xmit_response LLD code
+		 */
+		se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+		se_cmd->residual_count = se_cmd->data_length;
+
+		cmd->bufflen = 0;
+	}
+	/*
+	 * Now queue status response to qla2xxx LLD code and response ring
+	 */
+	return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+
+	pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+			mcmd, se_tmr->function, se_tmr->response);
+	/*
+	 * Do translation between TCM TM response codes and
+	 * QLA2xxx FC TM response codes.
+	 */
+	switch (se_tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+		break;
+	case TMR_TASK_DOES_NOT_EXIST:
+		mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+		break;
+	case TMR_FUNCTION_REJECTED:
+		mcmd->fc_tm_rsp = FC_TM_REJECT;
+		break;
+	case TMR_LUN_DOES_NOT_EXIST:
+	default:
+		mcmd->fc_tm_rsp = FC_TM_FAILED;
+		break;
+	}
+	/*
+	 * Queue the TM response to QLA2xxx LLD to build a
+	 * CTIO response packet.
+	 */
+	qlt_xmit_tm_rsp(mcmd);
+
+	return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+					u32 sense_length)
+{
+	return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+			struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+				struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+	void *node;
+
+	pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+	node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+	WARN_ON(node && (node != se_nacl));
+
+	pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+	    se_nacl, nacl->nport_wwnn, nacl->nport_id);
+	/*
+	 * Now clear the se_nacl and session pointers from our HW lport lookup
+	 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+	 *
+	 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+	 * target_wait_for_sess_cmds() before the session waits for outstanding
+	 * I/O to complete, to avoid a race between session shutdown execution
+	 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+	 */
+	tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+	struct se_session *se_sess = container_of(kref,
+			struct se_session, sess_kref);
+
+	qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+	struct qla_hw_data *ha = sess->vha->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+	tcm_qla2xxx_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+	tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct tcm_qla2xxx_nacl *nacl;
+	u64 wwnn;
+	u32 qla2xxx_nexus_depth;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+	if (!se_nacl_new)
+		return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+	qla2xxx_nexus_depth = 1;
+
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, qla2xxx_nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	nacl->nport_wwnn = wwnn;
+	tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+	return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct se_portal_group *se_tpg = se_acl->se_tpg;
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+
+	core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name)					\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(			\
+	struct se_portal_group *se_tpg,					\
+	char *page)							\
+{									\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+									\
+	return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name);	\
+}									\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(			\
+	struct se_portal_group *se_tpg,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+	unsigned long val;						\
+	int ret;							\
+									\
+	ret = kstrtoul(page, 0, &val);					\
+	if (ret < 0) {							\
+		pr_err("kstrtoul() failed with"				\
+				" ret: %d\n", ret);			\
+		return -EINVAL;						\
+	}								\
+	ret = tcm_qla2xxx_set_attrib_##name(tpg, val);			\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name)					\
+									\
+static int tcm_qla2xxx_set_attrib_##_name(				\
+	struct tcm_qla2xxx_tpg *tpg,					\
+	unsigned long val)						\
+{									\
+	struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;		\
+									\
+	if ((val != 0) && (val != 1)) {					\
+		pr_err("Illegal boolean value %lu\n", val);		\
+		return -EINVAL;						\
+	}								\
+									\
+	a->_name = val;							\
+	return 0;							\
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+	TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+	&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+	&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+	&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+	&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+	NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+
+	return snprintf(page, PAGE_SIZE, "%d\n",
+			atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	unsigned long op;
+	int rc;
+
+	rc = kstrtoul(page, 0, &op);
+	if (rc < 0) {
+		pr_err("kstrtoul() returned %d\n", rc);
+		return -EINVAL;
+	}
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %lu\n", op);
+		return -EINVAL;
+	}
+
+	if (op) {
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(vha);
+	} else {
+		if (!ha->tgt.qla_tgt) {
+			pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+			return -ENODEV;
+		}
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qlt_stop_phase1(ha->tgt.qla_tgt);
+	}
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+	&tcm_qla2xxx_tpg_enable.attr,
+	NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	if (!lport->qla_npiv_vp && (tpgt != 1)) {
+		pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+	/*
+	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+	 * NodeACLs
+	 */
+	QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+	QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+	QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	/*
+	 * Setup local TPG=1 pointer for non NPIV mode.
+	 */
+	if (lport->qla_npiv_vp == NULL)
+		lport->tpg_1 = tpg;
+
+	return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	/*
+	 * Call into qla2x_target.c LLD logic to shutdown the active
+	 * FC Nexuses and disable target mode operation for this qla_hw_data
+	 */
+	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+		qlt_stop_phase1(ha->tgt.qla_tgt);
+
+	core_tpg_deregister(se_tpg);
+	/*
+	 * Clear local TPG=1 pointer for non NPIV mode.
+	 */
+	if (lport->qla_npiv_vp == NULL)
+		lport->tpg_1 = NULL;
+
+	kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+
+	ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+	scsi_qla_host_t *vha,
+	const uint8_t *s_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	u32 key;
+
+	lport = ha->tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	key = (((unsigned long)s_id[0] << 16) |
+	       ((unsigned long)s_id[1] << 8) |
+	       (unsigned long)s_id[2]);
+	pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+	se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+	if (!se_nacl) {
+		pr_debug("Unable to locate s_id: 0x%06x\n", key);
+		return NULL;
+	}
+	pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+	    se_nacl, se_nacl->initiatorname);
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	if (!nacl->qla_tgt_sess) {
+		pr_err("Unable to locate struct qla_tgt_sess\n");
+		return NULL;
+	}
+
+	return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct qla_tgt_sess *qla_tgt_sess,
+	uint8_t *s_id)
+{
+	u32 key;
+	void *slot;
+	int rc;
+
+	key = (((unsigned long)s_id[0] << 16) |
+	       ((unsigned long)s_id[1] << 8) |
+	       (unsigned long)s_id[2]);
+	pr_debug("set_sess_by_s_id: %06x\n", key);
+
+	slot = btree_lookup32(&lport->lport_fcport_map, key);
+	if (!slot) {
+		if (new_se_nacl) {
+			pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+			nacl->nport_id = key;
+			rc = btree_insert32(&lport->lport_fcport_map, key,
+					new_se_nacl, GFP_ATOMIC);
+			if (rc)
+				printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+				    (int)key);
+		} else {
+			pr_debug("Wiping nonexisting fc_port entry\n");
+		}
+
+		qla_tgt_sess->se_sess = se_sess;
+		nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (nacl->qla_tgt_sess) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+			btree_remove32(&lport->lport_fcport_map, key);
+			nacl->qla_tgt_sess = NULL;
+			return;
+		}
+		pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+		btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+		qla_tgt_sess->se_sess = se_sess;
+		nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing existing fc_port entry\n");
+		btree_remove32(&lport->lport_fcport_map, key);
+		return;
+	}
+
+	pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+	btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+	qla_tgt_sess->se_sess = se_sess;
+	nacl->qla_tgt_sess = qla_tgt_sess;
+
+	pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+	    nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+	scsi_qla_host_t *vha,
+	const uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	lport = ha->tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = lport->lport_loopid_map + loop_id;
+	se_nacl = fc_loopid->se_nacl;
+	if (!se_nacl) {
+		pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+		    loop_id);
+		return NULL;
+	}
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+	if (!nacl->qla_tgt_sess) {
+		pr_err("Unable to locate struct qla_tgt_sess\n");
+		return NULL;
+	}
+
+	return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct qla_tgt_sess *qla_tgt_sess,
+	uint16_t loop_id)
+{
+	struct se_node_acl *saved_nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+			lport->lport_loopid_map)[loop_id];
+
+	saved_nacl = fc_loopid->se_nacl;
+	if (!saved_nacl) {
+		pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (qla_tgt_sess->se_sess != se_sess)
+			qla_tgt_sess->se_sess = se_sess;
+		if (nacl->qla_tgt_sess != qla_tgt_sess)
+			nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (nacl->qla_tgt_sess) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+			fc_loopid->se_nacl = NULL;
+			nacl->qla_tgt_sess = NULL;
+			return;
+		}
+
+		pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (qla_tgt_sess->se_sess != se_sess)
+			qla_tgt_sess->se_sess = se_sess;
+		if (nacl->qla_tgt_sess != qla_tgt_sess)
+			nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = NULL;
+		return;
+	}
+
+	pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+	fc_loopid->se_nacl = new_se_nacl;
+	if (qla_tgt_sess->se_sess != se_sess)
+		qla_tgt_sess->se_sess = se_sess;
+	if (nacl->qla_tgt_sess != qla_tgt_sess)
+		nacl->qla_tgt_sess = qla_tgt_sess;
+
+	pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+	    nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+		struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+	struct se_session *se_sess = sess->se_sess;
+	unsigned char be_sid[3];
+
+	be_sid[0] = sess->s_id.b.domain;
+	be_sid[1] = sess->s_id.b.area;
+	be_sid[2] = sess->s_id.b.al_pa;
+
+	tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+				sess, be_sid);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+				sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+	struct qla_tgt *tgt = sess->tgt;
+	struct qla_hw_data *ha = tgt->ha;
+	struct se_session *se_sess;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_nacl *nacl;
+
+	BUG_ON(in_interrupt());
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+		dump_stack();
+		return;
+	}
+	se_nacl = se_sess->se_node_acl;
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+	lport = ha->tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return;
+	}
+	target_wait_for_sess_cmds(se_sess, 0);
+
+	transport_deregister_session_configfs(sess->se_sess);
+	transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+	scsi_qla_host_t *vha,
+	unsigned char *fc_wwpn,
+	void *qla_tgt_sess,
+	uint8_t *s_id,
+	uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_tpg *tpg;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct se_portal_group *se_tpg;
+	struct se_node_acl *se_nacl;
+	struct se_session *se_sess;
+	struct qla_tgt_sess *sess = qla_tgt_sess;
+	unsigned char port_name[36];
+	unsigned long flags;
+
+	lport = ha->tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return -EINVAL;
+	}
+	/*
+	 * Locate the TPG=1 reference..
+	 */
+	tpg = lport->tpg_1;
+	if (!tpg) {
+		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+		return -EINVAL;
+	}
+	se_tpg = &tpg->se_tpg;
+
+	se_sess = transport_init_session();
+	if (IS_ERR(se_sess)) {
+		pr_err("Unable to initialize struct se_session\n");
+		return PTR_ERR(se_sess);
+	}
+	/*
+	 * Format the FCP Initiator port_name into colon seperated values to
+	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+	 */
+	memset(&port_name, 0, 36);
+	snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+		fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+		fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+	/*
+	 * Locate our struct se_node_acl either from an explict NodeACL created
+	 * via ConfigFS, or via running in TPG demo mode.
+	 */
+	se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+					port_name);
+	if (!se_sess->se_node_acl) {
+		transport_free_session(se_sess);
+		return -EINVAL;
+	}
+	se_nacl = se_sess->se_node_acl;
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	/*
+	 * And now setup the new se_nacl and session pointers into our HW lport
+	 * mappings for fabric S_ID and LOOP_ID.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+			qla_tgt_sess, s_id);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+			qla_tgt_sess, loop_id);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	/*
+	 * Finally register the new FC Nexus with TCM
+	 */
+	__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+	return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+	.handle_cmd		= tcm_qla2xxx_handle_cmd,
+	.handle_data		= tcm_qla2xxx_handle_data,
+	.handle_tmr		= tcm_qla2xxx_handle_tmr,
+	.free_cmd		= tcm_qla2xxx_free_cmd,
+	.free_mcmd		= tcm_qla2xxx_free_mcmd,
+	.free_session		= tcm_qla2xxx_free_session,
+	.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+	.find_sess_by_s_id	= tcm_qla2xxx_find_sess_by_s_id,
+	.find_sess_by_loop_id	= tcm_qla2xxx_find_sess_by_loop_id,
+	.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+	.put_sess		= tcm_qla2xxx_put_sess,
+	.shutdown_sess		= tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+	int rc;
+
+	rc = btree_init32(&lport->lport_fcport_map);
+	if (rc) {
+		pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+		return rc;
+	}
+
+	lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+				65536);
+	if (!lport->lport_loopid_map) {
+		pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+		    sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+		btree_destroy32(&lport->lport_fcport_map);
+		return -ENOMEM;
+	}
+	memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+	       * 65536);
+	pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+	       sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+	return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	/*
+	 * Setup local pointer to vha, NPIV VP pointer (if present) and
+	 * vha->tcm_lport pointer
+	 */
+	lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+	lport->qla_vha = vha;
+
+	return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 wwpn;
+	int ret = -ENODEV;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_wwpn = wwpn;
+	tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+				wwpn);
+
+	ret = tcm_qla2xxx_init_lport(lport);
+	if (ret != 0)
+		goto out;
+
+	ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+				tcm_qla2xxx_lport_register_cb, lport);
+	if (ret != 0)
+		goto out_lport;
+
+	return &lport->lport_wwn;
+out_lport:
+	vfree(lport->lport_loopid_map);
+	btree_destroy32(&lport->lport_fcport_map);
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct se_node_acl *node;
+	u32 key = 0;
+
+	/*
+	 * Call into qla2x_target.c LLD logic to complete the
+	 * shutdown of struct qla_tgt after the call to
+	 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+	 */
+	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+		qlt_stop_phase2(ha->tgt.qla_tgt);
+
+	qlt_lport_deregister(vha);
+
+	vfree(lport->lport_loopid_map);
+	btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+		btree_remove32(&lport->lport_fcport_map, key);
+	btree_destroy32(&lport->lport_fcport_map);
+	kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 npiv_wwpn, npiv_wwnn;
+	int ret;
+
+	if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+				&npiv_wwpn, &npiv_wwnn) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_npiv_wwpn = npiv_wwpn;
+	lport->lport_npiv_wwnn = npiv_wwnn;
+	tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+			TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+	ret = -ENOSYS;
+	if (ret != 0)
+		goto out;
+
+	return &lport->lport_wwn;
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct Scsi_Host *sh = vha->host;
+	/*
+	 * Notify libfc that we want to release the lport->npiv_vport
+	 */
+	fc_vport_terminate(lport->npiv_vport);
+
+	scsi_host_put(sh);
+	kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page,
+	    "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+	    UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+	    utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+	&tcm_qla2xxx_wwn_version.attr,
+	NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect =
+					tcm_qla2xxx_check_demo_write_protect,
+	.tpg_check_prod_mode_write_protect =
+					tcm_qla2xxx_check_prod_write_protect,
+	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.new_cmd_map			= NULL,
+	.check_stop_free		= tcm_qla2xxx_check_stop_free,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.put_session			= tcm_qla2xxx_put_session,
+	.shutdown_session		= tcm_qla2xxx_shutdown_session,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_task_tag			= tcm_qla2xxx_get_task_tag,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_qla2xxx_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_qla2xxx_set_fabric_sense_len,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_post_link		= NULL,
+	.fabric_pre_unlink		= NULL,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_qla2xxx_npiv_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_false,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true,
+	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_true,
+	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.put_session			= tcm_qla2xxx_put_session,
+	.shutdown_session		= tcm_qla2xxx_shutdown_session,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_task_tag			= tcm_qla2xxx_get_task_tag,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_qla2xxx_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_qla2xxx_set_fabric_sense_len,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_npiv_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_npiv_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_npiv_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_post_link		= NULL,
+	.fabric_pre_unlink		= NULL,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric, *npiv_fabric;
+	int ret;
+
+	pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+	    UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+	    utsname()->machine);
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+	if (IS_ERR(fabric)) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+	 */
+	fabric->tf_ops = tcm_qla2xxx_ops;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+						tcm_qla2xxx_tpg_attrib_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+		return ret;
+	}
+	/*
+	 * Setup our local pointer to *fabric
+	 */
+	tcm_qla2xxx_fabric_configfs = fabric;
+	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+	/*
+	 * Register the top level struct config_item_type for NPIV with TCM core
+	 */
+	npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+	if (IS_ERR(npiv_fabric)) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		ret = PTR_ERR(npiv_fabric);
+		goto out_fabric;
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+	 */
+	npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+	/*
+	 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the npiv_fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(npiv_fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+		goto out_fabric;
+	}
+	/*
+	 * Setup our local pointer to *npiv_fabric
+	 */
+	tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+	tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+						WQ_MEM_RECLAIM, 0);
+	if (!tcm_qla2xxx_free_wq) {
+		ret = -ENOMEM;
+		goto out_fabric_npiv;
+	}
+
+	tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+	if (!tcm_qla2xxx_cmd_wq) {
+		ret = -ENOMEM;
+		goto out_free_wq;
+	}
+
+	return 0;
+
+out_free_wq:
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+	return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+	destroy_workqueue(tcm_qla2xxx_cmd_wq);
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+
+	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+	tcm_qla2xxx_fabric_configfs = NULL;
+	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+	tcm_qla2xxx_npiv_fabric_configfs = NULL;
+	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+	int ret;
+
+	ret = tcm_qla2xxx_register_configfs();
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+	tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 0000000..8254981
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION	"v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN	32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+	/* From libfc struct fc_rport->port_id */
+	u32 nport_id;
+	/* Binary World Wide unique Node Name for remote FC Initiator Nport */
+	u64 nport_wwnn;
+	/* ASCII formatted WWPN for FC Initiator Nport */
+	char nport_name[TCM_QLA2XXX_NAMELEN];
+	/* Pointer to qla_tgt_sess */
+	struct qla_tgt_sess *qla_tgt_sess;
+	/* Pointer to TCM FC nexus */
+	struct se_session *nport_nexus;
+	/* Returned by tcm_qla2xxx_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+	int generate_node_acls;
+	int cache_dynamic_acls;
+	int demo_mode_write_protect;
+	int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+	/* FC lport target portal group tag for TCM */
+	u16 lport_tpgt;
+	/* Atomic bit to determine TPG active status */
+	atomic_t lport_tpg_enabled;
+	/* Pointer back to tcm_qla2xxx_lport */
+	struct tcm_qla2xxx_lport *lport;
+	/* Used by tcm_qla2xxx_tpg_attrib_cit */
+	struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+	/* Returned by tcm_qla2xxx_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg)	(&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+	struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+	/* SCSI protocol the lport is providing */
+	u8 lport_proto_id;
+	/* Binary World Wide unique Port Name for FC Target Lport */
+	u64 lport_wwpn;
+	/* Binary World Wide unique Port Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwpn;
+	/* Binary World Wide unique Node Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwnn;
+	/* ASCII formatted WWPN for FC Target Lport */
+	char lport_name[TCM_QLA2XXX_NAMELEN];
+	/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+	char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+	/* map for fc_port pointers in 24-bit FC Port ID space */
+	struct btree_head32 lport_fcport_map;
+	/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+	struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+	/* Pointer to struct scsi_qla_host from qla2xxx LLD */
+	struct scsi_qla_host *qla_vha;
+	/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+	struct scsi_qla_host *qla_npiv_vp;
+	/* Pointer to struct qla_tgt pointer */
+	struct qla_tgt lport_qla_tgt;
+	/* Pointer to struct fc_vport for NPIV vport from libfc */
+	struct fc_vport *npiv_vport;
+	/* Pointer to TPG=1 for non NPIV mode */
+	struct tcm_qla2xxx_tpg *tpg_1;
+	/* Returned by tcm_qla2xxx_make_lport() */
+	struct se_wwn lport_wwn;
+};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d4..c681b2a 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
 #include "ql4_glbl.h"
 #include "ql4_dbg.h"
 
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+			     struct bin_attribute *ba, char *buf, loff_t off,
+			     size_t count)
+{
+	struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+					       struct device, kobj)));
+
+	if (!is_qla8022(ha))
+		return -EINVAL;
+
+	if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+		return 0;
+
+	return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+				       ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+			      struct bin_attribute *ba, char *buf, loff_t off,
+			      size_t count)
+{
+	struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+					       struct device, kobj)));
+	uint32_t dev_state;
+	long reading;
+	int ret = 0;
+
+	if (!is_qla8022(ha))
+		return -EINVAL;
+
+	if (off != 0)
+		return ret;
+
+	buf[1] = 0;
+	ret = kstrtol(buf, 10, &reading);
+	if (ret) {
+		ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+			   __func__, ret);
+		return ret;
+	}
+
+	switch (reading) {
+	case 0:
+		/* clear dump collection flags */
+		if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+			clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+			/* Reload minidump template */
+			qla4xxx_alloc_fw_dump(ha);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Firmware template reloaded\n"));
+		}
+		break;
+	case 1:
+		/* Set flag to read dump */
+		if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+		    !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+			set_bit(AF_82XX_DUMP_READING, &ha->flags);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Raw firmware dump ready for read on (%ld).\n",
+					  ha->host_no));
+		}
+		break;
+	case 2:
+		/* Reset HBA */
+		qla4_8xxx_idc_lock(ha);
+		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		if (dev_state == QLA82XX_DEV_READY) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s: Setting Need reset, reset_owner is 0x%x.\n",
+				   __func__, ha->func_num);
+			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+					QLA82XX_DEV_NEED_RESET);
+			set_bit(AF_82XX_RST_OWNER, &ha->flags);
+		} else
+			ql4_printk(KERN_INFO, ha,
+				   "%s: Reset not performed as device state is 0x%x\n",
+				   __func__, dev_state);
+
+		qla4_8xxx_idc_unlock(ha);
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+
+	return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+	.attr = {
+		.name = "fw_dump",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla4_8xxx_sysfs_read_fw_dump,
+	.write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+	char *name;
+	struct bin_attribute *attr;
+} bin_file_entries[] = {
+	{ "fw_dump", &sysfs_fw_dump_attr },
+	{ NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+	struct Scsi_Host *host = ha->host;
+	struct sysfs_entry *iter;
+	int ret;
+
+	for (iter = bin_file_entries; iter->name; iter++) {
+		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+					    iter->attr);
+		if (ret)
+			ql4_printk(KERN_ERR, ha,
+				   "Unable to create sysfs %s binary attribute (%d).\n",
+				   iter->name, ret);
+	}
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+	struct Scsi_Host *host = ha->host;
+	struct sysfs_entry *iter;
+
+	for (iter = bin_file_entries; iter->name; iter++)
+		sysfs_remove_bin_file(&host->shost_gendev.kobj,
+				      iter->attr);
+}
+
 /* Scsi_Host attributes. */
 static ssize_t
 qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e..96a5616 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@
 	int (*get_sys_info) (struct scsi_qla_host *);
 };
 
+struct ql4_mdump_size_table {
+	uint32_t size;
+	uint32_t size_cmask_02;
+	uint32_t size_cmask_04;
+	uint32_t size_cmask_08;
+	uint32_t size_cmask_10;
+	uint32_t size_cmask_FF;
+	uint32_t version;
+};
+
 /*qla4xxx ipaddress configuration details */
 struct ipaddress_config {
 	uint16_t ipv4_options;
@@ -485,6 +495,10 @@
 #define AF_EEH_BUSY			20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE	21 /* 0x00200000 */
 #define AF_BUILD_DDB_LIST		22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED		24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER		25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING		26 /* 0x04000000 */
+
 	unsigned long dpc_flags;
 
 #define DPC_RESET_HA			1 /* 0x00000002 */
@@ -662,6 +676,11 @@
 
 	uint32_t nx_dev_init_timeout;
 	uint32_t nx_reset_timeout;
+	void *fw_dump;
+	uint32_t fw_dump_size;
+	uint32_t fw_dump_capture_mask;
+	void *fw_dump_tmplt_hdr;
+	uint32_t fw_dump_tmplt_size;
 
 	struct completion mbx_intr_comp;
 
@@ -936,4 +955,7 @@
 #define PROCESS_ALL_AENS	 0
 #define FLUSH_DDB_CHANGED_AENS	 1
 
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP		0
+
 #endif	/*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d..7240948 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@
 #define MBOX_CMD_GET_IP_ADDR_STATE		0x0091
 #define MBOX_CMD_SEND_IPV6_ROUTER_SOL		0x0092
 #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR	0x0093
+#define MBOX_CMD_MINIDUMP			0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND		0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND		0x01
 
 /* Mailbox 1 */
 #define FW_STATE_READY				0x0000
@@ -1190,4 +1195,27 @@
 	uint8_t reserved2[264]; /* 0x0308 - 0x040F */
 };
 
+#define QLA82XX_DBG_STATE_ARRAY_LEN		16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN		8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN		8
+
+struct qla4_8xxx_minidump_template_hdr {
+	uint32_t entry_type;
+	uint32_t first_entry_offset;
+	uint32_t size_of_template;
+	uint32_t capture_debug_level;
+	uint32_t num_of_entries;
+	uint32_t version;
+	uint32_t driver_timestamp;
+	uint32_t checksum;
+
+	uint32_t driver_capture_mask;
+	uint32_t driver_info_word2;
+	uint32_t driver_info_word3;
+	uint32_t driver_info_word4;
+
+	uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+	uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
 #endif /*  _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 9105366..20b49d0 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
 void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+				  dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
 
 extern struct device_attribute *qla4xxx_host_attrs[];
 #endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8..bf36723 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@
 	return ipv4_wait|ipv6_wait;
 }
 
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+	int status;
+	uint32_t capture_debug_level;
+	int hdr_entry_bit, k;
+	void *md_tmp;
+	dma_addr_t md_tmp_dma;
+	struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+	if (ha->fw_dump) {
+		ql4_printk(KERN_WARNING, ha,
+			   "Firmware dump previously allocated.\n");
+		return;
+	}
+
+	status = qla4xxx_req_template_size(ha);
+	if (status != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: Failed to get template size\n",
+			   ha->host_no);
+		return;
+	}
+
+	clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+	/* Allocate memory for saving the template */
+	md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+				    &md_tmp_dma, GFP_KERNEL);
+
+	/* Request template */
+	status =  qla4xxx_get_minidump_template(ha, md_tmp_dma);
+	if (status != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: Failed to get minidump template\n",
+			   ha->host_no);
+		goto alloc_cleanup;
+	}
+
+	md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+	capture_debug_level = md_hdr->capture_debug_level;
+
+	/* Get capture mask based on module loadtime setting. */
+	if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+		ha->fw_dump_capture_mask = ql4xmdcapmask;
+	else
+		ha->fw_dump_capture_mask = capture_debug_level;
+
+	md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+			  md_hdr->num_of_entries));
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size  = %d\n",
+			  ha->fw_dump_tmplt_size));
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+			  ha->fw_dump_capture_mask));
+
+	/* Calculate fw_dump_size */
+	for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+	     hdr_entry_bit <<= 1, k++) {
+		if (hdr_entry_bit & ha->fw_dump_capture_mask)
+			ha->fw_dump_size += md_hdr->capture_size_array[k];
+	}
+
+	/* Total firmware dump size including command header */
+	ha->fw_dump_size += ha->fw_dump_tmplt_size;
+	ha->fw_dump = vmalloc(ha->fw_dump_size);
+	if (!ha->fw_dump)
+		goto alloc_cleanup;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Minidump Tempalate Size = 0x%x KB\n",
+			  ha->fw_dump_tmplt_size));
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+	memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+	ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+	dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+			  md_tmp, md_tmp_dma);
+}
+
 static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
 {
 	uint32_t timeout_count;
@@ -445,9 +533,13 @@
 			      "control block\n", ha->host_no, __func__));
 		return status;
 	}
+
 	if (!qla4xxx_fw_ready(ha))
 		return status;
 
+	if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+		qla4xxx_alloc_fw_dump(ha);
+
 	return qla4xxx_get_firmware_status(ha);
 }
 
@@ -884,8 +976,8 @@
 		switch (state) {
 		case DDB_DS_SESSION_ACTIVE:
 		case DDB_DS_DISCOVERY:
-			ddb_entry->unblock_sess(ddb_entry->sess);
 			qla4xxx_update_session_conn_param(ha, ddb_entry);
+			ddb_entry->unblock_sess(ddb_entry->sess);
 			status = QLA_SUCCESS;
 			break;
 		case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@
 		}
 		break;
 	case DDB_DS_SESSION_ACTIVE:
+	case DDB_DS_DISCOVERY:
 		switch (state) {
 		case DDB_DS_SESSION_FAILED:
 			/*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21da..cab8f66 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@
 		}
 	}
 
-	if (is_qla8022(ha)) {
-		if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
-			DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
-			    "prematurely completing mbx cmd as firmware "
-			    "recovery detected\n", ha->host_no, __func__));
-			return status;
-		}
-		/* Do not send any mbx cmd if h/w is in failed state*/
-		qla4_8xxx_idc_lock(ha);
-		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-		qla4_8xxx_idc_unlock(ha);
-		if (dev_state == QLA82XX_DEV_FAILED) {
-			ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
-			    "failed state, do not send any mailbox commands\n",
-			    ha->host_no, __func__);
-			return status;
-		}
-	}
-
 	if ((is_aer_supported(ha)) &&
 	    (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@
 		msleep(10);
 	}
 
+	if (is_qla8022(ha)) {
+		if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha,
+					  "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+					  ha->host_no, __func__));
+			goto mbox_exit;
+		}
+		/* Do not send any mbx cmd if h/w is in failed state*/
+		qla4_8xxx_idc_lock(ha);
+		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		qla4_8xxx_idc_unlock(ha);
+		if (dev_state == QLA82XX_DEV_FAILED) {
+			ql4_printk(KERN_WARNING, ha,
+				   "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+				   ha->host_no, __func__);
+			goto mbox_exit;
+		}
+	}
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@
 	return status;
 }
 
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+				  dma_addr_t phys_addr)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+	mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+	mbox_cmd[2] = LSDW(phys_addr);
+	mbox_cmd[3] = MSDW(phys_addr);
+	mbox_cmd[4] = ha->fw_dump_tmplt_size;
+	mbox_cmd[5] = 0;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+				  ha->host_no, __func__, mbox_cmd[0],
+				  mbox_sts[0], mbox_sts[1]));
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+	mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status == QLA_SUCCESS) {
+		ha->fw_dump_tmplt_size = mbox_sts[1];
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: sts[0]=0x%04x, template  size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+				  __func__, mbox_sts[0], mbox_sts[1],
+				  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+				  mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+		if (ha->fw_dump_tmplt_size == 0)
+			status = QLA_ERROR;
+	} else {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+			   __func__, mbox_sts[0], mbox_sts[1]);
+		status = QLA_ERROR;
+	}
+
+	return status;
+}
+
 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
 {
 	set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6..228b670 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/pci.h>
+#include <linux/ratelimit.h>
 #include "ql4_def.h"
 #include "ql4_glbl.h"
 
@@ -420,6 +421,38 @@
 	return data;
 }
 
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+			      u32 data, uint8_t flag)
+{
+	uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+	off_value  = off & 0xFFFF0000;
+	writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+	/* Read back value to make sure write has gone through before trying
+	 * to use it.
+	 */
+	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	if (win_read != off_value) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+				   __func__, off_value, win_read, off));
+		return QLA_ERROR;
+	}
+
+	off_value  = off & 0x0000FFFF;
+
+	if (flag)
+		writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+					      ha->nx_pcibase));
+	else
+		rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+					      ha->nx_pcibase));
+
+	return rval;
+}
+
 #define CRB_WIN_LOCK_TIMEOUT 100000000
 
 int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@
 		}
 
 		if (j >= MAX_CTL_CHECK) {
-			if (printk_ratelimit())
-				ql4_printk(KERN_ERR, ha,
-				    "failed to read through agent\n");
+			printk_ratelimited(KERN_ERR
+					   "%s: failed to read through agent\n",
+					   __func__);
 			break;
 		}
 
@@ -1390,7 +1423,8 @@
 		if (j >= MAX_CTL_CHECK) {
 			if (printk_ratelimit())
 				ql4_printk(KERN_ERR, ha,
-				    "failed to write through agent\n");
+					   "%s: failed to read through agent\n",
+					   __func__);
 			ret = -1;
 			break;
 		}
@@ -1462,6 +1496,8 @@
 
 	drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
 	drv_active |= (1 << (ha->func_num * 4));
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+		   __func__, ha->host_no, drv_active);
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1472,6 +1508,8 @@
 
 	drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
 	drv_active &= ~(1 << (ha->func_num * 4));
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+		   __func__, ha->host_no, drv_active);
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1497,6 +1535,8 @@
 
 	drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
 	drv_state |= (1 << (ha->func_num * 4));
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+		   __func__, ha->host_no, drv_state);
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1507,6 +1547,8 @@
 
 	drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
 	drv_state &= ~(1 << (ha->func_num * 4));
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+		   __func__, ha->host_no, drv_state);
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1601,6 +1643,629 @@
 	qla4_8xxx_rom_unlock(ha);
 }
 
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla82xx_minidump_entry_crb *crb_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+	r_addr = crb_hdr->addr;
+	r_stride = crb_hdr->crb_strd.addr_stride;
+	loop_cnt = crb_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+		*data_ptr++ = cpu_to_le32(r_addr);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+				 struct qla82xx_minidump_entry_hdr *entry_hdr,
+				 uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	unsigned long p_wait, w_time, p_mask;
+	uint32_t c_value_w, c_value_r;
+	struct qla82xx_minidump_entry_cache *cache_hdr;
+	int rval = QLA_ERROR;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+	p_wait = cache_hdr->cache_ctrl.poll_wait;
+	p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+	for (i = 0; i < loop_count; i++) {
+		qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+		if (c_value_w)
+			qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+		if (p_mask) {
+			w_time = jiffies + p_wait;
+			do {
+				c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+								0, 0);
+				if ((c_value_r & p_mask) == 0) {
+					break;
+				} else if (time_after_eq(jiffies, w_time)) {
+					/* capturing dump failed */
+					return rval;
+				}
+			} while (1);
+		}
+
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+	struct qla82xx_minidump_entry_crb *crb_entry;
+	uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+	uint32_t crb_addr;
+	unsigned long wtime;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+	int i;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+						ha->fw_dump_tmplt_hdr;
+	crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+	crb_addr = crb_entry->addr;
+	for (i = 0; i < crb_entry->op_count; i++) {
+		opcode = crb_entry->crb_ctrl.opcode;
+		if (opcode & QLA82XX_DBG_OPCODE_WR) {
+			qla4_8xxx_md_rw_32(ha, crb_addr,
+					   crb_entry->value_1, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WR;
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_RW) {
+			read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+			qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_RW;
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_AND) {
+			read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+			read_value &= crb_entry->value_2;
+			opcode &= ~QLA82XX_DBG_OPCODE_AND;
+			if (opcode & QLA82XX_DBG_OPCODE_OR) {
+				read_value |= crb_entry->value_3;
+				opcode &= ~QLA82XX_DBG_OPCODE_OR;
+			}
+			qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_OR) {
+			read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+			read_value |= crb_entry->value_3;
+			qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_OR;
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+			poll_time = crb_entry->crb_strd.poll_timeout;
+			wtime = jiffies + poll_time;
+			read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+			do {
+				if ((read_value & crb_entry->value_2) ==
+				    crb_entry->value_1)
+					break;
+				else if (time_after_eq(jiffies, wtime)) {
+					/* capturing dump failed */
+					rval = QLA_ERROR;
+					break;
+				} else
+					read_value = qla4_8xxx_md_rw_32(ha,
+								crb_addr, 0, 0);
+			} while (1);
+			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+			index = crb_entry->crb_ctrl.state_index_v;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			if (crb_entry->crb_ctrl.state_index_v) {
+				index = crb_entry->crb_ctrl.state_index_v;
+				read_value =
+					tmplt_hdr->saved_state_array[index];
+			} else {
+				read_value = crb_entry->value_1;
+			}
+
+			qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+			index = crb_entry->crb_ctrl.state_index_v;
+			read_value = tmplt_hdr->saved_state_array[index];
+			read_value <<= crb_entry->crb_ctrl.shl;
+			read_value >>= crb_entry->crb_ctrl.shr;
+			if (crb_entry->value_2)
+				read_value &= crb_entry->value_2;
+			read_value |= crb_entry->value_3;
+			read_value += crb_entry->value_1;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+		}
+		crb_addr += crb_entry->crb_strd.addr_stride;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+	return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+	r_addr = ocm_hdr->read_addr;
+	r_stride = ocm_hdr->read_addr_stride;
+	loop_cnt = ocm_hdr->op_count;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+			  __func__, r_addr, r_stride, loop_cnt));
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+			  __func__, (loop_cnt * sizeof(uint32_t))));
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+	struct qla82xx_minidump_entry_mux *mux_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+	r_addr = mux_hdr->read_addr;
+	s_addr = mux_hdr->select_addr;
+	s_stride = mux_hdr->select_value_stride;
+	s_value = mux_hdr->select_value;
+	loop_cnt = mux_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+		r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+		*data_ptr++ = cpu_to_le32(s_value);
+		*data_ptr++ = cpu_to_le32(r_value);
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	uint32_t c_value_w;
+	struct qla82xx_minidump_entry_cache *cache_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+	for (i = 0; i < loop_count; i++) {
+		qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+		qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t s_addr, r_addr;
+	uint32_t r_stride, r_value, r_cnt, qid = 0;
+	uint32_t i, k, loop_cnt;
+	struct qla82xx_minidump_entry_queue *q_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+	s_addr = q_hdr->select_addr;
+	r_cnt = q_hdr->rd_strd.read_addr_cnt;
+	r_stride = q_hdr->rd_strd.read_addr_stride;
+	loop_cnt = q_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+		r_addr = q_hdr->read_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			r_addr += r_stride;
+		}
+		qid += q_hdr->q_strd.queue_id_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW		0x42110030
+#define MD_DIRECT_ROM_READ_BASE		0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value;
+	uint32_t i, loop_cnt;
+	struct qla82xx_minidump_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+	r_addr = rom_hdr->read_addr;
+	loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+			   __func__, r_addr, loop_cnt));
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+				   (r_addr & 0xFFFF0000), 1);
+		r_value = qla4_8xxx_md_rw_32(ha,
+					     MD_DIRECT_ROM_READ_BASE +
+					     (r_addr & 0x0000FFFF), 0, 0);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += sizeof(uint32_t);
+	}
+	*d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL		0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO		0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI		0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value, r_data;
+	uint32_t i, j, loop_cnt;
+	struct qla82xx_minidump_entry_rdmem *m_hdr;
+	unsigned long flags;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+	r_addr = m_hdr->read_addr;
+	loop_cnt = m_hdr->read_data_size/16;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+			  __func__, r_addr, m_hdr->read_data_size));
+
+	if (r_addr & 0xf) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+				  __func__, r_addr));
+		return QLA_ERROR;
+	}
+
+	if (m_hdr->read_data_size % 16) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+				  __func__, m_hdr->read_data_size));
+		return QLA_ERROR;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+			  __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+	for (i = 0; i < loop_cnt; i++) {
+		qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+		r_value = 0;
+		qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+		r_value = MIU_TA_CTL_ENABLE;
+		qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+		r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+		qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+						     0, 0);
+			if ((r_value & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR
+					   "%s: failed to read through agent\n",
+					    __func__);
+			write_unlock_irqrestore(&ha->hw_lock, flags);
+			return QLA_SUCCESS;
+		}
+
+		for (j = 0; j < 4; j++) {
+			r_data = qla4_8xxx_md_rw_32(ha,
+						    MD_MIU_TEST_AGT_RDDATA[j],
+						    0, 0);
+			*data_ptr++ = cpu_to_le32(r_data);
+		}
+
+		r_addr += 16;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+			  __func__, (loop_cnt * 16)));
+
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+				struct qla82xx_minidump_entry_hdr *entry_hdr,
+				int index)
+{
+	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+			  ha->host_no, index, entry_hdr->entry_type,
+			  entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+	int num_entry_hdr = 0;
+	struct qla82xx_minidump_entry_hdr *entry_hdr;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+	uint32_t *data_ptr;
+	uint32_t data_collected = 0;
+	int i, rval = QLA_ERROR;
+	uint64_t now;
+	uint32_t timestamp;
+
+	if (!ha->fw_dump) {
+		ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+			   __func__, ha->host_no);
+		return rval;
+	}
+
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+						ha->fw_dump_tmplt_hdr;
+	data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+						ha->fw_dump_tmplt_size);
+	data_collected += ha->fw_dump_tmplt_size;
+
+	num_entry_hdr = tmplt_hdr->num_of_entries;
+	ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+		   __func__, data_ptr);
+	ql4_printk(KERN_INFO, ha,
+		   "[%s]: no of entry headers in Template: 0x%x\n",
+		   __func__, num_entry_hdr);
+	ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+		   __func__, ha->fw_dump_capture_mask);
+	ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+		   __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+	/* Update current timestamp before taking dump */
+	now = get_jiffies_64();
+	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+	tmplt_hdr->driver_timestamp = timestamp;
+
+	entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+					(((uint8_t *)ha->fw_dump_tmplt_hdr) +
+					 tmplt_hdr->first_entry_offset);
+
+	/* Walk through the entry headers - validate/perform required action */
+	for (i = 0; i < num_entry_hdr; i++) {
+		if (data_collected >= ha->fw_dump_size) {
+			ql4_printk(KERN_INFO, ha,
+				   "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+				   data_collected, ha->fw_dump_size);
+			return rval;
+		}
+
+		if (!(entry_hdr->d_ctrl.entry_capture_mask &
+		      ha->fw_dump_capture_mask)) {
+			entry_hdr->d_ctrl.driver_flags |=
+						QLA82XX_DBG_SKIPPED_FLAG;
+			goto skip_nxt_entry;
+		}
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Data collected: [0x%x], Dump size left:[0x%x]\n",
+				  data_collected,
+				  (ha->fw_dump_size - data_collected)));
+
+		/* Decode the entry type and take required action to capture
+		 * debug data
+		 */
+		switch (entry_hdr->entry_type) {
+		case QLA82XX_RDEND:
+			ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA82XX_CNTRL:
+			rval = qla4_8xxx_minidump_process_control(ha,
+								  entry_hdr);
+			if (rval != QLA_SUCCESS) {
+				ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_RDCRB:
+			qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA82XX_RDMEM:
+			rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+								&data_ptr);
+			if (rval != QLA_SUCCESS) {
+				ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_BOARD:
+		case QLA82XX_RDROM:
+			qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA82XX_L2DTG:
+		case QLA82XX_L2ITG:
+		case QLA82XX_L2DAT:
+		case QLA82XX_L2INS:
+			rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+								&data_ptr);
+			if (rval != QLA_SUCCESS) {
+				ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_L1DAT:
+		case QLA82XX_L1INS:
+			qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+							   &data_ptr);
+			break;
+		case QLA82XX_RDOCM:
+			qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA82XX_RDMUX:
+			qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA82XX_QUEUE:
+			qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA82XX_RDNOP:
+		default:
+			ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		}
+
+		data_collected = (uint8_t *)data_ptr -
+				 ((uint8_t *)((uint8_t *)ha->fw_dump +
+						ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+		/*  next entry in the template */
+		entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+				(((uint8_t *)entry_hdr) +
+				 entry_hdr->entry_size);
+	}
+
+	if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+		ql4_printk(KERN_INFO, ha,
+			   "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+			   data_collected, ha->fw_dump_size);
+		goto md_failed;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+			  __func__, i));
+md_failed:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+	char event_string[40];
+	char *envp[] = { event_string, NULL };
+
+	switch (code) {
+	case QL4_UEVENT_CODE_FW_DUMP:
+		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+			 ha->host_no);
+		break;
+	default:
+		/*do nothing*/
+		break;
+	}
+
+	kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
 
 	qla4_8xxx_idc_unlock(ha);
+	if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+	    !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+		if (!qla4_8xxx_collect_md_data(ha)) {
+			qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+		} else {
+			ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+			clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+		}
+	}
 	rval = qla4_8xxx_try_start_fw(ha);
 	qla4_8xxx_idc_lock(ha);
 
@@ -1686,6 +2360,7 @@
 qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
 {
 	uint32_t dev_state, drv_state, drv_active;
+	uint32_t active_mask = 0xFFFFFFFF;
 	unsigned long reset_timeout;
 
 	ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@
 		qla4_8xxx_idc_lock(ha);
 	}
 
-	qla4_8xxx_set_rst_ready(ha);
+	if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s(%ld): reset acknowledged\n",
+				  __func__, ha->host_no));
+		qla4_8xxx_set_rst_ready(ha);
+	} else {
+		active_mask = (~(1 << (ha->func_num * 4)));
+	}
 
 	/* wait for 10 seconds for reset ack from all functions */
 	reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@
 		"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
 		__func__, ha->host_no, drv_state, drv_active);
 
-	while (drv_state != drv_active) {
+	while (drv_state != (drv_active & active_mask)) {
 		if (time_after_eq(jiffies, reset_timeout)) {
-			printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+			ql4_printk(KERN_INFO, ha,
+				   "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+				   DRIVER_NAME, drv_state, drv_active);
 			break;
 		}
 
+		/*
+		 * When reset_owner times out, check which functions
+		 * acked/did not ack
+		 */
+		if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+				   __func__, ha->host_no, drv_state,
+				   drv_active);
+		}
 		qla4_8xxx_idc_unlock(ha);
 		msleep(1000);
 		qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@
 		drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
 	}
 
+	/* Clear RESET OWNER as we are not going to use it any further */
+	clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-	ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
-		dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+	ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+		   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
 
 	/* Force to DEV_COLD unless someone else is starting a reset */
 	if (dev_state != QLA82XX_DEV_INITIALIZING) {
 		ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+		qla4_8xxx_set_rst_ready(ha);
 	}
 }
 
@@ -1765,8 +2463,9 @@
 	}
 
 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-	ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
-		dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+			  dev_state, dev_state < MAX_STATES ?
+			  qdev_state[dev_state] : "Unknown"));
 
 	/* wait for 30 seconds for device to go ready */
 	dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@
 	while (1) {
 
 		if (time_after_eq(jiffies, dev_init_timeout)) {
-			ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+			ql4_printk(KERN_WARNING, ha,
+				   "%s: Device Init Failed 0x%x = %s\n",
+				   DRIVER_NAME,
+				   dev_state, dev_state < MAX_STATES ?
+				   qdev_state[dev_state] : "Unknown");
 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
 				QLA82XX_DEV_FAILED);
 		}
 
 		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-		ql4_printk(KERN_INFO, ha,
-		    "2:Device state is 0x%x = %s\n", dev_state,
-		    dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+		ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+			   dev_state, dev_state < MAX_STATES ?
+			   qdev_state[dev_state] : "Unknown");
 
 		/* NOTE: Make sure idc unlocked upon exit of switch statement */
 		switch (dev_state) {
@@ -2184,6 +2887,7 @@
 		ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
 		    QLA82XX_DEV_NEED_RESET);
+		set_bit(AF_82XX_RST_OWNER, &ha->flags);
 	} else
 		ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
 
@@ -2195,8 +2899,10 @@
 	qla4_8xxx_clear_rst_ready(ha);
 	qla4_8xxx_idc_unlock(ha);
 
-	if (rval == QLA_SUCCESS)
+	if (rval == QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
 		clear_bit(AF_FW_RECOVERY, &ha->flags);
+	}
 
 	return rval;
 }
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e..3025847 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@
 #define MIU_TEST_AGT_WRDATA_UPPER_LO	(0x0b0)
 #define	MIU_TEST_AGT_WRDATA_UPPER_HI	(0x0b4)
 
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP	0
+#define QLA82XX_RDCRB	1
+#define QLA82XX_RDMUX	2
+#define QLA82XX_QUEUE	3
+#define QLA82XX_BOARD	4
+#define QLA82XX_RDOCM	6
+#define QLA82XX_PREGS	7
+#define QLA82XX_L1DTG	8
+#define QLA82XX_L1ITG	9
+#define QLA82XX_L1DAT	11
+#define QLA82XX_L1INS	12
+#define QLA82XX_L2DTG	21
+#define QLA82XX_L2ITG	22
+#define QLA82XX_L2DAT	23
+#define QLA82XX_L2INS	24
+#define QLA82XX_RDROM	71
+#define QLA82XX_RDMEM	72
+#define QLA82XX_CNTRL	98
+#define QLA82XX_RDEND	255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR		0x01
+#define QLA82XX_DBG_OPCODE_RW		0x02
+#define QLA82XX_DBG_OPCODE_AND		0x04
+#define QLA82XX_DBG_OPCODE_OR		0x08
+#define QLA82XX_DBG_OPCODE_POLL		0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE	0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE	0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE	0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG	0x80 /* driver skipped this entry  */
+#define QLA82XX_DBG_SIZE_ERR_FLAG	0x40 /* Entry vs Capture size
+					      * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+	uint32_t entry_type;
+	uint32_t entry_size;
+	uint32_t entry_capture_size;
+	struct {
+		uint8_t entry_capture_mask;
+		uint8_t entry_code;
+		uint8_t driver_code;
+		uint8_t driver_flags;
+	} d_ctrl;
+};
+
+/*  Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t addr;
+	struct {
+		uint8_t addr_stride;
+		uint8_t state_index_a;
+		uint16_t poll_timeout;
+	} crb_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	struct {
+		uint8_t opcode;
+		uint8_t state_index_v;
+		uint8_t shl;
+		uint8_t shr;
+	} crb_ctrl;
+
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t tag_reg_addr;
+	struct {
+		uint16_t tag_value_stride;
+		uint16_t init_tag_value;
+	} addr_ctrl;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t control_addr;
+	struct {
+		uint16_t write_value;
+		uint8_t poll_mask;
+		uint8_t poll_wait;
+	} cache_ctrl;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_1;
+	} read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t rsvd_0;
+	uint32_t rsvd_1;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_2;
+	uint32_t rsvd_3;
+	uint32_t read_addr;
+	uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t rsvd_0;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t select_value;
+	uint32_t select_value_stride;
+	uint32_t read_addr;
+	uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+	struct qla82xx_minidump_entry_hdr h;
+	uint32_t select_addr;
+	struct {
+		uint16_t queue_id_stride;
+		uint16_t rsvd_0;
+	} q_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_1;
+	uint32_t rsvd_2;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_3;
+	} rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE		(256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE		(256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE		1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE	0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE		0
+#define QLA82XX_MINIDUMP_MEM_SIZE		0
+#define QLA82XX_MAX_ENTRY_HDR			4
+
+struct qla82xx_minidump {
+	uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+	uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+	uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+	uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+	uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+	uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE	0x129
+#define RQST_TMPLT_SIZE				0x0
+#define RQST_TMPLT				0x1
+#define MD_DIRECT_ROM_WINDOW			0x42110030
+#define MD_DIRECT_ROM_READ_BASE			0x42150000
+#define MD_MIU_TEST_AGT_CTRL			0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO			0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI			0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+				0x410000AC, 0x410000B8, 0x410000BC };
 #endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820..cd15678 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@
 		 " Maximum queue depth to report for target devices.\n"
 		 "\t\t  Default: 32.");
 
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+		 " Enable or disable dynamic tracking and adjustment of\n"
+		 "\t\t scsi device queue depth.\n"
+		 "\t\t  0 - Disable.\n"
+		 "\t\t  1 - Enable. (Default)");
+
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
 		" Target Session Recovery Timeout.\n"
 		"\t\t  Default: 120 sec.");
 
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+		 " Set the Minidump driver capture mask level.\n"
+		 "\t\t  Default is 0x1F.\n"
+		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+		 " Set to enable minidump.\n"
+		 "\t\t  0 - disable minidump\n"
+		 "\t\t  1 - enable minidump (Default)");
+
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
  * SCSI host template entry points
@@ -140,6 +162,8 @@
 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
 static umode_t ql4_attr_is_visible(int param_type, int param);
 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				      int reason);
 
 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
     QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@
 	.slave_configure	= qla4xxx_slave_configure,
 	.slave_alloc		= qla4xxx_slave_alloc,
 	.slave_destroy		= qla4xxx_slave_destroy,
+	.change_queue_depth	= qla4xxx_change_queue_depth,
 
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
@@ -1555,19 +1580,53 @@
 	struct iscsi_session *sess;
 	struct ddb_entry *ddb_entry;
 	struct scsi_qla_host *ha;
-	unsigned long flags;
+	unsigned long flags, wtime;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t ddb_state;
+	int ret;
 
 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
 
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto destroy_session;
+	}
+
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+					      fw_ddb_entry, fw_ddb_entry_dma,
+					      NULL, NULL, &ddb_state, NULL,
+					      NULL, NULL);
+		if (ret == QLA_ERROR)
+			goto destroy_session;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto destroy_session;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+destroy_session:
 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	qla4xxx_free_ddb(ha, ddb_entry);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	iscsi_session_teardown(cls_sess);
+
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@
 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
 				  ha->queues_dma);
 
+	 if (ha->fw_dump)
+		vfree(ha->fw_dump);
+
 	ha->queues_len = 0;
 	ha->queues = NULL;
 	ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@
 	ha->response_dma = 0;
 	ha->shadow_regs = NULL;
 	ha->shadow_regs_dma = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dump_size = 0;
 
 	/* Free srb pool. */
 	if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@
 
 	set_bit(AF_INIT_DONE, &ha->flags);
 
+	qla4_8xxx_alloc_sysfs_attr(ha);
+
 	printk(KERN_INFO
 	       " QLogic iSCSI HBA Driver version: %s\n"
 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@
 		iscsi_boot_destroy_kset(ha->boot_kset);
 
 	qla4xxx_destroy_fw_ddb_session(ha);
+	qla4_8xxx_free_sysfs_attr(ha);
 
 	scsi_remove_host(ha->host);
 
@@ -5217,6 +5284,15 @@
 	scsi_deactivate_tcq(sdev, 1);
 }
 
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				      int reason)
+{
+	if (!ql4xqfulltracking)
+		return -EOPNOTSUPP;
+
+	return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
 /**
  * qla4xxx_del_from_active_array - returns an active srb
  * @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c1..cc1cc351 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a3..bbbc9c91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
 LIST_HEAD(scsi_sd_probe_domain);
 EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
 
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd3..6dfb978 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@
 {
 	struct scsi_device *sdev = q->queuedata;
 	struct Scsi_Host *shost;
-	struct scsi_target *starget;
 
 	if (!sdev)
 		return 0;
 
 	shost = sdev->host;
-	starget = scsi_target(sdev);
 
-	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
-	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+	/*
+	 * Ignore host/starget busy state.
+	 * Since block layer does not have a concept of fairness across
+	 * multiple queues, congestion of host/starget needs to be handled
+	 * in SCSI layer.
+	 */
+	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
 		return 1;
 
 	return 0;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41..d4201de 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@
 	err = scsi_device_quiesce(to_scsi_device(dev));
 	if (err == 0) {
 		drv = dev->driver;
-		if (drv && drv->suspend)
+		if (drv && drv->suspend) {
 			err = drv->suspend(dev, msg);
+			if (err)
+				scsi_device_resume(to_scsi_device(dev));
+		}
 	}
 	dev_dbg(dev, "scsi suspend: %d\n", err);
 	return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b0374..2e5fe58 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@
 
 	do {
 		if (list_empty(&scanning_hosts))
-			return 0;
+			goto out;
 		/* If we can't get memory immediately, that's OK.  Just
 		 * sleep a little.  Even if we never get memory, the async
 		 * scans will finish eventually.
@@ -179,8 +179,11 @@
 	}
  done:
 	spin_unlock(&async_scan_lock);
-
 	kfree(data);
+
+ out:
+	async_synchronize_full_domain(&scsi_sd_probe_domain);
+
 	return 0;
 }
 
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fc..ae78148 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
 
 static int __init wait_scan_init(void)
 {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f0a4c6..6f72b80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1899,6 +1899,8 @@
 {
 	if (sdp->host->max_cmd_len < 16)
 		return 0;
+	if (sdp->try_rc_10_first)
+		return 0;
 	if (sdp->scsi_level > SCSI_SPC_2)
 		return 1;
 	if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b7..6a4fd00 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@
 	err = pci_request_regions(pdev, UFSHCD);
 	if (err < 0) {
 		dev_err(&pdev->dev, "request regions failed\n");
-		goto out_disable;
+		goto out_host_put;
 	}
 
 	hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@
 	iounmap(hba->mmio_base);
 out_release_regions:
 	pci_release_regions(pdev);
-out_disable:
+out_host_put:
 	scsi_host_put(host);
+out_disable:
 	pci_clear_master(pdev);
 	pci_disable_device(pdev);
 out_error:
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c0240..cd2fe35 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -311,7 +311,7 @@
 
 config SPI_S3C64XX
 	tristate "Samsung S3C64XX series type SPI"
-	depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+	depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
 	select S3C64XX_DMA if ARCH_S3C64XX
 	help
 	  SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 69c9a66..47877d6 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -86,7 +86,8 @@
 	struct completion xfer_done;
 	void __iomem *base;
 	int irq;
-	struct clk *clk;
+	struct clk *clk_per;
+	struct clk *clk_ipg;
 	unsigned long spi_clk;
 
 	unsigned int count;
@@ -853,15 +854,22 @@
 		goto out_free_irq;
 	}
 
-	spi_imx->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(spi_imx->clk)) {
-		dev_err(&pdev->dev, "unable to get clock\n");
-		ret = PTR_ERR(spi_imx->clk);
+	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(spi_imx->clk_ipg)) {
+		ret = PTR_ERR(spi_imx->clk_ipg);
 		goto out_free_irq;
 	}
 
-	clk_enable(spi_imx->clk);
-	spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(spi_imx->clk_per)) {
+		ret = PTR_ERR(spi_imx->clk_per);
+		goto out_free_irq;
+	}
+
+	clk_prepare_enable(spi_imx->clk_per);
+	clk_prepare_enable(spi_imx->clk_ipg);
+
+	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
 
 	spi_imx->devtype_data->reset(spi_imx);
 
@@ -879,8 +887,8 @@
 	return ret;
 
 out_clk_put:
-	clk_disable(spi_imx->clk);
-	clk_put(spi_imx->clk);
+	clk_disable_unprepare(spi_imx->clk_per);
+	clk_disable_unprepare(spi_imx->clk_ipg);
 out_free_irq:
 	free_irq(spi_imx->irq, spi_imx);
 out_iounmap:
@@ -908,8 +916,8 @@
 	spi_bitbang_stop(&spi_imx->bitbang);
 
 	writel(0, spi_imx->base + MXC_CSPICTRL);
-	clk_disable(spi_imx->clk);
-	clk_put(spi_imx->clk);
+	clk_disable_unprepare(spi_imx->clk_per);
+	clk_disable_unprepare(spi_imx->clk_ipg);
 	free_irq(spi_imx->irq, spi_imx);
 	iounmap(spi_imx->base);
 
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe..0c73dd4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -801,7 +801,7 @@
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
 	if (!cs) {
-		cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
 			return -ENOMEM;
 		cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@
 		cs = spi->controller_state;
 		list_del(&cs->node);
 
+		kfree(cs);
 	}
 
 	if (spi->chip_select < spi->master->num_chipselect) {
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e496f79..dfd04e9 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,8 +16,8 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/module.h>
+#include <linux/clk.h>
 #include <asm/unaligned.h>
 
 #define DRIVER_NAME			"orion_spi"
@@ -46,6 +46,7 @@
 	unsigned int		max_speed;
 	unsigned int		min_speed;
 	struct orion_spi_info	*spi_info;
+	struct clk              *clk;
 };
 
 static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@
 
 	orion_spi = spi_master_get_devdata(spi->master);
 
-	tclk_hz = orion_spi->spi_info->tclk;
+	tclk_hz = clk_get_rate(orion_spi->clk);
 
 	/*
 	 * the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@
 	struct orion_spi *spi;
 	struct resource *r;
 	struct orion_spi_info *spi_info;
+	unsigned long tclk_hz;
 	int status = 0;
 
 	spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@
 	spi->master = master;
 	spi->spi_info = spi_info;
 
-	spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
-	spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
+	spi->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(spi->clk)) {
+		status = PTR_ERR(spi->clk);
+		goto out;
+	}
+
+	clk_prepare(spi->clk);
+	clk_enable(spi->clk);
+	tclk_hz = clk_get_rate(spi->clk);
+	spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
+	spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (r == NULL) {
 		status = -ENODEV;
-		goto out;
+		goto out_rel_clk;
 	}
 
 	if (!request_mem_region(r->start, resource_size(r),
 				dev_name(&pdev->dev))) {
 		status = -EBUSY;
-		goto out;
+		goto out_rel_clk;
 	}
 	spi->base = ioremap(r->start, SZ_1K);
 
@@ -508,7 +519,9 @@
 
 out_rel_mem:
 	release_mem_region(r->start, resource_size(r));
-
+out_rel_clk:
+	clk_disable_unprepare(spi->clk);
+	clk_put(spi->clk);
 out:
 	spi_master_put(master);
 	return status;
@@ -526,6 +539,9 @@
 
 	cancel_work_sync(&spi->work);
 
+	clk_disable_unprepare(spi->clk);
+	clk_put(spi->clk);
+
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(r->start, resource_size(r));
 
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 4511420..e84dbec 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/falloc.h>
 #include <linux/miscdevice.h>
 #include <linux/security.h>
 #include <linux/mm.h>
@@ -363,11 +364,12 @@
 
 	mutex_lock(&ashmem_mutex);
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
-		struct inode *inode = range->asma->file->f_dentry->d_inode;
 		loff_t start = range->pgstart * PAGE_SIZE;
-		loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+		loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-		vmtruncate_range(inode, start, end);
+		do_fallocate(range->asma->file,
+				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d638..aeac1ca 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <linux/usb.h>
 #include <linux/errno.h>
+#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/fcntl.h>
@@ -981,6 +982,8 @@
 }
 EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
 
+#if IS_ENABLED(CONFIG_USB)
+
 static int comedi_old_usb_auto_config(struct usb_interface *intf,
 				      struct comedi_driver *driver)
 {
@@ -1043,3 +1046,5 @@
 	comedi_driver_unregister(comedi_driver);
 }
 EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f..5166513 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -104,7 +104,7 @@
 
 void netlink_exit(struct sock *sock)
 {
-	sock_release(sock->sk_socket);
+	netlink_kernel_release(sock);
 }
 
 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7c..f03fbd3 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@
 	* info->driver_module:
 		Set to THIS_MODULE. Used to ensure correct ownership
 		of various resources allocate by the core.
-	* info->num_interrupt_lines:
-		Number of event triggering hardware lines the device has.
 	* info->event_attrs:
 		Attributes used to enable / disable hardware events.
 	* info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd2..8f1b3af 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@
 config AD7298
 	tristate "Analog Devices AD7298 ADC driver"
 	depends on SPI
+	select IIO_KFIFO_BUF if IIO_BUFFER
 	help
 	  Say yes here to build support for Analog Devices AD7298
 	  8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc..a13afff 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@
 		.indexed = 1,					\
 		.channel = num,					\
 		.address = num,					\
-		.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,	\
+		.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |	\
+				IIO_CHAN_INFO_SCALE_SHARED_BIT, \
 		.scan_index = num,				\
 		.scan_type = IIO_ST('s', 16, 16, 0),		\
 	}
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 3295ea6..97ef670 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -129,6 +129,7 @@
 
 static struct lirc_serial hardware[] = {
 	[LIRC_HOMEBREW] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
 		.signal_pin        = UART_MSR_DCD,
 		.signal_pin_change = UART_MSR_DDCD,
 		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@
 	},
 
 	[LIRC_IRDEO] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
 		.signal_pin        = UART_MSR_DSR,
 		.signal_pin_change = UART_MSR_DDSR,
 		.on  = UART_MCR_OUT2,
@@ -156,6 +158,7 @@
 	},
 
 	[LIRC_IRDEO_REMOTE] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
 		.signal_pin        = UART_MSR_DSR,
 		.signal_pin_change = UART_MSR_DDSR,
 		.on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@
 	},
 
 	[LIRC_ANIMAX] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
 		.signal_pin        = UART_MSR_DCD,
 		.signal_pin_change = UART_MSR_DDCD,
 		.on  = 0,
@@ -177,6 +181,7 @@
 	},
 
 	[LIRC_IGOR] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
 		.signal_pin        = UART_MSR_DSR,
 		.signal_pin_change = UART_MSR_DDSR,
 		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@
 	 * See also http://www.nslu2-linux.org for this device
 	 */
 	[LIRC_NSLU2] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
 		.signal_pin        = UART_MSR_CTS,
 		.signal_pin_change = UART_MSR_DCTS,
 		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c..8c6ed3b 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@
 	 */
 	ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
 	if (ret) {
-		dev_err(dev->dev, "could not map (paddr)!\n");
+		dev_err(dev->dev,
+			"could not map (paddr)!  Skipping framebuffer alloc\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
@@ -388,8 +389,11 @@
 
 	fbi = helper->fbdev;
 
-	unregister_framebuffer(fbi);
-	framebuffer_release(fbi);
+	/* only cleanup framebuffer if it is present */
+	if (fbi) {
+		unregister_framebuffer(fbi);
+		framebuffer_release(fbi);
+	}
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e..d46764b 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@
 	return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-	.put_page = zcache_frontswap_put_page,
-	.get_page = zcache_frontswap_get_page,
+	.store = zcache_frontswap_store,
+	.load = zcache_frontswap_load,
 	.invalidate_page = zcache_frontswap_flush_page,
 	.invalidate_area = zcache_frontswap_flush_area,
 	.init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2..69f616c 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@
 	/* - */
 	{USB_DEVICE(0x20F4, 0x646B)},
 	{USB_DEVICE(0x083A, 0xC512)},
+	{USB_DEVICE(0x25D4, 0x4CA1)},
+	{USB_DEVICE(0x25D4, 0x4CAB)},
 
 /* RTL8191SU */
 	/* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dac..784c796 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@
  * Swizzling increases objects per swaptype, increasing tmem concurrency
  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
  */
 #define SWIZ_BITS		8
 #define SWIZ_MASK		((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@
 	return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-	.put_page = zcache_frontswap_put_page,
-	.get_page = zcache_frontswap_get_page,
+	.store = zcache_frontswap_store,
+	.load = zcache_frontswap_load,
 	.invalidate_page = zcache_frontswap_flush_page,
 	.invalidate_area = zcache_frontswap_flush_area,
 	.init = zcache_frontswap_init
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 37c6098..7e6136e 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -587,14 +587,14 @@
 {
 	struct sbp_tport *tport = agent->tport;
 	struct sbp_tpg *tpg = tport->tpg;
-	int login_id;
+	int id;
 	struct sbp_login_descriptor *login;
 
-	login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 
-	login = sbp_login_find_by_id(tpg, login_id);
+	login = sbp_login_find_by_id(tpg, id);
 	if (!login) {
-		pr_warn("cannot find login: %d\n", login_id);
+		pr_warn("cannot find login: %d\n", id);
 
 		req->status.status = cpu_to_be32(
 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b83..9179997 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@
 
 out:
 	transport_kunmap_data_sg(cmd);
-	target_complete_cmd(cmd, GOOD);
-	return 0;
+	if (!rc)
+		target_complete_cmd(cmd, GOOD);
+	return rc;
 }
 
 static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba1..9f99d04 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@
 		ret = PTR_ERR(dev_p);
 		goto fail;
 	}
-
-	/* O_DIRECT too? */
-	flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
 	/*
-	 * If fd_buffered_io=1 has not been set explicitly (the default),
-	 * use O_SYNC to force FILEIO writes to disk.
+	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+	 * of pure timestamp updates.
 	 */
-	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
-		flags |= O_SYNC;
+	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 
 	file = filp_open(dev_p, flags, 0600);
 	if (IS_ERR(file)) {
@@ -380,23 +375,6 @@
 	}
 }
 
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
-	struct se_device *dev = cmd->se_dev;
-	struct fd_dev *fd_dev = dev->dev_ptr;
-	loff_t start = cmd->t_task_lba *
-		dev->se_sub_dev->se_dev_attrib.block_size;
-	loff_t end = start + cmd->data_length;
-	int ret;
-
-	pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
-		cmd->t_task_lba, cmd->data_length);
-
-	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
-	if (ret != 0)
-		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
 		u32 sgl_nents, enum dma_data_direction data_direction)
 {
@@ -411,19 +389,21 @@
 		ret = fd_do_readv(cmd, sgl, sgl_nents);
 	} else {
 		ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+		/*
+		 * Perform implict vfs_fsync_range() for fd_do_writev() ops
+		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
+		 * Allow this to happen independent of WCE=0 setting.
+		 */
 		if (ret > 0 &&
-		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
 		    (cmd->se_cmd_flags & SCF_FUA)) {
-			/*
-			 * We might need to be a bit smarter here
-			 * and return some sense data to let the initiator
-			 * know the FUA WRITE cache sync failed..?
-			 */
-			fd_emulate_write_fua(cmd);
-		}
+			struct fd_dev *fd_dev = dev->dev_ptr;
+			loff_t start = cmd->t_task_lba *
+				dev->se_sub_dev->se_dev_attrib.block_size;
+			loff_t end = start + cmd->data_length;
 
+			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+		}
 	}
 
 	if (ret < 0) {
@@ -442,7 +422,6 @@
 static match_table_t tokens = {
 	{Opt_fd_dev_name, "fd_dev_name=%s"},
 	{Opt_fd_dev_size, "fd_dev_size=%s"},
-	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
 	{Opt_err, NULL}
 };
 
@@ -454,7 +433,7 @@
 	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
 	char *orig, *ptr, *arg_p, *opts;
 	substring_t args[MAX_OPT_ARGS];
-	int ret = 0, arg, token;
+	int ret = 0, token;
 
 	opts = kstrdup(page, GFP_KERNEL);
 	if (!opts)
@@ -498,19 +477,6 @@
 					" bytes\n", fd_dev->fd_dev_size);
 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
 			break;
-		case Opt_fd_buffered_io:
-			match_int(args, &arg);
-			if (arg != 1) {
-				pr_err("bogus fd_buffered_io=%d value\n", arg);
-				ret = -EINVAL;
-				goto out;
-			}
-
-			pr_debug("FILEIO: Using buffered I/O"
-				" operations for struct fd_dev\n");
-
-			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
-			break;
 		default:
 			break;
 		}
@@ -542,10 +508,8 @@
 	ssize_t bl = 0;
 
 	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
-	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
-		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
-		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
-		"Buffered" : "Synchronous");
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size);
 	return bl;
 }
 
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef..70ce7fd 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
 
 #define FBDF_HAS_PATH		0x01
 #define FBDF_HAS_SIZE		0x02
-#define FDBD_USE_BUFFERED_IO	0x04
 
 struct fd_dev {
 	u32		fbd_flags;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0..634d0f3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -315,7 +315,7 @@
 }
 EXPORT_SYMBOL(transport_register_session);
 
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
 {
 	struct se_session *se_sess = container_of(kref,
 			struct se_session, sess_kref);
@@ -332,6 +332,12 @@
 
 void target_put_session(struct se_session *se_sess)
 {
+	struct se_portal_group *tpg = se_sess->se_tpg;
+
+	if (tpg->se_tpg_tfo->put_session != NULL) {
+		tpg->se_tpg_tfo->put_session(se_sess);
+		return;
+	}
 	kref_put(&se_sess->sess_kref, target_release_session);
 }
 EXPORT_SYMBOL(target_put_session);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 35819e3..6cc4358 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1033,7 +1033,7 @@
 	if (!retinfo)
 		return -EFAULT;
 	memset(&tmp, 0, sizeof(tmp));
-	tty_lock(tty);
+	tty_lock();
 	tmp.line = tty->index;
 	tmp.port = state->port;
 	tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@
 	tmp.close_delay = state->tport.close_delay;
 	tmp.closing_wait = state->tport.closing_wait;
 	tmp.custom_divisor = state->custom_divisor;
-	tty_unlock(tty);
+	tty_unlock();
 	if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
 		return -EFAULT;
 	return 0;
@@ -1059,12 +1059,12 @@
 	if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
 		return -EFAULT;
 
-	tty_lock(tty);
+	tty_lock();
 	change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
 		new_serial.custom_divisor != state->custom_divisor;
 	if (new_serial.irq || new_serial.port != state->port ||
 			new_serial.xmit_fifo_size != state->xmit_fifo_size) {
-		tty_unlock(tty);
+		tty_unlock();
 		return -EINVAL;
 	}
   
@@ -1074,7 +1074,7 @@
 		    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
 		    ((new_serial.flags & ~ASYNC_USR_MASK) !=
 		     (port->flags & ~ASYNC_USR_MASK))) {
-			tty_unlock(tty);
+			tty_unlock();
 			return -EPERM;
 		}
 		port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@
 	}
 
 	if (new_serial.baud_base < 9600) {
-		tty_unlock(tty);
+		tty_unlock();
 		return -EINVAL;
 	}
 
@@ -1116,7 +1116,7 @@
 		}
 	} else
 		retval = startup(tty, state);
-	tty_unlock(tty);
+	tty_unlock();
 	return retval;
 }
 
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6984e1a..e61cabd 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1599,7 +1599,7 @@
 	 * If the port is the middle of closing, bail out now
 	 */
 	if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
-		wait_event_interruptible_tty(tty, info->port.close_wait,
+		wait_event_interruptible_tty(info->port.close_wait,
 				!(info->port.flags & ASYNC_CLOSING));
 		return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
 	}
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91da..944eaeb 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -214,24 +214,24 @@
 	/* already configured */
 	if (info->intf != NULL)
 		return 0;
-
+	/*
+	 * If the toolstack (or the hypervisor) hasn't set these values, the
+	 * default value is 0. Even though mfn = 0 and evtchn = 0 are
+	 * theoretically correct values, in practice they never are and they
+	 * mean that a legacy toolstack hasn't initialized the pv console correctly.
+	 */
 	r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
-	if (r < 0) {
-		kfree(info);
-		return -ENODEV;
-	}
+	if (r < 0 || v == 0)
+		goto err;
 	info->evtchn = v;
-	hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
-	if (r < 0) {
-		kfree(info);
-		return -ENODEV;
-	}
+	v = 0;
+	r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+	if (r < 0 || v == 0)
+		goto err;
 	mfn = v;
 	info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
-	if (info->intf == NULL) {
-		kfree(info);
-		return -ENODEV;
-	}
+	if (info->intf == NULL)
+		goto err;
 	info->vtermno = HVC_COOKIE;
 
 	spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@
 	spin_unlock(&xencons_lock);
 
 	return 0;
+err:
+	kfree(info);
+	return -ENODEV;
 }
 
 static int xen_pv_console_init(void)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 656ad93..5c6c314 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1065,8 +1065,7 @@
 
 	TRACE_L("read()");
 
-	/* FIXME: should use a private lock */
-	tty_lock(tty);
+	tty_lock();
 
 	pClient = findClient(pInfo, task_pid(current));
 	if (pClient) {
@@ -1078,7 +1077,7 @@
 				goto unlock;
 			}
 			/* block until there is a message: */
-			wait_event_interruptible_tty(tty, pInfo->read_wait,
+			wait_event_interruptible_tty(pInfo->read_wait,
 					(pMsg = remove_msg(pInfo, pClient)));
 		}
 
@@ -1108,7 +1107,7 @@
 	}
 	ret = -EPERM;
 unlock:
-	tty_unlock(tty);
+	tty_unlock();
 	return ret;
 }
 
@@ -1157,7 +1156,7 @@
 	pHeader->locks = 0;
 	pHeader->owner = NULL;
 
-	tty_lock(tty);
+	tty_lock();
 
 	pClient = findClient(pInfo, task_pid(current));
 	if (pClient) {
@@ -1176,7 +1175,7 @@
 	add_tx_queue(pInfo, pHeader);
 	trigger_transmit(pInfo);
 
-	tty_unlock(tty);
+	tty_unlock();
 
 	return 0;
 }
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59af394..5505ffc 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,7 +47,6 @@
 	wake_up_interruptible(&tty->read_wait);
 	wake_up_interruptible(&tty->write_wait);
 	tty->packet = 0;
-	/* Review - krefs on tty_link ?? */
 	if (!tty->link)
 		return;
 	tty->link->packet = 0;
@@ -63,9 +62,9 @@
 		        mutex_unlock(&devpts_mutex);
 		}
 #endif
-		tty_unlock(tty);
+		tty_unlock();
 		tty_vhangup(tty->link);
-		tty_lock(tty);
+		tty_lock();
 	}
 }
 
@@ -623,29 +622,26 @@
 		return retval;
 
 	/* find a device that is not in use. */
-	mutex_lock(&devpts_mutex);
+	tty_lock();
 	index = devpts_new_index(inode);
+	tty_unlock();
 	if (index < 0) {
 		retval = index;
 		goto err_file;
 	}
 
-	mutex_unlock(&devpts_mutex);
-
 	mutex_lock(&tty_mutex);
 	mutex_lock(&devpts_mutex);
 	tty = tty_init_dev(ptm_driver, index);
+	mutex_unlock(&devpts_mutex);
+	tty_lock();
+	mutex_unlock(&tty_mutex);
 
 	if (IS_ERR(tty)) {
 		retval = PTR_ERR(tty);
 		goto out;
 	}
 
-	/* The tty returned here is locked so we can safely
-	   drop the mutex */
-	mutex_unlock(&devpts_mutex);
-	mutex_unlock(&tty_mutex);
-
 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 
 	tty_add_file(tty, filp);
@@ -658,17 +654,16 @@
 	if (retval)
 		goto err_release;
 
-	tty_unlock(tty);
+	tty_unlock();
 	return 0;
 err_release:
-	tty_unlock(tty);
+	tty_unlock();
 	tty_release(inode, filp);
 	return retval;
 out:
-	mutex_unlock(&tty_mutex);
 	devpts_kill_index(inode, index);
+	tty_unlock();
 err_file:
-        mutex_unlock(&devpts_mutex);
 	tty_free_file(filp);
 	return retval;
 }
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b..6e1958a 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@
 
 /**
  *	serial8250_register_8250_port - register a serial port
- *	@port: serial port template
+ *	@up: serial port template
  *
  *	Configure the serial port specified by the request. If the
  *	port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721f..c17923e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@
 struct uart_amba_port {
 	struct uart_port	port;
 	struct clk		*clk;
+	/* Two optional pin states - default & sleep */
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pins_default;
+	struct pinctrl_state	*pins_sleep;
 	const struct vendor_data *vendor;
 	unsigned int		dmacr;		/* dma control reg */
 	unsigned int		im;		/* interrupt mask */
@@ -1312,6 +1316,14 @@
 	unsigned int cr;
 	int retval;
 
+	/* Optionaly enable pins to be muxed in and configured */
+	if (!IS_ERR(uap->pins_default)) {
+		retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+		if (retval)
+			dev_err(port->dev,
+				"could not set default pins\n");
+	}
+
 	retval = clk_prepare(uap->clk);
 	if (retval)
 		goto out;
@@ -1420,6 +1432,7 @@
 {
 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
 	unsigned int cr;
+	int retval;
 
 	/*
 	 * disable all interrupts
@@ -1462,6 +1475,14 @@
 	 */
 	clk_disable(uap->clk);
 	clk_unprepare(uap->clk);
+	/* Optionally let pins go into sleep states */
+	if (!IS_ERR(uap->pins_sleep)) {
+		retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+		if (retval)
+			dev_err(port->dev,
+				"could not set pins to sleep state\n");
+	}
+
 
 	if (uap->port.dev->platform_data) {
 		struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@
 	if (!uap)
 		return -ENODEV;
 
+	/* Allow pins to be muxed in and configured */
+	if (!IS_ERR(uap->pins_default)) {
+		ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+		if (ret)
+			dev_err(uap->port.dev,
+				"could not set default pins\n");
+	}
+
 	ret = clk_prepare(uap->clk);
 	if (ret)
 		return ret;
@@ -1844,7 +1873,6 @@
 {
 	struct uart_amba_port *uap;
 	struct vendor_data *vendor = id->data;
-	struct pinctrl *pinctrl;
 	void __iomem *base;
 	int i, ret;
 
@@ -1869,11 +1897,20 @@
 		goto free;
 	}
 
-	pinctrl = devm_pinctrl_get_select_default(&dev->dev);
-	if (IS_ERR(pinctrl)) {
-		ret = PTR_ERR(pinctrl);
+	uap->pinctrl = devm_pinctrl_get(&dev->dev);
+	if (IS_ERR(uap->pinctrl)) {
+		ret = PTR_ERR(uap->pinctrl);
 		goto unmap;
 	}
+	uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+						 PINCTRL_STATE_DEFAULT);
+	if (IS_ERR(uap->pins_default))
+		dev_err(&dev->dev, "could not get default pinstate\n");
+
+	uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+					       PINCTRL_STATE_SLEEP);
+	if (IS_ERR(uap->pins_sleep))
+		dev_dbg(&dev->dev, "could not get sleep pinstate\n");
 
 	uap->clk = clk_get(&dev->dev, NULL);
 	if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 7264d4d..80b6b1b 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3976,7 +3976,7 @@
 	 */
 	if (tty_hung_up_p(filp) ||
 	    (info->flags & ASYNC_CLOSING)) {
-		wait_event_interruptible_tty(tty, info->close_wait,
+		wait_event_interruptible_tty(info->close_wait,
 			!(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
 		if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@
 		printk("block_til_ready blocking: ttyS%d, count = %d\n",
 		       info->line, info->count);
 #endif
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@
 	 */
 	if (tty_hung_up_p(filp) ||
 	    (info->flags & ASYNC_CLOSING)) {
-		wait_event_interruptible_tty(tty, info->close_wait,
+		wait_event_interruptible_tty(info->close_wait,
 			!(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
 		return ((info->flags & ASYNC_HUP_NOTIFY) ?
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ec20673..4ef7473 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -205,7 +205,8 @@
 	unsigned int		irda_inv_rx:1;
 	unsigned int		irda_inv_tx:1;
 	unsigned short		trcv_delay; /* transceiver delay */
-	struct clk		*clk;
+	struct clk		*clk_ipg;
+	struct clk		*clk_per;
 	struct imx_uart_data	*devdata;
 };
 
@@ -673,7 +674,7 @@
 	 * RFDIV is set such way to satisfy requested uartclk value
 	 */
 	val = TXTL << 10 | RXTL;
-	ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+	ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
 			/ sport->port.uartclk;
 
 	if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@
 		else
 			ucfr_rfdiv = 6 - ucfr_rfdiv;
 
-		uartclk = clk_get_rate(sport->clk);
+		uartclk = clk_get_rate(sport->clk_per);
 		uartclk /= ucfr_rfdiv;
 
 		{	/*
@@ -1511,14 +1512,22 @@
 		goto unmap;
 	}
 
-	sport->clk = clk_get(&pdev->dev, "uart");
-	if (IS_ERR(sport->clk)) {
-		ret = PTR_ERR(sport->clk);
+	sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(sport->clk_ipg)) {
+		ret = PTR_ERR(sport->clk_ipg);
 		goto unmap;
 	}
-	clk_prepare_enable(sport->clk);
 
-	sport->port.uartclk = clk_get_rate(sport->clk);
+	sport->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(sport->clk_per)) {
+		ret = PTR_ERR(sport->clk_per);
+		goto unmap;
+	}
+
+	clk_prepare_enable(sport->clk_per);
+	clk_prepare_enable(sport->clk_ipg);
+
+	sport->port.uartclk = clk_get_rate(sport->clk_per);
 
 	imx_ports[sport->port.line] = sport;
 
@@ -1539,8 +1548,8 @@
 	if (pdata && pdata->exit)
 		pdata->exit(pdev);
 clkput:
-	clk_disable_unprepare(sport->clk);
-	clk_put(sport->clk);
+	clk_disable_unprepare(sport->clk_per);
+	clk_disable_unprepare(sport->clk_ipg);
 unmap:
 	iounmap(sport->port.membase);
 free:
@@ -1558,11 +1567,10 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	if (sport) {
-		uart_remove_one_port(&imx_reg, &sport->port);
-		clk_disable_unprepare(sport->clk);
-		clk_put(sport->clk);
-	}
+	uart_remove_one_port(&imx_reg, &sport->port);
+
+	clk_disable_unprepare(sport->clk_per);
+	clk_disable_unprepare(sport->clk_ipg);
 
 	if (pdata && pdata->exit)
 		pdata->exit(pdev);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cac..02da071 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 
 #include <lantiq_soc.h>
 
 #define PORT_LTQ_ASC		111
 #define MAXPORTS		2
 #define UART_DUMMY_UER_RX	1
-#define DRVNAME			"ltq_asc"
+#define DRVNAME			"lantiq,asc"
 #ifdef __BIG_ENDIAN
 #define LTQ_ASC_TBUF		(0x0020 + 3)
 #define LTQ_ASC_RBUF		(0x0024 + 3)
@@ -114,6 +117,9 @@
 
 struct ltq_uart_port {
 	struct uart_port	port;
+	/* clock used to derive divider */
+	struct clk		*fpiclk;
+	/* clock gating of the ASC core */
 	struct clk		*clk;
 	unsigned int		tx_irq;
 	unsigned int		rx_irq;
@@ -316,7 +322,9 @@
 	struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
 	int retval;
 
-	port->uartclk = clk_get_rate(ltq_port->clk);
+	if (ltq_port->clk)
+		clk_enable(ltq_port->clk);
+	port->uartclk = clk_get_rate(ltq_port->fpiclk);
 
 	ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
 		port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@
 		port->membase + LTQ_ASC_RXFCON);
 	ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
 		port->membase + LTQ_ASC_TXFCON);
+	if (ltq_port->clk)
+		clk_disable(ltq_port->clk);
 }
 
 static void
@@ -630,7 +640,7 @@
 
 	port = &ltq_port->port;
 
-	port->uartclk = clk_get_rate(ltq_port->clk);
+	port->uartclk = clk_get_rate(ltq_port->fpiclk);
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@
 static int __init
 lqasc_probe(struct platform_device *pdev)
 {
+	struct device_node *node = pdev->dev.of_node;
 	struct ltq_uart_port *ltq_port;
 	struct uart_port *port;
-	struct resource *mmres, *irqres;
-	int tx_irq, rx_irq, err_irq;
-	struct clk *clk;
+	struct resource *mmres, irqres[3];
+	int line = 0;
 	int ret;
 
 	mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!mmres || !irqres)
+	ret = of_irq_to_resource_table(node, irqres, 3);
+	if (!mmres || (ret != 3)) {
+		dev_err(&pdev->dev,
+			"failed to get memory/irq for serial port\n");
 		return -ENODEV;
-
-	if (pdev->id >= MAXPORTS)
-		return -EBUSY;
-
-	if (lqasc_port[pdev->id] != NULL)
-		return -EBUSY;
-
-	clk = clk_get(&pdev->dev, "fpi");
-	if (IS_ERR(clk)) {
-		pr_err("failed to get fpi clk\n");
-		return -ENOENT;
 	}
 
-	tx_irq = platform_get_irq_byname(pdev, "tx");
-	rx_irq = platform_get_irq_byname(pdev, "rx");
-	err_irq = platform_get_irq_byname(pdev, "err");
-	if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
-		return -ENODEV;
+	/* check if this is the console port */
+	if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+		line = 1;
 
-	ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+	if (lqasc_port[line]) {
+		dev_err(&pdev->dev, "port %d already allocated\n", line);
+		return -EBUSY;
+	}
+
+	ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+			GFP_KERNEL);
 	if (!ltq_port)
 		return -ENOMEM;
 
@@ -709,19 +714,26 @@
 	port->ops	= &lqasc_pops;
 	port->fifosize	= 16;
 	port->type	= PORT_LTQ_ASC,
-	port->line	= pdev->id;
+	port->line	= line;
 	port->dev	= &pdev->dev;
-
-	port->irq	= tx_irq; /* unused, just to be backward-compatibe */
+	/* unused, just to be backward-compatible */
+	port->irq	= irqres[0].start;
 	port->mapbase	= mmres->start;
 
-	ltq_port->clk	= clk;
+	ltq_port->fpiclk = clk_get_fpi();
+	if (IS_ERR(ltq_port->fpiclk)) {
+		pr_err("failed to get fpi clk\n");
+		return -ENOENT;
+	}
 
-	ltq_port->tx_irq = tx_irq;
-	ltq_port->rx_irq = rx_irq;
-	ltq_port->err_irq = err_irq;
+	/* not all asc ports have clock gates, lets ignore the return code */
+	ltq_port->clk = clk_get(&pdev->dev, NULL);
 
-	lqasc_port[pdev->id] = ltq_port;
+	ltq_port->tx_irq = irqres[0].start;
+	ltq_port->rx_irq = irqres[1].start;
+	ltq_port->err_irq = irqres[2].start;
+
+	lqasc_port[line] = ltq_port;
 	platform_set_drvdata(pdev, ltq_port);
 
 	ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@
 	return ret;
 }
 
+static const struct of_device_id ltq_asc_match[] = {
+	{ .compatible = DRVNAME },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
 static struct platform_driver lqasc_driver = {
 	.driver		= {
 		.name	= DRVNAME,
 		.owner	= THIS_MODULE,
+		.of_match_table = ltq_asc_match,
 	},
 };
 
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f..f76b1688 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/major.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345..6ae2a58 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@
 	spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 /*
  *	Wait for transmitter & holding register to empty
  */
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4604153..1bd9163 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2179,6 +2179,16 @@
 	return 0;
 }
 
+static void sci_cleanup_single(struct sci_port *port)
+{
+	sci_free_gpios(port);
+
+	clk_put(port->iclk);
+	clk_put(port->fclk);
+
+	pm_runtime_disable(port->port.dev);
+}
+
 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
 static void serial_console_putchar(struct uart_port *port, int ch)
 {
@@ -2360,14 +2370,10 @@
 	cpufreq_unregister_notifier(&port->freq_transition,
 				    CPUFREQ_TRANSITION_NOTIFIER);
 
-	sci_free_gpios(port);
-
 	uart_remove_one_port(&sci_uart_driver, &port->port);
 
-	clk_put(port->iclk);
-	clk_put(port->fclk);
+	sci_cleanup_single(port);
 
-	pm_runtime_disable(&dev->dev);
 	return 0;
 }
 
@@ -2385,14 +2391,20 @@
 			   index+1, SCI_NPORTS);
 		dev_notice(&dev->dev, "Consider bumping "
 			   "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	ret = sci_init_single(dev, sciport, index, p);
 	if (ret)
 		return ret;
 
-	return uart_add_one_port(&sci_uart_driver, &sciport->port);
+	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+	if (ret) {
+		sci_cleanup_single(sciport);
+		return ret;
+	}
+
+	return 0;
 }
 
 static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@
 
 	ret = sci_probe_single(dev, dev->id, p, sp);
 	if (ret)
-		goto err_unreg;
+		return ret;
 
 	sp->freq_transition.notifier_call = sci_notifier;
 
 	ret = cpufreq_register_notifier(&sp->freq_transition,
 					CPUFREQ_TRANSITION_NOTIFIER);
-	if (unlikely(ret < 0))
-		goto err_unreg;
+	if (unlikely(ret < 0)) {
+		sci_cleanup_single(sp);
+		return ret;
+	}
 
 #ifdef CONFIG_SH_STANDARD_BIOS
 	sh_bios_gdb_detach();
 #endif
 
 	return 0;
-
-err_unreg:
-	sci_remove(dev);
-	return ret;
 }
 
 static int sci_suspend(struct device *dev)
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee..92c00b2 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
 #include <linux/ioport.h>
 #include <linux/irqflags.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/major.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ed0daa..593d40a 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3338,9 +3338,9 @@
 			printk("%s(%d):block_til_ready blocking on %s count=%d\n",
 				 __FILE__,__LINE__, tty->driver->name, port->count );
 				 
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 	
 	set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 45b43f1..aa1debf 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3336,9 +3336,9 @@
 		}
 
 		DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 
 	set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4a1e4f0..a3dddc1 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3357,9 +3357,9 @@
 			printk("%s(%d):%s block_til_ready() count=%d\n",
 				 __FILE__,__LINE__, tty->driver->name, port->count );
 
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 
 	set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9e930c0..b425c79 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -185,7 +185,6 @@
 		put_device(tty->dev);
 	kfree(tty->write_buf);
 	tty_buffer_free_all(tty);
-	tty->magic = 0xDEADDEAD;
 	kfree(tty);
 }
 
@@ -574,7 +573,7 @@
 	}
 	spin_unlock(&redirect_lock);
 
-	tty_lock(tty);
+	tty_lock();
 
 	/* some functions below drop BTM, so we need this bit */
 	set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@
 	clear_bit(TTY_HUPPING, &tty->flags);
 	tty_ldisc_enable(tty);
 
-	tty_unlock(tty);
+	tty_unlock();
 
 	if (f)
 		fput(f);
@@ -1104,12 +1103,12 @@
 {
 	if (tty) {
 		mutex_lock(&tty->atomic_write_lock);
-		tty_lock(tty);
+		tty_lock();
 		if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
-			tty_unlock(tty);
+			tty_unlock();
 			tty->ops->write(tty, msg, strlen(msg));
 		} else
-			tty_unlock(tty);
+			tty_unlock();
 		tty_write_unlock(tty);
 	}
 	return;
@@ -1404,7 +1403,6 @@
 	}
 	initialize_tty_struct(tty, driver, idx);
 
-	tty_lock(tty);
 	retval = tty_driver_install_tty(driver, tty);
 	if (retval < 0)
 		goto err_deinit_tty;
@@ -1417,11 +1415,9 @@
 	retval = tty_ldisc_setup(tty, tty->link);
 	if (retval)
 		goto err_release_tty;
-	/* Return the tty locked so that it cannot vanish under the caller */
 	return tty;
 
 err_deinit_tty:
-	tty_unlock(tty);
 	deinitialize_tty_struct(tty);
 	free_tty_struct(tty);
 err_module_put:
@@ -1430,7 +1426,6 @@
 
 	/* call the tty release_tty routine to clean out this slot */
 err_release_tty:
-	tty_unlock(tty);
 	printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
 				 "clearing slot %d\n", idx);
 	release_tty(tty, idx);
@@ -1633,7 +1628,7 @@
 	if (tty_paranoia_check(tty, inode, __func__))
 		return 0;
 
-	tty_lock(tty);
+	tty_lock();
 	check_tty_count(tty, __func__);
 
 	__tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@
 	pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 		      tty->driver->subtype == PTY_TYPE_MASTER);
 	devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
-	/* Review: parallel close */
 	o_tty = tty->link;
 
 	if (tty_release_checks(tty, o_tty, idx)) {
-		tty_unlock(tty);
+		tty_unlock();
 		return 0;
 	}
 
@@ -1658,7 +1652,7 @@
 	if (tty->ops->close)
 		tty->ops->close(tty, filp);
 
-	tty_unlock(tty);
+	tty_unlock();
 	/*
 	 * Sanity check: if tty->count is going to zero, there shouldn't be
 	 * any waiters on tty->read_wait or tty->write_wait.  We test the
@@ -1681,7 +1675,7 @@
 		   opens on /dev/tty */
 
 		mutex_lock(&tty_mutex);
-		tty_lock_pair(tty, o_tty);
+		tty_lock();
 		tty_closing = tty->count <= 1;
 		o_tty_closing = o_tty &&
 			(o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@
 
 		printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
 				__func__, tty_name(tty, buf));
-		tty_unlock_pair(tty, o_tty);
+		tty_unlock();
 		mutex_unlock(&tty_mutex);
 		schedule();
 	}
@@ -1775,7 +1769,7 @@
 
 	/* check whether both sides are closing ... */
 	if (!tty_closing || (o_tty && !o_tty_closing)) {
-		tty_unlock_pair(tty, o_tty);
+		tty_unlock();
 		return 0;
 	}
 
@@ -1788,16 +1782,14 @@
 	tty_ldisc_release(tty, o_tty);
 	/*
 	 * The release_tty function takes care of the details of clearing
-	 * the slots and preserving the termios structure. The tty_unlock_pair
-	 * should be safe as we keep a kref while the tty is locked (so the
-	 * unlock never unlocks a freed tty).
+	 * the slots and preserving the termios structure.
 	 */
 	release_tty(tty, idx);
-	tty_unlock_pair(tty, o_tty);
 
 	/* Make this pty number available for reallocation */
 	if (devpts)
 		devpts_kill_index(inode, idx);
+	tty_unlock();
 	return 0;
 }
 
@@ -1901,9 +1893,6 @@
  *	Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
  *		 tty->count should protect the rest.
  *		 ->siglock protects ->signal/->sighand
- *
- *	Note: the tty_unlock/lock cases without a ref are only safe due to
- *	tty_mutex
  */
 
 static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@
 	retval = 0;
 
 	mutex_lock(&tty_mutex);
-	/* This is protected by the tty_mutex */
+	tty_lock();
+
 	tty = tty_open_current_tty(device, filp);
 	if (IS_ERR(tty)) {
 		retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@
 	}
 
 	if (tty) {
-		tty_lock(tty);
 		retval = tty_reopen(tty);
-		if (retval < 0) {
-			tty_unlock(tty);
+		if (retval)
 			tty = ERR_PTR(retval);
-		}
-	} else	/* Returns with the tty_lock held for now */
+	} else
 		tty = tty_init_dev(driver, index);
 
 	mutex_unlock(&tty_mutex);
 	if (driver)
 		tty_driver_kref_put(driver);
 	if (IS_ERR(tty)) {
+		tty_unlock();
 		retval = PTR_ERR(tty);
 		goto err_file;
 	}
@@ -1989,7 +1977,7 @@
 		printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
 				retval, tty->name);
 #endif
-		tty_unlock(tty); /* need to call tty_release without BTM */
+		tty_unlock(); /* need to call tty_release without BTM */
 		tty_release(inode, filp);
 		if (retval != -ERESTARTSYS)
 			return retval;
@@ -2001,15 +1989,17 @@
 		/*
 		 * Need to reset f_op in case a hangup happened.
 		 */
+		tty_lock();
 		if (filp->f_op == &hung_up_tty_fops)
 			filp->f_op = &tty_fops;
+		tty_unlock();
 		goto retry_open;
 	}
-	tty_unlock(tty);
+	tty_unlock();
 
 
 	mutex_lock(&tty_mutex);
-	tty_lock(tty);
+	tty_lock();
 	spin_lock_irq(&current->sighand->siglock);
 	if (!noctty &&
 	    current->signal->leader &&
@@ -2017,10 +2007,11 @@
 	    tty->session == NULL)
 		__proc_set_tty(current, tty);
 	spin_unlock_irq(&current->sighand->siglock);
-	tty_unlock(tty);
+	tty_unlock();
 	mutex_unlock(&tty_mutex);
 	return 0;
 err_unlock:
+	tty_unlock();
 	mutex_unlock(&tty_mutex);
 	/* after locks to avoid deadlock */
 	if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@
 
 static int tty_fasync(int fd, struct file *filp, int on)
 {
-	struct tty_struct *tty = file_tty(filp);
 	int retval;
-
-	tty_lock(tty);
+	tty_lock();
 	retval = __tty_fasync(fd, filp, on);
-	tty_unlock(tty);
-
+	tty_unlock();
 	return retval;
 }
 
@@ -2946,7 +2934,6 @@
 	tty->pgrp = NULL;
 	tty->overrun_time = jiffies;
 	tty_buffer_init(tty);
-	mutex_init(&tty->legacy_mutex);
 	mutex_init(&tty->termios_mutex);
 	mutex_init(&tty->ldisc_mutex);
 	init_waitqueue_head(&tty->write_wait);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 173a900..9911eb6 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -568,7 +568,7 @@
 	if (IS_ERR(new_ldisc))
 		return PTR_ERR(new_ldisc);
 
-	tty_lock(tty);
+	tty_lock();
 	/*
 	 *	We need to look at the tty locking here for pty/tty pairs
 	 *	when both sides try to change in parallel.
@@ -582,12 +582,12 @@
 	 */
 
 	if (tty->ldisc->ops->num == ldisc) {
-		tty_unlock(tty);
+		tty_unlock();
 		tty_ldisc_put(new_ldisc);
 		return 0;
 	}
 
-	tty_unlock(tty);
+	tty_unlock();
 	/*
 	 *	Problem: What do we do if this blocks ?
 	 *	We could deadlock here
@@ -595,7 +595,7 @@
 
 	tty_wait_until_sent(tty, 0);
 
-	tty_lock(tty);
+	tty_lock();
 	mutex_lock(&tty->ldisc_mutex);
 
 	/*
@@ -605,10 +605,10 @@
 
 	while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
 		mutex_unlock(&tty->ldisc_mutex);
-		tty_unlock(tty);
+		tty_unlock();
 		wait_event(tty_ldisc_wait,
 			test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
-		tty_lock(tty);
+		tty_lock();
 		mutex_lock(&tty->ldisc_mutex);
 	}
 
@@ -623,7 +623,7 @@
 
 	o_ldisc = tty->ldisc;
 
-	tty_unlock(tty);
+	tty_unlock();
 	/*
 	 *	Make sure we don't change while someone holds a
 	 *	reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@
 
 	retval = tty_ldisc_wait_idle(tty, 5 * HZ);
 
-	tty_lock(tty);
+	tty_lock();
 	mutex_lock(&tty->ldisc_mutex);
 
 	/* handle wait idle failure locked */
@@ -665,7 +665,7 @@
 		clear_bit(TTY_LDISC_CHANGING, &tty->flags);
 		mutex_unlock(&tty->ldisc_mutex);
 		tty_ldisc_put(new_ldisc);
-		tty_unlock(tty);
+		tty_unlock();
 		return -EIO;
 	}
 
@@ -708,7 +708,7 @@
 	if (o_work)
 		schedule_work(&o_tty->buf.work);
 	mutex_unlock(&tty->ldisc_mutex);
-	tty_unlock(tty);
+	tty_unlock();
 	return retval;
 }
 
@@ -816,11 +816,11 @@
 	 * need to wait for another function taking the BTM
 	 */
 	clear_bit(TTY_LDISC, &tty->flags);
-	tty_unlock(tty);
+	tty_unlock();
 	cancel_work_sync(&tty->buf.work);
 	mutex_unlock(&tty->ldisc_mutex);
 retry:
-	tty_lock(tty);
+	tty_lock();
 	mutex_lock(&tty->ldisc_mutex);
 
 	/* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@
 		if (atomic_read(&tty->ldisc->users) != 1) {
 			char cur_n[TASK_COMM_LEN], tty_n[64];
 			long timeout = 3 * HZ;
-			tty_unlock(tty);
+			tty_unlock();
 
 			while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
 				timeout = MAX_SCHEDULE_TIMEOUT;
@@ -912,10 +912,10 @@
 	 * race with the set_ldisc code path.
 	 */
 
-	tty_unlock(tty);
+	tty_unlock();
 	tty_ldisc_halt(tty);
 	tty_ldisc_flush_works(tty);
-	tty_lock(tty);
+	tty_lock();
 
 	mutex_lock(&tty->ldisc_mutex);
 	/*
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80..9ff986c 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,59 +4,29 @@
 #include <linux/semaphore.h>
 #include <linux/sched.h>
 
-/* Legacy tty mutex glue */
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
 
 /*
  * Getting the big tty mutex.
  */
-
-void __lockfunc tty_lock(struct tty_struct *tty)
+void __lockfunc tty_lock(void)
 {
-	if (tty->magic != TTY_MAGIC) {
-		printk(KERN_ERR "L Bad %p\n", tty);
-		WARN_ON(1);
-		return;
-	}
-	tty_kref_get(tty);
-	mutex_lock(&tty->legacy_mutex);
+	mutex_lock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_lock);
 
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void __lockfunc tty_unlock(void)
 {
-	if (tty->magic != TTY_MAGIC) {
-		printk(KERN_ERR "U Bad %p\n", tty);
-		WARN_ON(1);
-		return;
-	}
-	mutex_unlock(&tty->legacy_mutex);
-	tty_kref_put(tty);
+	mutex_unlock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_unlock);
-
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
-					struct tty_struct *tty2)
-{
-	if (tty < tty2) {
-		tty_lock(tty);
-		tty_lock(tty2);
-	} else {
-		if (tty2 && tty2 != tty)
-			tty_lock(tty2);
-		tty_lock(tty);
-	}
-}
-EXPORT_SYMBOL(tty_lock_pair);
-
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-						struct tty_struct *tty2)
-{
-	tty_unlock(tty);
-	if (tty2 && tty2 != tty)
-		tty_unlock(tty2);
-}
-EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index d9cca95..bf6e238 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -230,7 +230,7 @@
 
 	/* block if port is in the process of being closed */
 	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
-		wait_event_interruptible_tty(tty, port->close_wait,
+		wait_event_interruptible_tty(port->close_wait,
 				!(port->flags & ASYNC_CLOSING));
 		if (port->flags & ASYNC_HUP_NOTIFY)
 			return -EAGAIN;
@@ -296,9 +296,9 @@
 			retval = -ERESTARTSYS;
 			break;
 		}
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 	finish_wait(&port->open_wait, &wait);
 
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c691eea..f5ed3d7 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -46,7 +46,7 @@
 obj-$(CONFIG_USB_SERIAL)	+= serial/
 
 obj-$(CONFIG_USB)		+= misc/
-obj-$(CONFIG_USB)		+= phy/
+obj-$(CONFIG_USB_COMMON)	+= phy/
 obj-$(CONFIG_EARLY_PRINTK_DBGP)	+= early/
 
 obj-$(CONFIG_USB_ATM)		+= atm/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120e..36a2a0b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@
 
 	usb_autopm_put_interface(acm->control);
 
+	/*
+	 * Unthrottle device in case the TTY was closed while throttled.
+	 */
+	spin_lock_irq(&acm->read_lock);
+	acm->throttled = 0;
+	acm->throttle_req = 0;
+	spin_unlock_irq(&acm->read_lock);
+
 	if (acm_submit_read_urbs(acm, GFP_KERNEL))
 		goto error_submit_read_urbs;
 
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304..8fd398d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@
 		.bInterfaceSubClass = 1,
 		.bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
 	},
+	{
+		 /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR |
+				      USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor           = HUAWEI_VENDOR_ID,
+		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
+		.bInterfaceSubClass = 1,
+		.bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+	},
 	{ }
 };
 
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e4..622b4a4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@
 
 	pci_save_state(pci_dev);
 
-	/*
-	 * Some systems crash if an EHCI controller is in D3 during
-	 * a sleep transition.  We have to leave such controllers in D0.
-	 */
-	if (hcd->broken_pci_sleep) {
-		dev_dbg(dev, "Staying in PCI D0\n");
-		return retval;
-	}
-
 	/* If the root hub is dead rather than suspended, disallow remote
 	 * wakeup.  usb_hc_died() should ensure that both hosts are marked as
 	 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834..25a7422 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3379,7 +3379,7 @@
 		return 0;
 
 	udev->lpm_disable_count++;
-	if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+	if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
 		return 0;
 
 	/* If LPM is enabled, attempt to disable it. */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1..bdd1c67 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@
 		intfc = cp->intf_cache[i];
 		intf->altsetting = intfc->altsetting;
 		intf->num_altsetting = intfc->num_altsetting;
-		intf->intf_assoc = find_iad(dev, cp, i);
 		kref_get(&intfc->ref);
 
 		alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@
 		if (!alt)
 			alt = &intf->altsetting[0];
 
+		intf->intf_assoc =
+			find_iad(dev, cp, alt->desc.bInterfaceNumber);
 		intf->cur_altsetting = alt;
 		usb_enable_interface(dev, intf, true);
 		intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3df1a19..ec70df7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1091,7 +1091,7 @@
 		if (r == req) {
 			/* wait until it is processed */
 			dwc3_stop_active_transfer(dwc, dep->number);
-			goto out0;
+			goto out1;
 		}
 		dev_err(dwc->dev, "request %p was not queued to %s\n",
 				request, ep->name);
@@ -1099,6 +1099,7 @@
 		goto out0;
 	}
 
+out1:
 	/* giveback the request */
 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
 
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf79..9a9bced 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@
 
 	spin_lock_irqsave(&ep->udc->lock, flags);
 
-	if (ep->ep.desc) {
-		spin_unlock_irqrestore(&ep->udc->lock, flags);
-		DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
-		return -EBUSY;
-	}
-
 	ep->ep.desc = desc;
 	ep->ep.maxpacket = maxpacket;
 
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3..b09452d 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@
 	ep = container_of(_ep, struct qe_ep, ep);
 
 	/* catch various bogus parameters */
-	if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+	if (!_ep || !desc || _ep->name == ep_name[0] ||
 			(desc->bDescriptorType != USB_DT_ENDPOINT))
 		return -EINVAL;
 
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 4c07ca9..7026919 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -153,10 +153,10 @@
 #define USB_BUSMODE_DTB		0x02
 
 /* Endpoint basic handle */
-#define ep_index(EP)		((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP)		((EP)->ep.desc->bEndpointAddress & 0xF)
 #define ep_maxpacket(EP)	((EP)->ep.maxpacket)
 #define ep_is_in(EP)	((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-			USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+			USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
 			& USB_DIR_IN) == USB_DIR_IN)
 
 /* ep0 transfer state */
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2831685..bc6f9bb 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@
 	ep = container_of(_ep, struct fsl_ep, ep);
 
 	/* catch various bogus parameters */
-	if (!_ep || !desc || ep->ep.desc
+	if (!_ep || !desc
 			|| (desc->bDescriptorType != USB_DT_ENDPOINT))
 		return -EINVAL;
 
@@ -2575,7 +2575,7 @@
 	/* for ep0: the desc defined here;
 	 * for other eps, gadget layer called ep_enable with defined desc
 	 */
-	udc_controller->eps[0].desc = &fsl_ep0_desc;
+	udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
 	udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
 
 	/* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e..f61a967 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@
 /*
  * ### internal used help routines.
  */
-#define ep_index(EP)		((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP)		((EP)->ep.desc->bEndpointAddress&0xF)
 #define ep_maxpacket(EP)	((EP)->ep.maxpacket)
 #define ep_is_in(EP)	( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-			USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+			USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
 			& USB_DIR_IN)==USB_DIR_IN)
 #define get_ep_by_pipe(udc, pipe)	((pipe == 1)? &udc->eps[0]: \
 					&udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c..3d28fb9 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@
 	unsigned long	flags;
 
 	ep = container_of(_ep, struct goku_ep, ep);
-	if (!_ep || !desc || ep->ep.desc
+	if (!_ep || !desc
 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 		return -EINVAL;
 	dev = ep->dev;
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 262acfd..2ab0388 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -61,6 +61,7 @@
 #include <mach/irqs.h>
 #include <mach/board.h>
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #endif
 
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd132..117a4bb 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@
 	ep = container_of(_ep, struct mv_ep, ep);
 	udc = ep->udc;
 
-	if (!_ep || !desc || ep->ep.desc
+	if (!_ep || !desc
 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 		return -EINVAL;
 
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba3246..a460e8c 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -153,7 +153,7 @@
 	u16		maxp;
 
 	/* catch various bogus parameters */
-	if (!_ep || !desc || ep->ep.desc
+	if (!_ep || !desc
 			|| desc->bDescriptorType != USB_DT_ENDPOINT
 			|| ep->bEndpointAddress != desc->bEndpointAddress
 			|| ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3..f7ff9e8e 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@
 	struct pxa25x_udc       *dev;
 
 	ep = container_of (_ep, struct pxa25x_ep, ep);
-	if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+	if (!_ep || !desc || _ep->name == ep0name
 			|| desc->bDescriptorType != USB_DT_ENDPOINT
 			|| ep->bEndpointAddress != desc->bEndpointAddress
 			|| ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836..236b271 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@
 	u32 ecr = 0;
 
 	hsep = our_ep(_ep);
-	if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+	if (!_ep || !desc || _ep->name == ep0name
 		|| desc->bDescriptorType != USB_DT_ENDPOINT
 		|| hsep->bEndpointAddress != desc->bEndpointAddress
 		|| ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d3..f2e51f5 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@
 
 	ep = to_s3c2410_ep(_ep);
 
-	if (!_ep || !desc || ep->ep.desc
+	if (!_ep || !desc
 			|| _ep->name == ep0name
 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 		return -EINVAL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f..800be38 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@
 	hw = ehci->async->hw;
 	hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
 	hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
 	hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7));	/* I = 1 */
+#endif
 	hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
 	hw->hw_qtd_next = EHCI_LIST_END(ehci);
 	ehci->async->qh_state = QH_STATE_LINKED;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a797d51..c778ffe 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -32,7 +32,7 @@
 #define ULPI_VIEWPORT_OFFSET	0x170
 
 struct ehci_mxc_priv {
-	struct clk *usbclk, *ahbclk, *phy1clk;
+	struct clk *usbclk, *ahbclk, *phyclk;
 	struct usb_hcd *hcd;
 };
 
@@ -166,31 +166,26 @@
 	}
 
 	/* enable clocks */
-	priv->usbclk = clk_get(dev, "usb");
+	priv->usbclk = clk_get(dev, "ipg");
 	if (IS_ERR(priv->usbclk)) {
 		ret = PTR_ERR(priv->usbclk);
 		goto err_clk;
 	}
-	clk_enable(priv->usbclk);
+	clk_prepare_enable(priv->usbclk);
 
-	if (!cpu_is_mx35() && !cpu_is_mx25()) {
-		priv->ahbclk = clk_get(dev, "usb_ahb");
-		if (IS_ERR(priv->ahbclk)) {
-			ret = PTR_ERR(priv->ahbclk);
-			goto err_clk_ahb;
-		}
-		clk_enable(priv->ahbclk);
+	priv->ahbclk = clk_get(dev, "ahb");
+	if (IS_ERR(priv->ahbclk)) {
+		ret = PTR_ERR(priv->ahbclk);
+		goto err_clk_ahb;
 	}
+	clk_prepare_enable(priv->ahbclk);
 
 	/* "dr" device has its own clock on i.MX51 */
-	if (cpu_is_mx51() && (pdev->id == 0)) {
-		priv->phy1clk = clk_get(dev, "usb_phy1");
-		if (IS_ERR(priv->phy1clk)) {
-			ret = PTR_ERR(priv->phy1clk);
-			goto err_clk_phy;
-		}
-		clk_enable(priv->phy1clk);
-	}
+	priv->phyclk = clk_get(dev, "phy");
+	if (IS_ERR(priv->phyclk))
+		priv->phyclk = NULL;
+	if (priv->phyclk)
+		clk_prepare_enable(priv->phyclk);
 
 
 	/* call platform specific init function */
@@ -265,17 +260,15 @@
 	if (pdata && pdata->exit)
 		pdata->exit(pdev);
 err_init:
-	if (priv->phy1clk) {
-		clk_disable(priv->phy1clk);
-		clk_put(priv->phy1clk);
+	if (priv->phyclk) {
+		clk_disable_unprepare(priv->phyclk);
+		clk_put(priv->phyclk);
 	}
-err_clk_phy:
-	if (priv->ahbclk) {
-		clk_disable(priv->ahbclk);
-		clk_put(priv->ahbclk);
-	}
+
+	clk_disable_unprepare(priv->ahbclk);
+	clk_put(priv->ahbclk);
 err_clk_ahb:
-	clk_disable(priv->usbclk);
+	clk_disable_unprepare(priv->usbclk);
 	clk_put(priv->usbclk);
 err_clk:
 	iounmap(hcd->regs);
@@ -307,15 +300,14 @@
 	usb_put_hcd(hcd);
 	platform_set_drvdata(pdev, NULL);
 
-	clk_disable(priv->usbclk);
+	clk_disable_unprepare(priv->usbclk);
 	clk_put(priv->usbclk);
-	if (priv->ahbclk) {
-		clk_disable(priv->ahbclk);
-		clk_put(priv->ahbclk);
-	}
-	if (priv->phy1clk) {
-		clk_disable(priv->phy1clk);
-		clk_put(priv->phy1clk);
+	clk_disable_unprepare(priv->ahbclk);
+	clk_put(priv->ahbclk);
+
+	if (priv->phyclk) {
+		clk_disable_unprepare(priv->phyclk);
+		clk_put(priv->phyclk);
 	}
 
 	kfree(priv);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d..17cfb8a 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/gpio.h>
+#include <linux/clk.h>
 
 /* EHCI Register Set */
 #define EHCI_INSNREG04					(0xA0)
@@ -55,6 +56,15 @@
 #define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8
 #define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0
 
+/* Errata i693 */
+static struct clk	*utmi_p1_fck;
+static struct clk	*utmi_p2_fck;
+static struct clk	*xclk60mhsp1_ck;
+static struct clk	*xclk60mhsp2_ck;
+static struct clk	*usbhost_p1_fck;
+static struct clk	*usbhost_p2_fck;
+static struct clk	*init_60m_fclk;
+
 /*-------------------------------------------------------------------------*/
 
 static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@
 	return __raw_readl(base + reg);
 }
 
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+	int ret = 0;
+
+	/* Switch to the internal 60 MHz clock */
+	ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+	if (ret != 0)
+		ehci_err(ehci, "init_60m_fclk set parent"
+			"failed error:%d\n", ret);
+
+	ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+	if (ret != 0)
+		ehci_err(ehci, "init_60m_fclk set parent"
+			"failed error:%d\n", ret);
+
+	clk_enable(usbhost_p1_fck);
+	clk_enable(usbhost_p2_fck);
+
+	/* Wait 1ms and switch back to the external clock */
+	mdelay(1);
+	ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+	if (ret != 0)
+		ehci_err(ehci, "xclk60mhsp1_ck set parent"
+			"failed error:%d\n", ret);
+
+	ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+	if (ret != 0)
+		ehci_err(ehci, "xclk60mhsp2_ck set parent"
+			"failed error:%d\n", ret);
+
+	clk_disable(usbhost_p1_fck);
+	clk_disable(usbhost_p2_fck);
+}
+
 static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
 {
 	struct usb_hcd	*hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@
 	}
 }
 
+static int omap_ehci_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	u32 __iomem *status_reg = &ehci->regs->port_status[
+				(wIndex & 0xff) - 1];
+	u32		temp;
+	unsigned long	flags;
+	int		retval = 0;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+		temp = ehci_readl(ehci, status_reg);
+		if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+			retval = -EPIPE;
+			goto done;
+		}
+
+		temp &= ~PORT_WKCONN_E;
+		temp |= PORT_WKDISC_E | PORT_WKOC_E;
+		ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+		omap_ehci_erratum_i693(ehci);
+
+		set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+		goto done;
+	}
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	/* Handle the hub control events here */
+	return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	return retval;
+}
+
 static void disable_put_regulator(
 		struct ehci_hcd_omap_platform_data *pdata)
 {
@@ -264,8 +353,76 @@
 	/* root ports should always stay powered */
 	ehci_port_power(omap_ehci, 1);
 
+	/* get clocks */
+	utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+	if (IS_ERR(utmi_p1_fck)) {
+		ret = PTR_ERR(utmi_p1_fck);
+		dev_err(dev, "utmi_p1_gfclk failed error:%d\n",	ret);
+		goto err_add_hcd;
+	}
+
+	xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+	if (IS_ERR(xclk60mhsp1_ck)) {
+		ret = PTR_ERR(xclk60mhsp1_ck);
+		dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+		goto err_utmi_p1_fck;
+	}
+
+	utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+	if (IS_ERR(utmi_p2_fck)) {
+		ret = PTR_ERR(utmi_p2_fck);
+		dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+		goto err_xclk60mhsp1_ck;
+	}
+
+	xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+	if (IS_ERR(xclk60mhsp2_ck)) {
+		ret = PTR_ERR(xclk60mhsp2_ck);
+		dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+		goto err_utmi_p2_fck;
+	}
+
+	usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+	if (IS_ERR(usbhost_p1_fck)) {
+		ret = PTR_ERR(usbhost_p1_fck);
+		dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+		goto err_xclk60mhsp2_ck;
+	}
+
+	usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+	if (IS_ERR(usbhost_p2_fck)) {
+		ret = PTR_ERR(usbhost_p2_fck);
+		dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+		goto err_usbhost_p1_fck;
+	}
+
+	init_60m_fclk = clk_get(dev, "init_60m_fclk");
+	if (IS_ERR(init_60m_fclk)) {
+		ret = PTR_ERR(init_60m_fclk);
+		dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+		goto err_usbhost_p2_fck;
+	}
+
 	return 0;
 
+err_usbhost_p2_fck:
+	clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+	clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+	clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+	clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+	clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+	clk_put(utmi_p1_fck);
+
 err_add_hcd:
 	disable_put_regulator(pdata);
 	pm_runtime_put_sync(dev);
@@ -294,6 +451,15 @@
 	disable_put_regulator(dev->platform_data);
 	iounmap(hcd->regs);
 	usb_put_hcd(hcd);
+
+	clk_put(utmi_p1_fck);
+	clk_put(utmi_p2_fck);
+	clk_put(xclk60mhsp1_ck);
+	clk_put(xclk60mhsp2_ck);
+	clk_put(usbhost_p1_fck);
+	clk_put(usbhost_p2_fck);
+	clk_put(init_60m_fclk);
+
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 
@@ -364,7 +530,7 @@
 	 * root hub support
 	 */
 	.hub_status_data	= ehci_hub_status_data,
-	.hub_control		= ehci_hub_control,
+	.hub_control		= omap_ehci_hub_control,
 	.bus_suspend		= ehci_bus_suspend,
 	.bus_resume		= ehci_bus_resume,
 
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c6a5a3..82de107 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/mbus.h>
+#include <linux/clk.h>
 #include <plat/ehci-orion.h>
 
 #define rdl(off)	__raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@
 	struct resource *res;
 	struct usb_hcd *hcd;
 	struct ehci_hcd *ehci;
+	struct clk *clk;
 	void __iomem *regs;
 	int irq, err;
 
@@ -238,6 +240,14 @@
 		goto err2;
 	}
 
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_prepare_enable(clk);
+		clk_put(clk);
+	}
+
 	hcd = usb_create_hcd(&ehci_orion_hc_driver,
 			&pdev->dev, dev_name(&pdev->dev));
 	if (!hcd) {
@@ -301,12 +311,18 @@
 static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
 {
 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct clk *clk;
 
 	usb_remove_hcd(hcd);
 	iounmap(hcd->regs);
 	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 	usb_put_hcd(hcd);
 
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+	}
 	return 0;
 }
 
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7b..1234817 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@
 			hcd->has_tt = 1;
 			tdi_reset(ehci);
 		}
-		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
-			/* EHCI #1 or #2 on 6 Series/C200 Series chipset */
-			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
-				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
-				hcd->broken_pci_sleep = 1;
-				device_set_wakeup_capable(&pdev->dev, false);
-			}
-		}
 		break;
 	case PCI_VENDOR_ID_TDI:
 		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cd..e7cb392 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@
 		goto fail_create_hcd;
 	}
 
-	if (pdev->dev.platform_data != NULL)
-		pdata = pdev->dev.platform_data;
+	pdata = pdev->dev.platform_data;
 
 	/* initialize hcd */
 	hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf8..6854823 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@
 		}
 	}
 
-	tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
-						TEGRA_USB_PHY_MODE_HOST);
+	tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
+					pdata->phy_config,
+					TEGRA_USB_PHY_MODE_HOST);
 	if (IS_ERR(tegra->phy)) {
 		dev_err(&pdev->dev, "Failed to open USB phy\n");
 		err = -ENXIO;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc46..e9713d5 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@
  *
  * Properly shutdown the hcd, call driver's shutdown routine.
  */
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
 {
 	struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
 
 	if (hcd->driver->shutdown)
 		hcd->driver->shutdown(hcd);
-
-	return 0;
 }
 
 
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772d..2f3619e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@
 }
 
 /* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
 {
 	struct ohci_hcd		*ohci = hcd_to_ohci(hcd);
 	int			port;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338e..77689bd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@
 		struct xhci_virt_device *virt_dev,
 		int slot_id)
 {
-	struct list_head *tt;
 	struct list_head *tt_list_head;
-	struct list_head *tt_next;
-	struct xhci_tt_bw_info *tt_info;
+	struct xhci_tt_bw_info *tt_info, *next;
+	bool slot_found = false;
 
 	/* If the device never made it past the Set Address stage,
 	 * it may not have the real_port set correctly.
@@ -808,34 +807,16 @@
 	}
 
 	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
-	if (list_empty(tt_list_head))
-		return;
-
-	list_for_each(tt, tt_list_head) {
-		tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-		if (tt_info->slot_id == slot_id)
+	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+		/* Multi-TT hubs will have more than one entry */
+		if (tt_info->slot_id == slot_id) {
+			slot_found = true;
+			list_del(&tt_info->tt_list);
+			kfree(tt_info);
+		} else if (slot_found) {
 			break;
+		}
 	}
-	/* Cautionary measure in case the hub was disconnected before we
-	 * stored the TT information.
-	 */
-	if (tt_info->slot_id != slot_id)
-		return;
-
-	tt_next = tt->next;
-	tt_info = list_entry(tt, struct xhci_tt_bw_info,
-			tt_list);
-	/* Multi-TT hubs will have more than one entry */
-	do {
-		list_del(tt);
-		kfree(tt_info);
-		tt = tt_next;
-		if (list_empty(tt_list_head))
-			break;
-		tt_next = tt->next;
-		tt_info = list_entry(tt, struct xhci_tt_bw_info,
-				tt_list);
-	} while (tt_info->slot_id == slot_id);
 }
 
 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@
 {
 	struct pci_dev	*pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 	struct dev_info	*dev_info, *next;
-	struct list_head *tt_list_head;
-	struct list_head *tt;
-	struct list_head *endpoints;
-	struct list_head *ep, *q;
-	struct xhci_tt_bw_info *tt_info;
-	struct xhci_interval_bw_table *bwt;
-	struct xhci_virt_ep *virt_ep;
-
 	unsigned long	flags;
 	int size;
-	int i;
+	int i, j, num_ports;
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@
 	}
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
-	bwt = &xhci->rh_bw->bw_table;
-	for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
-		endpoints = &bwt->interval_bw[i].endpoints;
-		list_for_each_safe(ep, q, endpoints) {
-			virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
-			list_del(&virt_ep->bw_endpoint_list);
-			kfree(virt_ep);
+	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+	for (i = 0; i < num_ports; i++) {
+		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+			struct list_head *ep = &bwt->interval_bw[j].endpoints;
+			while (!list_empty(ep))
+				list_del_init(ep->next);
 		}
 	}
 
-	tt_list_head = &xhci->rh_bw->tts;
-	list_for_each_safe(tt, q, tt_list_head) {
-		tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-		list_del(tt);
-		kfree(tt_info);
+	for (i = 0; i < num_ports; i++) {
+		struct xhci_tt_bw_info *tt, *n;
+		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+			list_del(&tt->tt_list);
+			kfree(tt);
+		}
 	}
 
 	xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73e..a979cd0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@
 	command = xhci_readl(xhci, &xhci->op_regs->command);
 	command |= CMD_CSS;
 	xhci_writel(xhci, command, &xhci->op_regs->command);
-	if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
-		xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+	if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+		xhci_warn(xhci, "WARN: xHC save state timeout\n");
 		spin_unlock_irq(&xhci->lock);
 		return -ETIMEDOUT;
 	}
@@ -848,8 +848,8 @@
 		command |= CMD_CRS;
 		xhci_writel(xhci, command, &xhci->op_regs->command);
 		if (handshake(xhci, &xhci->op_regs->status,
-			      STS_RESTORE, 0, 10*100)) {
-			xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+			      STS_RESTORE, 0, 10 * 1000)) {
+			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
 			spin_unlock_irq(&xhci->lock);
 			return -ETIMEDOUT;
 		}
@@ -3906,7 +3906,7 @@
 	default:
 		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
 				__func__);
-		return -EINVAL;
+		return USB3_LPM_DISABLED;
 	}
 
 	if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b5..9d63ba4 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
 #include <linux/dma-mapping.h>
 
 #include <mach/cputype.h>
+#include <mach/hardware.h>
 
 #include <asm/mach-types.h>
 
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c844..371baa0 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
  */
 
 /* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR	(DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR	0x01c40034
 #define USBPHY_DATAPOL		BIT(11)	/* (dm355) switch D+/D- */
 #define USBPHY_PHYCLKGD		BIT(8)
 #define USBPHY_SESNDEN		BIT(7)	/* v(sess_end) comparator */
@@ -27,7 +27,7 @@
 #define USBPHY_OTGPDWN		BIT(1)
 #define USBPHY_PHYPDWN		BIT(0)
 
-#define DM355_DEEPSLEEP_PADDR	(DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR	0x01c40048
 #define DRVVBUS_FORCE		BIT(2)
 #define DRVVBUS_OVERRIDE	BIT(1)
 
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b..95918da 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@
 	}
 
 	musb_ep->desc = NULL;
+	musb_ep->end_point.desc = NULL;
 
 	/* abort all pending DMA and requests */
 	nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ef8d744..e090c79 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -375,11 +375,21 @@
 	 */
 	if (list_empty(&qh->hep->urb_list)) {
 		struct list_head	*head;
+		struct dma_controller	*dma = musb->dma_controller;
 
-		if (is_in)
+		if (is_in) {
 			ep->rx_reinit = 1;
-		else
+			if (ep->rx_channel) {
+				dma->channel_release(ep->rx_channel);
+				ep->rx_channel = NULL;
+			}
+		} else {
 			ep->tx_reinit = 1;
+			if (ep->tx_channel) {
+				dma->channel_release(ep->tx_channel);
+				ep->tx_channel = NULL;
+			}
+		}
 
 		/* Clobber old pointers to this qh */
 		musb_ep_set_qh(ep, is_in, NULL);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index d2a9a8e..0eabb04 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -305,9 +305,8 @@
 
 		regulator_enable(twl->usb3v3);
 		twl->asleep = 1;
-		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
-		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-								0x10);
+		twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+		twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
 		status = USB_EVENT_ID;
 		otg->default_a = true;
 		twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@
 		atomic_notifier_call_chain(&twl->phy.notifier, status,
 							otg->gadget);
 	} else  {
-		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
-								0x10);
-		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-								0x1);
+		twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+		twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
 	}
-	twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+	twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
 
 	return IRQ_HANDLED;
 }
@@ -343,7 +340,7 @@
 {
 	struct twl6030_usb *twl = phy_to_twl(x);
 
-	twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+	twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
 	twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
 	twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
 
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3cfabcb..e7cf84f 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -2,11 +2,11 @@
 # Physical Layer USB driver configuration
 #
 comment "USB Physical Layer drivers"
-	depends on USB
+	depends on USB || USB_GADGET
 
 config USB_ISP1301
 	tristate "NXP ISP1301 USB transceiver support"
-	depends on USB
+	depends on USB || USB_GADGET
 	depends on I2C
 	help
 	  Say Y here to add support for the NXP ISP1301 USB transceiver driver.
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b19262..1e71079 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@
 	{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
 	{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
 	{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+	{ USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
 	{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
 	{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
 	{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@
 	{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
 	{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
 	{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+	{ USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
 	{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
 	{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
 	{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@
 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+	{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
 	{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+	{ USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+	{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+	{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+	{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
 	{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
 	{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
 	{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@
 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+	{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
 	{ } /* Terminating Entry */
 };
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea..bc912e5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@
 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
 	{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78..5661c7e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
 #define RTSYSTEMS_VID			0x2100	/* Vendor ID */
 #define RTSYSTEMS_SERIAL_VX7_PID	0x9e52	/* Serial converter for VX-7 Radios using FT232RL */
 #define RTSYSTEMS_CT29B_PID		0x9e54	/* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID		0x9e57	/* USB-RTS01 Radio Cable */
 
 
 /*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d8..9b026bf 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@
 
 static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
 
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
-	{.driver_info = 42},
-	{}
-};
-
 /* All of the device info needed for the Generic Serial Converter */
 struct usb_serial_driver usb_serial_generic_device = {
 	.driver = {
@@ -79,7 +72,8 @@
 		USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
 
 	/* register our generic driver with ourselves */
-	retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+	retval = usb_serial_register_drivers(serial_drivers,
+			"usbserial_generic", generic_device_ids);
 #endif
 	return retval;
 }
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa..a71fa0a 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@
 			MCT_U232_SET_REQUEST_TYPE,
 			0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
 			WDR_TIMEOUT);
-	if (rc < 0)
-		dev_err(&serial->dev->dev,
-			"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+	kfree(buf);
+
 	dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
 
-	kfree(buf);
-	return rc;
+	if (rc < 0) {
+		dev_err(&serial->dev->dev,
+			"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+		return rc;
+	}
+	return 0;
 } /* mct_u232_set_modem_ctrl */
 
 static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8..57eca24 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
 
 static int device_type;
 
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae902..adf8ce7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
 /* Function prototypes */
 static int  option_probe(struct usb_serial *serial,
 			const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
 static int option_send_setup(struct usb_serial_port *port);
 static void option_instat_callback(struct urb *urb);
 
@@ -150,6 +151,7 @@
 #define HUAWEI_PRODUCT_E14AC			0x14AC
 #define HUAWEI_PRODUCT_K3806			0x14AE
 #define HUAWEI_PRODUCT_K4605			0x14C6
+#define HUAWEI_PRODUCT_K5005			0x14C8
 #define HUAWEI_PRODUCT_K3770			0x14C9
 #define HUAWEI_PRODUCT_K3771			0x14CA
 #define HUAWEI_PRODUCT_K4510			0x14CB
@@ -234,6 +236,7 @@
 #define NOVATELWIRELESS_PRODUCT_G1		0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M		0xA002
 #define NOVATELWIRELESS_PRODUCT_G2		0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551		0xB001
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID				0x1614
@@ -425,7 +428,7 @@
 #define SAMSUNG_VENDOR_ID                       0x04e8
 #define SAMSUNG_PRODUCT_GT_B3730                0x6889
 
-/* YUGA products  www.yuga-info.com*/
+/* YUGA products  www.yuga-info.com gavin.kx@qq.com */
 #define YUGA_VENDOR_ID				0x257A
 #define YUGA_PRODUCT_CEM600			0x1601
 #define YUGA_PRODUCT_CEM610			0x1602
@@ -442,6 +445,8 @@
 #define YUGA_PRODUCT_CEU516			0x160C
 #define YUGA_PRODUCT_CEU528			0x160D
 #define YUGA_PRODUCT_CEU526			0x160F
+#define YUGA_PRODUCT_CEU881			0x161F
+#define YUGA_PRODUCT_CEU882			0x162F
 
 #define YUGA_PRODUCT_CWM600			0x2601
 #define YUGA_PRODUCT_CWM610			0x2602
@@ -457,23 +462,26 @@
 #define YUGA_PRODUCT_CWU518			0x260B
 #define YUGA_PRODUCT_CWU516			0x260C
 #define YUGA_PRODUCT_CWU528			0x260D
+#define YUGA_PRODUCT_CWU581			0x260E
 #define YUGA_PRODUCT_CWU526			0x260F
+#define YUGA_PRODUCT_CWU582			0x261F
+#define YUGA_PRODUCT_CWU583			0x262F
 
-#define YUGA_PRODUCT_CLM600			0x2601
-#define YUGA_PRODUCT_CLM610			0x2602
-#define YUGA_PRODUCT_CLM500			0x2603
-#define YUGA_PRODUCT_CLM510			0x2604
-#define YUGA_PRODUCT_CLM800			0x2605
-#define YUGA_PRODUCT_CLM900			0x2606
+#define YUGA_PRODUCT_CLM600			0x3601
+#define YUGA_PRODUCT_CLM610			0x3602
+#define YUGA_PRODUCT_CLM500			0x3603
+#define YUGA_PRODUCT_CLM510			0x3604
+#define YUGA_PRODUCT_CLM800			0x3605
+#define YUGA_PRODUCT_CLM900			0x3606
 
-#define YUGA_PRODUCT_CLU718			0x2607
-#define YUGA_PRODUCT_CLU716			0x2608
-#define YUGA_PRODUCT_CLU728			0x2609
-#define YUGA_PRODUCT_CLU726			0x260A
-#define YUGA_PRODUCT_CLU518			0x260B
-#define YUGA_PRODUCT_CLU516			0x260C
-#define YUGA_PRODUCT_CLU528			0x260D
-#define YUGA_PRODUCT_CLU526			0x260F
+#define YUGA_PRODUCT_CLU718			0x3607
+#define YUGA_PRODUCT_CLU716			0x3608
+#define YUGA_PRODUCT_CLU728			0x3609
+#define YUGA_PRODUCT_CLU726			0x360A
+#define YUGA_PRODUCT_CLU518			0x360B
+#define YUGA_PRODUCT_CLU516			0x360C
+#define YUGA_PRODUCT_CLU528			0x360D
+#define YUGA_PRODUCT_CLU526			0x360F
 
 /* Viettel products */
 #define VIETTEL_VENDOR_ID			0x2262
@@ -490,6 +498,10 @@
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID			0x0e8d
 
+/* Cellient products */
+#define CELLIENT_VENDOR_ID			0x2692
+#define CELLIENT_PRODUCT_MEN200			0x9005
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
 		OPTION_BLACKLIST_NONE = 0,
@@ -666,6 +678,11 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +739,8 @@
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+	/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 
 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1209,6 +1228,11 @@
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1240,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1270,7 @@
 	.ioctl             = usb_wwan_ioctl,
 	.attach            = usb_wwan_startup,
 	.disconnect        = usb_wwan_disconnect,
-	.release           = usb_wwan_release,
+	.release           = option_release,
 	.read_int_callback = option_instat_callback,
 #ifdef CONFIG_PM
 	.suspend           = usb_wwan_suspend,
@@ -1259,35 +1284,6 @@
 
 static bool debug;
 
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
-	/* Input endpoints and buffer for this port */
-	struct urb *in_urbs[N_IN_URB];
-	u8 *in_buffer[N_IN_URB];
-	/* Output endpoints and buffer for this port */
-	struct urb *out_urbs[N_OUT_URB];
-	u8 *out_buffer[N_OUT_URB];
-	unsigned long out_busy;		/* Bit vector of URBs in use */
-	int opened;
-	struct usb_anchor delayed;
-
-	/* Settings for the port */
-	int rts_state;	/* Handshaking pins (outputs) */
-	int dtr_state;
-	int cts_state;	/* Handshaking pins (inputs) */
-	int dsr_state;
-	int dcd_state;
-	int ri_state;
-
-	unsigned long tx_start_time[N_OUT_URB];
-};
-
 module_usb_serial_driver(serial_drivers, option_ids);
 
 static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1352,22 @@
 	return 0;
 }
 
+static void option_release(struct usb_serial *serial)
+{
+	struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+	usb_wwan_release(serial);
+
+	kfree(priv);
+}
+
 static void option_instat_callback(struct urb *urb)
 {
 	int err;
 	int status = urb->status;
 	struct usb_serial_port *port =  urb->context;
-	struct option_port_private *portdata = usb_get_serial_port_data(port);
+	struct usb_wwan_port_private *portdata =
+					usb_get_serial_port_data(port);
 
 	dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
 
@@ -1421,7 +1427,7 @@
 	struct usb_serial *serial = port->serial;
 	struct usb_wwan_intf_private *intfdata =
 		(struct usb_wwan_intf_private *) serial->private;
-	struct option_port_private *portdata;
+	struct usb_wwan_port_private *portdata;
 	int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
 	int val = 0;
 
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59..996015c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@
 	{USB_DEVICE(0x1410, 0xa021)},	/* Novatel Gobi 3000 Composite */
 	{USB_DEVICE(0x413c, 0x8193)},	/* Dell Gobi 3000 QDL */
 	{USB_DEVICE(0x413c, 0x8194)},	/* Dell Gobi 3000 Composite */
+	{USB_DEVICE(0x1199, 0x9010)},	/* Sierra Wireless Gobi 3000 QDL */
+	{USB_DEVICE(0x1199, 0x9012)},	/* Sierra Wireless Gobi 3000 QDL */
 	{USB_DEVICE(0x1199, 0x9013)},	/* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+	{USB_DEVICE(0x1199, 0x9014)},	/* Sierra Wireless Gobi 3000 QDL */
+	{USB_DEVICE(0x1199, 0x9015)},	/* Sierra Wireless Gobi 3000 Modem device */
+	{USB_DEVICE(0x1199, 0x9018)},	/* Sierra Wireless Gobi 3000 QDL */
+	{USB_DEVICE(0x1199, 0x9019)},	/* Sierra Wireless Gobi 3000 Modem device */
 	{USB_DEVICE(0x12D1, 0x14F0)},	/* Sony Gobi 3000 QDL */
 	{USB_DEVICE(0x12D1, 0x14F1)},	/* Sony Gobi 3000 Composite */
 	{ }				/* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a..d423d36 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@
 	{ USB_DEVICE(0x1199, 0x68A3), 	/* Sierra Wireless Direct IP modems */
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
+	/* AT&T Direct IP LTE modems */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+	},
 	{ USB_DEVICE(0x0f3d, 0x68A3), 	/* Airprime/Sierra Wireless Direct IP modems */
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609..27483f9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@
 static struct usb_serial_driver *search_serial_device(
 					struct usb_interface *iface)
 {
-	const struct usb_device_id *id;
+	const struct usb_device_id *id = NULL;
 	struct usb_serial_driver *drv;
+	struct usb_driver *driver = to_usb_driver(iface->dev.driver);
 
 	/* Check if the usb id matches a known device */
 	list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
-		id = get_iface_id(drv, iface);
+		if (drv->usb_driver == driver)
+			id = get_iface_id(drv, iface);
 		if (id)
 			return drv;
 	}
@@ -755,7 +757,7 @@
 
 		if (retval) {
 			dbg("sub driver rejected device");
-			kfree(serial);
+			usb_serial_put(serial);
 			module_put(type->driver.owner);
 			return retval;
 		}
@@ -827,7 +829,7 @@
 		 */
 		if (num_bulk_in == 0 || num_bulk_out == 0) {
 			dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
-			kfree(serial);
+			usb_serial_put(serial);
 			module_put(type->driver.owner);
 			return -ENODEV;
 		}
@@ -841,7 +843,7 @@
 		if (num_ports == 0) {
 			dev_err(&interface->dev,
 			    "Generic device with no bulk out, not allowed.\n");
-			kfree(serial);
+			usb_serial_put(serial);
 			module_put(type->driver.owner);
 			return -EIO;
 		}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a324a5d..11418da 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -202,6 +202,12 @@
 		if (us->fflags & US_FL_NO_READ_CAPACITY_16)
 			sdev->no_read_capacity_16 = 1;
 
+		/*
+		 * Many devices do not respond properly to READ_CAPACITY_16.
+		 * Tell the SCSI layer to try READ_CAPACITY_10 first.
+		 */
+		sdev->try_rc_10_first = 1;
+
 		/* assume SPC3 or latter devices support sense size > 18 */
 		if (sdev->scsi_level > SCSI_SPC_2)
 			us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 94dbd25..112156f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -191,7 +191,9 @@
 	struct vhost_dev *dev = data;
 	struct vhost_work *work = NULL;
 	unsigned uninitialized_var(seq);
+	mm_segment_t oldfs = get_fs();
 
+	set_fs(USER_DS);
 	use_mm(dev->mm);
 
 	for (;;) {
@@ -229,6 +231,7 @@
 
 	}
 	unuse_mm(dev->mm);
+	set_fs(oldfs);
 	return 0;
 }
 
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be5..0217f74 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2210,7 +2210,7 @@
 
 config FB_COBALT
 	tristate "Cobalt server LCD frame buffer support"
-	depends on FB && MIPS_COBALT
+	depends on FB && (MIPS_COBALT || MIPS_SEAD3)
 
 config FB_SH7760
 	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@
 	  and could also have been called by other names when coupled with
 	  a bridge adapter.
 
+config FB_AUO_K190X
+	tristate "AUO-K190X EPD controller support"
+	depends on FB
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select FB_DEFERRED_IO
+	help
+	  Provides support for epaper controllers from the K190X series
+	  of AUO. These controllers can be used to drive epaper displays
+	  from Sipix.
+
+	  This option enables the common support, shared by the individual
+	  controller drivers. You will also have to enable the driver
+	  for the controller type used in your device.
+
+config FB_AUO_K1900
+	tristate "AUO-K1900 EPD controller support"
+	depends on FB && FB_AUO_K190X
+	help
+	  This driver implements support for the AUO K1900 epd-controller.
+	  This controller can drive Sipix epaper displays but can only do
+	  serial updates, reducing the number of possible frames per second.
+
+config FB_AUO_K1901
+	tristate "AUO-K1901 EPD controller support"
+	depends on FB && FB_AUO_K190X
+	help
+	  This driver implements support for the AUO K1901 epd-controller.
+	  This controller can drive Sipix epaper displays and supports
+	  concurrent updates, making higher frames per second possible.
+
 config FB_JZ4740
 	tristate "JZ4740 LCD framebuffer support"
 	depends on FB && MACH_JZ4740
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add..ee8dafb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,6 +118,9 @@
 obj-$(CONFIG_FB_MAXINE)		  += maxinefb.o
 obj-$(CONFIG_FB_METRONOME)        += metronomefb.o
 obj-$(CONFIG_FB_BROADSHEET)       += broadsheetfb.o
+obj-$(CONFIG_FB_AUO_K190X)	  += auo_k190x.o
+obj-$(CONFIG_FB_AUO_K1900)	  += auo_k1900fb.o
+obj-$(CONFIG_FB_AUO_K1901)	  += auo_k1901fb.o
 obj-$(CONFIG_FB_S1D13XXX)	  += s1d13xxxfb.o
 obj-$(CONFIG_FB_SH7760)		  += sh7760fb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644
index 0000000..c36cf96
--- /dev/null
+++ b/drivers/video/auo_k1900fb.c
@@ -0,0 +1,198 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1900 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1900 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2 4 step gray (2bit) - FIXME: add strange refresh
+ * mode3 2 step gray (1bit) - FIXME: add strange refresh
+ * mode4 handwriting mode (strange behaviour)
+ * mode5 automatic selection of update mode
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1900 specific commands
+ */
+
+#define AUOK1900_CMD_PARTIALDISP	0x1001
+#define AUOK1900_CMD_ROTATION		0x1006
+#define AUOK1900_CMD_LUT_STOP		0x1009
+
+#define AUOK1900_INIT_TEMP_AVERAGE	(1 << 13)
+#define AUOK1900_INIT_ROTATE(_x)	((_x & 0x3) << 10)
+#define AUOK1900_INIT_RESOLUTION(_res)	((_res & 0x7) << 2)
+
+static void auok1900_init(struct auok190xfb_par *par)
+{
+	struct auok190x_board *board = par->board;
+	u16 init_param = 0;
+
+	init_param |= AUOK1900_INIT_TEMP_AVERAGE;
+	init_param |= AUOK1900_INIT_ROTATE(par->rotation);
+	init_param |= AUOK190X_INIT_INVERSE_WHITE;
+	init_param |= AUOK190X_INIT_FORMAT0;
+	init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
+	init_param |= AUOK190X_INIT_SHIFT_RIGHT;
+
+	auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+	/* let the controller finish */
+	board->wait_for_rdy(par);
+}
+
+static void auok1900_update_region(struct auok190xfb_par *par, int mode,
+						u16 y1, u16 y2)
+{
+	struct device *dev = par->info->device;
+	unsigned char *buf = (unsigned char *)par->info->screen_base;
+	int xres = par->info->var.xres;
+	u16 args[4];
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&(par->io_lock));
+
+	/* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+	y1 &= 0xfffe;
+	y2 &= 0xfffe;
+
+	dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+		1, y1+1, xres, y2-y1, mode);
+
+	/* to FIX handle different partial update modes */
+	args[0] = mode | 1;
+	args[1] = y1 + 1;
+	args[2] = xres;
+	args[3] = y2 - y1;
+	buf += y1 * xres;
+	auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
+				     ((y2 - y1) * xres)/2, (u16 *) buf);
+	auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
+
+	par->update_cnt++;
+
+	mutex_unlock(&(par->io_lock));
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
+						u16 y1, u16 y2)
+{
+	int mode;
+
+	if (par->update_mode < 0) {
+		mode = AUOK190X_UPDATE_MODE(1);
+		par->last_mode = -1;
+	} else {
+		mode = AUOK190X_UPDATE_MODE(par->update_mode);
+		par->last_mode = par->update_mode;
+	}
+
+	if (par->flash)
+		mode |= AUOK190X_UPDATE_NONFLASH;
+
+	auok1900_update_region(par, mode, y1, y2);
+}
+
+static void auok1900fb_dpy_update(struct auok190xfb_par *par)
+{
+	int mode;
+
+	if (par->update_mode < 0) {
+		mode = AUOK190X_UPDATE_MODE(0);
+		par->last_mode = -1;
+	} else {
+		mode = AUOK190X_UPDATE_MODE(par->update_mode);
+		par->last_mode = par->update_mode;
+	}
+
+	if (par->flash)
+		mode |= AUOK190X_UPDATE_NONFLASH;
+
+	auok1900_update_region(par, mode, 0, par->info->var.yres);
+	par->update_cnt = 0;
+}
+
+static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
+{
+	return (par->update_cnt > 10);
+}
+
+static int __devinit auok1900fb_probe(struct platform_device *pdev)
+{
+	struct auok190x_init_data init;
+	struct auok190x_board *board;
+
+	/* pick up board specific routines */
+	board = pdev->dev.platform_data;
+	if (!board)
+		return -EINVAL;
+
+	/* fill temporary init struct for common init */
+	init.id = "auo_k1900fb";
+	init.board = board;
+	init.update_partial = auok1900fb_dpy_update_pages;
+	init.update_all = auok1900fb_dpy_update;
+	init.need_refresh = auok1900fb_need_refresh;
+	init.init = auok1900_init;
+
+	return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1900fb_remove(struct platform_device *pdev)
+{
+	return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1900fb_driver = {
+	.probe	= auok1900fb_probe,
+	.remove = __devexit_p(auok1900fb_remove),
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "auo_k1900fb",
+		.pm = &auok190x_pm,
+	},
+};
+module_platform_driver(auok1900fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644
index 0000000..1c054c1
--- /dev/null
+++ b/drivers/video/auo_k1901fb.c
@@ -0,0 +1,251 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1901 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1901 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2+3 4 step gray (2bit)
+ * mode4+5 2 step gray (1bit)
+ * - mode4 is described as "without LUT"
+ * mode7 automatic selection of update mode
+ *
+ * The most interesting difference to the K1900 is the ability to do screen
+ * updates in an asynchronous fashion. Where the K1900 needs to wait for the
+ * current update to complete, the K1901 can process later updates already.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1901 specific commands
+ */
+
+#define AUOK1901_CMD_LUT_INTERFACE	0x0005
+#define AUOK1901_CMD_DMA_START		0x1001
+#define AUOK1901_CMD_CURSOR_START	0x1007
+#define AUOK1901_CMD_CURSOR_STOP	AUOK190X_CMD_DATA_STOP
+#define AUOK1901_CMD_DDMA_START		0x1009
+
+#define AUOK1901_INIT_GATE_PULSE_LOW	(0 << 14)
+#define AUOK1901_INIT_GATE_PULSE_HIGH	(1 << 14)
+#define AUOK1901_INIT_SINGLE_GATE	(0 << 13)
+#define AUOK1901_INIT_DOUBLE_GATE	(1 << 13)
+
+/* Bits to pixels
+ *   Mode	15-12	11-8	7-4	3-0
+ *   format2	2	T	1	T
+ *   format3	1	T	2	T
+ *   format4	T	2	T	1
+ *   format5	T	1	T	2
+ *
+ *   halftone modes:
+ *   format6	2	2	1	1
+ *   format7	1	1	2	2
+ */
+#define AUOK1901_INIT_FORMAT2		(1 << 7)
+#define AUOK1901_INIT_FORMAT3		((1 << 7) | (1 << 6))
+#define AUOK1901_INIT_FORMAT4		(1 << 8)
+#define AUOK1901_INIT_FORMAT5		((1 << 8) | (1 << 6))
+#define AUOK1901_INIT_FORMAT6		((1 << 8) | (1 << 7))
+#define AUOK1901_INIT_FORMAT7		((1 << 8) | (1 << 7) | (1 << 6))
+
+/* res[4] to bit 10
+ * res[3-0] to bits 5-2
+ */
+#define AUOK1901_INIT_RESOLUTION(_res)	(((_res & (1 << 4)) << 6) \
+					 | ((_res & 0xf) << 2))
+
+/*
+ * portrait / landscape orientation in AUOK1901_CMD_DMA_START
+ */
+#define AUOK1901_DMA_ROTATE90(_rot)		((_rot & 1) << 13)
+
+/*
+ * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
+ */
+#define AUOK1901_DDMA_ROTATE180(_rot)		((~_rot & 2) << 10)
+
+static void auok1901_init(struct auok190xfb_par *par)
+{
+	struct auok190x_board *board = par->board;
+	u16 init_param = 0;
+
+	init_param |= AUOK190X_INIT_INVERSE_WHITE;
+	init_param |= AUOK190X_INIT_FORMAT0;
+	init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
+	init_param |= AUOK190X_INIT_SHIFT_LEFT;
+
+	auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+	/* let the controller finish */
+	board->wait_for_rdy(par);
+}
+
+static void auok1901_update_region(struct auok190xfb_par *par, int mode,
+						u16 y1, u16 y2)
+{
+	struct device *dev = par->info->device;
+	unsigned char *buf = (unsigned char *)par->info->screen_base;
+	int xres = par->info->var.xres;
+	u16 args[5];
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&(par->io_lock));
+
+	/* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+	y1 &= 0xfffe;
+	y2 &= 0xfffe;
+
+	dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+		1, y1+1, xres, y2-y1, mode);
+
+	/* K1901: first transfer the region data */
+	args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
+	args[1] = y1 + 1;
+	args[2] = xres;
+	args[3] = y2 - y1;
+	buf += y1 * xres;
+	auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
+					    args, ((y2 - y1) * xres)/2,
+					    (u16 *) buf);
+	auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
+
+	/* K1901: second tell the controller to update the region with mode */
+	args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
+	args[1] = 1;
+	args[2] = y1 + 1;
+	args[3] = xres;
+	args[4] = y2 - y1;
+	auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
+
+	par->update_cnt++;
+
+	mutex_unlock(&(par->io_lock));
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
+						u16 y1, u16 y2)
+{
+	int mode;
+
+	if (par->update_mode < 0) {
+		mode = AUOK190X_UPDATE_MODE(1);
+		par->last_mode = -1;
+	} else {
+		mode = AUOK190X_UPDATE_MODE(par->update_mode);
+		par->last_mode = par->update_mode;
+	}
+
+	if (par->flash)
+		mode |= AUOK190X_UPDATE_NONFLASH;
+
+	auok1901_update_region(par, mode, y1, y2);
+}
+
+static void auok1901fb_dpy_update(struct auok190xfb_par *par)
+{
+	int mode;
+
+	/* When doing full updates, wait for the controller to be ready
+	 * This will hopefully catch some hangs of the K1901
+	 */
+	par->board->wait_for_rdy(par);
+
+	if (par->update_mode < 0) {
+		mode = AUOK190X_UPDATE_MODE(0);
+		par->last_mode = -1;
+	} else {
+		mode = AUOK190X_UPDATE_MODE(par->update_mode);
+		par->last_mode = par->update_mode;
+	}
+
+	if (par->flash)
+		mode |= AUOK190X_UPDATE_NONFLASH;
+
+	auok1901_update_region(par, mode, 0, par->info->var.yres);
+	par->update_cnt = 0;
+}
+
+static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
+{
+	return (par->update_cnt > 10);
+}
+
+static int __devinit auok1901fb_probe(struct platform_device *pdev)
+{
+	struct auok190x_init_data init;
+	struct auok190x_board *board;
+
+	/* pick up board specific routines */
+	board = pdev->dev.platform_data;
+	if (!board)
+		return -EINVAL;
+
+	/* fill temporary init struct for common init */
+	init.id = "auo_k1901fb";
+	init.board = board;
+	init.update_partial = auok1901fb_dpy_update_pages;
+	init.update_all = auok1901fb_dpy_update;
+	init.need_refresh = auok1901fb_need_refresh;
+	init.init = auok1901_init;
+
+	return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1901fb_remove(struct platform_device *pdev)
+{
+	return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1901fb_driver = {
+	.probe	= auok1901fb_probe,
+	.remove = __devexit_p(auok1901fb_remove),
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "auo_k1901fb",
+		.pm = &auok190x_pm,
+	},
+};
+module_platform_driver(auok1901fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644
index 0000000..77da6a2
--- /dev/null
+++ b/drivers/video/auo_k190x.c
@@ -0,0 +1,1046 @@
+/*
+ * Common code for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+struct panel_info {
+	int w;
+	int h;
+};
+
+/* table of panel specific parameters to be indexed into by the board drivers */
+static struct panel_info panel_table[] = {
+	/* standard 6" */
+	[AUOK190X_RESOLUTION_800_600] = {
+		.w = 800,
+		.h = 600,
+	},
+	/* standard 9" */
+	[AUOK190X_RESOLUTION_1024_768] = {
+		.w = 1024,
+		.h = 768,
+	},
+};
+
+/*
+ * private I80 interface to the board driver
+ */
+
+static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
+{
+	par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+	par->board->set_hdb(par, data);
+	par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+}
+
+static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
+{
+	par->board->set_ctl(par, AUOK190X_I80_DC, 0);
+	auok190x_issue_data(par, data);
+	par->board->set_ctl(par, AUOK190X_I80_DC, 1);
+}
+
+static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
+				 u16 *data)
+{
+	struct device *dev = par->info->device;
+	int i;
+	u16 tmp;
+
+	if (size & 3) {
+		dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
+			size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < (size >> 1); i++) {
+		par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+
+		/* simple reduction of 8bit staticgray to 4bit gray
+		 * combines 4 * 4bit pixel values into a 16bit value
+		 */
+		tmp  = (data[2*i] & 0xF0) >> 4;
+		tmp |= (data[2*i] & 0xF000) >> 8;
+		tmp |= (data[2*i+1] & 0xF0) << 4;
+		tmp |= (data[2*i+1] & 0xF000);
+
+		par->board->set_hdb(par, tmp);
+		par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+	}
+
+	return 0;
+}
+
+static u16 auok190x_read_data(struct auok190xfb_par *par)
+{
+	u16 data;
+
+	par->board->set_ctl(par, AUOK190X_I80_OE, 0);
+	data = par->board->get_hdb(par);
+	par->board->set_ctl(par, AUOK190X_I80_OE, 1);
+
+	return data;
+}
+
+/*
+ * Command interface for the controller drivers
+ */
+
+void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
+{
+	par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+	auok190x_issue_cmd(par, data);
+	par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
+
+void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+				  int argc, u16 *argv)
+{
+	int i;
+
+	par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+	auok190x_issue_cmd(par, cmd);
+
+	for (i = 0; i < argc; i++)
+		auok190x_issue_data(par, argv[i]);
+	par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
+
+int auok190x_send_command(struct auok190xfb_par *par, u16 data)
+{
+	int ret;
+
+	ret = par->board->wait_for_rdy(par);
+	if (ret)
+		return ret;
+
+	auok190x_send_command_nowait(par, data);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command);
+
+int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+			   int argc, u16 *argv)
+{
+	int ret;
+
+	ret = par->board->wait_for_rdy(par);
+	if (ret)
+		return ret;
+
+	auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
+
+int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+			   int argc, u16 *argv)
+{
+	int i, ret;
+
+	ret = par->board->wait_for_rdy(par);
+	if (ret)
+		return ret;
+
+	par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+	auok190x_issue_cmd(par, cmd);
+
+	for (i = 0; i < argc; i++)
+		argv[i] = auok190x_read_data(par);
+	par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
+
+void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
+				  int argc, u16 *argv, int size, u16 *data)
+{
+	int i;
+
+	par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+
+	auok190x_issue_cmd(par, cmd);
+
+	for (i = 0; i < argc; i++)
+		auok190x_issue_data(par, argv[i]);
+
+	auok190x_issue_pixels(par, size, data);
+
+	par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
+
+int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+				  int argc, u16 *argv, int size, u16 *data)
+{
+	int ret;
+
+	ret = par->board->wait_for_rdy(par);
+	if (ret)
+		return ret;
+
+	auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
+
+/*
+ * fbdefio callbacks - common on both controllers.
+ */
+
+static void auok190xfb_dpy_first_io(struct fb_info *info)
+{
+	/* tell runtime-pm that we wish to use the device in a short time */
+	pm_runtime_get(info->device);
+}
+
+/* this is called back from the deferred io workqueue */
+static void auok190xfb_dpy_deferred_io(struct fb_info *info,
+				struct list_head *pagelist)
+{
+	struct fb_deferred_io *fbdefio = info->fbdefio;
+	struct auok190xfb_par *par = info->par;
+	u16 yres = info->var.yres;
+	u16 xres = info->var.xres;
+	u16 y1 = 0, h = 0;
+	int prev_index = -1;
+	struct page *cur;
+	int h_inc;
+	int threshold;
+
+	if (!list_empty(pagelist))
+		/* the device resume should've been requested through first_io,
+		 * if the resume did not finish until now, wait for it.
+		 */
+		pm_runtime_barrier(info->device);
+	else
+		/* We reached this via the fsync or some other way.
+		 * In either case the first_io function did not run,
+		 * so we runtime_resume the device here synchronously.
+		 */
+		pm_runtime_get_sync(info->device);
+
+	/* Do a full screen update every n updates to prevent
+	 * excessive darkening of the Sipix display.
+	 * If we do this, there is no need to walk the pages.
+	 */
+	if (par->need_refresh(par)) {
+		par->update_all(par);
+		goto out;
+	}
+
+	/* height increment is fixed per page */
+	h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
+
+	/* calculate number of pages from pixel height */
+	threshold = par->consecutive_threshold / h_inc;
+	if (threshold < 1)
+		threshold = 1;
+
+	/* walk the written page list and swizzle the data */
+	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+		if (prev_index < 0) {
+			/* just starting so assign first page */
+			y1 = (cur->index << PAGE_SHIFT) / xres;
+			h = h_inc;
+		} else if ((cur->index - prev_index) <= threshold) {
+			/* page is within our threshold for single updates */
+			h += h_inc * (cur->index - prev_index);
+		} else {
+			/* page not consecutive, issue previous update first */
+			par->update_partial(par, y1, y1 + h);
+
+			/* start over with our non consecutive page */
+			y1 = (cur->index << PAGE_SHIFT) / xres;
+			h = h_inc;
+		}
+		prev_index = cur->index;
+	}
+
+	/* if we still have any pages to update we do so now */
+	if (h >= yres)
+		/* its a full screen update, just do it */
+		par->update_all(par);
+	else
+		par->update_partial(par, y1, min((u16) (y1 + h), yres));
+
+out:
+	pm_runtime_mark_last_busy(info->device);
+	pm_runtime_put_autosuspend(info->device);
+}
+
+/*
+ * framebuffer operations
+ */
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct auok190xfb_par *par = info->par;
+	unsigned long p = *ppos;
+	void *dst;
+	int err = 0;
+	unsigned long total_size;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return -EPERM;
+
+	total_size = info->fix.smem_len;
+
+	if (p > total_size)
+		return -EFBIG;
+
+	if (count > total_size) {
+		err = -EFBIG;
+		count = total_size;
+	}
+
+	if (count + p > total_size) {
+		if (!err)
+			err = -ENOSPC;
+
+		count = total_size - p;
+	}
+
+	dst = (void *)(info->screen_base + p);
+
+	if (copy_from_user(dst, buf, count))
+		err = -EFAULT;
+
+	if  (!err)
+		*ppos += count;
+
+	par->update_all(par);
+
+	return (err) ? err : count;
+}
+
+static void auok190xfb_fillrect(struct fb_info *info,
+				   const struct fb_fillrect *rect)
+{
+	struct auok190xfb_par *par = info->par;
+
+	sys_fillrect(info, rect);
+
+	par->update_all(par);
+}
+
+static void auok190xfb_copyarea(struct fb_info *info,
+				   const struct fb_copyarea *area)
+{
+	struct auok190xfb_par *par = info->par;
+
+	sys_copyarea(info, area);
+
+	par->update_all(par);
+}
+
+static void auok190xfb_imageblit(struct fb_info *info,
+				const struct fb_image *image)
+{
+	struct auok190xfb_par *par = info->par;
+
+	sys_imageblit(info, image);
+
+	par->update_all(par);
+}
+
+static int auok190xfb_check_var(struct fb_var_screeninfo *var,
+				   struct fb_info *info)
+{
+	if (info->var.xres != var->xres || info->var.yres != var->yres ||
+	    info->var.xres_virtual != var->xres_virtual ||
+	    info->var.yres_virtual != var->yres_virtual) {
+		pr_info("%s: Resolution not supported: X%u x Y%u\n",
+			 __func__, var->xres, var->yres);
+		return -EINVAL;
+	}
+
+	/*
+	 *  Memory limit
+	 */
+
+	if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+		pr_info("%s: Memory Limit requested yres_virtual = %u\n",
+			 __func__, var->yres_virtual);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static struct fb_ops auok190xfb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_read	= fb_sys_read,
+	.fb_write	= auok190xfb_write,
+	.fb_fillrect	= auok190xfb_fillrect,
+	.fb_copyarea	= auok190xfb_copyarea,
+	.fb_imageblit	= auok190xfb_imageblit,
+	.fb_check_var	= auok190xfb_check_var,
+};
+
+/*
+ * Controller-functions common to both K1900 and K1901
+ */
+
+static int auok190x_read_temperature(struct auok190xfb_par *par)
+{
+	struct device *dev = par->info->device;
+	u16 data[4];
+	int temp;
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&(par->io_lock));
+
+	auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+	mutex_unlock(&(par->io_lock));
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	/* sanitize and split of half-degrees for now */
+	temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
+
+	/* handle positive and negative temperatures */
+	if (temp >= 201)
+		return (255 - temp + 1) * (-1);
+	else
+		return temp;
+}
+
+static void auok190x_identify(struct auok190xfb_par *par)
+{
+	struct device *dev = par->info->device;
+	u16 data[4];
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&(par->io_lock));
+
+	auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+	mutex_unlock(&(par->io_lock));
+
+	par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
+
+	par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
+	par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
+	par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
+
+	par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
+	par->lut_version = AUOK190X_VERSION_LUT(data[3]);
+
+	dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
+		par->panel_size_int, par->panel_size_float, par->panel_model,
+		par->epd_type, par->tcon_version, par->lut_version);
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
+
+/*
+ * Sysfs functions
+ */
+
+static ssize_t update_mode_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct fb_info *info = dev_get_drvdata(dev);
+	struct auok190xfb_par *par = info->par;
+
+	return sprintf(buf, "%d\n", par->update_mode);
+}
+
+static ssize_t update_mode_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct fb_info *info = dev_get_drvdata(dev);
+	struct auok190xfb_par *par = info->par;
+	int mode, ret;
+
+	ret = kstrtoint(buf, 10, &mode);
+	if (ret)
+		return ret;
+
+	par->update_mode = mode;
+
+	/* if we enter a better mode, do a full update */
+	if (par->last_mode > 1 && mode < par->last_mode)
+		par->update_all(par);
+
+	return count;
+}
+
+static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct fb_info *info = dev_get_drvdata(dev);
+	struct auok190xfb_par *par = info->par;
+
+	return sprintf(buf, "%d\n", par->flash);
+}
+
+static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct fb_info *info = dev_get_drvdata(dev);
+	struct auok190xfb_par *par = info->par;
+	int flash, ret;
+
+	ret = kstrtoint(buf, 10, &flash);
+	if (ret)
+		return ret;
+
+	if (flash > 0)
+		par->flash = 1;
+	else
+		par->flash = 0;
+
+	return count;
+}
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct fb_info *info = dev_get_drvdata(dev);
+	struct auok190xfb_par *par = info->par;
+	int temp;
+
+	temp = auok190x_read_temperature(par);
+	return sprintf(buf, "%d\n", temp);
+}
+
+static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
+static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR(temp, 0644, temp_show, NULL);
+
+static struct attribute *auok190x_attributes[] = {
+	&dev_attr_update_mode.attr,
+	&dev_attr_flash.attr,
+	&dev_attr_temp.attr,
+	NULL
+};
+
+static const struct attribute_group auok190x_attr_group = {
+	.attrs		= auok190x_attributes,
+};
+
+static int auok190x_power(struct auok190xfb_par *par, bool on)
+{
+	struct auok190x_board *board = par->board;
+	int ret;
+
+	if (on) {
+		/* We should maintain POWER up for at least 80ms before set
+		 * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
+		 */
+		ret = regulator_enable(par->regulator);
+		if (ret)
+			return ret;
+
+		msleep(200);
+		gpio_set_value(board->gpio_nrst, 1);
+		gpio_set_value(board->gpio_nsleep, 1);
+		msleep(200);
+	} else {
+		regulator_disable(par->regulator);
+		gpio_set_value(board->gpio_nrst, 0);
+		gpio_set_value(board->gpio_nsleep, 0);
+	}
+
+	return 0;
+}
+
+/*
+ * Recovery - powercycle the controller
+ */
+
+static void auok190x_recover(struct auok190xfb_par *par)
+{
+	auok190x_power(par, 0);
+	msleep(100);
+	auok190x_power(par, 1);
+
+	par->init(par);
+
+	/* wait for init to complete */
+	par->board->wait_for_rdy(par);
+}
+
+/*
+ * Power-management
+ */
+
+#ifdef CONFIG_PM
+static int auok190x_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fb_info *info = platform_get_drvdata(pdev);
+	struct auok190xfb_par *par = info->par;
+	struct auok190x_board *board = par->board;
+	u16 standby_param;
+
+	/* take and keep the lock until we are resumed, as the controller
+	 * will never reach the non-busy state when in standby mode
+	 */
+	mutex_lock(&(par->io_lock));
+
+	if (par->standby) {
+		dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
+		mutex_unlock(&(par->io_lock));
+		return 0;
+	}
+
+	/* according to runtime_pm.txt runtime_suspend only means, that the
+	 * device will not process data and will not communicate with the CPU
+	 * As we hold the lock, this stays true even without standby
+	 */
+	if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+		dev_dbg(dev, "runtime suspend without standby\n");
+		goto finish;
+	} else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
+		/* for some TCON versions STANDBY expects a parameter (0) but
+		 * it seems the real tcon version has to be determined yet.
+		 */
+		dev_dbg(dev, "runtime suspend with additional empty param\n");
+		standby_param = 0;
+		auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
+				      &standby_param);
+	} else {
+		dev_dbg(dev, "runtime suspend without param\n");
+		auok190x_send_command(par, AUOK190X_CMD_STANDBY);
+	}
+
+	msleep(64);
+
+finish:
+	par->standby = 1;
+
+	return 0;
+}
+
+static int auok190x_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fb_info *info = platform_get_drvdata(pdev);
+	struct auok190xfb_par *par = info->par;
+	struct auok190x_board *board = par->board;
+
+	if (!par->standby) {
+		dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
+		return 0;
+	}
+
+	if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+		dev_dbg(dev, "runtime resume without standby\n");
+	} else {
+		/* when in standby, controller is always busy
+		 * and only accepts the wakeup command
+		 */
+		dev_dbg(dev, "runtime resume from standby\n");
+		auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
+
+		msleep(160);
+
+		/* wait for the controller to be ready and release the lock */
+		board->wait_for_rdy(par);
+	}
+
+	par->standby = 0;
+
+	mutex_unlock(&(par->io_lock));
+
+	return 0;
+}
+
+static int auok190x_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fb_info *info = platform_get_drvdata(pdev);
+	struct auok190xfb_par *par = info->par;
+	struct auok190x_board *board = par->board;
+	int ret;
+
+	dev_dbg(dev, "suspend\n");
+	if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+		/* suspend via powering off the ic */
+		dev_dbg(dev, "suspend with broken standby\n");
+
+		auok190x_power(par, 0);
+	} else {
+		dev_dbg(dev, "suspend using sleep\n");
+
+		/* the sleep state can only be entered from the standby state.
+		 * pm_runtime_get_noresume gets called before the suspend call.
+		 * So the devices usage count is >0 but it is not necessarily
+		 * active.
+		 */
+		if (!pm_runtime_status_suspended(dev)) {
+			ret = auok190x_runtime_suspend(dev);
+			if (ret < 0) {
+				dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
+					ret);
+				return ret;
+			}
+			par->manual_standby = 1;
+		}
+
+		gpio_direction_output(board->gpio_nsleep, 0);
+	}
+
+	msleep(100);
+
+	return 0;
+}
+
+static int auok190x_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fb_info *info = platform_get_drvdata(pdev);
+	struct auok190xfb_par *par = info->par;
+	struct auok190x_board *board = par->board;
+
+	dev_dbg(dev, "resume\n");
+	if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+		dev_dbg(dev, "resume with broken standby\n");
+
+		auok190x_power(par, 1);
+
+		par->init(par);
+	} else {
+		dev_dbg(dev, "resume from sleep\n");
+
+		/* device should be in runtime suspend when we were suspended
+		 * and pm_runtime_put_sync gets called after this function.
+		 * So there is no need to touch the standby mode here at all.
+		 */
+		gpio_direction_output(board->gpio_nsleep, 1);
+		msleep(100);
+
+		/* an additional init call seems to be necessary after sleep */
+		auok190x_runtime_resume(dev);
+		par->init(par);
+
+		/* if we were runtime-suspended before, suspend again*/
+		if (!par->manual_standby)
+			auok190x_runtime_suspend(dev);
+		else
+			par->manual_standby = 0;
+	}
+
+	return 0;
+}
+#endif
+
+const struct dev_pm_ops auok190x_pm = {
+	SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+			   NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
+};
+EXPORT_SYMBOL_GPL(auok190x_pm);
+
+/*
+ * Common probe and remove code
+ */
+
+int __devinit auok190x_common_probe(struct platform_device *pdev,
+				    struct auok190x_init_data *init)
+{
+	struct auok190x_board *board = init->board;
+	struct auok190xfb_par *par;
+	struct fb_info *info;
+	struct panel_info *panel;
+	int videomemorysize, ret;
+	unsigned char *videomemory;
+
+	/* check board contents */
+	if (!board->init || !board->cleanup || !board->wait_for_rdy
+	    || !board->set_ctl || !board->set_hdb || !board->get_hdb
+	    || !board->setup_irq)
+		return -EINVAL;
+
+	info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
+	if (!info)
+		return -ENOMEM;
+
+	par = info->par;
+	par->info = info;
+	par->board = board;
+	par->recover = auok190x_recover;
+	par->update_partial = init->update_partial;
+	par->update_all = init->update_all;
+	par->need_refresh = init->need_refresh;
+	par->init = init->init;
+
+	/* init update modes */
+	par->update_cnt = 0;
+	par->update_mode = -1;
+	par->last_mode = -1;
+	par->flash = 0;
+
+	par->regulator = regulator_get(info->device, "vdd");
+	if (IS_ERR(par->regulator)) {
+		ret = PTR_ERR(par->regulator);
+		dev_err(info->device, "Failed to get regulator: %d\n", ret);
+		goto err_reg;
+	}
+
+	ret = board->init(par);
+	if (ret) {
+		dev_err(info->device, "board init failed, %d\n", ret);
+		goto err_board;
+	}
+
+	ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
+	if (ret) {
+		dev_err(info->device, "could not request sleep gpio, %d\n",
+			ret);
+		goto err_gpio1;
+	}
+
+	ret = gpio_direction_output(board->gpio_nsleep, 0);
+	if (ret) {
+		dev_err(info->device, "could not set sleep gpio, %d\n", ret);
+		goto err_gpio2;
+	}
+
+	ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
+	if (ret) {
+		dev_err(info->device, "could not request reset gpio, %d\n",
+			ret);
+		goto err_gpio2;
+	}
+
+	ret = gpio_direction_output(board->gpio_nrst, 0);
+	if (ret) {
+		dev_err(info->device, "could not set reset gpio, %d\n", ret);
+		goto err_gpio3;
+	}
+
+	ret = auok190x_power(par, 1);
+	if (ret) {
+		dev_err(info->device, "could not power on the device, %d\n",
+			ret);
+		goto err_gpio3;
+	}
+
+	mutex_init(&par->io_lock);
+
+	init_waitqueue_head(&par->waitq);
+
+	ret = par->board->setup_irq(par->info);
+	if (ret) {
+		dev_err(info->device, "could not setup ready-irq, %d\n", ret);
+		goto err_irq;
+	}
+
+	/* wait for init to complete */
+	par->board->wait_for_rdy(par);
+
+	/*
+	 * From here on the controller can talk to us
+	 */
+
+	/* initialise fix, var, resolution and rotation */
+
+	strlcpy(info->fix.id, init->id, 16);
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+	info->fix.xpanstep = 0;
+	info->fix.ypanstep = 0;
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+
+	info->var.bits_per_pixel = 8;
+	info->var.grayscale = 1;
+	info->var.red.length = 8;
+	info->var.green.length = 8;
+	info->var.blue.length = 8;
+
+	panel = &panel_table[board->resolution];
+
+	/* if 90 degree rotation, switch width and height */
+	if (board->rotation & 1) {
+		info->var.xres = panel->h;
+		info->var.yres = panel->w;
+		info->var.xres_virtual = panel->h;
+		info->var.yres_virtual = panel->w;
+		info->fix.line_length = panel->h;
+	} else {
+		info->var.xres = panel->w;
+		info->var.yres = panel->h;
+		info->var.xres_virtual = panel->w;
+		info->var.yres_virtual = panel->h;
+		info->fix.line_length = panel->w;
+	}
+
+	par->resolution = board->resolution;
+	par->rotation = board->rotation;
+
+	/* videomemory handling */
+
+	videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
+	videomemory = vmalloc(videomemorysize);
+	if (!videomemory) {
+		ret = -ENOMEM;
+		goto err_irq;
+	}
+
+	memset(videomemory, 0, videomemorysize);
+	info->screen_base = (char *)videomemory;
+	info->fix.smem_len = videomemorysize;
+
+	info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+	info->fbops = &auok190xfb_ops;
+
+	/* deferred io init */
+
+	info->fbdefio = devm_kzalloc(info->device,
+				     sizeof(struct fb_deferred_io),
+				     GFP_KERNEL);
+	if (!info->fbdefio) {
+		dev_err(info->device, "Failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto err_defio;
+	}
+
+	dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
+	info->fbdefio->delay = HZ / board->fps;
+	info->fbdefio->first_io = auok190xfb_dpy_first_io,
+	info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
+	fb_deferred_io_init(info);
+
+	/* color map */
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret < 0) {
+		dev_err(info->device, "Failed to allocate colormap\n");
+		goto err_cmap;
+	}
+
+	/* controller init */
+
+	par->consecutive_threshold = 100;
+	par->init(par);
+	auok190x_identify(par);
+
+	platform_set_drvdata(pdev, info);
+
+	ret = register_framebuffer(info);
+	if (ret < 0)
+		goto err_regfb;
+
+	ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
+	if (ret)
+		goto err_sysfs;
+
+	dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
+		 info->node, info->var.xres, info->var.yres,
+		 videomemorysize >> 10);
+
+	/* increase autosuspend_delay when we use alternative methods
+	 * for runtime_pm
+	 */
+	par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
+					? 1000 : 200;
+
+	pm_runtime_set_active(info->device);
+	pm_runtime_enable(info->device);
+	pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
+	pm_runtime_use_autosuspend(info->device);
+
+	return 0;
+
+err_sysfs:
+	unregister_framebuffer(info);
+err_regfb:
+	fb_dealloc_cmap(&info->cmap);
+err_cmap:
+	fb_deferred_io_cleanup(info);
+	kfree(info->fbdefio);
+err_defio:
+	vfree((void *)info->screen_base);
+err_irq:
+	auok190x_power(par, 0);
+err_gpio3:
+	gpio_free(board->gpio_nrst);
+err_gpio2:
+	gpio_free(board->gpio_nsleep);
+err_gpio1:
+	board->cleanup(par);
+err_board:
+	regulator_put(par->regulator);
+err_reg:
+	framebuffer_release(info);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_probe);
+
+int  __devexit auok190x_common_remove(struct platform_device *pdev)
+{
+	struct fb_info *info = platform_get_drvdata(pdev);
+	struct auok190xfb_par *par = info->par;
+	struct auok190x_board *board = par->board;
+
+	pm_runtime_disable(info->device);
+
+	sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
+
+	unregister_framebuffer(info);
+
+	fb_dealloc_cmap(&info->cmap);
+
+	fb_deferred_io_cleanup(info);
+	kfree(info->fbdefio);
+
+	vfree((void *)info->screen_base);
+
+	auok190x_power(par, 0);
+
+	gpio_free(board->gpio_nrst);
+	gpio_free(board->gpio_nsleep);
+
+	board->cleanup(par);
+
+	regulator_put(par->regulator);
+
+	framebuffer_release(info);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_remove);
+
+MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644
index 0000000..e35af1f
--- /dev/null
+++ b/drivers/video/auo_k190x.h
@@ -0,0 +1,129 @@
+/*
+ * Private common definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * I80 interface specific defines
+ */
+
+#define AUOK190X_I80_CS			0x01
+#define AUOK190X_I80_DC			0x02
+#define AUOK190X_I80_WR			0x03
+#define AUOK190X_I80_OE			0x04
+
+/*
+ * AUOK190x commands, common to both controllers
+ */
+
+#define AUOK190X_CMD_INIT		0x0000
+#define AUOK190X_CMD_STANDBY		0x0001
+#define AUOK190X_CMD_WAKEUP		0x0002
+#define AUOK190X_CMD_TCON_RESET		0x0003
+#define AUOK190X_CMD_DATA_STOP		0x1002
+#define AUOK190X_CMD_LUT_START		0x1003
+#define AUOK190X_CMD_DISP_REFRESH	0x1004
+#define AUOK190X_CMD_DISP_RESET		0x1005
+#define AUOK190X_CMD_PRE_DISPLAY_START	0x100D
+#define AUOK190X_CMD_PRE_DISPLAY_STOP	0x100F
+#define AUOK190X_CMD_FLASH_W		0x2000
+#define AUOK190X_CMD_FLASH_E		0x2001
+#define AUOK190X_CMD_FLASH_STS		0x2002
+#define AUOK190X_CMD_FRAMERATE		0x3000
+#define AUOK190X_CMD_READ_VERSION	0x4000
+#define AUOK190X_CMD_READ_STATUS	0x4001
+#define AUOK190X_CMD_READ_LUT		0x4003
+#define AUOK190X_CMD_DRIVERTIMING	0x5000
+#define AUOK190X_CMD_LBALANCE		0x5001
+#define AUOK190X_CMD_AGINGMODE		0x6000
+#define AUOK190X_CMD_AGINGEXIT		0x6001
+
+/*
+ * Common settings for AUOK190X_CMD_INIT
+ */
+
+#define AUOK190X_INIT_DATA_FILTER	(0 << 12)
+#define AUOK190X_INIT_DATA_BYPASS	(1 << 12)
+#define AUOK190X_INIT_INVERSE_WHITE	(0 << 9)
+#define AUOK190X_INIT_INVERSE_BLACK	(1 << 9)
+#define AUOK190X_INIT_SCAN_DOWN		(0 << 1)
+#define AUOK190X_INIT_SCAN_UP		(1 << 1)
+#define AUOK190X_INIT_SHIFT_LEFT	(0 << 0)
+#define AUOK190X_INIT_SHIFT_RIGHT	(1 << 0)
+
+/* Common bits to pixels
+ *   Mode	15-12	11-8	7-4	3-0
+ *   format0	4	3	2	1
+ *   format1	3	4	1	2
+ */
+
+#define AUOK190X_INIT_FORMAT0		0
+#define AUOK190X_INIT_FORMAT1		(1 << 6)
+
+/*
+ * settings for AUOK190X_CMD_RESET
+ */
+
+#define AUOK190X_RESET_TCON		(0 << 0)
+#define AUOK190X_RESET_NORMAL		(1 << 0)
+#define AUOK190X_RESET_PON		(1 << 1)
+
+/*
+ * AUOK190X_CMD_VERSION
+ */
+
+#define AUOK190X_VERSION_TEMP_MASK		(0x1ff)
+#define AUOK190X_VERSION_EPD_MASK		(0xff)
+#define AUOK190X_VERSION_SIZE_INT(_val)		((_val & 0xfc00) >> 10)
+#define AUOK190X_VERSION_SIZE_FLOAT(_val)	((_val & 0x3c0) >> 6)
+#define AUOK190X_VERSION_MODEL(_val)		(_val & 0x3f)
+#define AUOK190X_VERSION_LUT(_val)		(_val & 0xff)
+#define AUOK190X_VERSION_TCON(_val)		((_val & 0xff00) >> 8)
+
+/*
+ * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
+ */
+
+#define AUOK190X_UPDATE_MODE(_res)		((_res & 0x7) << 12)
+#define AUOK190X_UPDATE_NONFLASH		(1 << 15)
+
+/*
+ * track panel specific parameters for common init
+ */
+
+struct auok190x_init_data {
+	char *id;
+	struct auok190x_board *board;
+
+	void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+	void (*update_all)(struct auok190xfb_par *par);
+	bool (*need_refresh)(struct auok190xfb_par *par);
+	void (*init)(struct auok190xfb_par *par);
+};
+
+
+extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
+extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
+extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+					 int argc, u16 *argv);
+extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+				  int argc, u16 *argv);
+extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
+						u16 cmd, int argc, u16 *argv,
+						int size, u16 *data);
+extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+					int argc, u16 *argv, int size,
+					u16 *data);
+extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+				  int argc, u16 *argv);
+
+extern int auok190x_common_probe(struct platform_device *pdev,
+				 struct auok190x_init_data *init);
+extern int auok190x_common_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops auok190x_pm;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884..2979292 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@
 
 config LCD_TOSA
 	tristate "Sharp SL-6000 LCD Driver"
-	depends on SPI && MACH_TOSA
+	depends on I2C && SPI && MACH_TOSA
 	help
 	  If you have an Sharp SL-6000 Zaurus say Y to enable a driver
 	  for its LCD.
@@ -184,6 +184,18 @@
 	  known as the Corgi backlight driver. If you have a Sharp Zaurus
 	  SL-C7xx, SL-Cxx00 or SL-6000x say y.
 
+config BACKLIGHT_LM3533
+	tristate "Backlight Driver for LM3533"
+	depends on BACKLIGHT_CLASS_DEVICE
+	depends on MFD_LM3533
+	help
+	  Say Y to enable the backlight driver for National Semiconductor / TI
+	  LM3533 Lighting Power chips.
+
+	  The backlights can be controlled directly, through PWM input, or by
+	  the ambient-light-sensor interface. The chip supports 256 brightness
+	  levels.
+
 config BACKLIGHT_LOCOMO
 	tristate "Sharp LOCOMO LCD/Backlight Driver"
 	depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae..a2ac9cf 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_BACKLIGHT_GENERIC)	+= generic_bl.o
 obj-$(CONFIG_BACKLIGHT_HP700)	+= jornada720_bl.o
 obj-$(CONFIG_BACKLIGHT_HP680)	+= hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533)	+= lm3533_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO)	+= locomolcd.o
 obj-$(CONFIG_BACKLIGHT_LP855X)	+= lp855x_bl.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)	+= omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7..df5db99 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &val);
+	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -214,7 +214,7 @@
 	struct adp5520_bl *data = dev_get_drvdata(dev);
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+	ret = kstrtoul(buf, 10, &data->cached_daylight_max);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0..77d1fdb 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@
 	struct led_info *cur_led;
 	int ret, i;
 
-	led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+	led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+				GFP_KERNEL);
 	if (led == NULL) {
 		dev_err(&client->dev, "failed to alloc memory\n");
 		return -ENOMEM;
@@ -236,7 +237,7 @@
 
 	if (ret) {
 		dev_err(&client->dev, "failed to write\n");
-		goto err_free;
+		return ret;
 	}
 
 	for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@
 		cancel_work_sync(&led[i].work);
 	}
 
- err_free:
-	kfree(led);
-
 	return ret;
 }
 
@@ -309,7 +307,6 @@
 		cancel_work_sync(&data->led[i].work);
 	}
 
-	kfree(data->led);
 	return 0;
 }
 #else
@@ -451,7 +448,7 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &val);
+	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -501,7 +498,7 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct adp8860_bl *data = dev_get_drvdata(dev);
-	int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+	int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
 	if (ret)
 		return ret;
 
@@ -608,7 +605,7 @@
 	uint8_t reg_val;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &val);
+	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -675,13 +672,13 @@
 		return -EINVAL;
 	}
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		return -ENOMEM;
 
 	ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
 	if (ret < 0)
-		goto out2;
+		return ret;
 
 	switch (ADP8860_MANID(reg_val)) {
 	case ADP8863_MANUFID:
@@ -694,8 +691,7 @@
 		break;
 	default:
 		dev_err(&client->dev, "failed to probe\n");
-		ret = -ENODEV;
-		goto out2;
+		return -ENODEV;
 	}
 
 	/* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@
 			&client->dev, data, &adp8860_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&client->dev, "failed to register backlight\n");
-		ret = PTR_ERR(bl);
-		goto out2;
+		return PTR_ERR(bl);
 	}
 
 	bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@
 			&adp8860_bl_attr_group);
 out1:
 	backlight_device_unregister(bl);
-out2:
-	kfree(data);
 
 	return ret;
 }
@@ -776,7 +769,6 @@
 			&adp8860_bl_attr_group);
 
 	backlight_device_unregister(data->bl);
-	kfree(data);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6..edf7f91 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@
 	struct led_info *cur_led;
 	int ret, i;
 
-
-	led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+	led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+				GFP_KERNEL);
 	if (led == NULL) {
 		dev_err(&client->dev, "failed to alloc memory\n");
 		return -ENOMEM;
@@ -253,17 +253,17 @@
 
 	ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
 	if (ret)
-		goto err_free;
+		return ret;
 
 	ret = adp8870_write(client, ADP8870_ISCT1,
 			(pdata->led_on_time & 0x3) << 6);
 	if (ret)
-		goto err_free;
+		return ret;
 
 	ret = adp8870_write(client, ADP8870_ISCF,
 			FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
 	if (ret)
-		goto err_free;
+		return ret;
 
 	for (i = 0; i < pdata->num_leds; ++i) {
 		cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@
 		cancel_work_sync(&led[i].work);
 	}
 
- err_free:
-	kfree(led);
-
 	return ret;
 }
 
@@ -335,7 +332,6 @@
 		cancel_work_sync(&data->led[i].work);
 	}
 
-	kfree(data->led);
 	return 0;
 }
 #else
@@ -572,7 +568,7 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &val);
+	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -652,7 +648,7 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct adp8870_bl *data = dev_get_drvdata(dev);
-	int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+	int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
 	if (ret)
 		return ret;
 
@@ -794,7 +790,7 @@
 	uint8_t reg_val;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &val);
+	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -874,7 +870,7 @@
 		return -ENODEV;
 	}
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		return -ENOMEM;
 
@@ -894,8 +890,7 @@
 			&client->dev, data, &adp8870_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&client->dev, "failed to register backlight\n");
-		ret = PTR_ERR(bl);
-		goto out2;
+		return PTR_ERR(bl);
 	}
 
 	data->bl = bl;
@@ -930,8 +925,6 @@
 			&adp8870_bl_attr_group);
 out1:
 	backlight_device_unregister(bl);
-out2:
-	kfree(data);
 
 	return ret;
 }
@@ -950,7 +943,6 @@
 			&adp8870_bl_attr_group);
 
 	backlight_device_unregister(data->bl);
-	kfree(data);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc7..3729238 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@
 	struct backlight_device *bd = NULL;
 	struct backlight_properties props;
 
-	lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
 	if (!lcd)
 		return -ENOMEM;
 
@@ -492,7 +492,7 @@
 	ret = spi_setup(spi);
 	if (ret < 0) {
 		dev_err(&spi->dev, "spi setup failed.\n");
-		goto out_free_lcd;
+		return ret;
 	}
 
 	lcd->spi = spi;
@@ -501,15 +501,13 @@
 	lcd->lcd_pd = spi->dev.platform_data;
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL\n");
-		goto out_free_lcd;
+		return -EFAULT;
 	}
 
 	ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
 		&ams369fg06_lcd_ops);
-	if (IS_ERR(ld)) {
-		ret = PTR_ERR(ld);
-		goto out_free_lcd;
-	}
+	if (IS_ERR(ld))
+		return PTR_ERR(ld);
 
 	lcd->ld = ld;
 
@@ -547,8 +545,6 @@
 
 out_lcd_unregister:
 	lcd_device_unregister(ld);
-out_free_lcd:
-	kfree(lcd);
 	return ret;
 }
 
@@ -559,7 +555,6 @@
 	ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
 	backlight_device_unregister(lcd->bd);
 	lcd_device_unregister(lcd->ld);
-	kfree(lcd);
 
 	return 0;
 }
@@ -619,7 +614,6 @@
 static struct spi_driver ams369fg06_driver = {
 	.driver = {
 		.name	= "ams369fg06",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b25..9dc73ac 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
  *  get at the firmware code in order to figure out what it's actually doing.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -25,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/atomic.h>
+#include <linux/apple_bl.h>
 
 static struct backlight_device *apple_backlight_device;
 
@@ -39,8 +42,6 @@
 
 static const struct hw_data *hw_data;
 
-#define DRIVER "apple_backlight: "
-
 /* Module parameters. */
 static int debug;
 module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@
 	int intensity = bd->props.brightness;
 
 	if (debug)
-		printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
-		       intensity);
+		pr_debug("setting brightness to %d\n", intensity);
 
 	intel_chipset_set_brightness(intensity);
 	return 0;
@@ -76,8 +76,7 @@
 	intensity = inb(0xb3) >> 4;
 
 	if (debug)
-		printk(KERN_DEBUG DRIVER "read brightness of %d\n",
-		       intensity);
+		pr_debug("read brightness of %d\n", intensity);
 
 	return intensity;
 }
@@ -107,8 +106,7 @@
 	int intensity = bd->props.brightness;
 
 	if (debug)
-		printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
-		       intensity);
+		pr_debug("setting brightness to %d\n", intensity);
 
 	nvidia_chipset_set_brightness(intensity);
 	return 0;
@@ -123,8 +121,7 @@
 	intensity = inb(0x52f) >> 4;
 
 	if (debug)
-		printk(KERN_DEBUG DRIVER "read brightness of %d\n",
-		       intensity);
+		pr_debug("read brightness of %d\n", intensity);
 
 	return intensity;
 }
@@ -149,7 +146,7 @@
 	host = pci_get_bus_and_slot(0, 0);
 
 	if (!host) {
-		printk(KERN_ERR DRIVER "unable to find PCI host\n");
+		pr_err("unable to find PCI host\n");
 		return -ENODEV;
 	}
 
@@ -161,7 +158,7 @@
 	pci_dev_put(host);
 
 	if (!hw_data) {
-		printk(KERN_ERR DRIVER "unknown hardware\n");
+		pr_err("unknown hardware\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ec..297db2f 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -123,7 +125,7 @@
 	rc = -ENXIO;
 	mutex_lock(&bd->ops_lock);
 	if (bd->ops) {
-		pr_debug("backlight: set power to %lu\n", power);
+		pr_debug("set power to %lu\n", power);
 		if (bd->props.power != power) {
 			bd->props.power = power;
 			backlight_update_status(bd);
@@ -161,8 +163,7 @@
 		if (brightness > bd->props.max_brightness)
 			rc = -EINVAL;
 		else {
-			pr_debug("backlight: set brightness to %lu\n",
-				 brightness);
+			pr_debug("set brightness to %lu\n", brightness);
 			bd->props.brightness = brightness;
 			backlight_update_status(bd);
 			rc = count;
@@ -378,8 +379,8 @@
 {
 	backlight_class = class_create(THIS_MODULE, "backlight");
 	if (IS_ERR(backlight_class)) {
-		printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
-				PTR_ERR(backlight_class));
+		pr_warn("Unable to create backlight class; errno = %ld\n",
+			PTR_ERR(backlight_class));
 		return PTR_ERR(backlight_class);
 	}
 
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13f..23d7326 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@
 		return -EINVAL;
 	}
 
-	lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
 	if (!lcd) {
 		dev_err(&spi->dev, "failed to allocate memory\n");
 		return -ENOMEM;
@@ -554,10 +554,9 @@
 
 	lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
 					lcd, &corgi_lcd_ops);
-	if (IS_ERR(lcd->lcd_dev)) {
-		ret = PTR_ERR(lcd->lcd_dev);
-		goto err_free_lcd;
-	}
+	if (IS_ERR(lcd->lcd_dev))
+		return PTR_ERR(lcd->lcd_dev);
+
 	lcd->power = FB_BLANK_POWERDOWN;
 	lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
 
@@ -591,8 +590,6 @@
 	backlight_device_unregister(lcd->bl_dev);
 err_unregister_lcd:
 	lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
-	kfree(lcd);
 	return ret;
 }
 
@@ -613,7 +610,6 @@
 
 	corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(lcd->lcd_dev);
-	kfree(lcd);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb..37bae80 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
  *   Alan Hourihane <alanh-at-tungstengraphics-dot-com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -180,14 +182,13 @@
 	lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
 					CRVML_DEVICE_LPC, NULL);
 	if (!lpc_dev) {
-		printk("INTEL CARILLO RANCH LPC not found.\n");
+		pr_err("INTEL CARILLO RANCH LPC not found.\n");
 		return -ENODEV;
 	}
 
 	pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
 	if (!(dev_en & CRVML_GPIOEN_BIT)) {
-		printk(KERN_ERR
-		       "Carillo Ranch GPIO device was not enabled.\n");
+		pr_err("Carillo Ranch GPIO device was not enabled.\n");
 		pci_dev_put(lpc_dev);
 		return -ENODEV;
 	}
@@ -270,7 +271,7 @@
 		return PTR_ERR(crp);
 	}
 
-	printk("Carillo Ranch Backlight Driver Initialized.\n");
+	pr_info("Carillo Ranch Backlight Driver Initialized.\n");
 
 	return 0;
 }
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e1968..573c7ec 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@
 		da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
 				DA9034_WLED_ISET(pdata->output_current));
 
+	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = max_brightness;
 	bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170..8c660fc 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -106,7 +108,7 @@
 
 	generic_backlight_device = bd;
 
-	printk("Generic Backlight Driver Initialized.\n");
+	pr_info("Generic Backlight Driver Initialized.\n");
 	return 0;
 }
 
@@ -120,7 +122,7 @@
 
 	backlight_device_unregister(bd);
 
-	printk("Generic Backlight Driver Unloaded\n");
+	pr_info("Generic Backlight Driver Unloaded\n");
 	return 0;
 }
 
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f..9327cd1 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@
 
 	/* allocate and initialse our state */
 
-	ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+	ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
 	if (ili == NULL) {
 		dev_err(dev, "no memory for device\n");
 		return -ENOMEM;
@@ -240,8 +240,7 @@
 	lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
 	if (IS_ERR(lcd)) {
 		dev_err(dev, "failed to register lcd device\n");
-		ret = PTR_ERR(lcd);
-		goto err_free;
+		return PTR_ERR(lcd);
 	}
 
 	ili->lcd = lcd;
@@ -259,20 +258,16 @@
  err_unregister:
 	lcd_device_unregister(lcd);
 
- err_free:
-	kfree(ili);
-
 	return ret;
 }
 
 EXPORT_SYMBOL_GPL(ili9320_probe_spi);
 
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
 {
 	ili9320_power(ili, FB_BLANK_POWERDOWN);
 
 	lcd_device_unregister(ili->lcd);
-	kfree(ili);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d..16f593b 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/backlight.h>
 #include <linux/device.h>
 #include <linux/fb.h>
@@ -38,7 +40,7 @@
 	ret = jornada_ssp_byte(GETBRIGHTNESS);
 
 	if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
-		printk(KERN_ERR "bl : get brightness timeout\n");
+		pr_err("get brightness timeout\n");
 		jornada_ssp_end();
 		return -ETIMEDOUT;
 	} else /* exchange txdummy for value */
@@ -59,7 +61,7 @@
 	if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
 		ret = jornada_ssp_byte(BRIGHTNESSOFF);
 		if (ret != TXDUMMY) {
-			printk(KERN_INFO "bl : brightness off timeout\n");
+			pr_info("brightness off timeout\n");
 			/* turn off backlight */
 			PPSR &= ~PPC_LDD1;
 			PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@
 
 		/* send command to our mcu */
 		if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
-			printk(KERN_INFO "bl : failed to set brightness\n");
+			pr_info("failed to set brightness\n");
 			ret = -ETIMEDOUT;
 			goto out;
 		}
@@ -81,7 +83,7 @@
 		   but due to physical layout it is equal to 0, so we simply
 		   invert the value (MAX VALUE - NEW VALUE). */
 		if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
-			printk(KERN_ERR "bl : set brightness failed\n");
+			pr_err("set brightness failed\n");
 			ret = -ETIMEDOUT;
 		}
 
@@ -113,7 +115,7 @@
 
 	if (IS_ERR(bd)) {
 		ret = PTR_ERR(bd);
-		printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+		pr_err("failed to register device, err=%x\n", ret);
 		return ret;
 	}
 
@@ -125,7 +127,7 @@
 	jornada_bl_update_status(bd);
 
 	platform_set_drvdata(pdev, bd);
-	printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+	pr_info("HP Jornada 700 series backlight driver\n");
 
 	return 0;
 }
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a..635b305 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/fb.h>
 #include <linux/kernel.h>
@@ -44,7 +46,7 @@
 	jornada_ssp_start();
 
 	if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
-		printk(KERN_ERR "lcd: get contrast failed\n");
+		pr_err("get contrast failed\n");
 		jornada_ssp_end();
 		return -ETIMEDOUT;
 	} else {
@@ -65,7 +67,7 @@
 
 	/* push the new value */
 	if (jornada_ssp_byte(value) != TXDUMMY) {
-		printk(KERN_ERR "lcd : set contrast failed\n");
+		pr_err("set contrast failed\n");
 		jornada_ssp_end();
 		return -ETIMEDOUT;
 	}
@@ -103,7 +105,7 @@
 
 	if (IS_ERR(lcd_device)) {
 		ret = PTR_ERR(lcd_device);
-		printk(KERN_ERR "lcd : failed to register device\n");
+		pr_err("failed to register device\n");
 		return ret;
 	}
 
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67..40f606a 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
@@ -159,7 +161,8 @@
 		return -EINVAL;
 	}
 
-	priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+	priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+				GFP_KERNEL);
 
 	if (priv == NULL) {
 		dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@
 	if (ret) {
 		dev_err(&spi->dev,
 			"Unable to get the lcd l4f00242t03 reset gpio.\n");
-		goto err;
+		return ret;
 	}
 
 	ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@
 	if (ret) {
 		dev_err(&spi->dev,
 			"Unable to get the lcd l4f00242t03 data en gpio.\n");
-		goto err2;
+		goto err;
 	}
 
 	priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@
 		ret = PTR_ERR(priv->io_reg);
 		dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
 		       __func__);
-		goto err3;
+		goto err2;
 	}
 
 	priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@
 		ret = PTR_ERR(priv->core_reg);
 		dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
 		       __func__);
-		goto err4;
+		goto err3;
 	}
 
 	priv->ld = lcd_device_register("l4f00242t03",
 					&spi->dev, priv, &l4f_ops);
 	if (IS_ERR(priv->ld)) {
 		ret = PTR_ERR(priv->ld);
-		goto err5;
+		goto err4;
 	}
 
 	/* Init the LCD */
@@ -220,16 +223,14 @@
 
 	return 0;
 
-err5:
-	regulator_put(priv->core_reg);
 err4:
-	regulator_put(priv->io_reg);
+	regulator_put(priv->core_reg);
 err3:
-	gpio_free(pdata->data_enable_gpio);
+	regulator_put(priv->io_reg);
 err2:
-	gpio_free(pdata->reset_gpio);
+	gpio_free(pdata->data_enable_gpio);
 err:
-	kfree(priv);
+	gpio_free(pdata->reset_gpio);
 
 	return ret;
 }
@@ -250,8 +251,6 @@
 	regulator_put(priv->io_reg);
 	regulator_put(priv->core_reg);
 
-	kfree(priv);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d..a5d0d02 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -32,6 +34,8 @@
 	case FB_EVENT_BLANK:
 	case FB_EVENT_MODE_CHANGE:
 	case FB_EVENT_MODE_CHANGE_ALL:
+	case FB_EARLY_EVENT_BLANK:
+	case FB_R_EARLY_EVENT_BLANK:
 		break;
 	default:
 		return 0;
@@ -46,6 +50,14 @@
 		if (event == FB_EVENT_BLANK) {
 			if (ld->ops->set_power)
 				ld->ops->set_power(ld, *(int *)evdata->data);
+		} else if (event == FB_EARLY_EVENT_BLANK) {
+			if (ld->ops->early_set_power)
+				ld->ops->early_set_power(ld,
+						*(int *)evdata->data);
+		} else if (event == FB_R_EARLY_EVENT_BLANK) {
+			if (ld->ops->r_early_set_power)
+				ld->ops->r_early_set_power(ld,
+						*(int *)evdata->data);
 		} else {
 			if (ld->ops->set_mode)
 				ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@
 
 	mutex_lock(&ld->ops_lock);
 	if (ld->ops && ld->ops->set_power) {
-		pr_debug("lcd: set power to %lu\n", power);
+		pr_debug("set power to %lu\n", power);
 		ld->ops->set_power(ld, power);
 		rc = count;
 	}
@@ -142,7 +154,7 @@
 
 	mutex_lock(&ld->ops_lock);
 	if (ld->ops && ld->ops->set_contrast) {
-		pr_debug("lcd: set contrast to %lu\n", contrast);
+		pr_debug("set contrast to %lu\n", contrast);
 		ld->ops->set_contrast(ld, contrast);
 		rc = count;
 	}
@@ -253,8 +265,8 @@
 {
 	lcd_class = class_create(THIS_MODULE, "lcd");
 	if (IS_ERR(lcd_class)) {
-		printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
-				PTR_ERR(lcd_class));
+		pr_warn("Unable to create backlight class; errno = %ld\n",
+			PTR_ERR(lcd_class));
 		return PTR_ERR(lcd_class);
 	}
 
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352b..58f517f 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@
 	struct backlight_device *bd = NULL;
 	struct backlight_properties props;
 
-	lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
 	if (!lcd)
 		return -ENOMEM;
 
@@ -717,7 +717,7 @@
 	ret = spi_setup(spi);
 	if (ret < 0) {
 		dev_err(&spi->dev, "spi setup failed.\n");
-		goto out_free_lcd;
+		return ret;
 	}
 
 	lcd->spi = spi;
@@ -726,7 +726,7 @@
 	lcd->lcd_pd = spi->dev.platform_data;
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL.\n");
-		goto out_free_lcd;
+		return -EFAULT;
 	}
 
 	mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@
 	ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
 	if (ret) {
 		dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
-		goto out_free_lcd;
+		return ret;
 	}
 
 	ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
 	if (IS_ERR(ld)) {
 		ret = PTR_ERR(ld);
-		goto out_free_lcd;
+		goto out_free_regulator;
 	}
 
 	lcd->ld = ld;
@@ -782,10 +782,9 @@
 
 out_unregister_lcd:
 	lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
 	regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
 
-	kfree(lcd);
 	return ret;
 }
 
@@ -797,7 +796,6 @@
 	backlight_device_unregister(lcd->bd);
 	lcd_device_unregister(lcd->ld);
 	regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
-	kfree(lcd);
 
 	return 0;
 }
@@ -846,7 +844,6 @@
 static struct spi_driver ld9040_driver = {
 	.driver = {
 		.name	= "ld9040",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 0000000..bebeb63
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT		2
+#define LM3533_BL_MAX_BRIGHTNESS	255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF	0x1a
+
+
+struct lm3533_bl {
+	struct lm3533 *lm3533;
+	struct lm3533_ctrlbank cb;
+	struct backlight_device *bd;
+	int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+	return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+	struct lm3533_bl *bl = bl_get_data(bd);
+	int brightness = bd->props.brightness;
+
+	if (bd->props.power != FB_BLANK_UNBLANK)
+		brightness = 0;
+	if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+		brightness = 0;
+
+	return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+	struct lm3533_bl *bl = bl_get_data(bd);
+	u8 val;
+	int ret;
+
+	ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+	.get_brightness	= lm3533_bl_get_brightness,
+	.update_status	= lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+	u8 val;
+	u8 mask;
+	bool enable;
+	int ret;
+
+	ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+	if (ret)
+		return ret;
+
+	mask = 1 << (2 * ctrlbank);
+	enable = val & mask;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+	int enable;
+	u8 val;
+	u8 mask;
+	int ret;
+
+	if (kstrtoint(buf, 0, &enable))
+		return -EINVAL;
+
+	mask = 1 << (2 * ctrlbank);
+
+	if (enable)
+		val = mask;
+	else
+		val = 0;
+
+	ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+									mask);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	u8 val;
+	u8 mask;
+	int linear;
+	int ret;
+
+	ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+	if (ret)
+		return ret;
+
+	mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+	if (val & mask)
+		linear = 1;
+	else
+		linear = 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	unsigned long linear;
+	u8 mask;
+	u8 val;
+	int ret;
+
+	if (kstrtoul(buf, 0, &linear))
+		return -EINVAL;
+
+	mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+	if (linear)
+		val = mask;
+	else
+		val = 0;
+
+	ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+									mask);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	u8 val;
+	int ret;
+
+	ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+	if (ret)
+		return ret;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	u8 val;
+	int ret;
+
+	if (kstrtou8(buf, 0, &val))
+		return -EINVAL;
+
+	ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+	&dev_attr_als_channel.attr,
+	&dev_attr_als_en.attr,
+	&dev_attr_id.attr,
+	&dev_attr_linear.attr,
+	&dev_attr_pwm.attr,
+	NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+					     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct lm3533_bl *bl = dev_get_drvdata(dev);
+	umode_t mode = attr->mode;
+
+	if (attr == &dev_attr_als_channel.attr ||
+					attr == &dev_attr_als_en.attr) {
+		if (!bl->lm3533->have_als)
+			mode = 0;
+	}
+
+	return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+	.is_visible	= lm3533_bl_attr_is_visible,
+	.attrs		= lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+					struct lm3533_bl_platform_data *pdata)
+{
+	int ret;
+
+	ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+	if (ret)
+		return ret;
+
+	return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+	struct lm3533 *lm3533;
+	struct lm3533_bl_platform_data *pdata;
+	struct lm3533_bl *bl;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	int ret;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	lm3533 = dev_get_drvdata(pdev->dev.parent);
+	if (!lm3533)
+		return -EINVAL;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+		dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+	if (!bl) {
+		dev_err(&pdev->dev,
+				"failed to allocate memory for backlight\n");
+		return -ENOMEM;
+	}
+
+	bl->lm3533 = lm3533;
+	bl->id = pdev->id;
+
+	bl->cb.lm3533 = lm3533;
+	bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+	bl->cb.dev = NULL;			/* until registered */
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+	props.brightness = pdata->default_brightness;
+	bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+						&lm3533_bl_ops, &props);
+	if (IS_ERR(bd)) {
+		dev_err(&pdev->dev, "failed to register backlight device\n");
+		ret = PTR_ERR(bd);
+		goto err_free;
+	}
+
+	bl->bd = bd;
+	bl->cb.dev = &bl->bd->dev;
+
+	platform_set_drvdata(pdev, bl);
+
+	ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+		goto err_unregister;
+	}
+
+	backlight_update_status(bd);
+
+	ret = lm3533_bl_setup(bl, pdata);
+	if (ret)
+		goto err_sysfs_remove;
+
+	ret = lm3533_ctrlbank_enable(&bl->cb);
+	if (ret)
+		goto err_sysfs_remove;
+
+	return 0;
+
+err_sysfs_remove:
+	sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+	backlight_device_unregister(bd);
+err_free:
+	kfree(bl);
+
+	return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+	struct lm3533_bl *bl = platform_get_drvdata(pdev);
+	struct backlight_device *bd = bl->bd;
+
+	dev_dbg(&bd->dev, "%s\n", __func__);
+
+	bd->props.power = FB_BLANK_POWERDOWN;
+	bd->props.brightness = 0;
+
+	lm3533_ctrlbank_disable(&bl->cb);
+	sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+	backlight_device_unregister(bd);
+	kfree(bl);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+	struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend	NULL
+#define lm3533_bl_resume	NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+	struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+	.driver = {
+		.name	= "lm3533-backlight",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= lm3533_bl_probe,
+	.remove		= __devexit_p(lm3533_bl_remove),
+	.shutdown	= lm3533_bl_shutdown,
+	.suspend	= lm3533_bl_suspend,
+	.resume		= lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e..a9f2c36 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@
 			goto err;
 	}
 
-	st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+	st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+				GFP_KERNEL);
 	if (st == NULL) {
 		dev_err(&spi->dev, "No memory for device state\n");
 		ret = -ENOMEM;
@@ -178,7 +179,7 @@
 	ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
 	if (IS_ERR(ld)) {
 		ret = PTR_ERR(ld);
-		goto err2;
+		goto err;
 	}
 
 	st->spi = spi;
@@ -193,8 +194,6 @@
 
 	return 0;
 
-err2:
-	kfree(st);
 err:
 	if (pdata != NULL)
 		gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@
 	if (pdata != NULL)
 		gpio_free(pdata->reset_gpio);
 
-	kfree(st);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949f..6c0f1ac 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@
 	struct lcd_device *ld;
 	int ret;
 
-	lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
 	if (!lcd)
 		return -ENOMEM;
 
 	lcd->spi = spi;
 	lcd->power = FB_BLANK_POWERDOWN;
-	lcd->buffer = kzalloc(8, GFP_KERNEL);
-	if (!lcd->buffer) {
-		ret = -ENOMEM;
-		goto out_free_lcd;
-	}
+	lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+	if (!lcd->buffer)
+		return -ENOMEM;
 
 	ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
-	if (IS_ERR(ld)) {
-		ret = PTR_ERR(ld);
-		goto out_free_buffer;
-	}
+	if (IS_ERR(ld))
+		return PTR_ERR(ld);
+
 	lcd->ld = ld;
 
 	ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@
 
 out_unregister:
 	lcd_device_unregister(ld);
-out_free_buffer:
-	kfree(lcd->buffer);
-out_free_lcd:
-	kfree(lcd);
 	return ret;
 }
 
@@ -274,8 +267,6 @@
 
 	ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(lcd->ld);
-	kfree(lcd->buffer);
-	kfree(lcd);
 
 	return 0;
 }
@@ -310,7 +301,6 @@
 static struct spi_driver ltv350qv_driver = {
 	.driver = {
 		.name		= "ltv350qv",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb..bfdc5fb 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -168,7 +170,7 @@
 	dev->props.brightness = pdata->default_intensity;
 	omapbl_update_status(dev);
 
-	printk(KERN_INFO "OMAP LCD backlight initialised\n");
+	pr_info("OMAP LCD backlight initialised\n");
 
 	return 0;
 }
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853c..c092159 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@
 	if (!pcf_bl)
 		return -ENOMEM;
 
+	memset(&bl_props, 0, sizeof(bl_props));
 	bl_props.type = BACKLIGHT_RAW;
 	bl_props.max_brightness = 0x3f;
 	bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d..69b35f0 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -68,13 +70,13 @@
 
 	pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
 	if (!pmu_dev) {
-		printk("ALI M7101 PMU not found.\n");
+		pr_err("ALI M7101 PMU not found.\n");
 		return -ENODEV;
 	}
 
 	sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
 	if (!sb_dev) {
-		printk("ALI 1533 SB not found.\n");
+		pr_err("ALI 1533 SB not found.\n");
 		ret = -ENODEV;
 		goto put_pmu;
 	}
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55..6437ae4 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@
 	struct backlight_device *bd = NULL;
 	struct backlight_properties props;
 
-	lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
 	if (!lcd)
 		return -ENOMEM;
 
@@ -751,7 +751,7 @@
 	ret = spi_setup(spi);
 	if (ret < 0) {
 		dev_err(&spi->dev, "spi setup failed.\n");
-		goto out_free_lcd;
+		return ret;
 	}
 
 	lcd->spi = spi;
@@ -760,14 +760,12 @@
 	lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL.\n");
-		goto out_free_lcd;
+		return -EFAULT;
 	}
 
 	ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
-	if (IS_ERR(ld)) {
-		ret = PTR_ERR(ld);
-		goto out_free_lcd;
-	}
+	if (IS_ERR(ld))
+		return PTR_ERR(ld);
 
 	lcd->ld = ld;
 
@@ -824,8 +822,6 @@
 
 out_lcd_unregister:
 	lcd_device_unregister(ld);
-out_free_lcd:
-	kfree(lcd);
 	return ret;
 }
 
@@ -838,7 +834,6 @@
 	device_remove_file(&spi->dev, &dev_attr_gamma_mode);
 	backlight_device_unregister(lcd->bd);
 	lcd_device_unregister(lcd->ld);
-	kfree(lcd);
 
 	return 0;
 }
@@ -899,7 +894,6 @@
 static struct spi_driver s6e63m0_driver = {
 	.driver = {
 		.name	= "s6e63m0",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e..02444d0 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@
 	if (err)
 		return err;
 
-	lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+	lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
 	if (!lcd)
 		return -ENOMEM;
 
@@ -357,11 +357,9 @@
 	lcd->power = FB_BLANK_POWERDOWN;
 	lcd->mode = MODE_VGA;	/* default to VGA */
 
-	lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
-	if (lcd->buf == NULL) {
-		kfree(lcd);
+	lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+	if (lcd->buf == NULL)
 		return -ENOMEM;
-	}
 
 	m = &lcd->msg;
 	x = &lcd->xfer;
@@ -383,15 +381,13 @@
 		break;
 	default:
 		dev_err(&spi->dev, "Unsupported model");
-		goto out_free;
+		return -EINVAL;
 	}
 
 	lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
 					lcd, &tdo24m_ops);
-	if (IS_ERR(lcd->lcd_dev)) {
-		err = PTR_ERR(lcd->lcd_dev);
-		goto out_free;
-	}
+	if (IS_ERR(lcd->lcd_dev))
+		return PTR_ERR(lcd->lcd_dev);
 
 	dev_set_drvdata(&spi->dev, lcd);
 	err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@
 
 out_unregister:
 	lcd_device_unregister(lcd->lcd_dev);
-out_free:
-	kfree(lcd->buf);
-	kfree(lcd);
 	return err;
 }
 
@@ -414,8 +407,6 @@
 
 	tdo24m_power(lcd, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(lcd->lcd_dev);
-	kfree(lcd->buf);
-	kfree(lcd);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241ab..0d54e60 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@
 		const struct i2c_device_id *id)
 {
 	struct backlight_properties props;
-	struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+	struct tosa_bl_data *data;
 	int ret = 0;
+
+	data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+				GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -92,7 +95,7 @@
 	ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
 	if (ret) {
 		dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
-		goto err_gpio_bl;
+		return ret;
 	}
 	ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
 	if (ret)
@@ -122,8 +125,6 @@
 	data->bl = NULL;
 err_gpio_dir:
 	gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
-	kfree(data);
 	return ret;
 }
 
@@ -136,8 +137,6 @@
 
 	gpio_free(TOSA_GPIO_BL_C20MA);
 
-	kfree(data);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec..47823b8 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@
 	int ret;
 	struct tosa_lcd_data *data;
 
-	data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+	data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+				GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -187,7 +188,7 @@
 
 	ret = spi_setup(spi);
 	if (ret < 0)
-		goto err_spi;
+		return ret;
 
 	data->spi = spi;
 	dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@
 	gpio_free(TOSA_GPIO_TG_ON);
 err_gpio_tg:
 	dev_set_drvdata(&spi->dev, NULL);
-err_spi:
-	kfree(data);
 	return ret;
 }
 
@@ -242,7 +241,6 @@
 
 	gpio_free(TOSA_GPIO_TG_ON);
 	dev_set_drvdata(&spi->dev, NULL);
-	kfree(data);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365de..9e5517a 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@
 	data->current_brightness = 0;
 	data->isink_reg = isink_reg;
 
+	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = max_isel;
 	bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 1a268a2..9bdd4b0 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@
 
 static int
 adv7393_write_proc(struct file *file, const char __user * buffer,
-		   unsigned long count, void *data)
+		   size_t count, void *data)
 {
 	struct adv7393fb_device *fbdev = data;
-	char line[8];
 	unsigned int val;
 	int ret;
 
-	ret = copy_from_user(line, buffer, count);
+	ret = kstrtouint_from_user(buffer, count, 0, &val);
 	if (ret)
 		return -EFAULT;
 
-	val = simple_strtoul(line, NULL, 0);
 	adv7393_write(fbdev->client, val >> 8, val & 0xff);
 
 	return count;
@@ -414,14 +412,14 @@
 		if (ret) {
 			dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
 			ret = -EBUSY;
-			goto out_8;
+			goto free_fbdev;
 		}
 	}
 
 	if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
 		dev_err(&client->dev, "requesting PPI peripheral failed\n");
 		ret = -EFAULT;
-		goto out_8;
+		goto free_gpio;
 	}
 
 	fbdev->fb_mem =
@@ -432,7 +430,7 @@
 		dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
 		       (u32) fbdev->fb_len);
 		ret = -ENOMEM;
-		goto out_7;
+		goto free_ppi_pins;
 	}
 
 	fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@
 	if (!fbdev->info.pseudo_palette) {
 		dev_err(&client->dev, "failed to allocate pseudo_palette\n");
 		ret = -ENOMEM;
-		goto out_6;
+		goto free_fb_mem;
 	}
 
 	if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
 		dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
 			   BFIN_LCD_NBR_PALETTE_ENTRIES);
 		ret = -EFAULT;
-		goto out_5;
+		goto free_palette;
 	}
 
 	if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
 		dev_err(&client->dev, "unable to request PPI DMA\n");
 		ret = -EFAULT;
-		goto out_4;
+		goto free_cmap;
 	}
 
 	if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
 			"PPI ERROR", fbdev) < 0) {
 		dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
 		ret = -EFAULT;
-		goto out_3;
+		goto free_ch_ppi;
 	}
 
 	fbdev->open = 0;
@@ -494,14 +492,14 @@
 
 	if (ret) {
 		dev_err(&client->dev, "i2c attach: init error\n");
-		goto out_1;
+		goto free_irq_ppi;
 	}
 
 
 	if (register_framebuffer(&fbdev->info) < 0) {
 		dev_err(&client->dev, "unable to register framebuffer\n");
 		ret = -EFAULT;
-		goto out_1;
+		goto free_irq_ppi;
 	}
 
 	dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@
 	if (!entry) {
 		dev_err(&client->dev, "unable to create /proc entry\n");
 		ret = -EFAULT;
-		goto out_0;
+		goto free_fb;
 	}
 
 	entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@
 
 	return 0;
 
- out_0:
+free_fb:
 	unregister_framebuffer(&fbdev->info);
- out_1:
+free_irq_ppi:
 	free_irq(IRQ_PPI_ERROR, fbdev);
- out_3:
+free_ch_ppi:
 	free_dma(CH_PPI);
- out_4:
+free_cmap:
+	fb_dealloc_cmap(&fbdev->info.cmap);
+free_palette:
+	kfree(fbdev->info.pseudo_palette);
+free_fb_mem:
 	dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
 			  fbdev->dma_handle);
- out_5:
-	fb_dealloc_cmap(&fbdev->info.cmap);
- out_6:
-	kfree(fbdev->info.pseudo_palette);
- out_7:
+free_ppi_pins:
 	peripheral_free_list(ppi_pins);
- out_8:
+free_gpio:
+	if (ANOMALY_05000400)
+		gpio_free(P_IDENT(P_PPI0_FS3));
+free_fbdev:
 	kfree(fbdev);
 
 	return ret;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3..c95b417 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@
 
 static struct platform_driver broadsheetfb_driver = {
 	.probe	= broadsheetfb_probe,
-	.remove = broadsheetfb_remove,
+	.remove = __devexit_p(broadsheetfb_remove),
 	.driver	= {
 		.owner	= THIS_MODULE,
 		.name	= "broadsheetfb",
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index f56699d..eae46f6 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,8 @@
 /*
- *  Cobalt server LCD frame buffer driver.
+ *  Cobalt/SEAD3 LCD frame buffer driver.
  *
  *  Copyright (C) 2008  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2012  MIPS Technologies, Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
 #define LCD_CUR_POS(x)		((x) & LCD_CUR_POS_MASK)
 #define LCD_TEXT_POS(x)		((x) | LCD_TEXT_MODE)
 
+#ifdef CONFIG_MIPS_COBALT
 static inline void lcd_write_control(struct fb_info *info, u8 control)
 {
 	writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@
 {
 	return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
 }
+#else
+
+#define LCD_CTL			0x00
+#define LCD_DATA		0x08
+#define CPLD_STATUS		0x10
+#define CPLD_DATA		0x18
+
+static inline void cpld_wait(struct fb_info *info)
+{
+	do {
+	} while (readl(info->screen_base + CPLD_STATUS) & 1);
+}
+
+static inline void lcd_write_control(struct fb_info *info, u8 control)
+{
+	cpld_wait(info);
+	writel(control, info->screen_base + LCD_CTL);
+}
+
+static inline u8 lcd_read_control(struct fb_info *info)
+{
+	cpld_wait(info);
+	readl(info->screen_base + LCD_CTL);
+	cpld_wait(info);
+	return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+
+static inline void lcd_write_data(struct fb_info *info, u8 data)
+{
+	cpld_wait(info);
+	writel(data, info->screen_base + LCD_DATA);
+}
+
+static inline u8 lcd_read_data(struct fb_info *info)
+{
+	cpld_wait(info);
+	readl(info->screen_base + LCD_DATA);
+	cpld_wait(info);
+	return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+#endif
 
 static int lcd_busy_wait(struct fb_info *info)
 {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fe..e2c96d0 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@
 	  big letters. It fits between the sun 12x22 and the normal 8x16 font.
 	  If other fonts are too big or too small for you, say Y, otherwise say N.
 
+config FONT_AUTOSELECT
+	def_bool y
+	depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+	depends on !FONT_8x8
+	depends on !FONT_6x11
+	depends on !FONT_7x14
+	depends on !FONT_PEARL_8x8
+	depends on !FONT_ACORN_8x8
+	depends on !FONT_MINI_4x6
+	depends on !FONT_SUN8x16
+	depends on !FONT_SUN12x22
+	depends on !FONT_10x18
+	select FONT_8x16
+
 endmenu
 
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f8babbe..345d962 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -507,16 +507,16 @@
 
 	err = fb_alloc_cmap(&info->cmap, 256, 0);
 	if (err)
-		goto failed;
+		goto failed_cmap;
 
 	err = ep93xxfb_alloc_videomem(info);
 	if (err)
-		goto failed;
+		goto failed_videomem;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		err = -ENXIO;
-		goto failed;
+		goto failed_resource;
 	}
 
 	/*
@@ -532,7 +532,7 @@
 	fbi->mmio_base = ioremap(res->start, resource_size(res));
 	if (!fbi->mmio_base) {
 		err = -ENXIO;
-		goto failed;
+		goto failed_resource;
 	}
 
 	strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@
 	if (err == 0) {
 		dev_err(info->dev, "No suitable video mode found\n");
 		err = -EINVAL;
-		goto failed;
+		goto failed_mode;
 	}
 
 	if (mach_info->setup) {
 		err = mach_info->setup(pdev);
 		if (err)
-			return err;
+			goto failed_mode;
 	}
 
 	err = ep93xxfb_check_var(&info->var, info);
 	if (err)
-		goto failed;
+		goto failed_check;
 
 	fbi->clk = clk_get(info->dev, NULL);
 	if (IS_ERR(fbi->clk)) {
 		err = PTR_ERR(fbi->clk);
 		fbi->clk = NULL;
-		goto failed;
+		goto failed_check;
 	}
 
 	ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@
 	return 0;
 
 failed:
-	if (fbi->clk)
-		clk_put(fbi->clk);
-	if (fbi->mmio_base)
-		iounmap(fbi->mmio_base);
-	ep93xxfb_dealloc_videomem(info);
-	if (&info->cmap)
-		fb_dealloc_cmap(&info->cmap);
+	clk_put(fbi->clk);
+failed_check:
 	if (fbi->mach_info->teardown)
 		fbi->mach_info->teardown(pdev);
+failed_mode:
+	iounmap(fbi->mmio_base);
+failed_resource:
+	ep93xxfb_dealloc_videomem(info);
+failed_videomem:
+	fb_dealloc_cmap(&info->cmap);
+failed_cmap:
 	kfree(info);
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2a4481c..a36b2d2 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -21,14 +21,14 @@
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 
 static int exynos_dp_init_dp(struct exynos_dp_device *dp)
 {
 	exynos_dp_reset(dp);
 
+	exynos_dp_swreset(dp);
+
 	/* SW defined function Normal operation */
 	exynos_dp_enable_sw_function(dp);
 
@@ -478,7 +478,7 @@
 	int lane_count;
 	u8 buf[5];
 
-	u8 *adjust_request;
+	u8 adjust_request[2];
 	u8 voltage_swing;
 	u8 pre_emphasis;
 	u8 training_lane;
@@ -493,8 +493,8 @@
 		/* set training pattern 2 for EQ */
 		exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-		adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-						- DPCD_ADDR_LANE0_1_STATUS);
+		adjust_request[0] = link_status[4];
+		adjust_request[1] = link_status[5];
 
 		exynos_dp_get_adjust_train(dp, adjust_request);
 
@@ -566,7 +566,7 @@
 	u8 buf[5];
 	u32 reg;
 
-	u8 *adjust_request;
+	u8 adjust_request[2];
 
 	udelay(400);
 
@@ -575,8 +575,8 @@
 	lane_count = dp->link_train.lane_count;
 
 	if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
-		adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-						- DPCD_ADDR_LANE0_1_STATUS);
+		adjust_request[0] = link_status[4];
+		adjust_request[1] = link_status[5];
 
 		if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
 			/* traing pattern Set to Normal */
@@ -770,7 +770,7 @@
 			return -ETIMEDOUT;
 		}
 
-		mdelay(100);
+		udelay(1);
 	}
 
 	/* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@
 			return -ETIMEDOUT;
 		}
 
-		mdelay(100);
+		mdelay(1);
 	}
 
 	if (retval != 0)
@@ -860,7 +860,8 @@
 		return -EINVAL;
 	}
 
-	dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL);
+	dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+				GFP_KERNEL);
 	if (!dp) {
 		dev_err(&pdev->dev, "no memory for device data\n");
 		return -ENOMEM;
@@ -871,8 +872,7 @@
 	dp->clock = clk_get(&pdev->dev, "dp");
 	if (IS_ERR(dp->clock)) {
 		dev_err(&pdev->dev, "failed to get clock\n");
-		ret = PTR_ERR(dp->clock);
-		goto err_dp;
+		return PTR_ERR(dp->clock);
 	}
 
 	clk_enable(dp->clock);
@@ -884,35 +884,25 @@
 		goto err_clock;
 	}
 
-	res = request_mem_region(res->start, resource_size(res),
-				dev_name(&pdev->dev));
-	if (!res) {
-		dev_err(&pdev->dev, "failed to request registers region\n");
-		ret = -EINVAL;
-		goto err_clock;
-	}
-
-	dp->res = res;
-
-	dp->reg_base = ioremap(res->start, resource_size(res));
+	dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
 	if (!dp->reg_base) {
 		dev_err(&pdev->dev, "failed to ioremap\n");
 		ret = -ENOMEM;
-		goto err_req_region;
+		goto err_clock;
 	}
 
 	dp->irq = platform_get_irq(pdev, 0);
 	if (!dp->irq) {
 		dev_err(&pdev->dev, "failed to get irq\n");
 		ret = -ENODEV;
-		goto err_ioremap;
+		goto err_clock;
 	}
 
-	ret = request_irq(dp->irq, exynos_dp_irq_handler, 0,
-			"exynos-dp", dp);
+	ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+				"exynos-dp", dp);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to request irq\n");
-		goto err_ioremap;
+		goto err_clock;
 	}
 
 	dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@
 	ret = exynos_dp_detect_hpd(dp);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to detect hpd\n");
-		goto err_irq;
+		goto err_clock;
 	}
 
 	exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@
 				dp->video_info->link_rate);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to do link train\n");
-		goto err_irq;
+		goto err_clock;
 	}
 
 	exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@
 	ret = exynos_dp_config_video(dp, dp->video_info);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to config video\n");
-		goto err_irq;
+		goto err_clock;
 	}
 
 	platform_set_drvdata(pdev, dp);
 
 	return 0;
 
-err_irq:
-	free_irq(dp->irq, dp);
-err_ioremap:
-	iounmap(dp->reg_base);
-err_req_region:
-	release_mem_region(res->start, resource_size(res));
 err_clock:
 	clk_put(dp->clock);
-err_dp:
-	kfree(dp);
 
 	return ret;
 }
@@ -976,16 +958,9 @@
 	if (pdata && pdata->phy_exit)
 		pdata->phy_exit();
 
-	free_irq(dp->irq, dp);
-	iounmap(dp->reg_base);
-
 	clk_disable(dp->clock);
 	clk_put(dp->clock);
 
-	release_mem_region(dp->res->start, resource_size(dp->res));
-
-	kfree(dp);
-
 	return 0;
 }
 
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 90ceaca..1e0f998 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -26,7 +26,6 @@
 
 struct exynos_dp_device {
 	struct device		*dev;
-	struct resource		*res;
 	struct clk		*clock;
 	unsigned int		irq;
 	void __iomem		*reg_base;
@@ -39,8 +38,10 @@
 void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
 void exynos_dp_stop_video(struct exynos_dp_device *dp);
 void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
 void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
 u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
 void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 6548afa..6ce76d5 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -16,8 +16,6 @@
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 #include "exynos_dp_reg.h"
 
@@ -65,6 +63,28 @@
 	writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
 }
 
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+	u32 reg;
+
+	reg = TX_TERMINAL_CTRL_50_OHM;
+	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+	reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+	reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+	reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+		TX_CUR1_2X | TX_CUR_8_MA;
+	writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+	reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+		CH1_AMP_400_MV | CH0_AMP_400_MV;
+	writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
 {
 	/* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@
 {
 	u32 reg;
 
-	writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-
 	exynos_dp_stop_video(dp);
 	exynos_dp_enable_video_mute(dp, 0);
 
@@ -131,9 +149,15 @@
 
 	writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
 
+	exynos_dp_init_analog_param(dp);
 	exynos_dp_init_interrupt(dp);
 }
 
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+	writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
 {
 	u32 reg;
@@ -271,6 +295,7 @@
 void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
 {
 	u32 reg;
+	int timeout_loop = 0;
 
 	exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
 
@@ -282,9 +307,19 @@
 	writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
 
 	/* Power up PLL */
-	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED)
+	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
 		exynos_dp_set_pll_power_down(dp, 0);
 
+		while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+			timeout_loop++;
+			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+				dev_err(dp->dev, "failed to get pll lock status\n");
+				return;
+			}
+			usleep_range(10, 20);
+		}
+	}
+
 	/* Enable Serdes FIFO function and Link symbol clock domain module */
 	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
 	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 42f608e..125b27c 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -24,6 +24,12 @@
 
 #define EXYNOS_DP_LANE_MAP			0x35C
 
+#define EXYNOS_DP_ANALOG_CTL_1			0x370
+#define EXYNOS_DP_ANALOG_CTL_2			0x374
+#define EXYNOS_DP_ANALOG_CTL_3			0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1		0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL		0x380
+
 #define EXYNOS_DP_AUX_HW_RETRY_CTL		0x390
 
 #define EXYNOS_DP_COMMON_INT_STA_1		0x3C4
@@ -166,6 +172,29 @@
 #define LANE0_MAP_LOGIC_LANE_2			(0x2 << 0)
 #define LANE0_MAP_LOGIC_LANE_3			(0x3 << 0)
 
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM			(0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M					(0x1 << 3)
+#define TX_DVDD_BIT_1_0625V			(0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V			(0x4 << 5)
+#define VCO_BIT_600_MICRO			(0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC				(0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM		(0x2 << 4)
+#define TX_CUR1_2X				(0x1 << 2)
+#define TX_CUR_8_MA				(0x2 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV				(0x0 << 24)
+#define CH2_AMP_400_MV				(0x0 << 16)
+#define CH1_AMP_400_MV				(0x0 << 8)
+#define CH0_AMP_400_MV				(0x0 << 0)
+
 /* EXYNOS_DP_AUX_HW_RETRY_CTL */
 #define AUX_BIT_PERIOD_EXPECTED_DELAY(x)	(((x) & 0x7) << 8)
 #define AUX_HW_RETRY_INTERVAL_MASK		(0x3 << 3)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 557091d..6c1f5c3 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -58,7 +58,7 @@
 }
 
 static struct regulator_bulk_data supplies[] = {
-	{ .supply = "vdd10", },
+	{ .supply = "vdd11", },
 	{ .supply = "vdd18", },
 };
 
@@ -102,6 +102,8 @@
 	/* set display timing. */
 	exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
 
+	exynos_mipi_dsi_init_interrupt(dsim);
+
 	/*
 	 * data from Display controller(FIMD) is transferred in video mode
 	 * but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@
 		goto err_platform_get_irq;
 	}
 
+	init_completion(&dsim_wr_comp);
+	init_completion(&dsim_rd_comp);
+	platform_set_drvdata(pdev, dsim);
+
 	ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
-			IRQF_SHARED, pdev->name, dsim);
+			IRQF_SHARED, dev_name(&pdev->dev), dsim);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "failed to request dsim irq\n");
 		ret = -EINVAL;
 		goto err_bind;
 	}
 
-	init_completion(&dsim_wr_comp);
-	init_completion(&dsim_rd_comp);
-
-	/* enable interrupt */
+	/* enable interrupts */
 	exynos_mipi_dsi_init_interrupt(dsim);
 
 	/* initialize mipi-dsi client(lcd panel). */
 	if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
 		dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
 
-	/* in case that mipi got enabled at bootloader. */
-	if (dsim_pd->enabled)
-		goto out;
+	/* in case mipi-dsi has been enabled by bootloader */
+	if (dsim_pd->enabled) {
+		exynos_mipi_regulator_enable(dsim);
+		goto done;
+	}
 
 	/* lcd panel power on. */
 	if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@
 
 	dsim->suspended = false;
 
-out:
+done:
 	platform_set_drvdata(pdev, dsim);
 
-	dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n",
-		(dsim_config->e_interface == DSIM_COMMAND) ?
-			"CPU" : "RGB");
+	dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
+		dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
 
 	return 0;
 
@@ -515,10 +519,10 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
-		pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mipi_dsi_suspend(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
 	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
 	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@
 	return 0;
 }
 
-static int exynos_mipi_dsi_resume(struct platform_device *pdev)
+static int exynos_mipi_dsi_resume(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
 	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
 	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@
 
 	return 0;
 }
-#else
-#define exynos_mipi_dsi_suspend NULL
-#define exynos_mipi_dsi_resume NULL
 #endif
 
+static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
+};
+
 static struct platform_driver exynos_mipi_dsi_driver = {
 	.probe = exynos_mipi_dsi_probe,
 	.remove = __devexit_p(exynos_mipi_dsi_remove),
-	.suspend = exynos_mipi_dsi_suspend,
-	.resume = exynos_mipi_dsi_resume,
 	.driver = {
 		   .name = "exynos-mipi-dsim",
 		   .owner = THIS_MODULE,
+		   .pm = &exynos_mipi_dsi_pm_ops,
 	},
 };
 
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 14909c1..47b533a 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -76,33 +76,25 @@
 
 irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
 {
-	unsigned int intsrc = 0;
-	unsigned int intmsk = 0;
-	struct mipi_dsim_device *dsim = NULL;
+	struct mipi_dsim_device *dsim = dev_id;
+	unsigned int intsrc, intmsk;
 
-	dsim = dev_id;
-	if (!dsim) {
-		dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
-							__func__);
-		return IRQ_HANDLED;
+	if (dsim == NULL) {
+		dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
+		return IRQ_NONE;
 	}
 
 	intsrc = exynos_mipi_dsi_read_interrupt(dsim);
 	intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
+	intmsk = ~intmsk & intsrc;
 
-	intmsk = ~(intmsk) & intsrc;
-
-	switch (intmsk) {
-	case INTMSK_RX_DONE:
+	if (intsrc & INTMSK_RX_DONE) {
 		complete(&dsim_rd_comp);
 		dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
-		break;
-	case INTMSK_FIFO_EMPTY:
+	}
+	if (intsrc & INTMSK_FIFO_EMPTY) {
 		complete(&dsim_wr_comp);
 		dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
-		break;
-	default:
-		break;
 	}
 
 	exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@
 		if (dsim_config->auto_vertical_cnt == 0) {
 			exynos_mipi_dsi_set_main_disp_vporch(dsim,
 				dsim_config->cmd_allow,
-				timing->upper_margin,
-				timing->lower_margin);
+				timing->lower_margin,
+				timing->upper_margin);
 			exynos_mipi_dsi_set_main_disp_hporch(dsim,
-				timing->left_margin,
-				timing->right_margin);
+				timing->right_margin,
+				timing->left_margin);
 			exynos_mipi_dsi_set_main_disp_sync_area(dsim,
 				timing->vsync_len,
 				timing->hsync_len);
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 4aa9ac6..05d080b 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -293,9 +293,20 @@
 		0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
 		0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
 	};
+	static const unsigned char data_to_send_panel_reverse[] = {
+		0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
+		0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
+		0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
+		0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
+	};
 
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
+	if (lcd->dsim_dev->panel_reverse)
+		ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+				data_to_send_panel_reverse,
+				ARRAY_SIZE(data_to_send_panel_reverse));
+	else
+		ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+				data_to_send, ARRAY_SIZE(data_to_send));
 }
 
 static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index c27e153..1ddeb11 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -23,7 +23,7 @@
 #include <linux/rmap.h>
 #include <linux/pagemap.h>
 
-struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
 {
 	void *screen_base = (void __force *) info->screen_base;
 	struct page *page;
@@ -107,6 +107,10 @@
 	/* protect against the workqueue changing the page list */
 	mutex_lock(&fbdefio->lock);
 
+	/* first write in this cycle, notify the driver */
+	if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+		fbdefio->first_io(info);
+
 	/*
 	 * We want the page to remain locked from ->page_mkwrite until
 	 * the PTE is marked dirty to avoid page_mkclean() being called
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416..0dff12a 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@
 int
 fb_blank(struct fb_info *info, int blank)
 {	
- 	int ret = -EINVAL;
+	struct fb_event event;
+	int ret = -EINVAL, early_ret;
 
  	if (blank > FB_BLANK_POWERDOWN)
  		blank = FB_BLANK_POWERDOWN;
 
+	event.info = info;
+	event.data = &blank;
+
+	early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
 	if (info->fbops->fb_blank)
  		ret = info->fbops->fb_blank(blank, info);
 
- 	if (!ret) {
-		struct fb_event event;
-
-		event.info = info;
-		event.data = &blank;
+	if (!ret)
 		fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+	else {
+		/*
+		 * if fb_blank is failed then revert effects of
+		 * the early blank event.
+		 */
+		if (!early_ret)
+			fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
 	}
 
  	return ret;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 67afa9c..a55e366 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,8 @@
  */
 void framebuffer_release(struct fb_info *info)
 {
+	if (!info)
+		return;
 	kfree(info->apertures);
 	kfree(info);
 }
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6af3f16..458c006 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -834,7 +834,6 @@
 	diu_ops.set_pixel_clock(var->pixclock);
 
 	out_be32(&hw->syn_pol, 0);	/* SYNC SIGNALS POLARITY */
-	out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
 	out_be32(&hw->int_status, 0);	/* INTERRUPT STATUS */
 	out_be32(&hw->plut, 0x01F5F666);
 
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index f135dbe..caad368 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -131,7 +131,9 @@
 struct imxfb_info {
 	struct platform_device  *pdev;
 	void __iomem		*regs;
-	struct clk		*clk;
+	struct clk		*clk_ipg;
+	struct clk		*clk_ahb;
+	struct clk		*clk_per;
 
 	/*
 	 * These are the addresses we mapped
@@ -340,7 +342,7 @@
 
 	pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
 
-	lcd_clk = clk_get_rate(fbi->clk);
+	lcd_clk = clk_get_rate(fbi->clk_per);
 
 	tmp = var->pixclock * (unsigned long long)lcd_clk;
 
@@ -455,11 +457,17 @@
 
 	fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
 
-	if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-		clk_enable(fbi->clk);
+	if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+		clk_prepare_enable(fbi->clk_ipg);
+		clk_prepare_enable(fbi->clk_ahb);
+		clk_prepare_enable(fbi->clk_per);
+	}
 	writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
-	if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-		clk_disable(fbi->clk);
+	if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+		clk_disable_unprepare(fbi->clk_per);
+		clk_disable_unprepare(fbi->clk_ahb);
+		clk_disable_unprepare(fbi->clk_ipg);
+	}
 
 	return 0;
 }
@@ -522,7 +530,9 @@
 	 */
 	writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
 
-	clk_enable(fbi->clk);
+	clk_prepare_enable(fbi->clk_ipg);
+	clk_prepare_enable(fbi->clk_ahb);
+	clk_prepare_enable(fbi->clk_per);
 
 	if (fbi->backlight_power)
 		fbi->backlight_power(1);
@@ -539,7 +549,9 @@
 	if (fbi->lcd_power)
 		fbi->lcd_power(0);
 
-	clk_disable(fbi->clk);
+	clk_disable_unprepare(fbi->clk_per);
+	clk_disable_unprepare(fbi->clk_ipg);
+	clk_disable_unprepare(fbi->clk_ahb);
 
 	writel(0, fbi->regs + LCDC_RMCR);
 }
@@ -770,10 +782,21 @@
 		goto failed_req;
 	}
 
-	fbi->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(fbi->clk)) {
-		ret = PTR_ERR(fbi->clk);
-		dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
+	fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(fbi->clk_ipg)) {
+		ret = PTR_ERR(fbi->clk_ipg);
+		goto failed_getclock;
+	}
+
+	fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(fbi->clk_ahb)) {
+		ret = PTR_ERR(fbi->clk_ahb);
+		goto failed_getclock;
+	}
+
+	fbi->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(fbi->clk_per)) {
+		ret = PTR_ERR(fbi->clk_per);
 		goto failed_getclock;
 	}
 
@@ -858,7 +881,6 @@
 failed_map:
 	iounmap(fbi->regs);
 failed_ioremap:
-	clk_put(fbi->clk);
 failed_getclock:
 	release_mem_region(res->start, resource_size(res));
 failed_req:
@@ -895,8 +917,6 @@
 
 	iounmap(fbi->regs);
 	release_mem_region(res->start, resource_size(res));
-	clk_disable(fbi->clk);
-	clk_put(fbi->clk);
 
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 02fd226..bdcbfba 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -680,6 +680,7 @@
 		 + dinfo->fb.size);
 	if (!dinfo->aperture.virtual) {
 		ERR_MSG("Cannot remap FB region.\n");
+		agp_backend_release(bridge);
 		cleanup(dinfo);
 		return -ENODEV;
 	}
@@ -689,6 +690,7 @@
 					      INTEL_REG_SIZE);
 	if (!dinfo->mmio_base) {
 		ERR_MSG("Cannot remap MMIO region.\n");
+		agp_backend_release(bridge);
 		cleanup(dinfo);
 		return -ENODEV;
 	}
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 31b8f67..217678e 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1243,6 +1243,7 @@
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
 					      I2C_FUNC_SMBUS_BYTE_DATA |
+					      I2C_FUNC_NOSTART |
 					      I2C_FUNC_PROTOCOL_MANGLING))
 		goto ERROR0;
 	if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index 273769b..c87e17a 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -68,7 +68,7 @@
 	return 1;
 }
 
-void mb862xx_i2c_stop(struct i2c_adapter *adap)
+static void mb862xx_i2c_stop(struct i2c_adapter *adap)
 {
 	struct mb862xxfb_par *par = adap->algo_data;
 
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 11a7a33..00ce1f3 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -579,7 +579,7 @@
 
 static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
 
-irqreturn_t mb862xx_intr(int irq, void *dev_id)
+static irqreturn_t mb862xx_intr(int irq, void *dev_id)
 {
 	struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
 	unsigned long reg_ist, mask;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 55bf619..85e4f44 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -950,7 +950,7 @@
 
 	mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
 					     res_size(mfbi->fb_req));
-	if (!mfbi->reg_virt_addr) {
+	if (!mfbi->fb_virt_addr) {
 		dev_err(&dev->dev, "failed to ioremap frame buffer\n");
 		ret = -EINVAL;
 		goto err4;
@@ -1045,7 +1045,7 @@
 
 static struct platform_driver mbxfb_driver = {
 	.probe = mbxfb_probe,
-	.remove = mbxfb_remove,
+	.remove = __devexit_p(mbxfb_remove),
 	.suspend = mbxfb_suspend,
 	.resume = mbxfb_resume,
 	.driver = {
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 6c6bc57..abbe691 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -889,6 +889,18 @@
 	return 0;
 }
 
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+	struct fb_info *fb_info = platform_get_drvdata(pdev);
+	struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+	/*
+	 * Force stop the LCD controller as keeping it running during reboot
+	 * might interfere with the BootROM's boot mode pads sampling.
+	 */
+	writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+}
+
 static struct platform_device_id mxsfb_devtype[] = {
 	{
 		.name = "imx23-fb",
@@ -905,6 +917,7 @@
 static struct platform_driver mxsfb_driver = {
 	.probe = mxsfb_probe,
 	.remove = __devexit_p(mxsfb_remove),
+	.shutdown = mxsfb_shutdown,
 	.id_table = mxsfb_devtype,
 	.driver = {
 		   .name = DRIVER_NAME,
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 1e7536d..b48f95f 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -39,14 +39,6 @@
 	  the Mobile Industry Processor Interface DBI-C/DCS
 	  specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
 
-config FB_OMAP_BOOTLOADER_INIT
-	bool "Check bootloader initialization"
-	depends on FB_OMAP
-	help
-	  Say Y here if you want to enable checking if the bootloader has
-	  already initialized the display controller. In this case the
-	  driver will skip the initialization.
-
 config FB_OMAP_CONSISTENT_DMA_SIZE
 	int "Consistent DMA memory size (MB)"
 	depends on FB_OMAP
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37a..ad741c3 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@
 
 	/*------- Backlight control --------*/
 
+	memset(&props, 0, sizeof(props));
 	props.fb_blank = FB_BLANK_UNBLANK;
 	props.power = FB_BLANK_UNBLANK;
 	props.type = BACKLIGHT_RAW;
@@ -738,12 +739,6 @@
 	}
 }
 
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
 static int acx_panel_check_timings(struct omap_dss_device *dssdev,
 		struct omap_video_timings *timings)
 {
@@ -761,7 +756,6 @@
 	.resume		= acx_panel_resume,
 
 	.set_timings	= acx_panel_set_timings,
-	.get_timings	= acx_panel_get_timings,
 	.check_timings	= acx_panel_check_timings,
 
 	.get_recommended_bpp = acx_get_recommended_bpp,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 30fe4df..e42f9dc 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -386,6 +386,106 @@
 
 		.name			= "innolux_at080tn52",
 	},
+
+	/* Mitsubishi AA084SB01 */
+	{
+		{
+			.x_res		= 800,
+			.y_res		= 600,
+			.pixel_clock	= 40000,
+
+			.hsw		= 1,
+			.hfp		= 254,
+			.hbp		= 1,
+
+			.vsw		= 1,
+			.vfp		= 26,
+			.vbp		= 1,
+		},
+		.config			= OMAP_DSS_LCD_TFT,
+		.name			= "mitsubishi_aa084sb01",
+	},
+	/* EDT ET0500G0DH6 */
+	{
+		{
+			.x_res		= 800,
+			.y_res		= 480,
+			.pixel_clock	= 33260,
+
+			.hsw		= 128,
+			.hfp		= 216,
+			.hbp		= 40,
+
+			.vsw		= 2,
+			.vfp		= 35,
+			.vbp		= 10,
+		},
+		.config			= OMAP_DSS_LCD_TFT,
+		.name			= "edt_et0500g0dh6",
+	},
+
+	/* Prime-View PD050VL1 */
+	{
+		{
+			.x_res		= 640,
+			.y_res		= 480,
+
+			.pixel_clock	= 25000,
+
+			.hsw		= 96,
+			.hfp		= 18,
+			.hbp		= 46,
+
+			.vsw		= 2,
+			.vfp		= 10,
+			.vbp		= 33,
+		},
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					  OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+		.name			= "primeview_pd050vl1",
+	},
+
+	/* Prime-View PM070WL4 */
+	{
+		{
+			.x_res		= 800,
+			.y_res		= 480,
+
+			.pixel_clock	= 32000,
+
+			.hsw		= 128,
+			.hfp		= 42,
+			.hbp		= 86,
+
+			.vsw		= 2,
+			.vfp		= 10,
+			.vbp		= 33,
+		},
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					  OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+		.name			= "primeview_pm070wl4",
+	},
+
+	/* Prime-View PD104SLF */
+	{
+		{
+			.x_res		= 800,
+			.y_res		= 600,
+
+			.pixel_clock	= 40000,
+
+			.hsw		= 128,
+			.hfp		= 42,
+			.hbp		= 86,
+
+			.vsw		= 4,
+			.vfp		= 1,
+			.vbp		= 23,
+		},
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					  OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+		.name			= "primeview_pd104slf",
+	},
 };
 
 struct panel_drv_data {
@@ -549,12 +649,6 @@
 	dpi_set_timings(dssdev, timings);
 }
 
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
 static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
 		struct omap_video_timings *timings)
 {
@@ -571,7 +665,6 @@
 	.resume		= generic_dpi_panel_resume,
 
 	.set_timings	= generic_dpi_panel_set_timings,
-	.get_timings	= generic_dpi_panel_get_timings,
 	.check_timings	= generic_dpi_panel_check_timings,
 
 	.driver         = {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dc9408d..4a34cdc 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -610,12 +610,6 @@
 	return 0;
 }
 
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
 static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
 		u16 *xres, u16 *yres)
 {
@@ -678,8 +672,6 @@
 	.get_resolution	= n8x0_panel_get_resolution,
 	.get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-	.get_timings	= n8x0_panel_get_timings,
-
 	.driver         = {
 		.name   = "n8x0_panel",
 		.owner  = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index b2dd88b..901576e 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -30,7 +30,6 @@
 #include <linux/gpio.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
-#include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
 
 #include <video/omapdss.h>
@@ -55,73 +54,6 @@
 
 static int taal_panel_reset(struct omap_dss_device *dssdev);
 
-struct panel_regulator {
-	struct regulator *regulator;
-	const char *name;
-	int min_uV;
-	int max_uV;
-};
-
-static void free_regulators(struct panel_regulator *regulators, int n)
-{
-	int i;
-
-	for (i = 0; i < n; i++) {
-		/* disable/put in reverse order */
-		regulator_disable(regulators[n - i - 1].regulator);
-		regulator_put(regulators[n - i - 1].regulator);
-	}
-}
-
-static int init_regulators(struct omap_dss_device *dssdev,
-			struct panel_regulator *regulators, int n)
-{
-	int r, i, v;
-
-	for (i = 0; i < n; i++) {
-		struct regulator *reg;
-
-		reg = regulator_get(&dssdev->dev, regulators[i].name);
-		if (IS_ERR(reg)) {
-			dev_err(&dssdev->dev, "failed to get regulator %s\n",
-				regulators[i].name);
-			r = PTR_ERR(reg);
-			goto err;
-		}
-
-		/* FIXME: better handling of fixed vs. variable regulators */
-		v = regulator_get_voltage(reg);
-		if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
-			r = regulator_set_voltage(reg, regulators[i].min_uV,
-						regulators[i].max_uV);
-			if (r) {
-				dev_err(&dssdev->dev,
-					"failed to set regulator %s voltage\n",
-					regulators[i].name);
-				regulator_put(reg);
-				goto err;
-			}
-		}
-
-		r = regulator_enable(reg);
-		if (r) {
-			dev_err(&dssdev->dev, "failed to enable regulator %s\n",
-				regulators[i].name);
-			regulator_put(reg);
-			goto err;
-		}
-
-		regulators[i].regulator = reg;
-	}
-
-	return 0;
-
-err:
-	free_regulators(regulators, i);
-
-	return r;
-}
-
 /**
  * struct panel_config - panel configuration
  * @name: panel name
@@ -150,8 +82,6 @@
 		unsigned int low;
 	} reset_sequence;
 
-	struct panel_regulator *regulators;
-	int num_regulators;
 };
 
 enum {
@@ -577,12 +507,6 @@
 	.update_status  = taal_bl_update_status,
 };
 
-static void taal_get_timings(struct omap_dss_device *dssdev,
-			struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
 static void taal_get_resolution(struct omap_dss_device *dssdev,
 		u16 *xres, u16 *yres)
 {
@@ -602,7 +526,7 @@
 {
 	struct omap_dss_device *dssdev = to_dss_device(dev);
 	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-	u8 errors;
+	u8 errors = 0;
 	int r;
 
 	mutex_lock(&td->lock);
@@ -977,11 +901,6 @@
 
 	atomic_set(&td->do_update, 0);
 
-	r = init_regulators(dssdev, panel_config->regulators,
-			panel_config->num_regulators);
-	if (r)
-		goto err_reg;
-
 	td->workqueue = create_singlethread_workqueue("taal_esd");
 	if (td->workqueue == NULL) {
 		dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@
 err_rst_gpio:
 	destroy_workqueue(td->workqueue);
 err_wq:
-	free_regulators(panel_config->regulators, panel_config->num_regulators);
-err_reg:
 	kfree(td);
 err:
 	return r;
@@ -1125,9 +1042,6 @@
 	/* reset, to be sure that the panel is in a valid state */
 	taal_hw_reset(dssdev);
 
-	free_regulators(td->panel_config->regulators,
-			td->panel_config->num_regulators);
-
 	if (gpio_is_valid(panel_data->reset_gpio))
 		gpio_free(panel_data->reset_gpio);
 
@@ -1909,8 +1823,6 @@
 	.run_test	= taal_run_test,
 	.memory_read	= taal_memory_read,
 
-	.get_timings	= taal_get_timings,
-
 	.driver         = {
 		.name   = "taal",
 		.owner  = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 52637fa..bff306e 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -47,13 +47,9 @@
 	struct mutex lock;
 
 	int pd_gpio;
-};
 
-static inline struct tfp410_platform_data
-*get_pdata(const struct omap_dss_device *dssdev)
-{
-	return dssdev->data;
-}
+	struct i2c_adapter *i2c_adapter;
+};
 
 static int tfp410_power_on(struct omap_dss_device *dssdev)
 {
@@ -68,7 +64,7 @@
 		goto err0;
 
 	if (gpio_is_valid(ddata->pd_gpio))
-		gpio_set_value(ddata->pd_gpio, 1);
+		gpio_set_value_cansleep(ddata->pd_gpio, 1);
 
 	return 0;
 err0:
@@ -83,18 +79,18 @@
 		return;
 
 	if (gpio_is_valid(ddata->pd_gpio))
-		gpio_set_value(ddata->pd_gpio, 0);
+		gpio_set_value_cansleep(ddata->pd_gpio, 0);
 
 	omapdss_dpi_display_disable(dssdev);
 }
 
 static int tfp410_probe(struct omap_dss_device *dssdev)
 {
-	struct tfp410_platform_data *pdata = get_pdata(dssdev);
 	struct panel_drv_data *ddata;
 	int r;
+	int i2c_bus_num;
 
-	ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+	ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
 	if (!ddata)
 		return -ENOMEM;
 
@@ -104,10 +100,15 @@
 	ddata->dssdev = dssdev;
 	mutex_init(&ddata->lock);
 
-	if (pdata)
+	if (dssdev->data) {
+		struct tfp410_platform_data *pdata = dssdev->data;
+
 		ddata->pd_gpio = pdata->power_down_gpio;
-	else
+		i2c_bus_num = pdata->i2c_bus_num;
+	} else {
 		ddata->pd_gpio = -1;
+		i2c_bus_num = -1;
+	}
 
 	if (gpio_is_valid(ddata->pd_gpio)) {
 		r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@
 		if (r) {
 			dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
 					ddata->pd_gpio);
-			ddata->pd_gpio = -1;
+			return r;
 		}
 	}
 
+	if (i2c_bus_num != -1) {
+		struct i2c_adapter *adapter;
+
+		adapter = i2c_get_adapter(i2c_bus_num);
+		if (!adapter) {
+			dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+					i2c_bus_num);
+			r = -EINVAL;
+			goto err_i2c;
+		}
+
+		ddata->i2c_adapter = adapter;
+	}
+
 	dev_set_drvdata(&dssdev->dev, ddata);
 
 	return 0;
+err_i2c:
+	if (gpio_is_valid(ddata->pd_gpio))
+		gpio_free(ddata->pd_gpio);
+	return r;
 }
 
 static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@
 
 	mutex_lock(&ddata->lock);
 
+	if (ddata->i2c_adapter)
+		i2c_put_adapter(ddata->i2c_adapter);
+
 	if (gpio_is_valid(ddata->pd_gpio))
 		gpio_free(ddata->pd_gpio);
 
 	dev_set_drvdata(&dssdev->dev, NULL);
 
 	mutex_unlock(&ddata->lock);
-
-	kfree(ddata);
 }
 
 static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@
 		u8 *edid, int len)
 {
 	struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-	struct tfp410_platform_data *pdata = get_pdata(dssdev);
-	struct i2c_adapter *adapter;
 	int r, l, bytes_read;
 
 	mutex_lock(&ddata->lock);
 
-	if (pdata->i2c_bus_num == 0) {
+	if (!ddata->i2c_adapter) {
 		r = -ENODEV;
 		goto err;
 	}
 
-	adapter = i2c_get_adapter(pdata->i2c_bus_num);
-	if (!adapter) {
-		dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
-				pdata->i2c_bus_num);
-		r = -EINVAL;
-		goto err;
-	}
-
 	l = min(EDID_LENGTH, len);
-	r = tfp410_ddc_read(adapter, edid, l, 0);
+	r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
 	if (r)
 		goto err;
 
@@ -299,7 +309,7 @@
 	if (len > EDID_LENGTH && edid[0x7e] > 0) {
 		l = min(EDID_LENGTH, len - EDID_LENGTH);
 
-		r = tfp410_ddc_read(adapter, edid + EDID_LENGTH,
+		r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
 				l, EDID_LENGTH);
 		if (r)
 			goto err;
@@ -319,21 +329,15 @@
 static bool tfp410_detect(struct omap_dss_device *dssdev)
 {
 	struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-	struct tfp410_platform_data *pdata = get_pdata(dssdev);
-	struct i2c_adapter *adapter;
 	unsigned char out;
 	int r;
 
 	mutex_lock(&ddata->lock);
 
-	if (pdata->i2c_bus_num == 0)
+	if (!ddata->i2c_adapter)
 		goto out;
 
-	adapter = i2c_get_adapter(pdata->i2c_bus_num);
-	if (!adapter)
-		goto out;
-
-	r = tfp410_ddc_read(adapter, &out, 1, 0);
+	r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
 
 	mutex_unlock(&ddata->lock);
 
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 32f3fcd..4b6448b 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -272,13 +272,16 @@
 static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
 	int nreset_gpio = tpo_td043->nreset_gpio;
+	int r;
 
 	if (tpo_td043->powered_on)
 		return 0;
 
-	regulator_enable(tpo_td043->vcc_reg);
+	r = regulator_enable(tpo_td043->vcc_reg);
+	if (r != 0)
+		return r;
 
-	/* wait for regulator to stabilize */
+	/* wait for panel to stabilize */
 	msleep(160);
 
 	if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@
 		gpio_free(nreset_gpio);
 }
 
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	return dpi_check_timings(dssdev, timings);
+}
+
 static struct omap_dss_driver tpo_td043_driver = {
 	.probe		= tpo_td043_probe,
 	.remove		= tpo_td043_remove,
@@ -481,6 +496,9 @@
 	.set_mirror	= tpo_td043_set_hmirror,
 	.get_mirror	= tpo_td043_get_hmirror,
 
+	.set_timings	= tpo_td043_set_timings,
+	.check_timings	= tpo_td043_check_timings,
+
 	.driver         = {
 		.name	= "tpo_td043mtea1_panel",
 		.owner  = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 7be7c06..43324e5 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -68,6 +68,10 @@
 	  HDMI Interface. This adds the High Definition Multimedia Interface.
 	  See http://www.hdmi.org/ for HDMI specification.
 
+config OMAP4_DSS_HDMI_AUDIO
+	bool
+	depends on OMAP4_DSS_HDMI
+
 config OMAP2_DSS_SDI
 	bool "SDI support"
 	depends on ARCH_OMAP3
@@ -90,15 +94,6 @@
 
 	  See http://www.mipi.org/ for DSI spesifications.
 
-config OMAP2_DSS_FAKE_VSYNC
-	bool "Fake VSYNC irq from manual update displays"
-	default n
-	help
-	  If this is selected, DSI will generate a fake DISPC VSYNC interrupt
-	  when DSI has sent a frame. This is only needed with DSI or RFBI
-	  displays using manual mode, and you want VSYNC to, for example,
-	  time animation.
-
 config OMAP2_DSS_MIN_FCK_PER_PCK
 	int "Minimum FCK/PCK ratio (for scaling)"
 	range 0 32
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index b10b3bc..ab22cc2 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -99,6 +99,11 @@
 
 	/* If true, a display is enabled using this manager */
 	bool enabled;
+
+	bool extra_info_dirty;
+	bool shadow_extra_info_dirty;
+
+	struct omap_video_timings timings;
 };
 
 static struct {
@@ -176,7 +181,7 @@
 }
 
 static int dss_check_settings_low(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dssdev, bool applying)
+		bool applying)
 {
 	struct omap_overlay_info *oi;
 	struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@
 
 	mp = get_mgr_priv(mgr);
 
+	if (!mp->enabled)
+		return 0;
+
 	if (applying && mp->user_info_dirty)
 		mi = &mp->user_info;
 	else
@@ -206,26 +214,24 @@
 		ois[ovl->id] = oi;
 	}
 
-	return dss_mgr_check(mgr, dssdev, mi, ois);
+	return dss_mgr_check(mgr, mi, &mp->timings, ois);
 }
 
 /*
  * check manager and overlay settings using overlay_info from data->info
  */
-static int dss_check_settings(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dssdev)
+static int dss_check_settings(struct omap_overlay_manager *mgr)
 {
-	return dss_check_settings_low(mgr, dssdev, false);
+	return dss_check_settings_low(mgr, false);
 }
 
 /*
  * check manager and overlay settings using overlay_info from ovl->info if
  * dirty and from data->info otherwise
  */
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dssdev)
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
 {
-	return dss_check_settings_low(mgr, dssdev, true);
+	return dss_check_settings_low(mgr, true);
 }
 
 static bool need_isr(void)
@@ -261,6 +267,20 @@
 			if (mp->shadow_info_dirty)
 				return true;
 
+			/*
+			 * NOTE: we don't check extra_info flags for disabled
+			 * managers, once the manager is enabled, the extra_info
+			 * related manager changes will be taken in by HW.
+			 */
+
+			/* to write new values to registers */
+			if (mp->extra_info_dirty)
+				return true;
+
+			/* to set GO bit */
+			if (mp->shadow_extra_info_dirty)
+				return true;
+
 			list_for_each_entry(ovl, &mgr->overlays, list) {
 				struct ovl_priv_data *op;
 
@@ -305,7 +325,7 @@
 
 	mp = get_mgr_priv(mgr);
 
-	if (mp->shadow_info_dirty)
+	if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
 		return true;
 
 	list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@
 /* returns true if an extra_info field is currently being updated */
 static bool extra_info_update_ongoing(void)
 {
-	const int num_ovls = omap_dss_get_num_overlays();
-	struct ovl_priv_data *op;
-	struct omap_overlay *ovl;
-	struct mgr_priv_data *mp;
+	const int num_mgrs = dss_feat_get_num_mgrs();
 	int i;
 
-	for (i = 0; i < num_ovls; ++i) {
-		ovl = omap_dss_get_overlay(i);
-		op = get_ovl_priv(ovl);
+	for (i = 0; i < num_mgrs; ++i) {
+		struct omap_overlay_manager *mgr;
+		struct omap_overlay *ovl;
+		struct mgr_priv_data *mp;
 
-		if (!ovl->manager)
-			continue;
-
-		mp = get_mgr_priv(ovl->manager);
+		mgr = omap_dss_get_overlay_manager(i);
+		mp = get_mgr_priv(mgr);
 
 		if (!mp->enabled)
 			continue;
@@ -341,8 +357,15 @@
 		if (!mp->updating)
 			continue;
 
-		if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+		if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
 			return true;
+
+		list_for_each_entry(ovl, &mgr->overlays, list) {
+			struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+			if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+				return true;
+		}
 	}
 
 	return false;
@@ -525,11 +548,13 @@
 
 	oi = &op->info;
 
+	mp = get_mgr_priv(ovl->manager);
+
 	replication = dss_use_replication(ovl->manager->device, oi->color_mode);
 
 	ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
 
-	r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+	r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
 	if (r) {
 		/*
 		 * We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@
 		return;
 	}
 
-	mp = get_mgr_priv(ovl->manager);
-
 	op->info_dirty = false;
 	if (mp->updating)
 		op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@
 	}
 }
 
+static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
+{
+	struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+	DSSDBGF("%d", mgr->id);
+
+	if (!mp->extra_info_dirty)
+		return;
+
+	dispc_mgr_set_timings(mgr->id, &mp->timings);
+
+	mp->extra_info_dirty = false;
+	if (mp->updating)
+		mp->shadow_extra_info_dirty = true;
+}
+
 static void dss_write_regs_common(void)
 {
 	const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@
 		if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
 			continue;
 
-		r = dss_check_settings(mgr, mgr->device);
+		r = dss_check_settings(mgr);
 		if (r) {
 			DSSERR("cannot write registers for manager %s: "
 					"illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@
 		}
 
 		dss_mgr_write_regs(mgr);
+		dss_mgr_write_regs_extra(mgr);
 	}
 }
 
@@ -693,6 +733,7 @@
 
 	mp = get_mgr_priv(mgr);
 	mp->shadow_info_dirty = false;
+	mp->shadow_extra_info_dirty = false;
 
 	list_for_each_entry(ovl, &mgr->overlays, list) {
 		op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@
 
 	WARN_ON(mp->updating);
 
-	r = dss_check_settings(mgr, mgr->device);
+	r = dss_check_settings(mgr);
 	if (r) {
 		DSSERR("cannot start manual update: illegal configuration\n");
 		spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@
 	}
 
 	dss_mgr_write_regs(mgr);
+	dss_mgr_write_regs_extra(mgr);
 
 	dss_write_regs_common();
 
@@ -857,7 +899,7 @@
 
 	spin_lock_irqsave(&data_lock, flags);
 
-	r = dss_check_settings_apply(mgr, mgr->device);
+	r = dss_check_settings_apply(mgr);
 	if (r) {
 		spin_unlock_irqrestore(&data_lock, flags);
 		DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@
 		bool use_fifo_merge)
 {
 	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	struct omap_dss_device *dssdev;
 	u32 fifo_low, fifo_high;
 
 	if (!op->enabled && !op->enabling)
 		return;
 
-	dssdev = ovl->manager->device;
-
 	dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
-			use_fifo_merge);
+			use_fifo_merge, ovl_manual_update(ovl));
 
 	dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
 }
@@ -1050,7 +1089,7 @@
 
 	mp->enabled = true;
 
-	r = dss_check_settings(mgr, mgr->device);
+	r = dss_check_settings(mgr);
 	if (r) {
 		DSSERR("failed to enable manager %d: check_settings failed\n",
 				mgr->id);
@@ -1225,6 +1264,35 @@
 	return r;
 }
 
+static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
+		struct omap_video_timings *timings)
+{
+	struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+	mp->timings = *timings;
+	mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+		struct omap_video_timings *timings)
+{
+	unsigned long flags;
+
+	mutex_lock(&apply_lock);
+
+	spin_lock_irqsave(&data_lock, flags);
+
+	dss_apply_mgr_timings(mgr, timings);
+
+	dss_write_regs();
+	dss_set_go_bits();
+
+	spin_unlock_irqrestore(&data_lock, flags);
+
+	wait_pending_extra_info_updates();
+
+	mutex_unlock(&apply_lock);
+}
 
 int dss_ovl_set_info(struct omap_overlay *ovl,
 		struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@
 
 	op->enabling = true;
 
-	r = dss_check_settings(ovl->manager, ovl->manager->device);
+	r = dss_check_settings(ovl->manager);
 	if (r) {
 		DSSERR("failed to enable overlay %d: check_settings failed\n",
 				ovl->id);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index e8a1207..5066eee 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -43,6 +43,8 @@
 
 	struct regulator *vdds_dsi_reg;
 	struct regulator *vdds_sdi_reg;
+
+	const char *default_display_name;
 } core;
 
 static char *def_disp_name;
@@ -54,9 +56,6 @@
 module_param_named(debug, dss_debug, bool, 0644);
 #endif
 
-static int omap_dss_register_device(struct omap_dss_device *);
-static void omap_dss_unregister_device(struct omap_dss_device *);
-
 /* REGULATORS */
 
 struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +86,51 @@
 	return reg;
 }
 
+int dss_get_ctx_loss_count(struct device *dev)
+{
+	struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+	int cnt;
+
+	if (!board_data->get_context_loss_count)
+		return -ENOENT;
+
+	cnt = board_data->get_context_loss_count(dev);
+
+	WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+	return cnt;
+}
+
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+{
+	struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+	if (!board_data->dsi_enable_pads)
+		return -ENOENT;
+
+	return board_data->dsi_enable_pads(dsi_id, lane_mask);
+}
+
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+{
+	struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+	if (!board_data->dsi_enable_pads)
+		return;
+
+	return board_data->dsi_disable_pads(dsi_id, lane_mask);
+}
+
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+	struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+
+	if (pdata->set_min_bus_tput)
+		return pdata->set_min_bus_tput(dev, tput);
+	else
+		return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 static int dss_debug_show(struct seq_file *s, void *unused)
 {
@@ -121,34 +165,6 @@
 	debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
 			&dss_debug_dump_clocks, &dss_debug_fops);
 
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-	debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
-			&dispc_dump_irqs, &dss_debug_fops);
-#endif
-
-#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
-	dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
-#endif
-
-	debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
-			&dss_dump_regs, &dss_debug_fops);
-	debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
-			&dispc_dump_regs, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_RFBI
-	debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
-			&rfbi_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-	dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-	debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
-			&venc_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
-	debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
-			&hdmi_dump_regs, &dss_debug_fops);
-#endif
 	return 0;
 }
 
@@ -157,6 +173,19 @@
 	if (dss_debugfs_dir)
 		debugfs_remove_recursive(dss_debugfs_dir);
 }
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+	struct dentry *d;
+
+	d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+			write, &dss_debug_fops);
+
+	if (IS_ERR(d))
+		return PTR_ERR(d);
+
+	return 0;
+}
 #else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 static inline int dss_initialize_debugfs(void)
 {
@@ -165,14 +194,17 @@
 static inline void dss_uninitialize_debugfs(void)
 {
 }
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+	return 0;
+}
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int __init omap_dss_probe(struct platform_device *pdev)
 {
 	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
 	int r;
-	int i;
 
 	core.pdev = pdev;
 
@@ -187,28 +219,13 @@
 	if (r)
 		goto err_debugfs;
 
-	for (i = 0; i < pdata->num_devices; ++i) {
-		struct omap_dss_device *dssdev = pdata->devices[i];
-
-		r = omap_dss_register_device(dssdev);
-		if (r) {
-			DSSERR("device %d %s register failed %d\n", i,
-				dssdev->name ?: "unnamed", r);
-
-			while (--i >= 0)
-				omap_dss_unregister_device(pdata->devices[i]);
-
-			goto err_register;
-		}
-
-		if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
-			pdata->default_device = dssdev;
-	}
+	if (def_disp_name)
+		core.default_display_name = def_disp_name;
+	else if (pdata->default_device)
+		core.default_display_name = pdata->default_device->name;
 
 	return 0;
 
-err_register:
-	dss_uninitialize_debugfs();
 err_debugfs:
 
 	return r;
@@ -216,17 +233,11 @@
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
-	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
-	int i;
-
 	dss_uninitialize_debugfs();
 
 	dss_uninit_overlays(pdev);
 	dss_uninit_overlay_managers(pdev);
 
-	for (i = 0; i < pdata->num_devices; ++i)
-		omap_dss_unregister_device(pdata->devices[i]);
-
 	return 0;
 }
 
@@ -251,7 +262,6 @@
 }
 
 static struct platform_driver omap_dss_driver = {
-	.probe          = omap_dss_probe,
 	.remove         = omap_dss_remove,
 	.shutdown	= omap_dss_shutdown,
 	.suspend	= omap_dss_suspend,
@@ -326,7 +336,6 @@
 	int r;
 	struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
 	struct omap_dss_device *dssdev = to_dss_device(dev);
-	struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
 	bool force;
 
 	DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +344,8 @@
 
 	dss_init_device(core.pdev, dssdev);
 
-	force = pdata->default_device == dssdev;
+	force = core.default_display_name &&
+		strcmp(core.default_display_name, dssdev->name) == 0;
 	dss_recheck_connections(dssdev, force);
 
 	r = dssdrv->probe(dssdev);
@@ -381,6 +391,8 @@
 	if (dssdriver->get_recommended_bpp == NULL)
 		dssdriver->get_recommended_bpp =
 			omapdss_default_get_recommended_bpp;
+	if (dssdriver->get_timings == NULL)
+		dssdriver->get_timings = omapdss_default_get_timings;
 
 	return driver_register(&dssdriver->driver);
 }
@@ -427,27 +439,38 @@
 	reset_device(dev, 0);
 }
 
-static int omap_dss_register_device(struct omap_dss_device *dssdev)
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+		struct device *parent, int disp_num)
 {
-	static int dev_num;
-
 	WARN_ON(!dssdev->driver_name);
 
 	reset_device(&dssdev->dev, 1);
 	dssdev->dev.bus = &dss_bus_type;
-	dssdev->dev.parent = &dss_bus;
+	dssdev->dev.parent = parent;
 	dssdev->dev.release = omap_dss_dev_release;
-	dev_set_name(&dssdev->dev, "display%d", dev_num++);
+	dev_set_name(&dssdev->dev, "display%d", disp_num);
 	return device_register(&dssdev->dev);
 }
 
-static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
 {
 	device_unregister(&dssdev->dev);
 }
 
+static int dss_unregister_dss_dev(struct device *dev, void *data)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	omap_dss_unregister_device(dssdev);
+	return 0;
+}
+
+void omap_dss_unregister_child_devices(struct device *parent)
+{
+	device_for_each_child(parent, NULL, dss_unregister_dss_dev);
+}
+
 /* BUS */
-static int omap_dss_bus_register(void)
+static int __init omap_dss_bus_register(void)
 {
 	int r;
 
@@ -469,12 +492,56 @@
 }
 
 /* INIT */
+static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+	dpi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+	sdi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	rfbi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+	venc_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+	dsi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+	hdmi_init_platform_driver,
+#endif
+};
+
+static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+	dpi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+	sdi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	rfbi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+	venc_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+	dsi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+	hdmi_uninit_platform_driver,
+#endif
+};
+
+static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
 
 static int __init omap_dss_register_drivers(void)
 {
 	int r;
+	int i;
 
-	r = platform_driver_register(&omap_dss_driver);
+	r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
 	if (r)
 		return r;
 
@@ -490,40 +557,18 @@
 		goto err_dispc;
 	}
 
-	r = rfbi_init_platform_driver();
-	if (r) {
-		DSSERR("Failed to initialize rfbi platform driver\n");
-		goto err_rfbi;
-	}
-
-	r = venc_init_platform_driver();
-	if (r) {
-		DSSERR("Failed to initialize venc platform driver\n");
-		goto err_venc;
-	}
-
-	r = dsi_init_platform_driver();
-	if (r) {
-		DSSERR("Failed to initialize DSI platform driver\n");
-		goto err_dsi;
-	}
-
-	r = hdmi_init_platform_driver();
-	if (r) {
-		DSSERR("Failed to initialize hdmi\n");
-		goto err_hdmi;
+	/*
+	 * It's ok if the output-driver register fails. It happens, for example,
+	 * when there is no output-device (e.g. SDI for OMAP4).
+	 */
+	for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
+		r = dss_output_drv_reg_funcs[i]();
+		if (r == 0)
+			dss_output_drv_loaded[i] = true;
 	}
 
 	return 0;
 
-err_hdmi:
-	dsi_uninit_platform_driver();
-err_dsi:
-	venc_uninit_platform_driver();
-err_venc:
-	rfbi_uninit_platform_driver();
-err_rfbi:
-	dispc_uninit_platform_driver();
 err_dispc:
 	dss_uninit_platform_driver();
 err_dss:
@@ -534,10 +579,13 @@
 
 static void __exit omap_dss_unregister_drivers(void)
 {
-	hdmi_uninit_platform_driver();
-	dsi_uninit_platform_driver();
-	venc_uninit_platform_driver();
-	rfbi_uninit_platform_driver();
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
+		if (dss_output_drv_loaded[i])
+			dss_output_drv_unreg_funcs[i]();
+	}
+
 	dispc_uninit_platform_driver();
 	dss_uninit_platform_driver();
 
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee30937..4749ac3 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -131,23 +131,6 @@
 	return __raw_readl(dispc.base + idx);
 }
 
-static int dispc_get_ctx_loss_count(void)
-{
-	struct device *dev = &dispc.pdev->dev;
-	struct omap_display_platform_data *pdata = dev->platform_data;
-	struct omap_dss_board_info *board_data = pdata->board_data;
-	int cnt;
-
-	if (!board_data->get_context_loss_count)
-		return -ENOENT;
-
-	cnt = board_data->get_context_loss_count(dev);
-
-	WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
-	return cnt;
-}
-
 #define SR(reg) \
 	dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
 #define RR(reg) \
@@ -251,7 +234,7 @@
 	if (dss_has_feature(FEAT_CORE_CLK_DIV))
 		SR(DIVISOR);
 
-	dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+	dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
 	dispc.ctx_valid = true;
 
 	DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@
 	if (!dispc.ctx_valid)
 		return;
 
-	ctx = dispc_get_ctx_loss_count();
+	ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
 
 	if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
 		return;
@@ -413,14 +396,6 @@
 		return false;
 }
 
-static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
-{
-	struct omap_overlay_manager *mgr =
-		omap_dss_get_overlay_manager(channel);
-
-	return mgr ? mgr->device : NULL;
-}
-
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
 {
 	switch (channel) {
@@ -432,6 +407,7 @@
 		return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -446,6 +422,7 @@
 		return 0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -764,7 +741,7 @@
 		case OMAP_DSS_COLOR_XRGB16_1555:
 			m = 0xf; break;
 		default:
-			BUG(); break;
+			BUG(); return;
 		}
 	} else {
 		switch (color_mode) {
@@ -801,13 +778,25 @@
 		case OMAP_DSS_COLOR_XRGB16_1555:
 			m = 0xf; break;
 		default:
-			BUG(); break;
+			BUG(); return;
 		}
 	}
 
 	REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
 }
 
+static void dispc_ovl_configure_burst_type(enum omap_plane plane,
+		enum omap_dss_rotation_type rotation_type)
+{
+	if (dss_has_feature(FEAT_BURST_2D) == 0)
+		return;
+
+	if (rotation_type == OMAP_DSS_ROT_TILER)
+		REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+	else
+		REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+}
+
 void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
 {
 	int shift;
@@ -845,6 +834,7 @@
 			break;
 		default:
 			BUG();
+			return;
 		}
 
 		val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@
 		break;
 	default:
 		BUG();
+		return 0;
 	}
 
 	val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,22 +974,15 @@
 	REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
 }
 
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
+		u16 height)
 {
 	u32 val;
-	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
+
 	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
 	dispc_write_reg(DISPC_SIZE_MGR(channel), val);
 }
 
-void dispc_set_digit_size(u16 width, u16 height)
-{
-	u32 val;
-	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
-	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
-	dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
-}
-
 static void dispc_read_plane_fifo_sizes(void)
 {
 	u32 size;
@@ -1063,7 +1047,8 @@
 }
 
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-		u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
+		u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+		bool manual_update)
 {
 	/*
 	 * All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@
 	 * combined fifo size
 	 */
 
-	if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+	if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
 		*fifo_low = ovl_fifo_size - burst_size * 2;
 		*fifo_high = total_fifo_size - burst_size;
 	} else {
@@ -1185,6 +1170,94 @@
 	dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
 }
 
+static void dispc_ovl_set_accu_uv(enum omap_plane plane,
+		u16 orig_width,	u16 orig_height, u16 out_width, u16 out_height,
+		bool ilace, enum omap_color_mode color_mode, u8 rotation)
+{
+	int h_accu2_0, h_accu2_1;
+	int v_accu2_0, v_accu2_1;
+	int chroma_hinc, chroma_vinc;
+	int idx;
+
+	struct accu {
+		s8 h0_m, h0_n;
+		s8 h1_m, h1_n;
+		s8 v0_m, v0_n;
+		s8 v1_m, v1_n;
+	};
+
+	const struct accu *accu_table;
+	const struct accu *accu_val;
+
+	static const struct accu accu_nv12[4] = {
+		{  0, 1,  0, 1 , -1, 2, 0, 1 },
+		{  1, 2, -3, 4 ,  0, 1, 0, 1 },
+		{ -1, 1,  0, 1 , -1, 2, 0, 1 },
+		{ -1, 2, -1, 2 , -1, 1, 0, 1 },
+	};
+
+	static const struct accu accu_nv12_ilace[4] = {
+		{  0, 1,  0, 1 , -3, 4, -1, 4 },
+		{ -1, 4, -3, 4 ,  0, 1,  0, 1 },
+		{ -1, 1,  0, 1 , -1, 4, -3, 4 },
+		{ -3, 4, -3, 4 , -1, 1,  0, 1 },
+	};
+
+	static const struct accu accu_yuv[4] = {
+		{  0, 1, 0, 1,  0, 1, 0, 1 },
+		{  0, 1, 0, 1,  0, 1, 0, 1 },
+		{ -1, 1, 0, 1,  0, 1, 0, 1 },
+		{  0, 1, 0, 1, -1, 1, 0, 1 },
+	};
+
+	switch (rotation) {
+	case OMAP_DSS_ROT_0:
+		idx = 0;
+		break;
+	case OMAP_DSS_ROT_90:
+		idx = 1;
+		break;
+	case OMAP_DSS_ROT_180:
+		idx = 2;
+		break;
+	case OMAP_DSS_ROT_270:
+		idx = 3;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_NV12:
+		if (ilace)
+			accu_table = accu_nv12_ilace;
+		else
+			accu_table = accu_nv12;
+		break;
+	case OMAP_DSS_COLOR_YUV2:
+	case OMAP_DSS_COLOR_UYVY:
+		accu_table = accu_yuv;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	accu_val = &accu_table[idx];
+
+	chroma_hinc = 1024 * orig_width / out_width;
+	chroma_vinc = 1024 * orig_height / out_height;
+
+	h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
+	h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
+	v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
+	v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
+
+	dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
+	dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+}
+
 static void dispc_ovl_set_scaling_common(enum omap_plane plane,
 		u16 orig_width, u16 orig_height,
 		u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@
 		REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
 		return;
 	}
+
+	dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
+			out_height, ilace, color_mode, rotation);
+
 	switch (color_mode) {
 	case OMAP_DSS_COLOR_NV12:
 		/* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@
 		break;
 	default:
 		BUG();
+		return;
 	}
 
 	if (out_width != orig_width)
@@ -1297,9 +1375,6 @@
 	REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
 	/* set V scaling */
 	REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
-
-	dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
-	dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
 }
 
 static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@
 		return 32;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -1423,6 +1499,7 @@
 		return 1 - (-pixels + 1) * ps;
 	else
 		BUG();
+		return 0;
 }
 
 static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@
 		enum omap_color_mode color_mode, bool fieldmode,
 		unsigned int field_offset,
 		unsigned *offset0, unsigned *offset1,
-		s32 *row_inc, s32 *pix_inc)
+		s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
 	u8 ps;
 
@@ -1477,10 +1554,10 @@
 		else
 			*offset0 = 0;
 
-		*row_inc = pixinc(1 + (screen_width - width) +
-				(fieldmode ? screen_width : 0),
-				ps);
-		*pix_inc = pixinc(1, ps);
+		*row_inc = pixinc(1 +
+			(y_predecim * screen_width - x_predecim * width) +
+			(fieldmode ? screen_width : 0), ps);
+		*pix_inc = pixinc(x_predecim, ps);
 		break;
 
 	case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@
 			*offset0 = field_offset * screen_width * ps;
 		else
 			*offset0 = 0;
-		*row_inc = pixinc(1 - (screen_width + width) -
-				(fieldmode ? screen_width : 0),
-				ps);
-		*pix_inc = pixinc(1, ps);
+		*row_inc = pixinc(1 -
+			(y_predecim * screen_width + x_predecim * width) -
+			(fieldmode ? screen_width : 0), ps);
+		*pix_inc = pixinc(x_predecim, ps);
 		break;
 
 	default:
 		BUG();
+		return;
 	}
 }
 
@@ -1515,7 +1593,7 @@
 		enum omap_color_mode color_mode, bool fieldmode,
 		unsigned int field_offset,
 		unsigned *offset0, unsigned *offset1,
-		s32 *row_inc, s32 *pix_inc)
+		s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
 	u8 ps;
 	u16 fbw, fbh;
@@ -1557,10 +1635,14 @@
 			*offset0 = *offset1 + field_offset * screen_width * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(1 + (screen_width - fbw) +
-				(fieldmode ? screen_width : 0),
-				ps);
-		*pix_inc = pixinc(1, ps);
+		*row_inc = pixinc(1 +
+			(y_predecim * screen_width - fbw * x_predecim) +
+			(fieldmode ? screen_width : 0),	ps);
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			*pix_inc = pixinc(x_predecim, 2 * ps);
+		else
+			*pix_inc = pixinc(x_predecim, ps);
 		break;
 	case OMAP_DSS_ROT_90:
 		*offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@
 			*offset0 = *offset1 + field_offset * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(screen_width * (fbh - 1) + 1 +
-				(fieldmode ? 1 : 0), ps);
-		*pix_inc = pixinc(-screen_width, ps);
+		*row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
+				y_predecim + (fieldmode ? 1 : 0), ps);
+		*pix_inc = pixinc(-x_predecim * screen_width, ps);
 		break;
 	case OMAP_DSS_ROT_180:
 		*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@
 		else
 			*offset0 = *offset1;
 		*row_inc = pixinc(-1 -
-				(screen_width - fbw) -
-				(fieldmode ? screen_width : 0),
-				ps);
-		*pix_inc = pixinc(-1, ps);
+			(y_predecim * screen_width - fbw * x_predecim) -
+			(fieldmode ? screen_width : 0),	ps);
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			*pix_inc = pixinc(-x_predecim, 2 * ps);
+		else
+			*pix_inc = pixinc(-x_predecim, ps);
 		break;
 	case OMAP_DSS_ROT_270:
 		*offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@
 			*offset0 = *offset1 - field_offset * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
-				(fieldmode ? 1 : 0), ps);
-		*pix_inc = pixinc(screen_width, ps);
+		*row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
+				y_predecim - (fieldmode ? 1 : 0), ps);
+		*pix_inc = pixinc(x_predecim * screen_width, ps);
 		break;
 
 	/* mirroring */
@@ -1602,10 +1687,14 @@
 			*offset0 = *offset1 + field_offset * screen_width * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(screen_width * 2 - 1 +
+		*row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
 				(fieldmode ? screen_width : 0),
 				ps);
-		*pix_inc = pixinc(-1, ps);
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			*pix_inc = pixinc(-x_predecim, 2 * ps);
+		else
+			*pix_inc = pixinc(-x_predecim, ps);
 		break;
 
 	case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@
 			*offset0 = *offset1 + field_offset * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
-				(fieldmode ? 1 : 0),
+		*row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
+				y_predecim + (fieldmode ? 1 : 0),
 				ps);
-		*pix_inc = pixinc(screen_width, ps);
+		*pix_inc = pixinc(x_predecim * screen_width, ps);
 		break;
 
 	case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@
 			*offset0 = *offset1 - field_offset * screen_width * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(1 - screen_width * 2 -
+		*row_inc = pixinc(1 - y_predecim * screen_width * 2 -
 				(fieldmode ? screen_width : 0),
 				ps);
-		*pix_inc = pixinc(1, ps);
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			*pix_inc = pixinc(x_predecim, 2 * ps);
+		else
+			*pix_inc = pixinc(x_predecim, ps);
 		break;
 
 	case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@
 			*offset0 = *offset1 - field_offset * ps;
 		else
 			*offset0 = *offset1;
-		*row_inc = pixinc(screen_width * (fbh - 1) - 1 -
-				(fieldmode ? 1 : 0),
+		*row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
+				y_predecim - (fieldmode ? 1 : 0),
 				ps);
-		*pix_inc = pixinc(-screen_width, ps);
+		*pix_inc = pixinc(-x_predecim * screen_width, ps);
 		break;
 
 	default:
 		BUG();
+		return;
 	}
 }
 
-static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
+		enum omap_color_mode color_mode, bool fieldmode,
+		unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+		s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+{
+	u8 ps;
+
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_CLUT1:
+	case OMAP_DSS_COLOR_CLUT2:
+	case OMAP_DSS_COLOR_CLUT4:
+	case OMAP_DSS_COLOR_CLUT8:
+		BUG();
+		return;
+	default:
+		ps = color_mode_to_bpp(color_mode) / 8;
+		break;
+	}
+
+	DSSDBG("scrw %d, width %d\n", screen_width, width);
+
+	/*
+	 * field 0 = even field = bottom field
+	 * field 1 = odd field = top field
+	 */
+	*offset1 = 0;
+	if (field_offset)
+		*offset0 = *offset1 + field_offset * screen_width * ps;
+	else
+		*offset0 = *offset1;
+	*row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
+			(fieldmode ? screen_width : 0), ps);
+	if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+		color_mode == OMAP_DSS_COLOR_UYVY)
+		*pix_inc = pixinc(x_predecim, 2 * ps);
+	else
+		*pix_inc = pixinc(x_predecim, ps);
+}
+
+/*
+ * This function is used to avoid synclosts in OMAP3, because of some
+ * undocumented horizontal position and timing related limitations.
+ */
+static int check_horiz_timing_omap3(enum omap_channel channel,
+		const struct omap_video_timings *t, u16 pos_x,
+		u16 width, u16 height, u16 out_width, u16 out_height)
+{
+	int DS = DIV_ROUND_UP(height, out_height);
+	unsigned long nonactive, lclk, pclk;
+	static const u8 limits[3] = { 8, 10, 20 };
+	u64 val, blank;
+	int i;
+
+	nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+	pclk = dispc_mgr_pclk_rate(channel);
+	if (dispc_mgr_is_lcd(channel))
+		lclk = dispc_mgr_lclk_rate(channel);
+	else
+		lclk = dispc_fclk_rate();
+
+	i = 0;
+	if (out_height < height)
+		i++;
+	if (out_width < width)
+		i++;
+	blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+	DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
+	if (blank <= limits[i])
+		return -EINVAL;
+
+	/*
+	 * Pixel data should be prepared before visible display point starts.
+	 * So, atleast DS-2 lines must have already been fetched by DISPC
+	 * during nonactive - pos_x period.
+	 */
+	val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
+	DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
+		val, max(0, DS - 2) * width);
+	if (val < max(0, DS - 2) * width)
+		return -EINVAL;
+
+	/*
+	 * All lines need to be refilled during the nonactive period of which
+	 * only one line can be loaded during the active period. So, atleast
+	 * DS - 1 lines should be loaded during nonactive period.
+	 */
+	val =  div_u64((u64)nonactive * lclk, pclk);
+	DSSDBG("nonactive * pcd  = %llu, max(0, DS - 1) * width = %d\n",
+		val, max(0, DS - 1) * width);
+	if (val < max(0, DS - 1) * width)
+		return -EINVAL;
+
+	return 0;
+}
+
+static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
+		const struct omap_video_timings *mgr_timings, u16 width,
 		u16 height, u16 out_width, u16 out_height,
 		enum omap_color_mode color_mode)
 {
-	u32 fclk = 0;
+	u32 core_clk = 0;
 	u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
 
 	if (height <= out_height && width <= out_width)
 		return (unsigned long) pclk;
 
 	if (height > out_height) {
-		struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
-		unsigned int ppl = dssdev->panel.timings.x_res;
+		unsigned int ppl = mgr_timings->x_res;
 
 		tmp = pclk * height * out_width;
 		do_div(tmp, 2 * out_height * ppl);
-		fclk = tmp;
+		core_clk = tmp;
 
 		if (height > 2 * out_height) {
 			if (ppl == out_width)
@@ -1673,23 +1862,23 @@
 
 			tmp = pclk * (height - 2 * out_height) * out_width;
 			do_div(tmp, 2 * out_height * (ppl - out_width));
-			fclk = max(fclk, (u32) tmp);
+			core_clk = max_t(u32, core_clk, tmp);
 		}
 	}
 
 	if (width > out_width) {
 		tmp = pclk * width;
 		do_div(tmp, out_width);
-		fclk = max(fclk, (u32) tmp);
+		core_clk = max_t(u32, core_clk, tmp);
 
 		if (color_mode == OMAP_DSS_COLOR_RGB24U)
-			fclk <<= 1;
+			core_clk <<= 1;
 	}
 
-	return fclk;
+	return core_clk;
 }
 
-static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
 		u16 height, u16 out_width, u16 out_height)
 {
 	unsigned int hf, vf;
@@ -1730,15 +1919,20 @@
 }
 
 static int dispc_ovl_calc_scaling(enum omap_plane plane,
-		enum omap_channel channel, u16 width, u16 height,
-		u16 out_width, u16 out_height,
-		enum omap_color_mode color_mode, bool *five_taps)
+		enum omap_channel channel,
+		const struct omap_video_timings *mgr_timings,
+		u16 width, u16 height, u16 out_width, u16 out_height,
+		enum omap_color_mode color_mode, bool *five_taps,
+		int *x_predecim, int *y_predecim, u16 pos_x)
 {
 	struct omap_overlay *ovl = omap_dss_get_overlay(plane);
 	const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
 	const int maxsinglelinewidth =
 				dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
-	unsigned long fclk = 0;
+	const int max_decim_limit = 16;
+	unsigned long core_clk = 0;
+	int decim_x, decim_y, error, min_factor;
+	u16 in_width, in_height, in_width_max = 0;
 
 	if (width == out_width && height == out_height)
 		return 0;
@@ -1746,64 +1940,154 @@
 	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
 		return -EINVAL;
 
-	if (out_width < width / maxdownscale ||
-			out_width > width * 8)
+	*x_predecim = max_decim_limit;
+	*y_predecim = max_decim_limit;
+
+	if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+	    color_mode == OMAP_DSS_COLOR_CLUT2 ||
+	    color_mode == OMAP_DSS_COLOR_CLUT4 ||
+	    color_mode == OMAP_DSS_COLOR_CLUT8) {
+		*x_predecim = 1;
+		*y_predecim = 1;
+		*five_taps = false;
+		return 0;
+	}
+
+	decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
+	decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+
+	min_factor = min(decim_x, decim_y);
+
+	if (decim_x > *x_predecim || out_width > width * 8)
 		return -EINVAL;
 
-	if (out_height < height / maxdownscale ||
-			out_height > height * 8)
+	if (decim_y > *y_predecim || out_height > height * 8)
 		return -EINVAL;
 
 	if (cpu_is_omap24xx()) {
-		if (width > maxsinglelinewidth)
-			DSSERR("Cannot scale max input width exceeded");
 		*five_taps = false;
-		fclk = calc_fclk(channel, width, height, out_width,
-								out_height);
+
+		do {
+			in_height = DIV_ROUND_UP(height, decim_y);
+			in_width = DIV_ROUND_UP(width, decim_x);
+			core_clk = calc_core_clk(channel, in_width, in_height,
+					out_width, out_height);
+			error = (in_width > maxsinglelinewidth || !core_clk ||
+				core_clk > dispc_core_clk_rate());
+			if (error) {
+				if (decim_x == decim_y) {
+					decim_x = min_factor;
+					decim_y++;
+				} else {
+					swap(decim_x, decim_y);
+					if (decim_x < decim_y)
+						decim_x++;
+				}
+			}
+		} while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
+				error);
+
+		if (in_width > maxsinglelinewidth) {
+			DSSERR("Cannot scale max input width exceeded");
+			return -EINVAL;
+		}
 	} else if (cpu_is_omap34xx()) {
-		if (width > (maxsinglelinewidth * 2)) {
+
+		do {
+			in_height = DIV_ROUND_UP(height, decim_y);
+			in_width = DIV_ROUND_UP(width, decim_x);
+			core_clk = calc_core_clk_five_taps(channel, mgr_timings,
+				in_width, in_height, out_width, out_height,
+				color_mode);
+
+			error = check_horiz_timing_omap3(channel, mgr_timings,
+				pos_x, in_width, in_height, out_width,
+				out_height);
+
+			if (in_width > maxsinglelinewidth)
+				if (in_height > out_height &&
+					in_height < out_height * 2)
+					*five_taps = false;
+			if (!*five_taps)
+				core_clk = calc_core_clk(channel, in_width,
+					in_height, out_width, out_height);
+			error = (error || in_width > maxsinglelinewidth * 2 ||
+				(in_width > maxsinglelinewidth && *five_taps) ||
+				!core_clk || core_clk > dispc_core_clk_rate());
+			if (error) {
+				if (decim_x == decim_y) {
+					decim_x = min_factor;
+					decim_y++;
+				} else {
+					swap(decim_x, decim_y);
+					if (decim_x < decim_y)
+						decim_x++;
+				}
+			}
+		} while (decim_x <= *x_predecim && decim_y <= *y_predecim
+			&& error);
+
+		if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
+			height, out_width, out_height)){
+				DSSERR("horizontal timing too tight\n");
+				return -EINVAL;
+		}
+
+		if (in_width > (maxsinglelinewidth * 2)) {
 			DSSERR("Cannot setup scaling");
 			DSSERR("width exceeds maximum width possible");
 			return -EINVAL;
 		}
-		fclk = calc_fclk_five_taps(channel, width, height, out_width,
-						out_height, color_mode);
-		if (width > maxsinglelinewidth) {
-			if (height > out_height && height < out_height * 2)
-				*five_taps = false;
-			else {
-				DSSERR("cannot setup scaling with five taps");
-				return -EINVAL;
-			}
+
+		if (in_width > maxsinglelinewidth && *five_taps) {
+			DSSERR("cannot setup scaling with five taps");
+			return -EINVAL;
 		}
-		if (!*five_taps)
-			fclk = calc_fclk(channel, width, height, out_width,
-					out_height);
 	} else {
-		if (width > maxsinglelinewidth) {
+		int decim_x_min = decim_x;
+		in_height = DIV_ROUND_UP(height, decim_y);
+		in_width_max = dispc_core_clk_rate() /
+				DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
+						out_width);
+		decim_x = DIV_ROUND_UP(width, in_width_max);
+
+		decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
+		if (decim_x > *x_predecim)
+			return -EINVAL;
+
+		do {
+			in_width = DIV_ROUND_UP(width, decim_x);
+		} while (decim_x <= *x_predecim &&
+				in_width > maxsinglelinewidth && decim_x++);
+
+		if (in_width > maxsinglelinewidth) {
 			DSSERR("Cannot scale width exceeds max line width");
 			return -EINVAL;
 		}
-		fclk = calc_fclk(channel, width, height, out_width,
-				out_height);
+
+		core_clk = calc_core_clk(channel, in_width, in_height,
+				out_width, out_height);
 	}
 
-	DSSDBG("required fclk rate = %lu Hz\n", fclk);
-	DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+	DSSDBG("required core clk rate = %lu Hz\n", core_clk);
+	DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
 
-	if (!fclk || fclk > dispc_fclk_rate()) {
+	if (!core_clk || core_clk > dispc_core_clk_rate()) {
 		DSSERR("failed to set up scaling, "
-			"required fclk rate = %lu Hz, "
-			"current fclk rate = %lu Hz\n",
-			fclk, dispc_fclk_rate());
+			"required core clk rate = %lu Hz, "
+			"current core clk rate = %lu Hz\n",
+			core_clk, dispc_core_clk_rate());
 		return -EINVAL;
 	}
 
+	*x_predecim = decim_x;
+	*y_predecim = decim_y;
 	return 0;
 }
 
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-		bool ilace, bool replication)
+		bool ilace, bool replication,
+		const struct omap_video_timings *mgr_timings)
 {
 	struct omap_overlay *ovl = omap_dss_get_overlay(plane);
 	bool five_taps = true;
@@ -1814,8 +2098,11 @@
 	s32 pix_inc;
 	u16 frame_height = oi->height;
 	unsigned int field_offset = 0;
-	u16 outw, outh;
+	u16 in_height = oi->height;
+	u16 in_width = oi->width;
+	u16 out_width, out_height;
 	enum omap_channel channel;
+	int x_predecim = 1, y_predecim = 1;
 
 	channel = dispc_ovl_get_channel_out(plane);
 
@@ -1829,32 +2116,35 @@
 	if (oi->paddr == 0)
 		return -EINVAL;
 
-	outw = oi->out_width == 0 ? oi->width : oi->out_width;
-	outh = oi->out_height == 0 ? oi->height : oi->out_height;
+	out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+	out_height = oi->out_height == 0 ? oi->height : oi->out_height;
 
-	if (ilace && oi->height == outh)
+	if (ilace && oi->height == out_height)
 		fieldmode = 1;
 
 	if (ilace) {
 		if (fieldmode)
-			oi->height /= 2;
+			in_height /= 2;
 		oi->pos_y /= 2;
-		outh /= 2;
+		out_height /= 2;
 
 		DSSDBG("adjusting for ilace: height %d, pos_y %d, "
 				"out_height %d\n",
-				oi->height, oi->pos_y, outh);
+				in_height, oi->pos_y, out_height);
 	}
 
 	if (!dss_feat_color_mode_supported(plane, oi->color_mode))
 		return -EINVAL;
 
-	r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
-			outw, outh, oi->color_mode,
-			&five_taps);
+	r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
+			in_height, out_width, out_height, oi->color_mode,
+			&five_taps, &x_predecim, &y_predecim, oi->pos_x);
 	if (r)
 		return r;
 
+	in_width = DIV_ROUND_UP(in_width, x_predecim);
+	in_height = DIV_ROUND_UP(in_height, y_predecim);
+
 	if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
 			oi->color_mode == OMAP_DSS_COLOR_UYVY ||
 			oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@
 		 * so the integer part must be added to the base address of the
 		 * bottom field.
 		 */
-		if (!oi->height || oi->height == outh)
+		if (!in_height || in_height == out_height)
 			field_offset = 0;
 		else
-			field_offset = oi->height / outh / 2;
+			field_offset = in_height / out_height / 2;
 	}
 
 	/* Fields are independent but interleaved in memory. */
 	if (fieldmode)
 		field_offset = 1;
 
-	if (oi->rotation_type == OMAP_DSS_ROT_DMA)
-		calc_dma_rotation_offset(oi->rotation, oi->mirror,
-				oi->screen_width, oi->width, frame_height,
+	offset0 = 0;
+	offset1 = 0;
+	row_inc = 0;
+	pix_inc = 0;
+
+	if (oi->rotation_type == OMAP_DSS_ROT_TILER)
+		calc_tiler_rotation_offset(oi->screen_width, in_width,
 				oi->color_mode, fieldmode, field_offset,
-				&offset0, &offset1, &row_inc, &pix_inc);
+				&offset0, &offset1, &row_inc, &pix_inc,
+				x_predecim, y_predecim);
+	else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+		calc_dma_rotation_offset(oi->rotation, oi->mirror,
+				oi->screen_width, in_width, frame_height,
+				oi->color_mode, fieldmode, field_offset,
+				&offset0, &offset1, &row_inc, &pix_inc,
+				x_predecim, y_predecim);
 	else
 		calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
-				oi->screen_width, oi->width, frame_height,
+				oi->screen_width, in_width, frame_height,
 				oi->color_mode, fieldmode, field_offset,
-				&offset0, &offset1, &row_inc, &pix_inc);
+				&offset0, &offset1, &row_inc, &pix_inc,
+				x_predecim, y_predecim);
 
 	DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
 			offset0, offset1, row_inc, pix_inc);
 
 	dispc_ovl_set_color_mode(plane, oi->color_mode);
 
+	dispc_ovl_configure_burst_type(plane, oi->rotation_type);
+
 	dispc_ovl_set_ba0(plane, oi->paddr + offset0);
 	dispc_ovl_set_ba1(plane, oi->paddr + offset1);
 
@@ -1906,19 +2210,18 @@
 	dispc_ovl_set_row_inc(plane, row_inc);
 	dispc_ovl_set_pix_inc(plane, pix_inc);
 
-	DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
-			oi->height, outw, outh);
+	DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
+			in_height, out_width, out_height);
 
 	dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
 
-	dispc_ovl_set_pic_size(plane, oi->width, oi->height);
+	dispc_ovl_set_pic_size(plane, in_width, in_height);
 
 	if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
-		dispc_ovl_set_scaling(plane, oi->width, oi->height,
-				   outw, outh,
-				   ilace, five_taps, fieldmode,
+		dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
+				   out_height, ilace, five_taps, fieldmode,
 				   oi->color_mode, oi->rotation);
-		dispc_ovl_set_vid_size(plane, outw, outh);
+		dispc_ovl_set_vid_size(plane, out_width, out_height);
 		dispc_ovl_set_vid_color_conv(plane, cconv);
 	}
 
@@ -2087,8 +2390,10 @@
 		return !!REG_GET(DISPC_CONTROL, 1, 1);
 	else if (channel == OMAP_DSS_CHANNEL_LCD2)
 		return !!REG_GET(DISPC_CONTROL2, 0, 0);
-	else
+	else {
 		BUG();
+		return false;
+	}
 }
 
 void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@
 		REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
 }
 
+static bool _dispc_mgr_size_ok(u16 width, u16 height)
+{
+	return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
+		height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+}
+
 static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
 		int vsw, int vfp, int vbp)
 {
@@ -2309,11 +2620,20 @@
 	return true;
 }
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+		const struct omap_video_timings *timings)
 {
-	return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-			timings->hbp, timings->vsw,
-			timings->vfp, timings->vbp);
+	bool timings_ok;
+
+	timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+
+	if (dispc_mgr_is_lcd(channel))
+		timings_ok =  timings_ok && _dispc_lcd_timings_ok(timings->hsw,
+						timings->hfp, timings->hbp,
+						timings->vsw, timings->vfp,
+						timings->vbp);
+
+	return timings_ok;
 }
 
 static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@
 }
 
 /* change name to mode? */
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
 		struct omap_video_timings *timings)
 {
 	unsigned xtot, ytot;
 	unsigned long ht, vt;
+	struct omap_video_timings t = *timings;
 
-	if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-				timings->hbp, timings->vsw,
-				timings->vfp, timings->vbp))
+	DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+
+	if (!dispc_mgr_timings_ok(channel, &t)) {
 		BUG();
+		return;
+	}
 
-	_dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
-			timings->hbp, timings->vsw, timings->vfp,
-			timings->vbp);
+	if (dispc_mgr_is_lcd(channel)) {
+		_dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
+				t.vfp, t.vbp);
 
-	dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
+		xtot = t.x_res + t.hfp + t.hsw + t.hbp;
+		ytot = t.y_res + t.vfp + t.vsw + t.vbp;
 
-	xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
-	ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+		ht = (timings->pixel_clock * 1000) / xtot;
+		vt = (timings->pixel_clock * 1000) / xtot / ytot;
 
-	ht = (timings->pixel_clock * 1000) / xtot;
-	vt = (timings->pixel_clock * 1000) / xtot / ytot;
+		DSSDBG("pck %u\n", timings->pixel_clock);
+		DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+			t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
 
-	DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
-			timings->y_res);
-	DSSDBG("pck %u\n", timings->pixel_clock);
-	DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
-			timings->hsw, timings->hfp, timings->hbp,
-			timings->vsw, timings->vfp, timings->vbp);
+		DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+	} else {
+		enum dss_hdmi_venc_clk_source_select source;
 
-	DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+		source = dss_get_hdmi_venc_clk_source();
+
+		if (source == DSS_VENC_TV_CLK)
+			t.y_res /= 2;
+	}
+
+	dispc_mgr_set_size(channel, t.x_res, t.y_res);
 }
 
 static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@
 		break;
 	default:
 		BUG();
+		return 0;
 	}
 
 	return r;
@@ -2441,6 +2770,7 @@
 		break;
 	default:
 		BUG();
+		return 0;
 	}
 
 	return r / lcd;
@@ -2462,20 +2792,35 @@
 
 		return r / pcd;
 	} else {
-		struct omap_dss_device *dssdev =
-			dispc_mgr_get_device(channel);
+		enum dss_hdmi_venc_clk_source_select source;
 
-		switch (dssdev->type) {
-		case OMAP_DISPLAY_TYPE_VENC:
+		source = dss_get_hdmi_venc_clk_source();
+
+		switch (source) {
+		case DSS_VENC_TV_CLK:
 			return venc_get_pixel_clock();
-		case OMAP_DISPLAY_TYPE_HDMI:
+		case DSS_HDMI_M_PCLK:
 			return hdmi_get_pixel_clock();
 		default:
 			BUG();
+			return 0;
 		}
 	}
 }
 
+unsigned long dispc_core_clk_rate(void)
+{
+	int lcd;
+	unsigned long fclk = dispc_fclk_rate();
+
+	if (dss_has_feature(FEAT_CORE_CLK_DIV))
+		lcd = REG_GET(DISPC_DIVISOR, 23, 16);
+	else
+		lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
+
+	return fclk / lcd;
+}
+
 void dispc_dump_clocks(struct seq_file *s)
 {
 	int lcd, pcd;
@@ -2588,7 +2933,7 @@
 }
 #endif
 
-void dispc_dump_regs(struct seq_file *s)
+static void dispc_dump_regs(struct seq_file *s)
 {
 	int i, j;
 	const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
-{
-	u32 irqstatus = DISPC_IRQ_VSYNC;
-	int i;
-
-	WARN_ON(!in_interrupt());
-
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		struct omap_dispc_isr_data *isr_data;
-		isr_data = &dispc.registered_isr[i];
-
-		if (!isr_data->isr)
-			continue;
-
-		if (isr_data->mask & irqstatus)
-			isr_data->isr(isr_data->arg, irqstatus);
-	}
-}
-#endif
-
 static void _omap_dispc_initialize_irq(void)
 {
 	unsigned long flags;
@@ -3330,7 +3654,7 @@
 }
 
 /* DISPC HW IP initialisation */
-static int omap_dispchw_probe(struct platform_device *pdev)
+static int __init omap_dispchw_probe(struct platform_device *pdev)
 {
 	u32 rev;
 	int r = 0;
@@ -3399,6 +3723,11 @@
 
 	dispc_runtime_put();
 
+	dss_debugfs_create_file("dispc", dispc_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+	dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
 	return 0;
 
 err_runtime_get:
@@ -3407,7 +3736,7 @@
 	return r;
 }
 
-static int omap_dispchw_remove(struct platform_device *pdev)
+static int __exit omap_dispchw_remove(struct platform_device *pdev)
 {
 	pm_runtime_disable(&pdev->dev);
 
@@ -3419,19 +3748,12 @@
 static int dispc_runtime_suspend(struct device *dev)
 {
 	dispc_save_context();
-	dss_runtime_put();
 
 	return 0;
 }
 
 static int dispc_runtime_resume(struct device *dev)
 {
-	int r;
-
-	r = dss_runtime_get();
-	if (r < 0)
-		return r;
-
 	dispc_restore_context();
 
 	return 0;
@@ -3443,8 +3765,7 @@
 };
 
 static struct platform_driver omap_dispchw_driver = {
-	.probe          = omap_dispchw_probe,
-	.remove         = omap_dispchw_remove,
+	.remove         = __exit_p(omap_dispchw_remove),
 	.driver         = {
 		.name   = "omapdss_dispc",
 		.owner  = THIS_MODULE,
@@ -3452,12 +3773,12 @@
 	},
 };
 
-int dispc_init_platform_driver(void)
+int __init dispc_init_platform_driver(void)
 {
-	return platform_driver_register(&omap_dispchw_driver);
+	return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
 }
 
-void dispc_uninit_platform_driver(void)
+void __exit dispc_uninit_platform_driver(void)
 {
-	return platform_driver_unregister(&omap_dispchw_driver);
+	platform_driver_unregister(&omap_dispchw_driver);
 }
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 5836bd1..f278080 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -120,6 +120,7 @@
 		return 0x03AC;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -134,6 +135,7 @@
 		return 0x03B0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -144,10 +146,12 @@
 		return 0x0064;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x0400;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -158,10 +162,12 @@
 		return 0x0068;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x0404;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -172,10 +178,12 @@
 		return 0x006C;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x0408;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -186,10 +194,12 @@
 		return 0x0070;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x040C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -205,6 +215,7 @@
 		return 0x03CC;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -215,10 +226,12 @@
 		return 0x01D4;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03C0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -229,10 +242,12 @@
 		return 0x01D8;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03C4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -243,10 +258,12 @@
 		return 0x01DC;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03C8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -257,10 +274,12 @@
 		return 0x0220;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03BC;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -271,10 +290,12 @@
 		return 0x0224;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03B8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -285,10 +306,12 @@
 		return 0x0228;
 	case OMAP_DSS_CHANNEL_DIGIT:
 		BUG();
+		return 0;
 	case OMAP_DSS_CHANNEL_LCD2:
 		return 0x03B4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -306,6 +329,7 @@
 		return 0x0300;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -321,6 +345,7 @@
 		return 0x0008;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -335,6 +360,7 @@
 		return 0x000C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -343,6 +369,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0544;
 	case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@
 		return 0x0310;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -359,6 +387,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0548;
 	case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@
 		return 0x0314;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -381,6 +411,7 @@
 		return 0x009C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -395,6 +426,7 @@
 		return 0x00A8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -410,6 +442,7 @@
 		return 0x0070;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -418,6 +451,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0568;
 	case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@
 		return 0x032C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -441,6 +476,7 @@
 		return 0x008C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -456,6 +492,7 @@
 		return 0x0088;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -471,6 +508,7 @@
 		return 0x00A4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -486,6 +524,7 @@
 		return 0x0098;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -498,8 +537,10 @@
 	case OMAP_DSS_VIDEO2:
 	case OMAP_DSS_VIDEO3:
 		BUG();
+		return 0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -512,8 +553,10 @@
 	case OMAP_DSS_VIDEO2:
 	case OMAP_DSS_VIDEO3:
 		BUG();
+		return 0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -522,6 +565,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x0024;
@@ -529,6 +573,7 @@
 		return 0x0090;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -537,6 +582,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0580;
 	case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@
 		return 0x0424;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -553,6 +600,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x0028;
@@ -560,6 +608,7 @@
 		return 0x0094;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -569,6 +618,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x002C;
@@ -576,6 +626,7 @@
 		return 0x0000;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -584,6 +635,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0584;
 	case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@
 		return 0x0428;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -600,6 +653,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x0030;
@@ -607,6 +661,7 @@
 		return 0x0004;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -615,6 +670,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0588;
 	case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@
 		return 0x042C;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -632,6 +689,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@
 		return 0x0010 + i * 0x8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -648,6 +707,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x058C + i * 0x8;
 	case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@
 		return 0x0430 + i * 0x8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -665,6 +726,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 		return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@
 		return 0x0014 + i * 0x8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -681,6 +744,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0590 + i * 8;
 	case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@
 		return 0x0434 + i * 0x8;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -698,12 +763,14 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 	case OMAP_DSS_VIDEO2:
 	case OMAP_DSS_VIDEO3:
 		return 0x0074 + i * 0x4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -713,6 +780,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x0124 + i * 0x4;
 	case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@
 		return 0x0050 + i * 0x4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -730,6 +799,7 @@
 	switch (plane) {
 	case OMAP_DSS_GFX:
 		BUG();
+		return 0;
 	case OMAP_DSS_VIDEO1:
 		return 0x05CC + i * 0x4;
 	case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@
 		return 0x0470 + i * 0x4;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -754,6 +825,7 @@
 		return 0x00A0;
 	default:
 		BUG();
+		return 0;
 	}
 }
 #endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 4424c19..2490106 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -304,10 +304,18 @@
 		return 24;
 	default:
 		BUG();
+		return 0;
 	}
 }
 EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
 
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
 /* Checks if replication logic should be used. Only use for active matrix,
  * when overlay is in RGB12U or RGB16 mode, and LCD interface is
  * 18bpp or 24bpp */
@@ -340,6 +348,7 @@
 		break;
 	default:
 		BUG();
+		return false;
 	}
 
 	return bpp > 16;
@@ -352,46 +361,6 @@
 	int i;
 	int r;
 
-	switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
-	case OMAP_DISPLAY_TYPE_DPI:
-		r = dpi_init_display(dssdev);
-		break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
-	case OMAP_DISPLAY_TYPE_DBI:
-		r = rfbi_init_display(dssdev);
-		break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-	case OMAP_DISPLAY_TYPE_VENC:
-		r = venc_init_display(dssdev);
-		break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
-	case OMAP_DISPLAY_TYPE_SDI:
-		r = sdi_init_display(dssdev);
-		break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-	case OMAP_DISPLAY_TYPE_DSI:
-		r = dsi_init_display(dssdev);
-		break;
-#endif
-	case OMAP_DISPLAY_TYPE_HDMI:
-		r = hdmi_init_display(dssdev);
-		break;
-	default:
-		DSSERR("Support for display '%s' not compiled in.\n",
-				dssdev->name);
-		return;
-	}
-
-	if (r) {
-		DSSERR("failed to init display %s\n", dssdev->name);
-		return;
-	}
-
 	/* create device sysfs files */
 	i = 0;
 	while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index faaf305..8c2056c 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -156,7 +156,7 @@
 		t->pixel_clock = pck;
 	}
 
-	dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+	dss_mgr_set_timings(dssdev->manager, t);
 
 	return 0;
 }
@@ -202,10 +202,6 @@
 			goto err_reg_enable;
 	}
 
-	r = dss_runtime_get();
-	if (r)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r)
 		goto err_get_dispc;
@@ -244,8 +240,6 @@
 err_get_dsi:
 	dispc_runtime_put();
 err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
 	if (cpu_is_omap34xx())
 		regulator_disable(dpi.vdds_dsi_reg);
 err_reg_enable:
@@ -266,7 +260,6 @@
 	}
 
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	if (cpu_is_omap34xx())
 		regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@
 	DSSDBG("dpi_set_timings\n");
 	dssdev->panel.timings = *timings;
 	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
-		r = dss_runtime_get();
+		r = dispc_runtime_get();
 		if (r)
 			return;
 
-		r = dispc_runtime_get();
-		if (r) {
-			dss_runtime_put();
-			return;
-		}
-
 		dpi_set_mode(dssdev);
-		dispc_mgr_go(dssdev->manager->id);
 
 		dispc_runtime_put();
-		dss_runtime_put();
+	} else {
+		dss_mgr_set_timings(dssdev->manager, timings);
 	}
 }
 EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@
 	unsigned long pck;
 	struct dispc_clock_info dispc_cinfo;
 
-	if (!dispc_lcd_timings_ok(timings))
+	if (dss_mgr_check_timings(dssdev->manager, timings))
 		return -EINVAL;
 
 	if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@
 }
 EXPORT_SYMBOL(dpi_check_timings);
 
-int dpi_init_display(struct omap_dss_device *dssdev)
+static int __init dpi_init_display(struct omap_dss_device *dssdev)
 {
 	DSSDBG("init_display\n");
 
@@ -378,12 +365,58 @@
 	return 0;
 }
 
-int dpi_init(void)
+static void __init dpi_probe_pdata(struct platform_device *pdev)
 {
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int i, r;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
+			continue;
+
+		r = dpi_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &pdev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+					dssdev->name, r);
+	}
+}
+
+static int __init omap_dpi_probe(struct platform_device *pdev)
+{
+	dpi_probe_pdata(pdev);
+
 	return 0;
 }
 
-void dpi_exit(void)
+static int __exit omap_dpi_remove(struct platform_device *pdev)
 {
+	omap_dss_unregister_child_devices(&pdev->dev);
+
+	return 0;
 }
 
+static struct platform_driver omap_dpi_driver = {
+	.remove         = __exit_p(omap_dpi_remove),
+	.driver         = {
+		.name   = "omapdss_dpi",
+		.owner  = THIS_MODULE,
+	},
+};
+
+int __init dpi_init_platform_driver(void)
+{
+	return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
+}
+
+void __exit dpi_uninit_platform_driver(void)
+{
+	platform_driver_unregister(&omap_dpi_driver);
+}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 210a3c4..ca8382d 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -256,14 +256,13 @@
 	struct platform_device *pdev;
 	void __iomem	*base;
 
+	int module_id;
+
 	int irq;
 
 	struct clk *dss_clk;
 	struct clk *sys_clk;
 
-	int (*enable_pads)(int dsi_id, unsigned lane_mask);
-	void (*disable_pads)(int dsi_id, unsigned lane_mask);
-
 	struct dsi_clock_info current_cinfo;
 
 	bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@
 	return dsi_pdev_map[module];
 }
 
-static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
-{
-	return dsidev->id;
-}
-
 static inline void dsi_write_reg(struct platform_device *dsidev,
 		const struct dsi_reg idx, u32 val)
 {
@@ -452,6 +446,7 @@
 		return 16;
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -1184,10 +1179,9 @@
 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
 {
 	unsigned long r;
-	int dsi_module = dsi_get_dsidev_id(dsidev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
-	if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
+	if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
 		/* DSI FCLK source is DSS_CLK_FCK */
 		r = clk_get_rate(dsi->dss_clk);
 	} else {
@@ -1279,10 +1273,9 @@
 }
 
 /* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+static int dsi_calc_clock_rates(struct platform_device *dsidev,
 		struct dsi_clock_info *cinfo)
 {
-	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
 	if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@
 	if (cinfo->regm_dsi > dsi->regm_dsi_max)
 		return -EINVAL;
 
-	if (cinfo->use_sys_clk) {
-		cinfo->clkin = clk_get_rate(dsi->sys_clk);
-		/* XXX it is unclear if highfreq should be used
-		 * with DSS_SYS_CLK source also */
-		cinfo->highfreq = 0;
-	} else {
-		cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
-
-		if (cinfo->clkin < 32000000)
-			cinfo->highfreq = 0;
-		else
-			cinfo->highfreq = 1;
-	}
-
-	cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+	cinfo->clkin = clk_get_rate(dsi->sys_clk);
+	cinfo->fint = cinfo->clkin / cinfo->regn;
 
 	if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
 		return -EINVAL;
@@ -1378,27 +1358,21 @@
 
 	memset(&cur, 0, sizeof(cur));
 	cur.clkin = dss_sys_clk;
-	cur.use_sys_clk = 1;
-	cur.highfreq = 0;
 
-	/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
-	/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+	/* 0.75MHz < Fint = clkin / regn < 2.1MHz */
 	/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
 	for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-		if (cur.highfreq == 0)
-			cur.fint = cur.clkin / cur.regn;
-		else
-			cur.fint = cur.clkin / (2 * cur.regn);
+		cur.fint = cur.clkin / cur.regn;
 
 		if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
 			continue;
 
-		/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+		/* DSIPHY(MHz) = (2 * regm / regn) * clkin */
 		for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
 			unsigned long a, b;
 
 			a = 2 * cur.regm * (cur.clkin/1000);
-			b = cur.regn * (cur.highfreq + 1);
+			b = cur.regn;
 			cur.clkin4ddr = a / b * 1000;
 
 			if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@
 
 	DSSDBGF();
 
-	dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
-	dsi->current_cinfo.highfreq = cinfo->highfreq;
-
+	dsi->current_cinfo.clkin = cinfo->clkin;
 	dsi->current_cinfo.fint = cinfo->fint;
 	dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
 	dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@
 
 	DSSDBG("DSI Fint %ld\n", cinfo->fint);
 
-	DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
-			cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
-			cinfo->clkin,
-			cinfo->highfreq);
+	DSSDBG("clkin rate %ld\n", cinfo->clkin);
 
 	/* DSIPHY == CLKIN4DDR */
-	DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+	DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
 			cinfo->regm,
 			cinfo->regn,
 			cinfo->clkin,
-			cinfo->highfreq + 1,
 			cinfo->clkin4ddr);
 
 	DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@
 
 	if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
 		l = FLD_MOD(l, f, 4, 1);	/* DSI_PLL_FREQSEL */
-	l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
-			11, 11);		/* DSI_PLL_CLKSEL */
-	l = FLD_MOD(l, cinfo->highfreq,
-			12, 12);		/* DSI_PLL_HIGHFREQ */
 	l = FLD_MOD(l, 1, 13, 13);		/* DSI_PLL_REFEN */
 	l = FLD_MOD(l, 0, 14, 14);		/* DSIPHY_CLKINEN */
 	l = FLD_MOD(l, 1, 20, 20);		/* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	struct dsi_clock_info *cinfo = &dsi->current_cinfo;
 	enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
-	int dsi_module = dsi_get_dsidev_id(dsidev);
+	int dsi_module = dsi->module_id;
 
 	dispc_clk_src = dss_get_dispc_clk_source();
 	dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@
 
 	seq_printf(s,	"- DSI%d PLL -\n", dsi_module + 1);
 
-	seq_printf(s,	"dsi pll source = %s\n",
-			cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
+	seq_printf(s,	"dsi pll clkin\t%lu\n", cinfo->clkin);
 
 	seq_printf(s,	"Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
 
@@ -1789,7 +1752,6 @@
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	unsigned long flags;
 	struct dsi_irq_stats stats;
-	int dsi_module = dsi_get_dsidev_id(dsidev);
 
 	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
 
@@ -1806,7 +1768,7 @@
 #define PIS(x) \
 	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
 
-	seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
+	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
 	PIS(VC0);
 	PIS(VC1);
 	PIS(VC2);
@@ -1886,22 +1848,6 @@
 
 	dsi_dump_dsidev_irqs(dsidev, s);
 }
-
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-		const struct file_operations *debug_fops)
-{
-	struct platform_device *dsidev;
-
-	dsidev = dsi_get_dsidev_from_id(0);
-	if (dsidev)
-		debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
-			&dsi1_dump_irqs, debug_fops);
-
-	dsidev = dsi_get_dsidev_from_id(1);
-	if (dsidev)
-		debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
-			&dsi2_dump_irqs, debug_fops);
-}
 #endif
 
 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@
 	dsi_dump_dsidev_regs(dsidev, s);
 }
 
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-		const struct file_operations *debug_fops)
-{
-	struct platform_device *dsidev;
-
-	dsidev = dsi_get_dsidev_from_id(0);
-	if (dsidev)
-		debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
-			&dsi1_dump_regs, debug_fops);
-
-	dsidev = dsi_get_dsidev_from_id(1);
-	if (dsidev)
-		debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
-			&dsi2_dump_regs, debug_fops);
-}
 enum dsi_cio_power_state {
 	DSI_COMPLEXIO_POWER_OFF		= 0x0,
 	DSI_COMPLEXIO_POWER_ON		= 0x1,
@@ -2073,6 +2004,7 @@
 		return 1365 * 3;	/* 1365x24 bits */
 	default:
 		BUG();
+		return 0;
 	}
 }
 
@@ -2337,7 +2269,7 @@
 
 	DSSDBGF();
 
-	r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+	r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
 	if (r)
 		return r;
 
@@ -2447,7 +2379,7 @@
 		dsi_cio_disable_lane_override(dsidev);
 err_scp_clk_dom:
 	dsi_disable_scp_clk(dsidev);
-	dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+	dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
 	return r;
 }
 
@@ -2461,7 +2393,7 @@
 
 	dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
 	dsi_disable_scp_clk(dsidev);
-	dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+	dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
 }
 
 static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@
 		if (add + size > 4) {
 			DSSERR("Illegal FIFO configuration\n");
 			BUG();
+			return;
 		}
 
 		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@
 		if (add + size > 4) {
 			DSSERR("Illegal FIFO configuration\n");
 			BUG();
+			return;
 		}
 
 		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@
 		return dsi_sync_vc_l4(dsidev, channel);
 	default:
 		BUG();
+		return -EINVAL;
 	}
 }
 
@@ -3226,6 +3161,7 @@
 		data = reqdata[0] | (reqdata[1] << 8);
 	} else {
 		BUG();
+		return -EINVAL;
 	}
 
 	r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@
 		goto err;
 	}
 
-	BUG();
 err:
 	DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
 		type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@
 	dsi_write_reg(dsidev, DSI_CTRL, r);
 }
 
+/*
+ * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
+ * results in maximum transition time for data and clock lanes to enter and
+ * exit HS mode. Hence, this is the scenario where the least amount of command
+ * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
+ * clock cycles that can be used to interleave command mode data in HS so that
+ * all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
+		int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
+{
+	int transition;
+
+	/*
+	 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
+	 * time of data lanes only, if it isn't set, we need to consider HS
+	 * transition time of both data and clock lanes. HS transition time
+	 * of Scenario 3 is considered.
+	 */
+	if (ddr_alwon) {
+		transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
+	} else {
+		int trans1, trans2;
+		trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
+		trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
+				enter_hs + 1;
+		transition = max(trans1, trans2);
+	}
+
+	return blank > transition ? blank - transition : 0;
+}
+
+/*
+ * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
+ * results in maximum transition time for data lanes to enter and exit LP mode.
+ * Hence, this is the scenario where the least amount of command mode data can
+ * be interleaved. We program the minimum amount of bytes that can be
+ * interleaved in LP so that all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
+		int lp_clk_div, int tdsi_fclk)
+{
+	int trans_lp;	/* time required for a LP transition, in TXBYTECLKHS */
+	int tlp_avail;	/* time left for interleaving commands, in CLKIN4DDR */
+	int ttxclkesc;	/* period of LP transmit escape clock, in CLKIN4DDR */
+	int thsbyte_clk = 16;	/* Period of TXBYTECLKHS clock, in CLKIN4DDR */
+	int lp_inter;	/* cmd mode data that can be interleaved, in bytes */
+
+	/* maximum LP transition time according to Scenario 1 */
+	trans_lp = exit_hs + max(enter_hs, 2) + 1;
+
+	/* CLKIN4DDR = 16 * TXBYTECLKHS */
+	tlp_avail = thsbyte_clk * (blank - trans_lp);
+
+	ttxclkesc = tdsi_fclk * lp_clk_div;
+
+	lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
+			26) / 16;
+
+	return max(lp_inter, 0);
+}
+
+static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+{
+	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+	int blanking_mode;
+	int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
+	int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
+	int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
+	int tclk_trail, ths_exit, exiths_clk;
+	bool ddr_alwon;
+	struct omap_video_timings *timings = &dssdev->panel.timings;
+	int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+	int ndl = dsi->num_lanes_used - 1;
+	int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+	int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
+	int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
+	int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
+	int bl_interleave_hs = 0, bl_interleave_lp = 0;
+	u32 r;
+
+	r = dsi_read_reg(dsidev, DSI_CTRL);
+	blanking_mode = FLD_GET(r, 20, 20);
+	hfp_blanking_mode = FLD_GET(r, 21, 21);
+	hbp_blanking_mode = FLD_GET(r, 22, 22);
+	hsa_blanking_mode = FLD_GET(r, 23, 23);
+
+	r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+	hbp = FLD_GET(r, 11, 0);
+	hfp = FLD_GET(r, 23, 12);
+	hsa = FLD_GET(r, 31, 24);
+
+	r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+	ddr_clk_post = FLD_GET(r, 7, 0);
+	ddr_clk_pre = FLD_GET(r, 15, 8);
+
+	r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+	exit_hs_mode_lat = FLD_GET(r, 15, 0);
+	enter_hs_mode_lat = FLD_GET(r, 31, 16);
+
+	r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+	lp_clk_div = FLD_GET(r, 12, 0);
+	ddr_alwon = FLD_GET(r, 13, 13);
+
+	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+	ths_exit = FLD_GET(r, 7, 0);
+
+	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+	tclk_trail = FLD_GET(r, 15, 8);
+
+	exiths_clk = ths_exit + tclk_trail;
+
+	width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+	bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
+
+	if (!hsa_blanking_mode) {
+		hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					exiths_clk, ddr_clk_pre, ddr_clk_post);
+		hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					lp_clk_div, dsi_fclk_hsdiv);
+	}
+
+	if (!hfp_blanking_mode) {
+		hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					exiths_clk, ddr_clk_pre, ddr_clk_post);
+		hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					lp_clk_div, dsi_fclk_hsdiv);
+	}
+
+	if (!hbp_blanking_mode) {
+		hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+		hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					lp_clk_div, dsi_fclk_hsdiv);
+	}
+
+	if (!blanking_mode) {
+		bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+		bl_interleave_lp = dsi_compute_interleave_lp(bllp,
+					enter_hs_mode_lat, exit_hs_mode_lat,
+					lp_clk_div, dsi_fclk_hsdiv);
+	}
+
+	DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+		hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
+		bl_interleave_hs);
+
+	DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+		hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
+		bl_interleave_lp);
+
+	r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+	r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
+	r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
+	r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
+	dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+	r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+	r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
+	r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
+	r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
+	dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+	r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+	r = FLD_MOD(r, bl_interleave_hs, 31, 15);
+	r = FLD_MOD(r, bl_interleave_lp, 16, 0);
+	dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+}
+
 static int dsi_proto_config(struct omap_dss_device *dssdev)
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@
 		break;
 	default:
 		BUG();
+		return -EINVAL;
 	}
 
 	r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@
 	if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
 		dsi_config_vp_sync_events(dssdev);
 		dsi_config_blanking_modes(dssdev);
+		dsi_config_cmd_mode_interleaving(dssdev);
 	}
 
 	dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@
 			break;
 		default:
 			BUG();
+			return -EINVAL;
 		};
 
 		dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@
 	__cancel_delayed_work(&dsi->framedone_timeout_work);
 
 	dsi_handle_framedone(dsidev, 0);
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-	dispc_fake_vsync_irq();
-#endif
 }
 
 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@
 		dispc_mgr_enable_stallmode(dssdev->manager->id, true);
 		dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
 
-		dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+		dss_mgr_set_timings(dssdev->manager, &timings);
 	} else {
 		dispc_mgr_enable_stallmode(dssdev->manager->id, false);
 		dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
 
-		dispc_mgr_set_lcd_timings(dssdev->manager->id,
-			&dssdev->panel.timings);
+		dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 	}
 
 		dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@
 	struct dsi_clock_info cinfo;
 	int r;
 
-	/* we always use DSS_CLK_SYSCK as input clock */
-	cinfo.use_sys_clk = true;
 	cinfo.regn  = dssdev->clocks.dsi.regn;
 	cinfo.regm  = dssdev->clocks.dsi.regm;
 	cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
 	cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
-	r = dsi_calc_clock_rates(dssdev, &cinfo);
+	r = dsi_calc_clock_rates(dsidev, &cinfo);
 	if (r) {
 		DSSERR("Failed to calc dsi clocks\n");
 		return r;
@@ -4345,7 +4456,7 @@
 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-	int dsi_module = dsi_get_dsidev_id(dsidev);
+	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	int r;
 
 	r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@
 		goto err1;
 
 	dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
-	dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
+	dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
 	dss_select_lcd_clk_source(dssdev->manager->id,
 			dssdev->clocks.dispc.channel.lcd_clk_src);
 
@@ -4396,7 +4507,7 @@
 	dsi_cio_uninit(dssdev);
 err2:
 	dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-	dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+	dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
 	dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
 
 err1:
@@ -4410,7 +4521,6 @@
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	int dsi_module = dsi_get_dsidev_id(dsidev);
 
 	if (enter_ulps && !dsi->ulps_enabled)
 		dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@
 	dsi_vc_enable(dsidev, 3, 0);
 
 	dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-	dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+	dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
 	dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
 	dsi_cio_uninit(dssdev);
 	dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@
 }
 EXPORT_SYMBOL(omapdss_dsi_enable_te);
 
-int dsi_init_display(struct omap_dss_device *dssdev)
+static int __init dsi_init_display(struct omap_dss_device *dssdev)
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@
 		clk_put(dsi->sys_clk);
 }
 
-/* DSI1 HW IP initialisation */
-static int omap_dsihw_probe(struct platform_device *dsidev)
+static void __init dsi_probe_pdata(struct platform_device *dsidev)
 {
-	struct omap_display_platform_data *dss_plat_data;
-	struct omap_dss_board_info *board_info;
+	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+	struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
+	int i, r;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
+			continue;
+
+		if (dssdev->phy.dsi.module != dsi->module_id)
+			continue;
+
+		r = dsi_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &dsidev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+					dssdev->name, r);
+	}
+}
+
+/* DSI1 HW IP initialisation */
+static int __init omap_dsihw_probe(struct platform_device *dsidev)
+{
 	u32 rev;
-	int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
+	int r, i;
 	struct resource *dsi_mem;
 	struct dsi_data *dsi;
 
@@ -4694,15 +4830,11 @@
 	if (!dsi)
 		return -ENOMEM;
 
+	dsi->module_id = dsidev->id;
 	dsi->pdev = dsidev;
-	dsi_pdev_map[dsi_module] = dsidev;
+	dsi_pdev_map[dsi->module_id] = dsidev;
 	dev_set_drvdata(&dsidev->dev, dsi);
 
-	dss_plat_data = dsidev->dev.platform_data;
-	board_info = dss_plat_data->board_data;
-	dsi->enable_pads = board_info->dsi_enable_pads;
-	dsi->disable_pads = board_info->dsi_disable_pads;
-
 	spin_lock_init(&dsi->irq_lock);
 	spin_lock_init(&dsi->errors_lock);
 	dsi->errors = 0;
@@ -4780,8 +4912,21 @@
 	else
 		dsi->num_lanes_supported = 3;
 
+	dsi_probe_pdata(dsidev);
+
 	dsi_runtime_put(dsidev);
 
+	if (dsi->module_id == 0)
+		dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
+	else if (dsi->module_id == 1)
+		dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+	if (dsi->module_id == 0)
+		dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
+	else if (dsi->module_id == 1)
+		dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+#endif
 	return 0;
 
 err_runtime_get:
@@ -4790,12 +4935,14 @@
 	return r;
 }
 
-static int omap_dsihw_remove(struct platform_device *dsidev)
+static int __exit omap_dsihw_remove(struct platform_device *dsidev)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
 	WARN_ON(dsi->scp_clk_refcount > 0);
 
+	omap_dss_unregister_child_devices(&dsidev->dev);
+
 	pm_runtime_disable(&dsidev->dev);
 
 	dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@
 static int dsi_runtime_suspend(struct device *dev)
 {
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	return 0;
 }
@@ -4825,20 +4971,11 @@
 {
 	int r;
 
-	r = dss_runtime_get();
-	if (r)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r)
-		goto err_get_dispc;
+		return r;
 
 	return 0;
-
-err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
-	return r;
 }
 
 static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@
 };
 
 static struct platform_driver omap_dsihw_driver = {
-	.probe          = omap_dsihw_probe,
-	.remove         = omap_dsihw_remove,
+	.remove         = __exit_p(omap_dsihw_remove),
 	.driver         = {
 		.name   = "omapdss_dsi",
 		.owner  = THIS_MODULE,
@@ -4856,12 +4992,12 @@
 	},
 };
 
-int dsi_init_platform_driver(void)
+int __init dsi_init_platform_driver(void)
 {
-	return platform_driver_register(&omap_dsihw_driver);
+	return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
 }
 
-void dsi_uninit_platform_driver(void)
+void __exit dsi_uninit_platform_driver(void)
 {
-	return platform_driver_unregister(&omap_dsihw_driver);
+	platform_driver_unregister(&omap_dsihw_driver);
 }
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd2d5e1..7706323 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -62,6 +62,9 @@
 #define REG_FLD_MOD(idx, val, start, end) \
 	dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
 
+static int dss_runtime_get(void);
+static void dss_runtime_put(void);
+
 static struct {
 	struct platform_device *pdev;
 	void __iomem    *base;
@@ -277,7 +280,7 @@
 	dss_runtime_put();
 }
 
-void dss_dump_regs(struct seq_file *s)
+static void dss_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
 
@@ -322,6 +325,7 @@
 		break;
 	default:
 		BUG();
+		return;
 	}
 
 	dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@
 		enum omap_dss_clk_source clk_src)
 {
 	struct platform_device *dsidev;
-	int b;
+	int b, pos;
 
 	switch (clk_src) {
 	case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@
 		break;
 	default:
 		BUG();
+		return;
 	}
 
-	REG_FLD_MOD(DSS_CONTROL, b, 1, 1);	/* DSI_CLK_SWITCH */
+	pos = dsi_module == 0 ? 1 : 10;
+	REG_FLD_MOD(DSS_CONTROL, b, pos, pos);	/* DSIx_CLK_SWITCH */
 
 	dss.dsi_clk_source[dsi_module] = clk_src;
 }
@@ -389,6 +395,7 @@
 		break;
 	default:
 		BUG();
+		return;
 	}
 
 	pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@
 	clk_put(dss.dss_clk);
 }
 
-int dss_runtime_get(void)
+static int dss_runtime_get(void)
 {
 	int r;
 
@@ -717,14 +724,14 @@
 	return r < 0 ? r : 0;
 }
 
-void dss_runtime_put(void)
+static void dss_runtime_put(void)
 {
 	int r;
 
 	DSSDBG("dss_runtime_put\n");
 
 	r = pm_runtime_put_sync(&dss.pdev->dev);
-	WARN_ON(r < 0);
+	WARN_ON(r < 0 && r != -EBUSY);
 }
 
 /* DEBUGFS */
@@ -740,7 +747,7 @@
 #endif
 
 /* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
+static int __init omap_dsshw_probe(struct platform_device *pdev)
 {
 	struct resource *dss_mem;
 	u32 rev;
@@ -785,40 +792,24 @@
 	dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
 	dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
 
-	r = dpi_init();
-	if (r) {
-		DSSERR("Failed to initialize DPI\n");
-		goto err_dpi;
-	}
-
-	r = sdi_init();
-	if (r) {
-		DSSERR("Failed to initialize SDI\n");
-		goto err_sdi;
-	}
-
 	rev = dss_read_reg(DSS_REVISION);
 	printk(KERN_INFO "OMAP DSS rev %d.%d\n",
 			FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
 
 	dss_runtime_put();
 
+	dss_debugfs_create_file("dss", dss_dump_regs);
+
 	return 0;
-err_sdi:
-	dpi_exit();
-err_dpi:
-	dss_runtime_put();
+
 err_runtime_get:
 	pm_runtime_disable(&pdev->dev);
 	dss_put_clocks();
 	return r;
 }
 
-static int omap_dsshw_remove(struct platform_device *pdev)
+static int __exit omap_dsshw_remove(struct platform_device *pdev)
 {
-	dpi_exit();
-	sdi_exit();
-
 	pm_runtime_disable(&pdev->dev);
 
 	dss_put_clocks();
@@ -829,11 +820,24 @@
 static int dss_runtime_suspend(struct device *dev)
 {
 	dss_save_context();
+	dss_set_min_bus_tput(dev, 0);
 	return 0;
 }
 
 static int dss_runtime_resume(struct device *dev)
 {
+	int r;
+	/*
+	 * Set an arbitrarily high tput request to ensure OPP100.
+	 * What we should really do is to make a request to stay in OPP100,
+	 * without any tput requirements, but that is not currently possible
+	 * via the PM layer.
+	 */
+
+	r = dss_set_min_bus_tput(dev, 1000000000);
+	if (r)
+		return r;
+
 	dss_restore_context();
 	return 0;
 }
@@ -844,8 +848,7 @@
 };
 
 static struct platform_driver omap_dsshw_driver = {
-	.probe          = omap_dsshw_probe,
-	.remove         = omap_dsshw_remove,
+	.remove         = __exit_p(omap_dsshw_remove),
 	.driver         = {
 		.name   = "omapdss_dss",
 		.owner  = THIS_MODULE,
@@ -853,12 +856,12 @@
 	},
 };
 
-int dss_init_platform_driver(void)
+int __init dss_init_platform_driver(void)
 {
-	return platform_driver_register(&omap_dsshw_driver);
+	return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
 }
 
 void dss_uninit_platform_driver(void)
 {
-	return platform_driver_unregister(&omap_dsshw_driver);
+	platform_driver_unregister(&omap_dsshw_driver);
 }
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index d4b3dff..dd1092c 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -150,9 +150,6 @@
 	u16 regm_dsi;	/* OMAP3: REGM4
 			 * OMAP4: REGM5 */
 	u16 lp_clk_div;
-
-	u8 highfreq;
-	bool use_sys_clk;
 };
 
 struct seq_file;
@@ -162,6 +159,16 @@
 struct bus_type *dss_get_bus(void);
 struct regulator *dss_get_vdds_dsi(void);
 struct regulator *dss_get_vdds_sdi(void);
+int dss_get_ctx_loss_count(struct device *dev);
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+		struct device *parent, int disp_num);
+void omap_dss_unregister_device(struct omap_dss_device *dssdev);
+void omap_dss_unregister_child_devices(struct device *parent);
 
 /* apply */
 void dss_apply_init(void);
@@ -179,6 +186,9 @@
 int dss_mgr_set_device(struct omap_overlay_manager *mgr,
 		struct omap_dss_device *dssdev);
 int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+		struct omap_video_timings *timings);
+const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
 
 bool dss_ovl_is_enabled(struct omap_overlay *ovl);
 int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@
 void dss_uninit_overlay_managers(struct platform_device *pdev);
 int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
 		const struct omap_overlay_manager_info *info);
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+		const struct omap_video_timings *timings);
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dssdev,
 		struct omap_overlay_manager_info *info,
+		const struct omap_video_timings *mgr_timings,
 		struct omap_overlay_info **overlay_infos);
 
 /* overlay */
@@ -220,22 +232,18 @@
 void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
 int dss_ovl_simple_check(struct omap_overlay *ovl,
 		const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl,
-		struct omap_overlay_info *info, struct omap_dss_device *dssdev);
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+		const struct omap_video_timings *mgr_timings);
 
 /* DSS */
-int dss_init_platform_driver(void);
+int dss_init_platform_driver(void) __init;
 void dss_uninit_platform_driver(void);
 
-int dss_runtime_get(void);
-void dss_runtime_put(void);
-
 void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
 enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
 const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
 void dss_dump_clocks(struct seq_file *s);
 
-void dss_dump_regs(struct seq_file *s);
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 void dss_debug_dump_clocks(struct seq_file *s);
 #endif
@@ -265,19 +273,8 @@
 		struct dispc_clock_info *dispc_cinfo);
 
 /* SDI */
-#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(void);
-void sdi_exit(void);
-int sdi_init_display(struct omap_dss_device *display);
-#else
-static inline int sdi_init(void)
-{
-	return 0;
-}
-static inline void sdi_exit(void)
-{
-}
-#endif
+int sdi_init_platform_driver(void) __init;
+void sdi_uninit_platform_driver(void) __exit;
 
 /* DSI */
 #ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@
 struct dentry;
 struct file_operations;
 
-int dsi_init_platform_driver(void);
-void dsi_uninit_platform_driver(void);
+int dsi_init_platform_driver(void) __init;
+void dsi_uninit_platform_driver(void) __exit;
 
 int dsi_runtime_get(struct platform_device *dsidev);
 void dsi_runtime_put(struct platform_device *dsidev);
 
 void dsi_dump_clocks(struct seq_file *s);
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-		const struct file_operations *debug_fops);
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-		const struct file_operations *debug_fops);
 
-int dsi_init_display(struct omap_dss_device *display);
 void dsi_irq_handler(void);
 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
 
@@ -314,13 +306,6 @@
 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
 struct platform_device *dsi_get_dsidev_from_id(int module);
 #else
-static inline int dsi_init_platform_driver(void)
-{
-	return 0;
-}
-static inline void dsi_uninit_platform_driver(void)
-{
-}
 static inline int dsi_runtime_get(struct platform_device *dsidev)
 {
 	return 0;
@@ -377,28 +362,14 @@
 #endif
 
 /* DPI */
-#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(void);
-void dpi_exit(void);
-int dpi_init_display(struct omap_dss_device *dssdev);
-#else
-static inline int dpi_init(void)
-{
-	return 0;
-}
-static inline void dpi_exit(void)
-{
-}
-#endif
+int dpi_init_platform_driver(void) __init;
+void dpi_uninit_platform_driver(void) __exit;
 
 /* DISPC */
-int dispc_init_platform_driver(void);
-void dispc_uninit_platform_driver(void);
+int dispc_init_platform_driver(void) __init;
+void dispc_uninit_platform_driver(void) __exit;
 void dispc_dump_clocks(struct seq_file *s);
-void dispc_dump_irqs(struct seq_file *s);
-void dispc_dump_regs(struct seq_file *s);
 void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
 
 int dispc_runtime_get(void);
 void dispc_runtime_put(void);
@@ -409,12 +380,12 @@
 void dispc_lcd_enable_signal_polarity(bool act_high);
 void dispc_lcd_enable_signal(bool enable);
 void dispc_pck_free_enable(bool enable);
-void dispc_set_digit_size(u16 width, u16 height);
 void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+		const struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
 void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
 		struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@
 
 void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-		u32 *fifo_low, u32 *fifo_high, bool use_fifomerge);
+		u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+		bool manual_update);
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-		bool ilace, bool replication);
+		bool ilace, bool replication,
+		const struct omap_video_timings *mgr_timings);
 int dispc_ovl_enable(enum omap_plane plane, bool enable);
 void dispc_ovl_set_channel_out(enum omap_plane plane,
 		enum omap_channel channel);
 
 void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
 u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
 bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@
 void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
 void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
 		enum omap_lcd_display_type type);
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
 		struct omap_video_timings *timings);
 void dispc_mgr_set_pol_freq(enum omap_channel channel,
 		enum omap_panel_config config, u8 acbi, u8 acb);
 unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
 unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+unsigned long dispc_core_clk_rate(void);
 int dispc_mgr_set_clock_div(enum omap_channel channel,
 		struct dispc_clock_info *cinfo);
 int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@
 
 /* VENC */
 #ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init_platform_driver(void);
-void venc_uninit_platform_driver(void);
-void venc_dump_regs(struct seq_file *s);
-int venc_init_display(struct omap_dss_device *display);
+int venc_init_platform_driver(void) __init;
+void venc_uninit_platform_driver(void) __exit;
 unsigned long venc_get_pixel_clock(void);
 #else
-static inline int venc_init_platform_driver(void)
-{
-	return 0;
-}
-static inline void venc_uninit_platform_driver(void)
-{
-}
 static inline unsigned long venc_get_pixel_clock(void)
 {
 	WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@
 
 /* HDMI */
 #ifdef CONFIG_OMAP4_DSS_HDMI
-int hdmi_init_platform_driver(void);
-void hdmi_uninit_platform_driver(void);
-int hdmi_init_display(struct omap_dss_device *dssdev);
+int hdmi_init_platform_driver(void) __init;
+void hdmi_uninit_platform_driver(void) __exit;
 unsigned long hdmi_get_pixel_clock(void);
-void hdmi_dump_regs(struct seq_file *s);
 #else
-static inline int hdmi_init_display(struct omap_dss_device *dssdev)
-{
-	return 0;
-}
-static inline int hdmi_init_platform_driver(void)
-{
-	return 0;
-}
-static inline void hdmi_uninit_platform_driver(void)
-{
-}
 static inline unsigned long hdmi_get_pixel_clock(void)
 {
 	WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@
 bool omapdss_hdmi_detect(void);
 int hdmi_panel_init(void);
 void hdmi_panel_exit(void);
+#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
+int hdmi_audio_enable(void);
+void hdmi_audio_disable(void);
+int hdmi_audio_start(void);
+void hdmi_audio_stop(void);
+bool hdmi_mode_has_audio(void);
+int hdmi_audio_config(struct omap_dss_audio *audio);
+#endif
 
 /* RFBI */
-#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init_platform_driver(void);
-void rfbi_uninit_platform_driver(void);
-void rfbi_dump_regs(struct seq_file *s);
-int rfbi_init_display(struct omap_dss_device *display);
-#else
-static inline int rfbi_init_platform_driver(void)
-{
-	return 0;
-}
-static inline void rfbi_uninit_platform_driver(void)
-{
-}
-#endif
+int rfbi_init_platform_driver(void) __init;
+void rfbi_uninit_platform_driver(void) __exit;
 
 
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index ce14aa6..9387097 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -52,6 +52,8 @@
 	const char * const *clksrc_names;
 	const struct dss_param_range *dss_params;
 
+	const enum omap_dss_rotation_type supported_rotation_types;
+
 	const u32 buffer_size_unit;
 	const u32 burst_size_unit;
 };
@@ -311,6 +313,8 @@
 	 * scaler cannot scale a image with width more than 768.
 	 */
 	[FEAT_PARAM_LINEWIDTH]			= { 1, 768 },
+	[FEAT_PARAM_MGR_WIDTH]			= { 1, 2048 },
+	[FEAT_PARAM_MGR_HEIGHT]			= { 1, 2048 },
 };
 
 static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@
 	[FEAT_PARAM_DSIPLL_LPDIV]		= { 1, (1 << 13) - 1},
 	[FEAT_PARAM_DOWNSCALE]			= { 1, 4 },
 	[FEAT_PARAM_LINEWIDTH]			= { 1, 1024 },
+	[FEAT_PARAM_MGR_WIDTH]			= { 1, 2048 },
+	[FEAT_PARAM_MGR_HEIGHT]			= { 1, 2048 },
 };
 
 static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@
 	[FEAT_PARAM_DSIPLL_LPDIV]		= { 0, (1 << 13) - 1 },
 	[FEAT_PARAM_DOWNSCALE]			= { 1, 4 },
 	[FEAT_PARAM_LINEWIDTH]			= { 1, 2048 },
+	[FEAT_PARAM_MGR_WIDTH]			= { 1, 2048 },
+	[FEAT_PARAM_MGR_HEIGHT]			= { 1, 2048 },
 };
 
 static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@
 	FEAT_FIR_COEF_V,
 	FEAT_ALPHA_FREE_ZORDER,
 	FEAT_FIFO_MERGE,
+	FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@
 	FEAT_FIR_COEF_V,
 	FEAT_ALPHA_FREE_ZORDER,
 	FEAT_FIFO_MERGE,
+	FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@
 	FEAT_FIR_COEF_V,
 	FEAT_ALPHA_FREE_ZORDER,
 	FEAT_FIFO_MERGE,
+	FEAT_BURST_2D,
 };
 
 /* OMAP2 DSS Features */
@@ -451,6 +462,7 @@
 	.overlay_caps = omap2_dss_overlay_caps,
 	.clksrc_names = omap2_dss_clk_source_names,
 	.dss_params = omap2_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
 	.buffer_size_unit = 1,
 	.burst_size_unit = 8,
 };
@@ -470,6 +482,7 @@
 	.overlay_caps = omap3430_dss_overlay_caps,
 	.clksrc_names = omap3_dss_clk_source_names,
 	.dss_params = omap3_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
 	.buffer_size_unit = 1,
 	.burst_size_unit = 8,
 };
@@ -488,6 +501,7 @@
 	.overlay_caps = omap3630_dss_overlay_caps,
 	.clksrc_names = omap3_dss_clk_source_names,
 	.dss_params = omap3_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
 	.buffer_size_unit = 1,
 	.burst_size_unit = 8,
 };
@@ -508,6 +522,7 @@
 	.overlay_caps = omap4_dss_overlay_caps,
 	.clksrc_names = omap4_dss_clk_source_names,
 	.dss_params = omap4_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
 	.buffer_size_unit = 16,
 	.burst_size_unit = 16,
 };
@@ -527,6 +542,7 @@
 	.overlay_caps = omap4_dss_overlay_caps,
 	.clksrc_names = omap4_dss_clk_source_names,
 	.dss_params = omap4_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
 	.buffer_size_unit = 16,
 	.burst_size_unit = 16,
 };
@@ -546,6 +562,7 @@
 	.overlay_caps = omap4_dss_overlay_caps,
 	.clksrc_names = omap4_dss_clk_source_names,
 	.dss_params = omap4_dss_param_range,
+	.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
 	.buffer_size_unit = 16,
 	.burst_size_unit = 16,
 };
@@ -562,13 +579,17 @@
 	.pll_enable		=	ti_hdmi_4xxx_pll_enable,
 	.pll_disable		=	ti_hdmi_4xxx_pll_disable,
 	.video_enable		=	ti_hdmi_4xxx_wp_video_start,
+	.video_disable		=	ti_hdmi_4xxx_wp_video_stop,
 	.dump_wrapper		=	ti_hdmi_4xxx_wp_dump,
 	.dump_core		=	ti_hdmi_4xxx_core_dump,
 	.dump_pll		=	ti_hdmi_4xxx_pll_dump,
 	.dump_phy		=	ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
 	.audio_enable		=       ti_hdmi_4xxx_wp_audio_enable,
+	.audio_disable		=       ti_hdmi_4xxx_wp_audio_disable,
+	.audio_start		=       ti_hdmi_4xxx_audio_start,
+	.audio_stop		=       ti_hdmi_4xxx_audio_stop,
+	.audio_config		=	ti_hdmi_4xxx_audio_config,
 #endif
 
 };
@@ -662,6 +683,11 @@
 	*end = omap_current_dss_features->reg_fields[id].end;
 }
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
+{
+	return omap_current_dss_features->supported_rotation_types & rot_type;
+}
+
 void dss_features_init(void)
 {
 	if (cpu_is_omap24xx())
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index c332e7d..bdf469f0 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -62,6 +62,7 @@
 	FEAT_FIFO_MERGE,
 	/* An unknown HW bug causing the normal FIFO thresholds not to work */
 	FEAT_OMAP3_DSI_FIFO_BUG,
+	FEAT_BURST_2D,
 };
 
 /* DSS register field id */
@@ -91,6 +92,8 @@
 	FEAT_PARAM_DSIPLL_LPDIV,
 	FEAT_PARAM_DOWNSCALE,
 	FEAT_PARAM_LINEWIDTH,
+	FEAT_PARAM_MGR_WIDTH,
+	FEAT_PARAM_MGR_HEIGHT,
 };
 
 /* DSS Feature Functions */
@@ -108,6 +111,8 @@
 u32 dss_feat_get_buffer_size_unit(void);	/* in bytes */
 u32 dss_feat_get_burst_size_unit(void);		/* in bytes */
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
+
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
 void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c4b4f69..8195c71 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -33,12 +33,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "ti_hdmi_4xxx_ip.h"
-#endif
 
 #include "ti_hdmi.h"
 #include "dss.h"
@@ -63,7 +57,6 @@
 
 static struct {
 	struct mutex lock;
-	struct omap_display_platform_data *pdata;
 	struct platform_device *pdev;
 	struct hdmi_ip_data ip_data;
 
@@ -130,25 +123,12 @@
 
 	DSSDBG("hdmi_runtime_get\n");
 
-	/*
-	 * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
-	 * This should be removed later.
-	 */
-	r = dss_runtime_get();
-	if (r < 0)
-		goto err_get_dss;
-
 	r = pm_runtime_get_sync(&hdmi.pdev->dev);
 	WARN_ON(r < 0);
 	if (r < 0)
-		goto err_get_hdmi;
+		return r;
 
 	return 0;
-
-err_get_hdmi:
-	dss_runtime_put();
-err_get_dss:
-	return r;
 }
 
 static void hdmi_runtime_put(void)
@@ -159,15 +139,9 @@
 
 	r = pm_runtime_put_sync(&hdmi.pdev->dev);
 	WARN_ON(r < 0);
-
-	/*
-	 * HACK: This is added to complement the dss_runtime_get() call in
-	 * hdmi_runtime_get(). This should be removed later.
-	 */
-	dss_runtime_put();
 }
 
-int hdmi_init_display(struct omap_dss_device *dssdev)
+static int __init hdmi_init_display(struct omap_dss_device *dssdev)
 {
 	DSSDBG("init_display\n");
 
@@ -344,7 +318,7 @@
 
 	hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
 
-	hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
 
 	/* config the PLL and PHY hdmi_set_pll_pwrfirst */
 	r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@
 	dispc_enable_gamma_table(0);
 
 	/* tv size */
-	dispc_set_digit_size(dssdev->panel.timings.x_res,
-			dssdev->panel.timings.y_res);
+	dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-	hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
+	r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
+	if (r)
+		goto err_vid_enable;
 
 	r = dss_mgr_enable(dssdev->manager);
 	if (r)
@@ -388,7 +363,8 @@
 	return 0;
 
 err_mgr_enable:
-	hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
+err_vid_enable:
 	hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
 	hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
 err:
@@ -400,7 +376,7 @@
 {
 	dss_mgr_disable(dssdev->manager);
 
-	hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
 	hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
 	hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
 	hdmi_runtime_put();
@@ -436,10 +412,12 @@
 		r = hdmi_power_on(dssdev);
 		if (r)
 			DSSERR("failed to power on device\n");
+	} else {
+		dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 	}
 }
 
-void hdmi_dump_regs(struct seq_file *s)
+static void hdmi_dump_regs(struct seq_file *s)
 {
 	mutex_lock(&hdmi.lock);
 
@@ -555,220 +533,6 @@
 	mutex_unlock(&hdmi.lock);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
-				struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct platform_device *pdev = to_platform_device(codec->dev);
-	struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-	int err = 0;
-
-	if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
-		dev_err(&pdev->dev, "Cannot enable/disable audio\n");
-		return -ENODEV;
-	}
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		ip_data->ops->audio_enable(ip_data, true);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		ip_data->ops->audio_enable(ip_data, false);
-		break;
-	default:
-		err = -EINVAL;
-	}
-	return err;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
-				    struct snd_pcm_hw_params *params,
-				    struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-	struct hdmi_audio_format audio_format;
-	struct hdmi_audio_dma audio_dma;
-	struct hdmi_core_audio_config core_cfg;
-	struct hdmi_core_infoframe_audio aud_if_cfg;
-	int err, n, cts;
-	enum hdmi_core_audio_sample_freq sample_freq;
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S16_LE:
-		core_cfg.i2s_cfg.word_max_length =
-			HDMI_AUDIO_I2S_MAX_WORD_20BITS;
-		core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
-		core_cfg.i2s_cfg.in_length_bits =
-			HDMI_AUDIO_I2S_INPUT_LENGTH_16;
-		core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
-		audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
-		audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-		audio_dma.transfer_size = 0x10;
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		core_cfg.i2s_cfg.word_max_length =
-			HDMI_AUDIO_I2S_MAX_WORD_24BITS;
-		core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
-		core_cfg.i2s_cfg.in_length_bits =
-			HDMI_AUDIO_I2S_INPUT_LENGTH_24;
-		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
-		audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
-		audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-		core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-		audio_dma.transfer_size = 0x20;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (params_rate(params)) {
-	case 32000:
-		sample_freq = HDMI_AUDIO_FS_32000;
-		break;
-	case 44100:
-		sample_freq = HDMI_AUDIO_FS_44100;
-		break;
-	case 48000:
-		sample_freq = HDMI_AUDIO_FS_48000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
-	if (err < 0)
-		return err;
-
-	/* Audio wrapper config */
-	audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
-	audio_format.active_chnnls_msk = 0x03;
-	audio_format.type = HDMI_AUDIO_TYPE_LPCM;
-	audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
-	/* Disable start/stop signals of IEC 60958 blocks */
-	audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
-
-	audio_dma.block_size = 0xC0;
-	audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
-	audio_dma.fifo_threshold = 0x20; /* in number of samples */
-
-	hdmi_wp_audio_config_dma(ip_data, &audio_dma);
-	hdmi_wp_audio_config_format(ip_data, &audio_format);
-
-	/*
-	 * I2S config
-	 */
-	core_cfg.i2s_cfg.en_high_bitrate_aud = false;
-	/* Only used with high bitrate audio */
-	core_cfg.i2s_cfg.cbit_order = false;
-	/* Serial data and word select should change on sck rising edge */
-	core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
-	core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
-	/* Set I2S word select polarity */
-	core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
-	core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
-	/* Set serial data to word select shift. See Phillips spec. */
-	core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
-	/* Enable one of the four available serial data channels */
-	core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
-	/* Core audio config */
-	core_cfg.freq_sample = sample_freq;
-	core_cfg.n = n;
-	core_cfg.cts = cts;
-	if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
-		core_cfg.aud_par_busclk = 0;
-		core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
-		core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
-	} else {
-		core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
-		core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
-		core_cfg.use_mclk = true;
-	}
-
-	if (core_cfg.use_mclk)
-		core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
-	core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
-	core_cfg.en_spdif = false;
-	/* Use sample frequency from channel status word */
-	core_cfg.fs_override = true;
-	/* Enable ACR packets */
-	core_cfg.en_acr_pkt = true;
-	/* Disable direct streaming digital audio */
-	core_cfg.en_dsd_audio = false;
-	/* Use parallel audio interface */
-	core_cfg.en_parallel_aud_input = true;
-
-	hdmi_core_audio_config(ip_data, &core_cfg);
-
-	/*
-	 * Configure packet
-	 * info frame audio see doc CEA861-D page 74
-	 */
-	aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
-	aud_if_cfg.db1_channel_count = 2;
-	aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
-	aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
-	aud_if_cfg.db4_channel_alloc = 0x00;
-	aud_if_cfg.db5_downmix_inh = false;
-	aud_if_cfg.db5_lsv = 0;
-
-	hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
-	return 0;
-}
-
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
-				  struct snd_soc_dai *dai)
-{
-	if (!hdmi.ip_data.cfg.cm.mode) {
-		pr_err("Current video settings do not support audio.\n");
-		return -EIO;
-	}
-	return 0;
-}
-
-static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
-{
-	struct hdmi_ip_data *priv = &hdmi.ip_data;
-
-	snd_soc_codec_set_drvdata(codec, priv);
-	return 0;
-}
-
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
-	.probe = hdmi_audio_codec_probe,
-};
-
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
-	.hw_params = hdmi_audio_hw_params,
-	.trigger = hdmi_audio_trigger,
-	.startup = hdmi_audio_startup,
-};
-
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
-		.name = "hdmi-audio-codec",
-		.playback = {
-			.channels_min = 2,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_32000 |
-				SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE |
-				SNDRV_PCM_FMTBIT_S24_LE,
-		},
-		.ops = &hdmi_audio_codec_ops,
-};
-#endif
-
 static int hdmi_get_clocks(struct platform_device *pdev)
 {
 	struct clk *clk;
@@ -790,13 +554,180 @@
 		clk_put(hdmi.sys_clk);
 }
 
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
+{
+	u32 deep_color;
+	bool deep_color_correct = false;
+	u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
+
+	if (n == NULL || cts == NULL)
+		return -EINVAL;
+
+	/* TODO: When implemented, query deep color mode here. */
+	deep_color = 100;
+
+	/*
+	 * When using deep color, the default N value (as in the HDMI
+	 * specification) yields to an non-integer CTS. Hence, we
+	 * modify it while keeping the restrictions described in
+	 * section 7.2.1 of the HDMI 1.4a specification.
+	 */
+	switch (sample_freq) {
+	case 32000:
+	case 48000:
+	case 96000:
+	case 192000:
+		if (deep_color == 125)
+			if (pclk == 27027 || pclk == 74250)
+				deep_color_correct = true;
+		if (deep_color == 150)
+			if (pclk == 27027)
+				deep_color_correct = true;
+		break;
+	case 44100:
+	case 88200:
+	case 176400:
+		if (deep_color == 125)
+			if (pclk == 27027)
+				deep_color_correct = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (deep_color_correct) {
+		switch (sample_freq) {
+		case 32000:
+			*n = 8192;
+			break;
+		case 44100:
+			*n = 12544;
+			break;
+		case 48000:
+			*n = 8192;
+			break;
+		case 88200:
+			*n = 25088;
+			break;
+		case 96000:
+			*n = 16384;
+			break;
+		case 176400:
+			*n = 50176;
+			break;
+		case 192000:
+			*n = 32768;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		switch (sample_freq) {
+		case 32000:
+			*n = 4096;
+			break;
+		case 44100:
+			*n = 6272;
+			break;
+		case 48000:
+			*n = 6144;
+			break;
+		case 88200:
+			*n = 12544;
+			break;
+		case 96000:
+			*n = 12288;
+			break;
+		case 176400:
+			*n = 25088;
+			break;
+		case 192000:
+			*n = 24576;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+	*cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+	return 0;
+}
+
+int hdmi_audio_enable(void)
+{
+	DSSDBG("audio_enable\n");
+
+	return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
+}
+
+void hdmi_audio_disable(void)
+{
+	DSSDBG("audio_disable\n");
+
+	hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
+}
+
+int hdmi_audio_start(void)
+{
+	DSSDBG("audio_start\n");
+
+	return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
+}
+
+void hdmi_audio_stop(void)
+{
+	DSSDBG("audio_stop\n");
+
+	hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
+}
+
+bool hdmi_mode_has_audio(void)
+{
+	if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
+		return true;
+	else
+		return false;
+}
+
+int hdmi_audio_config(struct omap_dss_audio *audio)
+{
+	return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
+}
+
+#endif
+
+static void __init hdmi_probe_pdata(struct platform_device *pdev)
+{
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int r, i;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
+			continue;
+
+		r = hdmi_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &pdev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+					dssdev->name, r);
+	}
+}
+
 /* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
+static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
 {
 	struct resource *hdmi_mem;
 	int r;
 
-	hdmi.pdata = pdev->dev.platform_data;
 	hdmi.pdev = pdev;
 
 	mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@
 
 	hdmi_panel_init();
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+	dss_debugfs_create_file("hdmi", hdmi_dump_regs);
 
-	/* Register ASoC codec DAI */
-	r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
-					&hdmi_codec_dai_drv, 1);
-	if (r) {
-		DSSERR("can't register ASoC HDMI audio codec\n");
-		return r;
-	}
-#endif
+	hdmi_probe_pdata(pdev);
+
 	return 0;
 }
 
-static int omapdss_hdmihw_remove(struct platform_device *pdev)
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
 {
-	hdmi_panel_exit();
+	omap_dss_unregister_child_devices(&pdev->dev);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-	snd_soc_unregister_codec(&pdev->dev);
-#endif
+	hdmi_panel_exit();
 
 	pm_runtime_disable(&pdev->dev);
 
@@ -867,7 +788,6 @@
 	clk_disable(hdmi.sys_clk);
 
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	return 0;
 }
@@ -876,23 +796,13 @@
 {
 	int r;
 
-	r = dss_runtime_get();
-	if (r < 0)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r < 0)
-		goto err_get_dispc;
-
+		return r;
 
 	clk_enable(hdmi.sys_clk);
 
 	return 0;
-
-err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
-	return r;
 }
 
 static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@
 };
 
 static struct platform_driver omapdss_hdmihw_driver = {
-	.probe          = omapdss_hdmihw_probe,
-	.remove         = omapdss_hdmihw_remove,
+	.remove         = __exit_p(omapdss_hdmihw_remove),
 	.driver         = {
 		.name   = "omapdss_hdmi",
 		.owner  = THIS_MODULE,
@@ -910,12 +819,12 @@
 	},
 };
 
-int hdmi_init_platform_driver(void)
+int __init hdmi_init_platform_driver(void)
 {
-	return platform_driver_register(&omapdss_hdmihw_driver);
+	return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
 }
 
-void hdmi_uninit_platform_driver(void)
+void __exit hdmi_uninit_platform_driver(void)
 {
-	return platform_driver_unregister(&omapdss_hdmihw_driver);
+	platform_driver_unregister(&omapdss_hdmihw_driver);
 }
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 533d5dc..1179e3c 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -30,7 +30,12 @@
 #include "dss.h"
 
 static struct {
-	struct mutex hdmi_lock;
+	/* This protects the panel ops, mainly when accessing the HDMI IP. */
+	struct mutex lock;
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+	/* This protects the audio ops, specifically. */
+	spinlock_t audio_lock;
+#endif
 } hdmi;
 
 
@@ -54,12 +59,168 @@
 
 }
 
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+	unsigned long flags;
+	int r;
+
+	mutex_lock(&hdmi.lock);
+	spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+	/* enable audio only if the display is active and supports audio */
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+	    !hdmi_mode_has_audio()) {
+		DSSERR("audio not supported or display is off\n");
+		r = -EPERM;
+		goto err;
+	}
+
+	r = hdmi_audio_enable();
+
+	if (!r)
+		dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+err:
+	spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+	hdmi_audio_disable();
+
+	dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+
+	spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+	unsigned long flags;
+	int r;
+
+	spin_lock_irqsave(&hdmi.audio_lock, flags);
+	/*
+	 * No need to check the panel state. It was checked when trasitioning
+	 * to AUDIO_ENABLED.
+	 */
+	if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
+		DSSERR("audio start from invalid state\n");
+		r = -EPERM;
+		goto err;
+	}
+
+	r = hdmi_audio_start();
+
+	if (!r)
+		dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+err:
+	spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+	return r;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+	hdmi_audio_stop();
+	dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+	spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+	bool r = false;
+
+	mutex_lock(&hdmi.lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		goto err;
+
+	if (!hdmi_mode_has_audio())
+		goto err;
+
+	r = true;
+err:
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+		struct omap_dss_audio *audio)
+{
+	unsigned long flags;
+	int r;
+
+	mutex_lock(&hdmi.lock);
+	spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+	/* config audio only if the display is active and supports audio */
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+	    !hdmi_mode_has_audio()) {
+		DSSERR("audio not supported or display is off\n");
+		r = -EPERM;
+		goto err;
+	}
+
+	r = hdmi_audio_config(audio);
+
+	if (!r)
+		dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+err:
+	spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+#else
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+	return -EPERM;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+	return -EPERM;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+	return false;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+		struct omap_dss_audio *audio)
+{
+	return -EPERM;
+}
+#endif
+
 static int hdmi_panel_enable(struct omap_dss_device *dssdev)
 {
 	int r = 0;
 	DSSDBG("ENTER hdmi_panel_enable\n");
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
 		r = -EINVAL;
@@ -75,40 +236,52 @@
 	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 
 	return r;
 }
 
 static void hdmi_panel_disable(struct omap_dss_device *dssdev)
 {
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+		/*
+		 * TODO: notify audio users that the display was disabled. For
+		 * now, disable audio locally to not break our audio state
+		 * machine.
+		 */
+		hdmi_panel_audio_disable(dssdev);
 		omapdss_hdmi_display_disable(dssdev);
+	}
 
 	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
 {
 	int r = 0;
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
 		r = -EINVAL;
 		goto err;
 	}
 
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+	/*
+	 * TODO: notify audio users that the display was suspended. For now,
+	 * disable audio locally to not break our audio state machine.
+	 */
+	hdmi_panel_audio_disable(dssdev);
 
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
 	omapdss_hdmi_display_disable(dssdev);
 
 err:
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 
 	return r;
 }
@@ -117,7 +290,7 @@
 {
 	int r = 0;
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
 		r = -EINVAL;
@@ -129,11 +302,12 @@
 		DSSERR("failed to power on\n");
 		goto err;
 	}
+	/* TODO: notify audio users that the panel resumed. */
 
 	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 
 	return r;
 }
@@ -141,11 +315,11 @@
 static void hdmi_get_timings(struct omap_dss_device *dssdev,
 			struct omap_video_timings *timings)
 {
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	*timings = dssdev->panel.timings;
 
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 }
 
 static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@
 {
 	DSSDBG("hdmi_set_timings\n");
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
+
+	/*
+	 * TODO: notify audio users that there was a timings change. For
+	 * now, disable audio locally to not break our audio state machine.
+	 */
+	hdmi_panel_audio_disable(dssdev);
 
 	dssdev->panel.timings = *timings;
 	omapdss_hdmi_display_set_timing(dssdev);
 
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@
 
 	DSSDBG("hdmi_check_timings\n");
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	r = omapdss_hdmi_display_check_timing(dssdev, timings);
 
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 	return r;
 }
 
@@ -180,7 +360,7 @@
 {
 	int r;
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
 		r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@
 			dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
 		omapdss_hdmi_display_disable(dssdev);
 err:
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 
 	return r;
 }
@@ -203,7 +383,7 @@
 {
 	int r;
 
-	mutex_lock(&hdmi.hdmi_lock);
+	mutex_lock(&hdmi.lock);
 
 	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
 		r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@
 			dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
 		omapdss_hdmi_display_disable(dssdev);
 err:
-	mutex_unlock(&hdmi.hdmi_lock);
+	mutex_unlock(&hdmi.lock);
 
 	return r;
 }
@@ -234,6 +414,12 @@
 	.check_timings	= hdmi_check_timings,
 	.read_edid	= hdmi_read_edid,
 	.detect		= hdmi_detect,
+	.audio_enable	= hdmi_panel_audio_enable,
+	.audio_disable	= hdmi_panel_audio_disable,
+	.audio_start	= hdmi_panel_audio_start,
+	.audio_stop	= hdmi_panel_audio_stop,
+	.audio_supported	= hdmi_panel_audio_supported,
+	.audio_config	= hdmi_panel_audio_config,
 	.driver			= {
 		.name   = "hdmi_panel",
 		.owner  = THIS_MODULE,
@@ -242,7 +428,11 @@
 
 int hdmi_panel_init(void)
 {
-	mutex_init(&hdmi.hdmi_lock);
+	mutex_init(&hdmi.lock);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+	spin_lock_init(&hdmi.audio_lock);
+#endif
 
 	omap_dss_register_driver(&hdmi_driver);
 
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index e736460..0cbcde4 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -654,9 +654,20 @@
 	return 0;
 }
 
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+		const struct omap_video_timings *timings)
+{
+	if (!dispc_mgr_timings_ok(mgr->id, timings)) {
+		DSSERR("check_manager: invalid timings\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dssdev,
 		struct omap_overlay_manager_info *info,
+		const struct omap_video_timings *mgr_timings,
 		struct omap_overlay_info **overlay_infos)
 {
 	struct omap_overlay *ovl;
@@ -668,6 +679,10 @@
 			return r;
 	}
 
+	r = dss_mgr_check_timings(mgr, mgr_timings);
+	if (r)
+		return r;
+
 	list_for_each_entry(ovl, &mgr->overlays, list) {
 		struct omap_overlay_info *oi;
 		int r;
@@ -677,7 +692,7 @@
 		if (oi == NULL)
 			continue;
 
-		r = dss_ovl_check(ovl, oi, dssdev);
+		r = dss_ovl_check(ovl, oi, mgr_timings);
 		if (r)
 			return r;
 	}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 6e82181..b0ba60f 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -628,19 +628,23 @@
 		return -EINVAL;
 	}
 
+	if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
+		DSSERR("check_overlay: rotation type %d not supported\n",
+				info->rotation_type);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-int dss_ovl_check(struct omap_overlay *ovl,
-		struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+		const struct omap_video_timings *mgr_timings)
 {
 	u16 outw, outh;
 	u16 dw, dh;
 
-	if (dssdev == NULL)
-		return 0;
-
-	dssdev->driver->get_resolution(dssdev, &dw, &dh);
+	dw = mgr_timings->x_res;
+	dh = mgr_timings->y_res;
 
 	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
 		outw = info->width;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 788a0ef..3d8c206 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -304,13 +304,23 @@
 		u16 height, void (*callback)(void *data), void *data)
 {
 	u32 l;
+	struct omap_video_timings timings = {
+		.hsw		= 1,
+		.hfp		= 1,
+		.hbp		= 1,
+		.vsw		= 1,
+		.vfp		= 0,
+		.vbp		= 0,
+		.x_res		= width,
+		.y_res		= height,
+	};
 
 	/*BUG_ON(callback == 0);*/
 	BUG_ON(rfbi.framedone_callback != NULL);
 
 	DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
 
-	dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
+	dss_mgr_set_timings(dssdev->manager, &timings);
 
 	dispc_mgr_enable(dssdev->manager->id, true);
 
@@ -766,6 +776,16 @@
 		u16 *x, u16 *y, u16 *w, u16 *h)
 {
 	u16 dw, dh;
+	struct omap_video_timings timings = {
+		.hsw		= 1,
+		.hfp		= 1,
+		.hbp		= 1,
+		.vsw		= 1,
+		.vfp		= 0,
+		.vbp		= 0,
+		.x_res		= *w,
+		.y_res		= *h,
+	};
 
 	dssdev->driver->get_resolution(dssdev, &dw, &dh);
 
@@ -784,7 +804,7 @@
 	if (*w == 0 || *h == 0)
 		return -EINVAL;
 
-	dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
+	dss_mgr_set_timings(dssdev->manager, &timings);
 
 	return 0;
 }
@@ -799,7 +819,7 @@
 }
 EXPORT_SYMBOL(omap_rfbi_update);
 
-void rfbi_dump_regs(struct seq_file *s)
+static void rfbi_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
 
@@ -900,15 +920,39 @@
 }
 EXPORT_SYMBOL(omapdss_rfbi_display_disable);
 
-int rfbi_init_display(struct omap_dss_device *dssdev)
+static int __init rfbi_init_display(struct omap_dss_device *dssdev)
 {
 	rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
 	dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
 	return 0;
 }
 
+static void __init rfbi_probe_pdata(struct platform_device *pdev)
+{
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int i, r;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
+			continue;
+
+		r = rfbi_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &pdev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+				dssdev->name, r);
+	}
+}
+
 /* RFBI HW IP initialisation */
-static int omap_rfbihw_probe(struct platform_device *pdev)
+static int __init omap_rfbihw_probe(struct platform_device *pdev)
 {
 	u32 rev;
 	struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@
 
 	rfbi_runtime_put();
 
+	dss_debugfs_create_file("rfbi", rfbi_dump_regs);
+
+	rfbi_probe_pdata(pdev);
+
 	return 0;
 
 err_runtime_get:
@@ -963,8 +1011,9 @@
 	return r;
 }
 
-static int omap_rfbihw_remove(struct platform_device *pdev)
+static int __exit omap_rfbihw_remove(struct platform_device *pdev)
 {
+	omap_dss_unregister_child_devices(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
@@ -972,7 +1021,6 @@
 static int rfbi_runtime_suspend(struct device *dev)
 {
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	return 0;
 }
@@ -981,20 +1029,11 @@
 {
 	int r;
 
-	r = dss_runtime_get();
-	if (r < 0)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r < 0)
-		goto err_get_dispc;
+		return r;
 
 	return 0;
-
-err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
-	return r;
 }
 
 static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@
 };
 
 static struct platform_driver omap_rfbihw_driver = {
-	.probe          = omap_rfbihw_probe,
-	.remove         = omap_rfbihw_remove,
+	.remove         = __exit_p(omap_rfbihw_remove),
 	.driver         = {
 		.name   = "omapdss_rfbi",
 		.owner  = THIS_MODULE,
@@ -1012,12 +1050,12 @@
 	},
 };
 
-int rfbi_init_platform_driver(void)
+int __init rfbi_init_platform_driver(void)
 {
-	return platform_driver_register(&omap_rfbihw_driver);
+	return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
 }
 
-void rfbi_uninit_platform_driver(void)
+void __exit rfbi_uninit_platform_driver(void)
 {
-	return platform_driver_unregister(&omap_rfbihw_driver);
+	platform_driver_unregister(&omap_rfbihw_driver);
 }
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 8266ca0..3a43dc2 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/export.h>
+#include <linux/platform_device.h>
 
 #include <video/omapdss.h>
 #include "dss.h"
@@ -71,10 +72,6 @@
 	if (r)
 		goto err_reg_enable;
 
-	r = dss_runtime_get();
-	if (r)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r)
 		goto err_get_dispc;
@@ -107,7 +104,7 @@
 	}
 
 
-	dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+	dss_mgr_set_timings(dssdev->manager, t);
 
 	r = dss_set_clock_div(&dss_cinfo);
 	if (r)
@@ -137,8 +134,6 @@
 err_calc_clock_div:
 	dispc_runtime_put();
 err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
 	regulator_disable(sdi.vdds_sdi_reg);
 err_reg_enable:
 	omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@
 	dss_sdi_disable();
 
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	regulator_disable(sdi.vdds_sdi_reg);
 
@@ -162,7 +156,7 @@
 }
 EXPORT_SYMBOL(omapdss_sdi_display_disable);
 
-int sdi_init_display(struct omap_dss_device *dssdev)
+static int __init sdi_init_display(struct omap_dss_device *dssdev)
 {
 	DSSDBG("SDI init\n");
 
@@ -182,11 +176,58 @@
 	return 0;
 }
 
-int sdi_init(void)
+static void __init sdi_probe_pdata(struct platform_device *pdev)
 {
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int i, r;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
+			continue;
+
+		r = sdi_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &pdev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+					dssdev->name, r);
+	}
+}
+
+static int __init omap_sdi_probe(struct platform_device *pdev)
+{
+	sdi_probe_pdata(pdev);
+
 	return 0;
 }
 
-void sdi_exit(void)
+static int __exit omap_sdi_remove(struct platform_device *pdev)
 {
+	omap_dss_unregister_child_devices(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver omap_sdi_driver = {
+	.remove         = __exit_p(omap_sdi_remove),
+	.driver         = {
+		.name   = "omapdss_sdi",
+		.owner  = THIS_MODULE,
+	},
+};
+
+int __init sdi_init_platform_driver(void)
+{
+	return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+}
+
+void __exit sdi_uninit_platform_driver(void)
+{
+	platform_driver_unregister(&omap_sdi_driver);
 }
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 1f58b84..e734cb4 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -96,7 +96,9 @@
 
 	void (*pll_disable)(struct hdmi_ip_data *ip_data);
 
-	void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+	int (*video_enable)(struct hdmi_ip_data *ip_data);
+
+	void (*video_disable)(struct hdmi_ip_data *ip_data);
 
 	void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
@@ -106,9 +108,17 @@
 
 	void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-	void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+	int (*audio_enable)(struct hdmi_ip_data *ip_data);
+
+	void (*audio_disable)(struct hdmi_ip_data *ip_data);
+
+	int (*audio_start)(struct hdmi_ip_data *ip_data);
+
+	void (*audio_stop)(struct hdmi_ip_data *ip_data);
+
+	int (*audio_config)(struct hdmi_ip_data *ip_data,
+		struct omap_dss_audio *audio);
 #endif
 
 };
@@ -173,7 +183,8 @@
 void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
 bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@
 void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+		struct omap_dss_audio *audio);
 #endif
 #endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index bfe6fe6..4dae1b2 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -29,9 +29,14 @@
 #include <linux/string.h>
 #include <linux/seq_file.h>
 #include <linux/gpio.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
 
 #include "ti_hdmi_4xxx_ip.h"
 #include "dss.h"
+#include "dss_features.h"
 
 static inline void hdmi_write_reg(void __iomem *base_addr,
 				const u16 idx, u32 val)
@@ -298,9 +303,9 @@
 	REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
 
 	r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
-			NULL, hpd_irq_handler,
-			IRQF_DISABLED | IRQF_TRIGGER_RISING |
-			IRQF_TRIGGER_FALLING, "hpd", ip_data);
+				 NULL, hpd_irq_handler,
+				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+				 IRQF_ONESHOT, "hpd", ip_data);
 	if (r) {
 		DSSERR("HPD IRQ request failed\n");
 		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@
 
 }
 
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
 {
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
+	return 0;
+}
+
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
+{
+	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
 }
 
 static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@
 
 #define CORE_REG(i, name) name(i)
 #define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_pll_base(ip_data), r))
-#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+		hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+		hdmi_read_reg(hdmi_av_base(ip_data), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
 		(i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
-		hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+		hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
 
 	DUMPCORE(HDMI_CORE_SYS_VND_IDL);
 	DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@
 	DUMPCORE(HDMI_CORE_SYS_SRST);
 	DUMPCORE(HDMI_CORE_CTRL1);
 	DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+	DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+	DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+	DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+	DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+	DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+	DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+	DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
 	DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
 	DUMPCORE(HDMI_CORE_SYS_VID_MODE);
 	DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@
 	DUMPCORE(HDMI_CORE_SYS_INTR4);
 	DUMPCORE(HDMI_CORE_SYS_UMASK1);
 	DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
-	DUMPCORE(HDMI_CORE_SYS_DE_DLY);
-	DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
-	DUMPCORE(HDMI_CORE_SYS_DE_TOP);
-	DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
-	DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
-	DUMPCORE(HDMI_CORE_SYS_DE_LINL);
-	DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
 
-	DUMPCORE(HDMI_CORE_DDC_CMD);
-	DUMPCORE(HDMI_CORE_DDC_STATUS);
 	DUMPCORE(HDMI_CORE_DDC_ADDR);
+	DUMPCORE(HDMI_CORE_DDC_SEGM);
 	DUMPCORE(HDMI_CORE_DDC_OFFSET);
 	DUMPCORE(HDMI_CORE_DDC_COUNT1);
 	DUMPCORE(HDMI_CORE_DDC_COUNT2);
+	DUMPCORE(HDMI_CORE_DDC_STATUS);
+	DUMPCORE(HDMI_CORE_DDC_CMD);
 	DUMPCORE(HDMI_CORE_DDC_DATA);
-	DUMPCORE(HDMI_CORE_DDC_SEGM);
 
-	DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-	DUMPCORE(HDMI_CORE_AV_DPD);
-	DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-	DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-	DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-	DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-	DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-	DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+	DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+	DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+	DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+	DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+	DUMPCOREAV(HDMI_CORE_AV_ASRC);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+	DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+	DUMPCOREAV(HDMI_CORE_AV_DPD);
+	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
 
 	for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
 
 	for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
 
 	for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
 
 	for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
 
 	for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
 
 	for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
-		DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
+		DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
 
-	DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
-	DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
-	DUMPCORE(HDMI_CORE_AV_N_SVAL1);
-	DUMPCORE(HDMI_CORE_AV_N_SVAL2);
-	DUMPCORE(HDMI_CORE_AV_N_SVAL3);
-	DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
-	DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
-	DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
-	DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
-	DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
-	DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
-	DUMPCORE(HDMI_CORE_AV_AUD_MODE);
-	DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
-	DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
-	DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
-	DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
-	DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
-	DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
-	DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
-	DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
-	DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
-	DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
-	DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
-	DUMPCORE(HDMI_CORE_AV_ASRC);
-	DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
-	DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-	DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
-	DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
-	DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
-	DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
-	DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
-	DUMPCORE(HDMI_CORE_AV_DPD);
-	DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-	DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-	DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-	DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-	DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-	DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
-	DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
-	DUMPCORE(HDMI_CORE_AV_SPD_VERS);
-	DUMPCORE(HDMI_CORE_AV_SPD_LEN);
-	DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
-	DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
-	DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
-	DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
-	DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
-	DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
-	DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
-	DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
-	DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
-	DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
-	DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+	DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
 }
 
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@
 	DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
 					struct hdmi_audio_format *aud_fmt)
 {
 	u32 r;
@@ -1037,7 +1045,7 @@
 	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
 }
 
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
 					struct hdmi_audio_dma *aud_dma)
 {
 	u32 r;
@@ -1055,7 +1063,7 @@
 	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
 }
 
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
 					struct hdmi_core_audio_config *cfg)
 {
 	u32 r;
@@ -1106,27 +1114,33 @@
 	REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
 						cfg->fs_override, 1, 1);
 
-	/* I2S parameters */
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
-						cfg->freq_sample, 3, 0);
+	/*
+	 * Set IEC-60958-3 channel status word. It is passed to the IP
+	 * just as it is received. The user of the driver is responsible
+	 * for its contents.
+	 */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+		       cfg->iec60958_cfg->status[0]);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+		       cfg->iec60958_cfg->status[1]);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+		       cfg->iec60958_cfg->status[2]);
+	/* yes, this is correct: status[3] goes to CHST4 register */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+		       cfg->iec60958_cfg->status[3]);
+	/* yes, this is correct: status[4] goes to CHST5 register */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+		       cfg->iec60958_cfg->status[4]);
 
+	/* set I2S parameters */
 	r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
-	r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
 	r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
-	r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
 	r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
-	r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
 	r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
 	r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
 	r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
 	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
 
-	r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
-	r = FLD_MOD(r, cfg->freq_sample, 7, 4);
-	r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
-	r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
-
 	REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
 			cfg->i2s_cfg.in_length_bits, 3, 0);
 
@@ -1138,12 +1152,19 @@
 	r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
 	r = FLD_MOD(r, cfg->en_spdif, 1, 1);
 	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+	/* Audio channel mappings */
+	/* TODO: Make channel mapping dynamic. For now, map channels
+	 * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+	 * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+	 */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
 }
 
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-		struct hdmi_core_infoframe_audio *info_aud)
+static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+		struct snd_cea_861_aud_if *info_aud)
 {
-	u8 val;
 	u8 sum = 0, checksum = 0;
 	void __iomem *av_base = hdmi_av_base(ip_data);
 
@@ -1157,24 +1178,23 @@
 	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
 	sum += 0x84 + 0x001 + 0x00a;
 
-	val = (info_aud->db1_coding_type << 4)
-			| (info_aud->db1_channel_count - 1);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
-	sum += val;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+		       info_aud->db1_ct_cc);
+	sum += info_aud->db1_ct_cc;
 
-	val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
-	sum += val;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+		       info_aud->db2_sf_ss);
+	sum += info_aud->db2_sf_ss;
 
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+	sum += info_aud->db3;
 
-	val = info_aud->db4_channel_alloc;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
-	sum += val;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+	sum += info_aud->db4_ca;
 
-	val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
-	sum += val;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+		       info_aud->db5_dminh_lsv);
+	sum += info_aud->db5_dminh_lsv;
 
 	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
 	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@
 	 */
 }
 
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-				u32 sample_freq, u32 *n, u32 *cts)
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+		struct omap_dss_audio *audio)
 {
-	u32 r;
-	u32 deep_color = 0;
-	u32 pclk = ip_data->cfg.timings.pixel_clock;
+	struct hdmi_audio_format audio_format;
+	struct hdmi_audio_dma audio_dma;
+	struct hdmi_core_audio_config core;
+	int err, n, cts, channel_count;
+	unsigned int fs_nr;
+	bool word_length_16b = false;
 
-	if (n == NULL || cts == NULL)
+	if (!audio || !audio->iec || !audio->cea || !ip_data)
 		return -EINVAL;
+
+	core.iec60958_cfg = audio->iec;
 	/*
-	 * Obtain current deep color configuration. This needed
-	 * to calculate the TMDS clock based on the pixel clock.
+	 * In the IEC-60958 status word, check if the audio sample word length
+	 * is 16-bit as several optimizations can be performed in such case.
 	 */
-	r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
-	switch (r) {
-	case 1: /* No deep color selected */
-		deep_color = 100;
+	if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+		if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+			word_length_16b = true;
+
+	/* I2S configuration. See Phillips' specification */
+	if (word_length_16b)
+		core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+	else
+		core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+	/*
+	 * The I2S input word length is twice the lenght given in the IEC-60958
+	 * status word. If the word size is greater than
+	 * 20 bits, increment by one.
+	 */
+	core.i2s_cfg.in_length_bits = audio->iec->status[4]
+		& IEC958_AES4_CON_WORDLEN;
+	if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+		core.i2s_cfg.in_length_bits++;
+	core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+	core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+	core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+	core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+	/* convert sample frequency to a number */
+	switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+	case IEC958_AES3_CON_FS_32000:
+		fs_nr = 32000;
 		break;
-	case 2: /* 10-bit deep color selected */
-		deep_color = 125;
+	case IEC958_AES3_CON_FS_44100:
+		fs_nr = 44100;
 		break;
-	case 3: /* 12-bit deep color selected */
-		deep_color = 150;
+	case IEC958_AES3_CON_FS_48000:
+		fs_nr = 48000;
+		break;
+	case IEC958_AES3_CON_FS_88200:
+		fs_nr = 88200;
+		break;
+	case IEC958_AES3_CON_FS_96000:
+		fs_nr = 96000;
+		break;
+	case IEC958_AES3_CON_FS_176400:
+		fs_nr = 176400;
+		break;
+	case IEC958_AES3_CON_FS_192000:
+		fs_nr = 192000;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	switch (sample_freq) {
-	case 32000:
-		if ((deep_color == 125) && ((pclk == 54054)
-				|| (pclk == 74250)))
-			*n = 8192;
-		else
-			*n = 4096;
+	err = hdmi_compute_acr(fs_nr, &n, &cts);
+
+	/* Audio clock regeneration settings */
+	core.n = n;
+	core.cts = cts;
+	if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+		core.aud_par_busclk = 0;
+		core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+		core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+	} else {
+		core.aud_par_busclk = (((128 * 31) - 1) << 8);
+		core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+		core.use_mclk = true;
+	}
+
+	if (core.use_mclk)
+		core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+	/* Audio channels settings */
+	channel_count = (audio->cea->db1_ct_cc &
+			 CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+	switch (channel_count) {
+	case 2:
+		audio_format.active_chnnls_msk = 0x03;
 		break;
-	case 44100:
-		*n = 6272;
+	case 3:
+		audio_format.active_chnnls_msk = 0x07;
 		break;
-	case 48000:
-		if ((deep_color == 125) && ((pclk == 54054)
-				|| (pclk == 74250)))
-			*n = 8192;
-		else
-			*n = 6144;
+	case 4:
+		audio_format.active_chnnls_msk = 0x0f;
+		break;
+	case 5:
+		audio_format.active_chnnls_msk = 0x1f;
+		break;
+	case 6:
+		audio_format.active_chnnls_msk = 0x3f;
+		break;
+	case 7:
+		audio_format.active_chnnls_msk = 0x7f;
+		break;
+	case 8:
+		audio_format.active_chnnls_msk = 0xff;
 		break;
 	default:
-		*n = 0;
 		return -EINVAL;
 	}
 
-	/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
-	*cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+	/*
+	 * the HDMI IP needs to enable four stereo channels when transmitting
+	 * more than 2 audio channels
+	 */
+	if (channel_count == 2) {
+		audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+		core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+		core.layout = HDMI_AUDIO_LAYOUT_2CH;
+	} else {
+		audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+		core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+				HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+				HDMI_AUDIO_I2S_SD3_EN;
+		core.layout = HDMI_AUDIO_LAYOUT_8CH;
+	}
+
+	core.en_spdif = false;
+	/* use sample frequency from channel status word */
+	core.fs_override = true;
+	/* enable ACR packets */
+	core.en_acr_pkt = true;
+	/* disable direct streaming digital audio */
+	core.en_dsd_audio = false;
+	/* use parallel audio interface */
+	core.en_parallel_aud_input = true;
+
+	/* DMA settings */
+	if (word_length_16b)
+		audio_dma.transfer_size = 0x10;
+	else
+		audio_dma.transfer_size = 0x20;
+	audio_dma.block_size = 0xC0;
+	audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+	audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+	/* audio FIFO format settings */
+	if (word_length_16b) {
+		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+		audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+		audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+	} else {
+		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+		audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+		audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+	}
+	audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+	audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+	/* disable start/stop signals of IEC 60958 blocks */
+	audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+	/* configure DMA and audio FIFO format*/
+	ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
+	ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+
+	/* configure the core*/
+	ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+
+	/* configure CEA 861 audio infoframe*/
+	ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
 
 	return 0;
 }
 
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+{
+	REG_FLD_MOD(hdmi_wp_base(ip_data),
+		    HDMI_WP_AUDIO_CTRL, true, 31, 31);
+	return 0;
+}
+
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
+{
+	REG_FLD_MOD(hdmi_wp_base(ip_data),
+		    HDMI_WP_AUDIO_CTRL, false, 31, 31);
+}
+
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
 {
 	REG_FLD_MOD(hdmi_av_base(ip_data),
-				HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+		    HDMI_CORE_AV_AUD_MODE, true, 0, 0);
 	REG_FLD_MOD(hdmi_wp_base(ip_data),
-				HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+		    HDMI_WP_AUDIO_CTRL, true, 30, 30);
+	return 0;
+}
+
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+{
+	REG_FLD_MOD(hdmi_av_base(ip_data),
+		    HDMI_CORE_AV_AUD_MODE, false, 0, 0);
 	REG_FLD_MOD(hdmi_wp_base(ip_data),
-				HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+		    HDMI_WP_AUDIO_CTRL, false, 30, 30);
 }
 #endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index a14d1a0..8366ae1 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -24,11 +24,6 @@
 #include <linux/string.h>
 #include <video/omapdss.h>
 #include "ti_hdmi.h"
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
 
 /* HDMI Wrapper */
 
@@ -57,6 +52,13 @@
 #define HDMI_CORE_SYS_SRST			0x14
 #define HDMI_CORE_CTRL1				0x20
 #define HDMI_CORE_SYS_SYS_STAT			0x24
+#define HDMI_CORE_SYS_DE_DLY			0xC8
+#define HDMI_CORE_SYS_DE_CTRL			0xCC
+#define HDMI_CORE_SYS_DE_TOP			0xD0
+#define HDMI_CORE_SYS_DE_CNTL			0xD8
+#define HDMI_CORE_SYS_DE_CNTH			0xDC
+#define HDMI_CORE_SYS_DE_LINL			0xE0
+#define HDMI_CORE_SYS_DE_LINH_1			0xE4
 #define HDMI_CORE_SYS_VID_ACEN			0x124
 #define HDMI_CORE_SYS_VID_MODE			0x128
 #define HDMI_CORE_SYS_INTR_STATE		0x1C0
@@ -66,50 +68,24 @@
 #define HDMI_CORE_SYS_INTR4			0x1D0
 #define HDMI_CORE_SYS_UMASK1			0x1D4
 #define HDMI_CORE_SYS_TMDS_CTRL			0x208
-#define HDMI_CORE_SYS_DE_DLY			0xC8
-#define HDMI_CORE_SYS_DE_CTRL			0xCC
-#define HDMI_CORE_SYS_DE_TOP			0xD0
-#define HDMI_CORE_SYS_DE_CNTL			0xD8
-#define HDMI_CORE_SYS_DE_CNTH			0xDC
-#define HDMI_CORE_SYS_DE_LINL			0xE0
-#define HDMI_CORE_SYS_DE_LINH_1			0xE4
+
 #define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC	0x1
 #define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC	0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS		0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS	0x1
 #define HDMI_CORE_CTRL1_EDGE_RISINGEDGE	0x1
 
 /* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD			0x3CC
-#define HDMI_CORE_DDC_STATUS			0x3C8
 #define HDMI_CORE_DDC_ADDR			0x3B4
+#define HDMI_CORE_DDC_SEGM			0x3B8
 #define HDMI_CORE_DDC_OFFSET			0x3BC
 #define HDMI_CORE_DDC_COUNT1			0x3C0
 #define HDMI_CORE_DDC_COUNT2			0x3C4
+#define HDMI_CORE_DDC_STATUS			0x3C8
+#define HDMI_CORE_DDC_CMD			0x3CC
 #define HDMI_CORE_DDC_DATA			0x3D0
-#define HDMI_CORE_DDC_SEGM			0x3B8
 
 /* HDMI IP Core Audio Video */
 
-#define HDMI_CORE_AV_HDMI_CTRL			0xBC
-#define HDMI_CORE_AV_DPD			0xF4
-#define HDMI_CORE_AV_PB_CTRL1			0xF8
-#define HDMI_CORE_AV_PB_CTRL2			0xFC
-#define HDMI_CORE_AV_AVI_TYPE			0x100
-#define HDMI_CORE_AV_AVI_VERS			0x104
-#define HDMI_CORE_AV_AVI_LEN			0x108
-#define HDMI_CORE_AV_AVI_CHSUM			0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n)		(n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS		15
-#define HDMI_CORE_AV_SPD_DBYTE(n)		(n * 4 + 0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS		27
-#define HDMI_CORE_AV_AUD_DBYTE(n)		(n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS		10
-#define HDMI_CORE_AV_MPEG_DBYTE(n)		(n * 4 + 0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS		27
-#define HDMI_CORE_AV_GEN_DBYTE(n)		(n * 4 + 0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS		31
-#define HDMI_CORE_AV_GEN2_DBYTE(n)		(n * 4 + 0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS		31
 #define HDMI_CORE_AV_ACR_CTRL			0x4
 #define HDMI_CORE_AV_FREQ_SVAL			0x8
 #define HDMI_CORE_AV_N_SVAL1			0xC
@@ -148,25 +124,39 @@
 #define HDMI_CORE_AV_AVI_VERS			0x104
 #define HDMI_CORE_AV_AVI_LEN			0x108
 #define HDMI_CORE_AV_AVI_CHSUM			0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n)		(n * 4 + 0x110)
 #define HDMI_CORE_AV_SPD_TYPE			0x180
 #define HDMI_CORE_AV_SPD_VERS			0x184
 #define HDMI_CORE_AV_SPD_LEN			0x188
 #define HDMI_CORE_AV_SPD_CHSUM			0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n)		(n * 4 + 0x190)
 #define HDMI_CORE_AV_AUDIO_TYPE			0x200
 #define HDMI_CORE_AV_AUDIO_VERS			0x204
 #define HDMI_CORE_AV_AUDIO_LEN			0x208
 #define HDMI_CORE_AV_AUDIO_CHSUM		0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n)		(n * 4 + 0x210)
 #define HDMI_CORE_AV_MPEG_TYPE			0x280
 #define HDMI_CORE_AV_MPEG_VERS			0x284
 #define HDMI_CORE_AV_MPEG_LEN			0x288
 #define HDMI_CORE_AV_MPEG_CHSUM			0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n)		(n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n)		(n * 4 + 0x300)
 #define HDMI_CORE_AV_CP_BYTE1			0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n)		(n * 4 + 0x380)
 #define HDMI_CORE_AV_CEC_ADDR_ID		0x3FC
+
 #define HDMI_CORE_AV_SPD_DBYTE_ELSIZE		0x4
 #define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE		0x4
 #define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE		0x4
 #define HDMI_CORE_AV_GEN_DBYTE_ELSIZE		0x4
 
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS		15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS		27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS		10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS		27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS		31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS		31
+
 /* PLL */
 
 #define PLLCTRL_PLL_CONTROL			0x0
@@ -284,35 +274,6 @@
 	HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
 	HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
 	HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-	HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
-	HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
-	HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
-	HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
-	HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
-	HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
-	HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
-	HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
-	HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
-	HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
-	HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
-	HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
-	HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
-	HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
-	HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
-	HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
-	HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
-	HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
-	HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
-	HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
-	HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
-	HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
-	HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
-	HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
-	HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
-	HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
-	HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
-	HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
-	HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
 };
 
 enum hdmi_packing_mode {
@@ -322,17 +283,6 @@
 	HDMI_PACK_ALREADYPACKED = 7
 };
 
-enum hdmi_core_audio_sample_freq {
-	HDMI_AUDIO_FS_32000 = 0x3,
-	HDMI_AUDIO_FS_44100 = 0x0,
-	HDMI_AUDIO_FS_48000 = 0x2,
-	HDMI_AUDIO_FS_88200 = 0x8,
-	HDMI_AUDIO_FS_96000 = 0xA,
-	HDMI_AUDIO_FS_176400 = 0xC,
-	HDMI_AUDIO_FS_192000 = 0xE,
-	HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
 enum hdmi_core_audio_layout {
 	HDMI_AUDIO_LAYOUT_2CH = 0,
 	HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@
 };
 
 enum hdmi_audio_i2s_config {
-	HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
-	HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
 	HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
 	HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
-	HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
-	HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
-	HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
-	HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
-	HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
-	HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
-	HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
-	HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
-	HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
-	HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
-	HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
-	HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
-	HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
 	HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
 	HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
 	HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
 	HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
-	HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
 	HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
 	HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
 	HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@
 	enum hdmi_core_tclkselclkmult	tclk_sel_clkmult;
 };
 
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
-	u8 db1_coding_type;
-	u8 db1_channel_count;
-	u8 db2_sample_freq;
-	u8 db2_sample_size;
-	u8 db4_channel_alloc;
-	bool db5_downmix_inh;
-	u8 db5_lsv;	/* Level shift values for downmix */
-};
-
 struct hdmi_core_packet_enable_repeat {
 	u32	audio_pkt;
 	u32	audio_pkt_repeat;
@@ -496,15 +407,10 @@
 };
 
 struct hdmi_core_audio_i2s_config {
-	u8 word_max_length;
-	u8 word_length;
 	u8 in_length_bits;
 	u8 justification;
-	u8 en_high_bitrate_aud;
 	u8 sck_edge_mode;
-	u8 cbit_order;
 	u8 vbit;
-	u8 ws_polarity;
 	u8 direction;
 	u8 shift;
 	u8 active_sds;
@@ -512,7 +418,7 @@
 
 struct hdmi_core_audio_config {
 	struct hdmi_core_audio_i2s_config	i2s_cfg;
-	enum hdmi_core_audio_sample_freq	freq_sample;
+	struct snd_aes_iec958			*iec60958_cfg;
 	bool					fs_override;
 	u32					n;
 	u32					cts;
@@ -527,17 +433,4 @@
 	bool					en_spdif;
 };
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-	defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-				u32 sample_freq, u32 *n, u32 *cts);
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-		struct hdmi_core_infoframe_audio *info_aud);
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
-					struct hdmi_core_audio_config *cfg);
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
-					struct hdmi_audio_dma *aud_dma);
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
-					struct hdmi_audio_format *aud_fmt);
-#endif
 #endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 9c3daf7..2b89739 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -415,6 +415,7 @@
 		return &venc_config_ntsc_trm;
 
 	BUG();
+	return NULL;
 }
 
 static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@
 
 	venc_write_reg(VENC_OUTPUT_CONTROL, l);
 
-	dispc_set_digit_size(dssdev->panel.timings.x_res,
-			dssdev->panel.timings.y_res/2);
+	dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-	regulator_enable(venc.vdda_dac_reg);
+	r = regulator_enable(venc.vdda_dac_reg);
+	if (r)
+		goto err;
 
 	if (dssdev->platform_enable)
 		dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@
 	return 13500000;
 }
 
+static ssize_t display_output_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	const char *ret;
+
+	switch (dssdev->phy.venc.type) {
+	case OMAP_DSS_VENC_TYPE_COMPOSITE:
+		ret = "composite";
+		break;
+	case OMAP_DSS_VENC_TYPE_SVIDEO:
+		ret = "svideo";
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_output_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	enum omap_dss_venc_type new_type;
+
+	if (sysfs_streq("composite", buf))
+		new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+	else if (sysfs_streq("svideo", buf))
+		new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+	else
+		return -EINVAL;
+
+	mutex_lock(&venc.venc_lock);
+
+	if (dssdev->phy.venc.type != new_type) {
+		dssdev->phy.venc.type = new_type;
+		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+			venc_power_off(dssdev);
+			venc_power_on(dssdev);
+		}
+	}
+
+	mutex_unlock(&venc.venc_lock);
+
+	return size;
+}
+
+static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
+		display_output_type_show, display_output_type_store);
+
 /* driver */
 static int venc_panel_probe(struct omap_dss_device *dssdev)
 {
 	dssdev->panel.timings = omap_dss_pal_timings;
 
-	return 0;
+	return device_create_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static void venc_panel_remove(struct omap_dss_device *dssdev)
 {
+	device_remove_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@
 	return venc_panel_enable(dssdev);
 }
 
-static void venc_get_timings(struct omap_dss_device *dssdev,
-			struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
 static void venc_set_timings(struct omap_dss_device *dssdev,
 			struct omap_video_timings *timings)
 {
@@ -597,6 +645,8 @@
 		/* turn the venc off and on to get new timings to use */
 		venc_panel_disable(dssdev);
 		venc_panel_enable(dssdev);
+	} else {
+		dss_mgr_set_timings(dssdev->manager, timings);
 	}
 }
 
@@ -661,7 +711,6 @@
 	.get_resolution	= omapdss_default_get_resolution,
 	.get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-	.get_timings	= venc_get_timings,
 	.set_timings	= venc_set_timings,
 	.check_timings	= venc_check_timings,
 
@@ -675,7 +724,7 @@
 };
 /* driver end */
 
-int venc_init_display(struct omap_dss_device *dssdev)
+static int __init venc_init_display(struct omap_dss_device *dssdev)
 {
 	DSSDBG("init_display\n");
 
@@ -695,7 +744,7 @@
 	return 0;
 }
 
-void venc_dump_regs(struct seq_file *s)
+static void venc_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
 
@@ -779,8 +828,32 @@
 		clk_put(venc.tv_dac_clk);
 }
 
+static void __init venc_probe_pdata(struct platform_device *pdev)
+{
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int r, i;
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
+			continue;
+
+		r = venc_init_display(dssdev);
+		if (r) {
+			DSSERR("device %s init failed: %d\n", dssdev->name, r);
+			continue;
+		}
+
+		r = omap_dss_register_device(dssdev, &pdev->dev, i);
+		if (r)
+			DSSERR("device %s register failed: %d\n",
+					dssdev->name, r);
+	}
+}
+
 /* VENC HW IP initialisation */
-static int omap_venchw_probe(struct platform_device *pdev)
+static int __init omap_venchw_probe(struct platform_device *pdev)
 {
 	u8 rev_id;
 	struct resource *venc_mem;
@@ -824,6 +897,10 @@
 	if (r)
 		goto err_reg_panel_driver;
 
+	dss_debugfs_create_file("venc", venc_dump_regs);
+
+	venc_probe_pdata(pdev);
+
 	return 0;
 
 err_reg_panel_driver:
@@ -833,12 +910,15 @@
 	return r;
 }
 
-static int omap_venchw_remove(struct platform_device *pdev)
+static int __exit omap_venchw_remove(struct platform_device *pdev)
 {
+	omap_dss_unregister_child_devices(&pdev->dev);
+
 	if (venc.vdda_dac_reg != NULL) {
 		regulator_put(venc.vdda_dac_reg);
 		venc.vdda_dac_reg = NULL;
 	}
+
 	omap_dss_unregister_driver(&venc_driver);
 
 	pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@
 		clk_disable(venc.tv_dac_clk);
 
 	dispc_runtime_put();
-	dss_runtime_put();
 
 	return 0;
 }
@@ -862,23 +941,14 @@
 {
 	int r;
 
-	r = dss_runtime_get();
-	if (r < 0)
-		goto err_get_dss;
-
 	r = dispc_runtime_get();
 	if (r < 0)
-		goto err_get_dispc;
+		return r;
 
 	if (venc.tv_dac_clk)
 		clk_enable(venc.tv_dac_clk);
 
 	return 0;
-
-err_get_dispc:
-	dss_runtime_put();
-err_get_dss:
-	return r;
 }
 
 static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@
 };
 
 static struct platform_driver omap_venchw_driver = {
-	.probe          = omap_venchw_probe,
-	.remove         = omap_venchw_remove,
+	.remove         = __exit_p(omap_venchw_remove),
 	.driver         = {
 		.name   = "omapdss_venc",
 		.owner  = THIS_MODULE,
@@ -896,18 +965,18 @@
 	},
 };
 
-int venc_init_platform_driver(void)
+int __init venc_init_platform_driver(void)
 {
 	if (cpu_is_omap44xx())
 		return 0;
 
-	return platform_driver_register(&omap_venchw_driver);
+	return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
 }
 
-void venc_uninit_platform_driver(void)
+void __exit venc_uninit_platform_driver(void)
 {
 	if (cpu_is_omap44xx())
 		return;
 
-	return platform_driver_unregister(&omap_venchw_driver);
+	platform_driver_unregister(&omap_venchw_driver);
 }
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6a09ef8..c6cf372 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -70,7 +70,7 @@
 
 	DBG("omapfb_setup_plane\n");
 
-	if (ofbi->num_overlays != 1) {
+	if (ofbi->num_overlays == 0) {
 		r = -EINVAL;
 		goto out;
 	}
@@ -185,7 +185,7 @@
 {
 	struct omapfb_info *ofbi = FB2OFB(fbi);
 
-	if (ofbi->num_overlays != 1) {
+	if (ofbi->num_overlays == 0) {
 		memset(pi, 0, sizeof(*pi));
 	} else {
 		struct omap_overlay *ovl;
@@ -225,6 +225,9 @@
 	down_write_nested(&rg->lock, rg->id);
 	atomic_inc(&rg->lock_count);
 
+	if (rg->size == size && rg->type == mi->type)
+		goto out;
+
 	if (atomic_read(&rg->map_count)) {
 		r = -EBUSY;
 		goto out;
@@ -247,12 +250,10 @@
 		}
 	}
 
-	if (rg->size != size || rg->type != mi->type) {
-		r = omapfb_realloc_fbmem(fbi, size, mi->type);
-		if (r) {
-			dev_err(fbdev->dev, "realloc fbmem failed\n");
-			goto out;
-		}
+	r = omapfb_realloc_fbmem(fbi, size, mi->type);
+	if (r) {
+		dev_err(fbdev->dev, "realloc fbmem failed\n");
+		goto out;
 	}
 
  out:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index b00db40..3450ea0 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -179,6 +179,7 @@
 		break;
 	default:
 		BUG();
+		return 0;
 	}
 
 	offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@
 
 		fbnum = simple_strtoul(p, &p, 10);
 
-		if (p == param)
+		if (p == start)
 			return -EINVAL;
 
 		if (*p != ':')
@@ -2307,7 +2308,7 @@
 	return 0;
 }
 
-static int omapfb_probe(struct platform_device *pdev)
+static int __init omapfb_probe(struct platform_device *pdev)
 {
 	struct omapfb2_device *fbdev = NULL;
 	int r = 0;
@@ -2448,7 +2449,7 @@
 	return r;
 }
 
-static int omapfb_remove(struct platform_device *pdev)
+static int __exit omapfb_remove(struct platform_device *pdev)
 {
 	struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
 
@@ -2462,8 +2463,7 @@
 }
 
 static struct platform_driver omapfb_driver = {
-	.probe          = omapfb_probe,
-	.remove         = omapfb_remove,
+	.remove         = __exit_p(omapfb_remove),
 	.driver         = {
 		.name   = "omapfb",
 		.owner  = THIS_MODULE,
@@ -2474,7 +2474,7 @@
 {
 	DBG("omapfb_init\n");
 
-	if (platform_driver_register(&omapfb_driver)) {
+	if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
 		printk(KERN_ERR "failed to register omapfb driver\n");
 		return -ENODEV;
 	}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index c0bdc9b..30361a0 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -166,6 +166,7 @@
 
 	/* This should never happen */
 	BUG();
+	return NULL;
 }
 
 static inline void omapfb_lock(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 4e5b960..7e99022 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -179,8 +179,10 @@
 		pixel_size_exp = 2;
 	else if (bytespp == 2)
 		pixel_size_exp = 1;
-	else
+	else {
 		BUG();
+		return;
+	}
 
 	vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
 	vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1d71c08..0b4ae0c 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -316,12 +316,9 @@
 		ret = wait_event_interruptible_timeout(priv->wait_idle,
 					!priv->shared->hw_running, HZ*4);
 
-		if (ret < 0)
+		if (ret != 0)
 			break;
 
-		if (ret > 0)
-			continue;
-
 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
 		    priv->shared->num_interrupts == num) {
 			QERROR("TIMEOUT");
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index f310516..ea7b661 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -47,7 +47,7 @@
 #ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
 #undef writel
 #define writel(v, r) do { \
-	printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \
+	pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
 	__raw_writel(v, r); \
 } while (0)
 #endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@
 	result = (unsigned int)tmp / 1000;
 
 	dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
-		pixclk, clk, result, clk / result);
+		pixclk, clk, result, result ? clk / result : clk);
 
 	return result;
 }
@@ -495,7 +495,6 @@
 	u32 alpha = 0;
 	u32 data;
 	u32 pagewidth;
-	int clkdiv;
 
 	dev_dbg(sfb->dev, "setting framebuffer parameters\n");
 
@@ -532,48 +531,9 @@
 	/* disable the window whilst we update it */
 	writel(0, regs + WINCON(win_no));
 
-	/* use platform specified window as the basis for the lcd timings */
-
-	if (win_no == sfb->pdata->default_win) {
-		clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
-
-		data = sfb->pdata->vidcon0;
-		data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
-		if (clkdiv > 1)
-			data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
-		else
-			data &= ~VIDCON0_CLKDIR;	/* 1:1 clock */
-
-		/* write the timing data to the panel */
-
-		if (sfb->variant.is_2443)
-			data |= (1 << 5);
-
-		writel(data, regs + VIDCON0);
-
+	if (!sfb->output_on)
 		s3c_fb_enable(sfb, 1);
 
-		data = VIDTCON0_VBPD(var->upper_margin - 1) |
-		       VIDTCON0_VFPD(var->lower_margin - 1) |
-		       VIDTCON0_VSPW(var->vsync_len - 1);
-
-		writel(data, regs + sfb->variant.vidtcon);
-
-		data = VIDTCON1_HBPD(var->left_margin - 1) |
-		       VIDTCON1_HFPD(var->right_margin - 1) |
-		       VIDTCON1_HSPW(var->hsync_len - 1);
-
-		/* VIDTCON1 */
-		writel(data, regs + sfb->variant.vidtcon + 4);
-
-		data = VIDTCON2_LINEVAL(var->yres - 1) |
-		       VIDTCON2_HOZVAL(var->xres - 1) |
-		       VIDTCON2_LINEVAL_E(var->yres - 1) |
-		       VIDTCON2_HOZVAL_E(var->xres - 1);
-		writel(data, regs + sfb->variant.vidtcon + 8);
-	}
-
 	/* write the buffer address */
 
 	/* start and end registers stride is 8 */
@@ -839,6 +799,7 @@
 	struct s3c_fb *sfb = win->parent;
 	unsigned int index = win->index;
 	u32 wincon;
+	u32 output_on = sfb->output_on;
 
 	dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
 
@@ -877,34 +838,18 @@
 
 	shadow_protect_win(win, 1);
 	writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
-	shadow_protect_win(win, 0);
 
 	/* Check the enabled state to see if we need to be running the
 	 * main LCD interface, as if there are no active windows then
 	 * it is highly likely that we also do not need to output
 	 * anything.
 	 */
-
-	/* We could do something like the following code, but the current
-	 * system of using framebuffer events means that we cannot make
-	 * the distinction between just window 0 being inactive and all
-	 * the windows being down.
-	 *
-	 * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
-	*/
-
-	/* we're stuck with this until we can do something about overriding
-	 * the power control using the blanking event for a single fb.
-	 */
-	if (index == sfb->pdata->default_win) {
-		shadow_protect_win(win, 1);
-		s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
-		shadow_protect_win(win, 0);
-	}
+	s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
+	shadow_protect_win(win, 0);
 
 	pm_runtime_put_sync(sfb->dev);
 
-	return 0;
+	return output_on == sfb->output_on;
 }
 
 /**
@@ -1111,7 +1056,7 @@
  *
  * Calculate the pixel clock when none has been given through platform data.
  */
-static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
 {
 	u64 pixclk = 1000000000000ULL;
 	u32 div;
@@ -1144,11 +1089,11 @@
 
 	dev_dbg(sfb->dev, "allocating memory for display\n");
 
-	real_size = windata->win_mode.xres * windata->win_mode.yres;
+	real_size = windata->xres * windata->yres;
 	virt_size = windata->virtual_x * windata->virtual_y;
 
 	dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
-		real_size, windata->win_mode.xres, windata->win_mode.yres,
+		real_size, windata->xres, windata->yres,
 		virt_size, windata->virtual_x, windata->virtual_y);
 
 	size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@
 				      struct s3c_fb_win **res)
 {
 	struct fb_var_screeninfo *var;
-	struct fb_videomode *initmode;
+	struct fb_videomode initmode;
 	struct s3c_fb_pd_win *windata;
 	struct s3c_fb_win *win;
 	struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@
 	}
 
 	windata = sfb->pdata->win[win_no];
-	initmode = &windata->win_mode;
+	initmode = *sfb->pdata->vtiming;
 
 	WARN_ON(windata->max_bpp == 0);
-	WARN_ON(windata->win_mode.xres == 0);
-	WARN_ON(windata->win_mode.yres == 0);
+	WARN_ON(windata->xres == 0);
+	WARN_ON(windata->yres == 0);
 
 	win = fbinfo->par;
 	*res = win;
@@ -1294,7 +1239,9 @@
 	}
 
 	/* setup the initial video mode from the window */
-	fb_videomode_to_var(&fbinfo->var, initmode);
+	initmode.xres = windata->xres;
+	initmode.yres = windata->yres;
+	fb_videomode_to_var(&fbinfo->var, &initmode);
 
 	fbinfo->fix.type	= FB_TYPE_PACKED_PIXELS;
 	fbinfo->fix.accel	= FB_ACCEL_NONE;
@@ -1339,6 +1286,53 @@
 }
 
 /**
+ * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
+ * @sfb: The base resources for the hardware.
+ *
+ * Set horizontal and vertical lcd rgb interface timing.
+ */
+static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
+{
+	struct fb_videomode *vmode = sfb->pdata->vtiming;
+	void __iomem *regs = sfb->regs;
+	int clkdiv;
+	u32 data;
+
+	if (!vmode->pixclock)
+		s3c_fb_missing_pixclock(vmode);
+
+	clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
+
+	data = sfb->pdata->vidcon0;
+	data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+	if (clkdiv > 1)
+		data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
+	else
+		data &= ~VIDCON0_CLKDIR;	/* 1:1 clock */
+
+	if (sfb->variant.is_2443)
+		data |= (1 << 5);
+	writel(data, regs + VIDCON0);
+
+	data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
+	       VIDTCON0_VFPD(vmode->lower_margin - 1) |
+	       VIDTCON0_VSPW(vmode->vsync_len - 1);
+	writel(data, regs + sfb->variant.vidtcon);
+
+	data = VIDTCON1_HBPD(vmode->left_margin - 1) |
+	       VIDTCON1_HFPD(vmode->right_margin - 1) |
+	       VIDTCON1_HSPW(vmode->hsync_len - 1);
+	writel(data, regs + sfb->variant.vidtcon + 4);
+
+	data = VIDTCON2_LINEVAL(vmode->yres - 1) |
+	       VIDTCON2_HOZVAL(vmode->xres - 1) |
+	       VIDTCON2_LINEVAL_E(vmode->yres - 1) |
+	       VIDTCON2_HOZVAL_E(vmode->xres - 1);
+	writel(data, regs + sfb->variant.vidtcon + 8);
+}
+
+/**
  * s3c_fb_clear_win() - clear hardware window registers.
  * @sfb: The base resources for the hardware.
  * @win: The window to process.
@@ -1354,8 +1348,14 @@
 	writel(0, regs + VIDOSD_A(win, sfb->variant));
 	writel(0, regs + VIDOSD_B(win, sfb->variant));
 	writel(0, regs + VIDOSD_C(win, sfb->variant));
-	reg = readl(regs + SHADOWCON);
-	writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+	if (sfb->variant.has_shadowcon) {
+		reg = readl(sfb->regs + SHADOWCON);
+		reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+			SHADOWCON_CHx_ENABLE(win) |
+			SHADOWCON_CHx_LOCAL_ENABLE(win));
+		writel(reg, sfb->regs + SHADOWCON);
+	}
 }
 
 static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@
 		writel(0xffffff, regs + WKEYCON1);
 	}
 
+	s3c_fb_set_rgb_timing(sfb);
+
 	/* we have the register setup, start allocating framebuffers */
 
 	for (win = 0; win < fbdrv->variant.nr_windows; win++) {
 		if (!pd->win[win])
 			continue;
 
-		if (!pd->win[win]->win_mode.pixclock)
-			s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
-
 		ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
 				       &sfb->windows[win]);
 		if (ret < 0) {
@@ -1564,6 +1563,8 @@
 	struct s3c_fb_win *win;
 	int win_no;
 
+	pm_runtime_get_sync(sfb->dev);
+
 	for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
 		win = sfb->windows[win_no];
 		if (!win)
@@ -1577,6 +1578,9 @@
 		clk_disable(sfb->lcd_clk);
 
 	clk_disable(sfb->bus_clk);
+
+	pm_runtime_put_sync(sfb->dev);
+
 	return 0;
 }
 
@@ -1589,6 +1593,8 @@
 	int win_no;
 	u32 reg;
 
+	pm_runtime_get_sync(sfb->dev);
+
 	clk_enable(sfb->bus_clk);
 
 	if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@
 		shadow_protect_win(win, 0);
 	}
 
+	s3c_fb_set_rgb_timing(sfb);
+
 	/* restore framebuffers */
 	for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
 		win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@
 		s3c_fb_set_par(win->fbinfo);
 	}
 
+	pm_runtime_put_sync(sfb->dev);
+
 	return 0;
 }
 #endif
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index cee7803..f3d3b9c 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@
 	/* following part not present in X11 driver */
 	cr67 = vga_in8(0x3d5, par) & 0xf;
 	vga_out8(0x3d5, 0x50 | cr67, par);
-	udelay(10000);
+	mdelay(10);
 	vga_out8(0x3d4, 0x67, par);
 	/* end of part */
 	vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@
 	vga_out8(0x3d4, 0x66, par);
 	cr66 = vga_in8(0x3d5, par);
 	vga_out8(0x3d5, cr66 | 0x02, par);
-	udelay(10000);
+	mdelay(10);
 
 	vga_out8(0x3d4, 0x66, par);
 	vga_out8(0x3d5, cr66 & ~0x02, par);	/* clear reset flag */
-	udelay(10000);
+	mdelay(10);
 
 
 	/*
@@ -1918,11 +1918,11 @@
 	vga_out8(0x3d4, 0x3f, par);
 	cr3f = vga_in8(0x3d5, par);
 	vga_out8(0x3d5, cr3f | 0x08, par);
-	udelay(10000);
+	mdelay(10);
 
 	vga_out8(0x3d4, 0x3f, par);
 	vga_out8(0x3d5, cr3f & ~0x08, par);	/* clear reset flags */
-	udelay(10000);
+	mdelay(10);
 
 	/* Savage ramdac speeds */
 	par->numClocks = 4;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index eafb19d..930e550 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -31,6 +31,7 @@
 
 #include "sh_mobile_lcdcfb.h"
 
+/* HDMI Core Control Register (HTOP0) */
 #define HDMI_SYSTEM_CTRL			0x00 /* System control */
 #define HDMI_L_R_DATA_SWAP_CTRL_RPKT		0x01 /* L/R data swap control,
 							bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
@@ -201,6 +202,68 @@
 #define HDMI_REVISION_ID			0xF1 /* Revision ID */
 #define HDMI_TEST_MODE				0xFE /* Test mode */
 
+/* HDMI Control Register (HTOP1) */
+#define HDMI_HTOP1_TEST_MODE			0x0000 /* Test mode */
+#define HDMI_HTOP1_VIDEO_INPUT			0x0008 /* VideoInput */
+#define HDMI_HTOP1_CORE_RSTN			0x000C /* CoreResetn */
+#define HDMI_HTOP1_PLLBW			0x0018 /* PLLBW */
+#define HDMI_HTOP1_CLK_TO_PHY			0x001C /* Clk to Phy */
+#define HDMI_HTOP1_VIDEO_INPUT2			0x0020 /* VideoInput2 */
+#define HDMI_HTOP1_TISEMP0_1			0x0024 /* tisemp0-1 */
+#define HDMI_HTOP1_TISEMP2_C			0x0028 /* tisemp2-c */
+#define HDMI_HTOP1_TISIDRV			0x002C /* tisidrv */
+#define HDMI_HTOP1_TISEN			0x0034 /* tisen */
+#define HDMI_HTOP1_TISDREN			0x0038 /* tisdren  */
+#define HDMI_HTOP1_CISRANGE			0x003C /* cisrange  */
+#define HDMI_HTOP1_ENABLE_SELECTOR		0x0040 /* Enable Selector */
+#define HDMI_HTOP1_MACRO_RESET			0x0044 /* Macro reset */
+#define HDMI_HTOP1_PLL_CALIBRATION		0x0048 /* PLL calibration */
+#define HDMI_HTOP1_RE_CALIBRATION		0x004C /* Re-calibration */
+#define HDMI_HTOP1_CURRENT			0x0050 /* Current */
+#define HDMI_HTOP1_PLL_LOCK_DETECT		0x0054 /* PLL lock detect */
+#define HDMI_HTOP1_PHY_TEST_MODE		0x0058 /* PHY Test Mode */
+#define HDMI_HTOP1_CLK_SET			0x0080 /* Clock Set */
+#define HDMI_HTOP1_DDC_FAIL_SAFE		0x0084 /* DDC fail safe */
+#define HDMI_HTOP1_PRBS				0x0088 /* PRBS */
+#define HDMI_HTOP1_EDID_AINC_CONTROL		0x008C /* EDID ainc Control */
+#define HDMI_HTOP1_HTOP_DCL_MODE		0x00FC /* Deep Coloer Mode */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0		0x0100 /* Deep Color:FRC COEF0 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1		0x0104 /* Deep Color:FRC COEF1 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2		0x0108 /* Deep Color:FRC COEF2 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3		0x010C /* Deep Color:FRC COEF3 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C		0x0110 /* Deep Color:FRC COEF0C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C		0x0114 /* Deep Color:FRC COEF1C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C		0x0118 /* Deep Color:FRC COEF2C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C		0x011C /* Deep Color:FRC COEF3C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_MODE		0x0120 /* Deep Color:FRC Mode */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START1		0x0124 /* Deep Color:Rect Start1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1		0x0128 /* Deep Color:Rect Size1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START2		0x012C /* Deep Color:Rect Start2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2		0x0130 /* Deep Color:Rect Size2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START3		0x0134 /* Deep Color:Rect Start3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3		0x0138 /* Deep Color:Rect Size3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START4		0x013C /* Deep Color:Rect Start4 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4		0x0140 /* Deep Color:Rect Size4 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1	0x0144 /* Deep Color:Fil Para Y1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2	0x0148 /* Deep Color:Fil Para Y1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1	0x014C /* Deep Color:Fil Para CB1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2	0x0150 /* Deep Color:Fil Para CB1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1	0x0154 /* Deep Color:Fil Para CR1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2	0x0158 /* Deep Color:Fil Para CR1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1	0x015C /* Deep Color:Fil Para Y2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2	0x0160 /* Deep Color:Fil Para Y2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1	0x0164 /* Deep Color:Fil Para CB2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2	0x0168 /* Deep Color:Fil Para CB2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1	0x016C /* Deep Color:Fil Para CR2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2	0x0170 /* Deep Color:Fil Para CR2_2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1		0x0174 /* Deep Color:Cor Para Y1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1	0x0178 /* Deep Color:Cor Para CB1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1	0x017C /* Deep Color:Cor Para CR1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2		0x0180 /* Deep Color:Cor Para Y2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2	0x0184 /* Deep Color:Cor Para CB2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2	0x0188 /* Deep Color:Cor Para CR2 */
+#define HDMI_HTOP1_EDID_DATA_READ		0x0200 /* EDID Data Read 128Byte:0x03FC */
+
 enum hotplug_state {
 	HDMI_HOTPLUG_DISCONNECTED,
 	HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@
 	struct sh_mobile_lcdc_entity entity;
 
 	void __iomem *base;
+	void __iomem *htop1;
 	enum hotplug_state hp_state;	/* hot-plug status */
 	u8 preprogrammed_vic;		/* use a pre-programmed VIC or
 					   the external mode */
@@ -222,20 +286,66 @@
 	struct delayed_work edid_work;
 	struct fb_videomode mode;
 	struct fb_monspecs monspec;
+
+	/* register access functions */
+	void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
+	u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
 };
 
 #define entity_to_sh_hdmi(e)	container_of(e, struct sh_hdmi, entity)
 
-static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
 {
 	iowrite8(data, hdmi->base + reg);
 }
 
-static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
 {
 	return ioread8(hdmi->base + reg);
 }
 
+static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+	iowrite32((u32)data, hdmi->base + (reg * 4));
+	udelay(100);
+}
+
+static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
+{
+	return (u8)ioread32(hdmi->base + (reg * 4));
+}
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+	hdmi->write(hdmi, data, reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+	return hdmi->read(hdmi, reg);
+}
+
+static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
+{
+	u8 val = hdmi_read(hdmi, reg);
+
+	val &= ~mask;
+	val |= (data & mask);
+
+	hdmi_write(hdmi, val, reg);
+}
+
+static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
+{
+	iowrite32(data, hdmi->htop1 + reg);
+	udelay(100);
+}
+
+static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
+{
+	return ioread32(hdmi->htop1 + reg);
+}
+
 /*
  *	HDMI sound
  */
@@ -693,11 +803,11 @@
 	msleep(10);
 
 	/* PS mode b->d, reset PLLA and PLLB */
-	hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+	hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
 
 	udelay(10);
 
-	hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+	hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
 }
 
 static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@
 	/* Read EDID */
 	dev_dbg(hdmi->dev, "Read back EDID code:");
 	for (i = 0; i < 128; i++) {
-		edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+		edid[i] = (hdmi->htop1) ?
+			(u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
+			hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
 #ifdef DEBUG
 		if ((i % 16) == 0) {
 			printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@
 	u8 status1, status2, mask1, mask2;
 
 	/* mode_b and PLLA and PLLB reset */
-	hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+	hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
 
 	/* How long shall reset be held? */
 	udelay(10);
 
 	/* mode_b and PLLA and PLLB reset release */
-	hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+	hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
 
 	status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
 	status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@
 	 */
 	if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
 		/* PS mode d->e. All functions are active */
-		hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+		hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
 		dev_dbg(hdmi->dev, "HDMI running\n");
 	}
 
@@ -1016,7 +1128,7 @@
 
 	dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
 	/* PS mode e->a */
-	hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+	hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
 }
 
 static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@
 	dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
 }
 
+static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
+{
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
+	hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
+	hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
+	hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
+	hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
+	hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
+	hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
+	hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+	hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+	hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+	hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+	hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
+	msleep(100);
+	hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
+	msleep(100);
+	hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+	hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+	hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+	hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
+	hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
+	hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
+}
+
 static int __init sh_hdmi_probe(struct platform_device *pdev)
 {
 	struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct resource *htop1_res;
 	int irq = platform_get_irq(pdev, 0), ret;
 	struct sh_hdmi *hdmi;
 	long rate;
@@ -1121,6 +1281,15 @@
 	if (!res || !pdata || irq < 0)
 		return -ENODEV;
 
+	htop1_res = NULL;
+	if (pdata->flags & HDMI_HAS_HTOP1) {
+		htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!htop1_res) {
+			dev_err(&pdev->dev, "htop1 needs register base\n");
+			return -EINVAL;
+		}
+	}
+
 	hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
 	if (!hdmi) {
 		dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@
 		goto egetclk;
 	}
 
+	/* select register access functions */
+	if (pdata->flags & HDMI_32BIT_REG) {
+		hdmi->write	= __hdmi_write32;
+		hdmi->read	= __hdmi_read32;
+	} else {
+		hdmi->write	= __hdmi_write8;
+		hdmi->read	= __hdmi_read8;
+	}
+
 	/* An arbitrary relaxed pixclock just to get things started: from standard 480p */
 	rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
 	if (rate > 0)
@@ -1176,6 +1354,24 @@
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
+	/* init interrupt polarity */
+	if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
+		hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
+
+	if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
+		hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
+
+	/* enable htop1 register if needed */
+	if (htop1_res) {
+		hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
+		if (!hdmi->htop1) {
+			dev_err(&pdev->dev, "control register region already claimed\n");
+			ret = -ENOMEM;
+			goto emap_htop1;
+		}
+		sh_hdmi_htop1_init(hdmi);
+	}
+
 	/* Product and revision IDs are 0 in sh-mobile version */
 	dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
 		 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@
 ecodec:
 	free_irq(irq, hdmi);
 ereqirq:
+	if (hdmi->htop1)
+		iounmap(hdmi->htop1);
+emap_htop1:
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@
 	pm_runtime_disable(&pdev->dev);
 	clk_disable(hdmi->hdmi_clk);
 	clk_put(hdmi->hdmi_clk);
+	if (hdmi->htop1)
+		iounmap(hdmi->htop1);
 	iounmap(hdmi->base);
 	release_mem_region(res->start, resource_size(res));
 	kfree(hdmi);
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index aff7384..85d6738 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -105,51 +105,6 @@
 static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
 static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
 
-static const unsigned short SiS_DRAMType[17][5]={
-	{0x0C,0x0A,0x02,0x40,0x39},
-	{0x0D,0x0A,0x01,0x40,0x48},
-	{0x0C,0x09,0x02,0x20,0x35},
-	{0x0D,0x09,0x01,0x20,0x44},
-	{0x0C,0x08,0x02,0x10,0x31},
-	{0x0D,0x08,0x01,0x10,0x40},
-	{0x0C,0x0A,0x01,0x20,0x34},
-	{0x0C,0x09,0x01,0x08,0x32},
-	{0x0B,0x08,0x02,0x08,0x21},
-	{0x0C,0x08,0x01,0x08,0x30},
-	{0x0A,0x08,0x02,0x04,0x11},
-	{0x0B,0x0A,0x01,0x10,0x28},
-	{0x09,0x08,0x02,0x02,0x01},
-	{0x0B,0x09,0x01,0x08,0x24},
-	{0x0B,0x08,0x01,0x04,0x20},
-	{0x0A,0x08,0x01,0x02,0x10},
-	{0x09,0x08,0x01,0x01,0x00}
-};
-
-static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
-{
-	{ 2,12, 9,64,0x35},
-	{ 1,13, 9,64,0x44},
-	{ 2,12, 8,32,0x31},
-	{ 2,11, 9,32,0x25},
-	{ 1,12, 9,32,0x34},
-	{ 1,13, 8,32,0x40},
-	{ 2,11, 8,16,0x21},
-	{ 1,12, 8,16,0x30},
-	{ 1,11, 9,16,0x24},
-	{ 1,11, 8, 8,0x20},
-	{ 2, 9, 8, 4,0x01},
-	{ 1,10, 8, 4,0x10},
-	{ 1, 9, 8, 2,0x00}
-};
-
-static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
-{
-	{ 2,12, 9,64,0x35},
-	{ 2,12, 8,32,0x31},
-	{ 2,11, 8,16,0x21},
-	{ 2, 9, 8, 4,0x01}
-};
-
 static const unsigned char SiS_MDA_DAC[] =
 {
 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 078ca21..a7a48db 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4222,6 +4222,26 @@
 	return 1;			/* 32bit */
 }
 
+static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+	{0x0C,0x0A,0x02,0x40,0x39},
+	{0x0D,0x0A,0x01,0x40,0x48},
+	{0x0C,0x09,0x02,0x20,0x35},
+	{0x0D,0x09,0x01,0x20,0x44},
+	{0x0C,0x08,0x02,0x10,0x31},
+	{0x0D,0x08,0x01,0x10,0x40},
+	{0x0C,0x0A,0x01,0x20,0x34},
+	{0x0C,0x09,0x01,0x08,0x32},
+	{0x0B,0x08,0x02,0x08,0x21},
+	{0x0C,0x08,0x01,0x08,0x30},
+	{0x0A,0x08,0x02,0x04,0x11},
+	{0x0B,0x0A,0x01,0x10,0x28},
+	{0x09,0x08,0x02,0x02,0x01},
+	{0x0B,0x09,0x01,0x08,0x24},
+	{0x0B,0x08,0x01,0x04,0x20},
+	{0x0A,0x08,0x01,0x02,0x10},
+	{0x09,0x08,0x01,0x01,0x00}
+};
+
 static int __devinit
 sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
 			int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@
 	unsigned short sr14;
 	unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
 	unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
-	static const unsigned short SiS_DRAMType[17][5] = {
-		{0x0C,0x0A,0x02,0x40,0x39},
-		{0x0D,0x0A,0x01,0x40,0x48},
-		{0x0C,0x09,0x02,0x20,0x35},
-		{0x0D,0x09,0x01,0x20,0x44},
-		{0x0C,0x08,0x02,0x10,0x31},
-		{0x0D,0x08,0x01,0x10,0x40},
-		{0x0C,0x0A,0x01,0x20,0x34},
-		{0x0C,0x09,0x01,0x08,0x32},
-		{0x0B,0x08,0x02,0x08,0x21},
-		{0x0C,0x08,0x01,0x08,0x30},
-		{0x0A,0x08,0x02,0x04,0x11},
-		{0x0B,0x0A,0x01,0x10,0x28},
-		{0x09,0x08,0x02,0x02,0x01},
-		{0x0B,0x09,0x01,0x08,0x24},
-		{0x0B,0x08,0x01,0x04,0x20},
-		{0x0A,0x08,0x01,0x02,0x10},
-		{0x09,0x08,0x01,0x01,0x00}
-	};
 
-	 for(k = 0; k <= 16; k++) {
+	 for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
 
 		RankCapacity = buswidth * SiS_DRAMType[k][3];
 
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 30f7a81..5b6abc6 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -1036,6 +1036,6 @@
      */
 
 module_init(xxxfb_init);
-module_exit(xxxfb_remove);
+module_exit(xxxfb_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index ccbfef5..af3ef27 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -846,7 +846,7 @@
 	}
 }
 
-int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
 	int width, int height)
 {
 	size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@
 
 		struct fb_deferred_io *fbdefio;
 
-		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+		fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
 		if (fbdefio) {
 			fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 7af1e81..8af6414 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -893,7 +893,7 @@
 
 		struct fb_deferred_io *fbdefio;
 
-		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+		fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
 		if (fbdefio) {
 			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0c88375..c80e770 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1276,17 +1276,12 @@
 static ssize_t viafb_dfph_proc_write(struct file *file,
 	const char __user *buffer, size_t count, loff_t *pos)
 {
-	char buf[20];
-	u8 reg_val = 0;
-	unsigned long length;
-	if (count < 1)
-		return -EINVAL;
-	length = count > 20 ? 20 : count;
-	if (copy_from_user(&buf[0], buffer, length))
-		return -EFAULT;
-	buf[length - 1] = '\0';	/*Ensure end string */
-	if (kstrtou8(buf, 0, &reg_val) < 0)
-		return -EINVAL;
+	int err;
+	u8 reg_val;
+	err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+	if (err)
+		return err;
+
 	viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
 	return count;
 }
@@ -1316,17 +1311,12 @@
 static ssize_t viafb_dfpl_proc_write(struct file *file,
 	const char __user *buffer, size_t count, loff_t *pos)
 {
-	char buf[20];
-	u8 reg_val = 0;
-	unsigned long length;
-	if (count < 1)
-		return -EINVAL;
-	length = count > 20 ? 20 : count;
-	if (copy_from_user(&buf[0], buffer, length))
-		return -EFAULT;
-	buf[length - 1] = '\0';	/*Ensure end string */
-	if (kstrtou8(buf, 0, &reg_val) < 0)
-		return -EINVAL;
+	int err;
+	u8 reg_val;
+	err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+	if (err)
+		return err;
+
 	viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
 	return count;
 }
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a3b6a74..1cc61a7 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -138,7 +138,7 @@
 		goto failed_ioremap;
 	}
 
-	clk_enable(mdev->clk);
+	clk_prepare_enable(mdev->clk);
 	__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
 
 	mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@
 
 	iounmap(mdev->regs);
 	release_mem_region(res->start, resource_size(res));
-	clk_disable(mdev->clk);
+	clk_disable_unprepare(mdev->clk);
 	clk_put(mdev->clk);
 
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a18bf63..fe819b7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -64,6 +64,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called softdog.
 
+config DA9052_WATCHDOG
+        tristate "Dialog DA9052 Watchdog"
+        depends on PMIC_DA9052
+        select WATCHDOG_CORE
+        help
+          Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+          cause system reset.
+
+          Say Y here to include support for the DA9052 watchdog.
+          Alternatively say M to compile the driver as a module,
+          which will be called da9052_wdt.
+
 config WM831X_WATCHDOG
 	tristate "WM831x watchdog"
 	depends on MFD_WM831X
@@ -87,6 +99,7 @@
 config ARM_SP805_WATCHDOG
 	tristate "ARM SP805 Watchdog"
 	depends on ARM_AMBA
+	select WATCHDOG_CORE
 	help
 	  ARM Primecell SP805 Watchdog timer. This will reboot your system when
 	  the timeout is reached.
@@ -565,6 +578,7 @@
 config ITCO_WDT
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
+	select LPC_ICH
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 442bfbe..572b39b 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -163,6 +163,7 @@
 obj-$(CONFIG_XEN_WDT) += xen_wdt.o
 
 # Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
 obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
 obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644
index 0000000..3f75129
--- /dev/null
+++ b/drivers/watchdog/da9052_wdt.c
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT	4
+#define DA9052_TWDMIN		256
+
+struct da9052_wdt_data {
+	struct watchdog_device wdt;
+	struct da9052 *da9052;
+	struct kref kref;
+	unsigned long jpast;
+};
+
+static const struct {
+	u8 reg_val;
+	int time;  /* Seconds */
+} da9052_wdt_maps[] = {
+	{ 1, 2 },
+	{ 2, 4 },
+	{ 3, 8 },
+	{ 4, 16 },
+	{ 5, 32 },
+	{ 5, 33 },  /* Actual time  32.768s so included both 32s and 33s */
+	{ 6, 65 },
+	{ 6, 66 },  /* Actual time 65.536s so include both, 65s and 66s */
+	{ 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+	struct da9052_wdt_data *driver_data =
+		container_of(r, struct da9052_wdt_data, kref);
+
+	kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+				  unsigned int timeout)
+{
+	struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct da9052 *da9052 = driver_data->da9052;
+	int ret, i;
+
+	/*
+	 * Disable the Watchdog timer before setting
+	 * new time out.
+	 */
+	ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+				DA9052_CONTROLD_TWDSCALE, 0);
+	if (ret < 0) {
+		dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+			ret);
+		return ret;
+	}
+	if (timeout) {
+		/*
+		 * To change the timeout, da9052 needs to
+		 * be disabled for at least 150 us.
+		 */
+		udelay(150);
+
+		/* Set the desired timeout */
+		for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+			if (da9052_wdt_maps[i].time == timeout)
+				break;
+
+		if (i == ARRAY_SIZE(da9052_wdt_maps))
+			ret = -EINVAL;
+		else
+			ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+						DA9052_CONTROLD_TWDSCALE,
+						da9052_wdt_maps[i].reg_val);
+		if (ret < 0) {
+			dev_err(da9052->dev,
+				"Failed to update timescale bit, %d\n", ret);
+			return ret;
+		}
+
+		wdt_dev->timeout = timeout;
+		driver_data->jpast = jiffies;
+	}
+
+	return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+	struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+	kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+	struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+	kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+	return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+	return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+	struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+	struct da9052 *da9052 = driver_data->da9052;
+	unsigned long msec, jnow = jiffies;
+	int ret;
+
+	/*
+	 * We have a minimum time for watchdog window called TWDMIN. A write
+	 * to the watchdog before this elapsed time should cause an error.
+	 */
+	msec = (jnow - driver_data->jpast) * 1000/HZ;
+	if (msec < DA9052_TWDMIN)
+		mdelay(msec);
+
+	/* Reset the watchdog timer */
+	ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+				DA9052_CONTROLD_WATCHDOG, 1 << 7);
+	if (ret < 0)
+		goto err_strobe;
+
+	/*
+	 * FIXME: Reset the watchdog core, in general PMIC
+	 * is supposed to do this
+	 */
+	ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+				DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+	return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+	.options	= WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+	.identity	= "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+	.owner = THIS_MODULE,
+	.start = da9052_wdt_start,
+	.stop = da9052_wdt_stop,
+	.ping = da9052_wdt_ping,
+	.set_timeout = da9052_wdt_set_timeout,
+	.ref = da9052_wdt_ref,
+	.unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+	struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+	struct da9052_wdt_data *driver_data;
+	struct watchdog_device *da9052_wdt;
+	int ret;
+
+	driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+				   GFP_KERNEL);
+	if (!driver_data) {
+		dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	driver_data->da9052 = da9052;
+
+	da9052_wdt = &driver_data->wdt;
+
+	da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+	da9052_wdt->info = &da9052_wdt_info;
+	da9052_wdt->ops = &da9052_wdt_ops;
+	watchdog_set_drvdata(da9052_wdt, driver_data);
+
+	kref_init(&driver_data->kref);
+
+	ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+				DA9052_CONTROLD_TWDSCALE, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+			ret);
+		goto err;
+	}
+
+	ret = watchdog_register_device(&driver_data->wdt);
+	if (ret != 0) {
+		dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+			ret);
+		goto err;
+	}
+
+	dev_set_drvdata(&pdev->dev, driver_data);
+err:
+	return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+	struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+	watchdog_unregister_device(&driver_data->wdt);
+	kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+	return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+	.probe = da9052_wdt_probe,
+	.remove = __devexit_p(da9052_wdt_remove),
+	.driver = {
+		.name	= "da9052-watchdog",
+	},
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 2b76381..1eff743 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -146,7 +146,7 @@
 }  __attribute__((packed));
 
 static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
 static unsigned int is_icru;
 static DEFINE_SPINLOCK(rom_lock);
 static void *cru_rom_addr;
@@ -756,6 +756,8 @@
 static void hpwdt_exit_nmi_decoding(void)
 {
 	unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+	unregister_nmi_handler(NMI_SERR, "hpwdt");
+	unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
 	if (cru_rom_addr)
 		iounmap(cru_rom_addr);
 }
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 9e27e64..3c57b455 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,8 +1,8 @@
 /* iTCO Vendor Specific Support hooks */
 #ifdef CONFIG_ITCO_VENDOR_SUPPORT
-extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
-extern void iTCO_vendor_pre_stop(unsigned long);
-extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
+extern void iTCO_vendor_pre_stop(struct resource *);
+extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
 extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
 extern int iTCO_vendor_check_noreboot_on(void);
 #else
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 2721d29..b6b2f90 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -35,11 +35,6 @@
 
 #include "iTCO_vendor.h"
 
-/* iTCO defines */
-#define	SMI_EN		(acpibase + 0x30) /* SMI Control and Enable Register */
-#define	TCOBASE		(acpibase + 0x60) /* TCO base address */
-#define	TCO1_STS	(TCOBASE + 0x04)  /* TCO1 Status Register */
-
 /* List of vendor support modes */
 /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
 #define SUPERMICRO_OLD_BOARD	1
@@ -82,24 +77,24 @@
  *	    20.6 seconds.
  */
 
-static void supermicro_old_pre_start(unsigned long acpibase)
+static void supermicro_old_pre_start(struct resource *smires)
 {
 	unsigned long val32;
 
 	/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
-	val32 = inl(SMI_EN);
+	val32 = inl(smires->start);
 	val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
-	outl(val32, SMI_EN);	/* Needed to activate watchdog */
+	outl(val32, smires->start);	/* Needed to activate watchdog */
 }
 
-static void supermicro_old_pre_stop(unsigned long acpibase)
+static void supermicro_old_pre_stop(struct resource *smires)
 {
 	unsigned long val32;
 
 	/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
-	val32 = inl(SMI_EN);
+	val32 = inl(smires->start);
 	val32 |= 0x00002000;	/* Turn on SMI clearing watchdog */
-	outl(val32, SMI_EN);	/* Needed to deactivate watchdog */
+	outl(val32, smires->start);	/* Needed to deactivate watchdog */
 }
 
 /*
@@ -270,66 +265,66 @@
  *	Don't use this fix if you don't need to!!!
  */
 
-static void broken_bios_start(unsigned long acpibase)
+static void broken_bios_start(struct resource *smires)
 {
 	unsigned long val32;
 
-	val32 = inl(SMI_EN);
+	val32 = inl(smires->start);
 	/* Bit 13: TCO_EN     -> 0 = Disables TCO logic generating an SMI#
 	   Bit  0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
 	val32 &= 0xffffdffe;
-	outl(val32, SMI_EN);
+	outl(val32, smires->start);
 }
 
-static void broken_bios_stop(unsigned long acpibase)
+static void broken_bios_stop(struct resource *smires)
 {
 	unsigned long val32;
 
-	val32 = inl(SMI_EN);
+	val32 = inl(smires->start);
 	/* Bit 13: TCO_EN     -> 1 = Enables TCO logic generating an SMI#
 	   Bit  0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
 	val32 |= 0x00002001;
-	outl(val32, SMI_EN);
+	outl(val32, smires->start);
 }
 
 /*
  *	Generic Support Functions
  */
 
-void iTCO_vendor_pre_start(unsigned long acpibase,
+void iTCO_vendor_pre_start(struct resource *smires,
 			   unsigned int heartbeat)
 {
 	switch (vendorsupport) {
 	case SUPERMICRO_OLD_BOARD:
-		supermicro_old_pre_start(acpibase);
+		supermicro_old_pre_start(smires);
 		break;
 	case SUPERMICRO_NEW_BOARD:
 		supermicro_new_pre_start(heartbeat);
 		break;
 	case BROKEN_BIOS:
-		broken_bios_start(acpibase);
+		broken_bios_start(smires);
 		break;
 	}
 }
 EXPORT_SYMBOL(iTCO_vendor_pre_start);
 
-void iTCO_vendor_pre_stop(unsigned long acpibase)
+void iTCO_vendor_pre_stop(struct resource *smires)
 {
 	switch (vendorsupport) {
 	case SUPERMICRO_OLD_BOARD:
-		supermicro_old_pre_stop(acpibase);
+		supermicro_old_pre_stop(smires);
 		break;
 	case SUPERMICRO_NEW_BOARD:
 		supermicro_new_pre_stop();
 		break;
 	case BROKEN_BIOS:
-		broken_bios_stop(acpibase);
+		broken_bios_stop(smires);
 		break;
 	}
 }
 EXPORT_SYMBOL(iTCO_vendor_pre_stop);
 
-void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
 {
 	if (vendorsupport == SUPERMICRO_NEW_BOARD)
 		supermicro_new_pre_set_heartbeat(heartbeat);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9fecb95..9c2c27c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,316 +66,16 @@
 #include <linux/spinlock.h>		/* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>		/* For copy_to_user/put_user/... */
 #include <linux/io.h>			/* For inb/outb/... */
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
 
 #include "iTCO_vendor.h"
 
-/* TCO related info */
-enum iTCO_chipsets {
-	TCO_ICH = 0,	/* ICH */
-	TCO_ICH0,	/* ICH0 */
-	TCO_ICH2,	/* ICH2 */
-	TCO_ICH2M,	/* ICH2-M */
-	TCO_ICH3,	/* ICH3-S */
-	TCO_ICH3M,	/* ICH3-M */
-	TCO_ICH4,	/* ICH4 */
-	TCO_ICH4M,	/* ICH4-M */
-	TCO_CICH,	/* C-ICH */
-	TCO_ICH5,	/* ICH5 & ICH5R */
-	TCO_6300ESB,	/* 6300ESB */
-	TCO_ICH6,	/* ICH6 & ICH6R */
-	TCO_ICH6M,	/* ICH6-M */
-	TCO_ICH6W,	/* ICH6W & ICH6RW */
-	TCO_631XESB,	/* 631xESB/632xESB */
-	TCO_ICH7,	/* ICH7 & ICH7R */
-	TCO_ICH7DH,	/* ICH7DH */
-	TCO_ICH7M,	/* ICH7-M & ICH7-U */
-	TCO_ICH7MDH,	/* ICH7-M DH */
-	TCO_NM10,	/* NM10 */
-	TCO_ICH8,	/* ICH8 & ICH8R */
-	TCO_ICH8DH,	/* ICH8DH */
-	TCO_ICH8DO,	/* ICH8DO */
-	TCO_ICH8M,	/* ICH8M */
-	TCO_ICH8ME,	/* ICH8M-E */
-	TCO_ICH9,	/* ICH9 */
-	TCO_ICH9R,	/* ICH9R */
-	TCO_ICH9DH,	/* ICH9DH */
-	TCO_ICH9DO,	/* ICH9DO */
-	TCO_ICH9M,	/* ICH9M */
-	TCO_ICH9ME,	/* ICH9M-E */
-	TCO_ICH10,	/* ICH10 */
-	TCO_ICH10R,	/* ICH10R */
-	TCO_ICH10D,	/* ICH10D */
-	TCO_ICH10DO,	/* ICH10DO */
-	TCO_PCH,	/* PCH Desktop Full Featured */
-	TCO_PCHM,	/* PCH Mobile Full Featured */
-	TCO_P55,	/* P55 */
-	TCO_PM55,	/* PM55 */
-	TCO_H55,	/* H55 */
-	TCO_QM57,	/* QM57 */
-	TCO_H57,	/* H57 */
-	TCO_HM55,	/* HM55 */
-	TCO_Q57,	/* Q57 */
-	TCO_HM57,	/* HM57 */
-	TCO_PCHMSFF,	/* PCH Mobile SFF Full Featured */
-	TCO_QS57,	/* QS57 */
-	TCO_3400,	/* 3400 */
-	TCO_3420,	/* 3420 */
-	TCO_3450,	/* 3450 */
-	TCO_EP80579,	/* EP80579 */
-	TCO_CPT,	/* Cougar Point */
-	TCO_CPTD,	/* Cougar Point Desktop */
-	TCO_CPTM,	/* Cougar Point Mobile */
-	TCO_PBG,	/* Patsburg */
-	TCO_DH89XXCC,	/* DH89xxCC */
-	TCO_PPT,	/* Panther Point */
-	TCO_LPT,	/* Lynx Point */
-};
-
-static struct {
-	char *name;
-	unsigned int iTCO_version;
-} iTCO_chipset_info[] __devinitdata = {
-	{"ICH", 1},
-	{"ICH0", 1},
-	{"ICH2", 1},
-	{"ICH2-M", 1},
-	{"ICH3-S", 1},
-	{"ICH3-M", 1},
-	{"ICH4", 1},
-	{"ICH4-M", 1},
-	{"C-ICH", 1},
-	{"ICH5 or ICH5R", 1},
-	{"6300ESB", 1},
-	{"ICH6 or ICH6R", 2},
-	{"ICH6-M", 2},
-	{"ICH6W or ICH6RW", 2},
-	{"631xESB/632xESB", 2},
-	{"ICH7 or ICH7R", 2},
-	{"ICH7DH", 2},
-	{"ICH7-M or ICH7-U", 2},
-	{"ICH7-M DH", 2},
-	{"NM10", 2},
-	{"ICH8 or ICH8R", 2},
-	{"ICH8DH", 2},
-	{"ICH8DO", 2},
-	{"ICH8M", 2},
-	{"ICH8M-E", 2},
-	{"ICH9", 2},
-	{"ICH9R", 2},
-	{"ICH9DH", 2},
-	{"ICH9DO", 2},
-	{"ICH9M", 2},
-	{"ICH9M-E", 2},
-	{"ICH10", 2},
-	{"ICH10R", 2},
-	{"ICH10D", 2},
-	{"ICH10DO", 2},
-	{"PCH Desktop Full Featured", 2},
-	{"PCH Mobile Full Featured", 2},
-	{"P55", 2},
-	{"PM55", 2},
-	{"H55", 2},
-	{"QM57", 2},
-	{"H57", 2},
-	{"HM55", 2},
-	{"Q57", 2},
-	{"HM57", 2},
-	{"PCH Mobile SFF Full Featured", 2},
-	{"QS57", 2},
-	{"3400", 2},
-	{"3420", 2},
-	{"3450", 2},
-	{"EP80579", 2},
-	{"Cougar Point", 2},
-	{"Cougar Point Desktop", 2},
-	{"Cougar Point Mobile", 2},
-	{"Patsburg", 2},
-	{"DH89xxCC", 2},
-	{"Panther Point", 2},
-	{"Lynx Point", 2},
-	{NULL, 0}
-};
-
-/*
- * This data only exists for exporting the supported PCI ids
- * via MODULE_DEVICE_TABLE.  We do not actually register a
- * pci_driver, because the I/O Controller Hub has also other
- * functions that probably will be registered by other drivers.
- */
-static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
-	{ PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
-	{ PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
-	{ PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
-	{ PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
-	{ PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
-	{ PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
-	{ PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
-	{ PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
-	{ PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
-	{ PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
-	{ PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
-	{ PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
-	{ PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
-	{ PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
-	{ PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
-	{ PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
-	{ PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
-	{ PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
-	{ PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
-	{ PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
-	{ PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
-	{ PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
-	{ PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
-	{ PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
-	{ PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
-	{ PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
-	{ PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
-	{ PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
-	{ PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
-	{ PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
-	{ PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
-	{ PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
-	{ PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
-	{ PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
-	{ PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
-	{ PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
-	{ PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
-	{ PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
-	{ PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
-	{ PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
-	{ PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
-	{ PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
-	{ PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
-	{ PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
-	{ PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
-	{ PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
-	{ PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
-	{ PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
-	{ PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
-	{ PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
-	{ PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
-	{ PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
-	{ PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
-	{ PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
-	{ PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
-	{ PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
-	{ PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
-	{ PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
-	{ PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
-	{ PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
-	{ 0, },			/* End of list */
-};
-MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
-
 /* Address definitions for the TCO */
 /* TCO base address */
-#define TCOBASE		(iTCO_wdt_private.ACPIBASE + 0x60)
+#define TCOBASE		(iTCO_wdt_private.tco_res->start)
 /* SMI Control and Enable Register */
-#define SMI_EN		(iTCO_wdt_private.ACPIBASE + 0x30)
+#define SMI_EN		(iTCO_wdt_private.smi_res->start)
 
 #define TCO_RLD		(TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
 #define TCOv1_TMR	(TCOBASE + 0x01) /* TCOv1 Timer Initial Value	*/
@@ -393,19 +93,18 @@
 static struct {		/* this is private data for the iTCO_wdt device */
 	/* TCO version/generation */
 	unsigned int iTCO_version;
-	/* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
-	unsigned long ACPIBASE;
+	struct resource *tco_res;
+	struct resource *smi_res;
+	struct resource *gcs_res;
 	/* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
 	unsigned long __iomem *gcs;
 	/* the lock for io operations */
 	spinlock_t io_lock;
+	struct platform_device *dev;
 	/* the PCI-device */
 	struct pci_dev *pdev;
 } iTCO_wdt_private;
 
-/* the watchdog platform device */
-static struct platform_device *iTCO_wdt_platform_device;
-
 /* module parameters */
 #define WATCHDOG_HEARTBEAT 30	/* 30 sec default heartbeat */
 static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
@@ -485,7 +184,7 @@
 
 	spin_lock(&iTCO_wdt_private.io_lock);
 
-	iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+	iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
 
 	/* disable chipset's NO_REBOOT bit */
 	if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@
 
 	spin_lock(&iTCO_wdt_private.io_lock);
 
-	iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+	iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
 
 	/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
 	val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@
 {
 	spin_lock(&iTCO_wdt_private.io_lock);
 
-	iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+	iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
 
 	/* Reload the timer by writing to the TCO Timer Counter register */
 	if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@
  *	Init & exit routines
  */
 
-static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
-		const struct pci_device_id *ent, struct platform_device *dev)
+static void __devexit iTCO_wdt_cleanup(void)
 {
-	int ret;
-	u32 base_address;
-	unsigned long RCBA;
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		iTCO_wdt_stop();
+
+	/* Deregister */
+	misc_deregister(&iTCO_wdt_miscdev);
+
+	/* release resources */
+	release_region(iTCO_wdt_private.tco_res->start,
+			resource_size(iTCO_wdt_private.tco_res));
+	release_region(iTCO_wdt_private.smi_res->start,
+			resource_size(iTCO_wdt_private.smi_res));
+	if (iTCO_wdt_private.iTCO_version == 2) {
+		iounmap(iTCO_wdt_private.gcs);
+		release_mem_region(iTCO_wdt_private.gcs_res->start,
+				resource_size(iTCO_wdt_private.gcs_res));
+	}
+
+	iTCO_wdt_private.tco_res = NULL;
+	iTCO_wdt_private.smi_res = NULL;
+	iTCO_wdt_private.gcs_res = NULL;
+	iTCO_wdt_private.gcs = NULL;
+}
+
+static int __devinit iTCO_wdt_probe(struct platform_device *dev)
+{
+	int ret = -ENODEV;
 	unsigned long val32;
+	struct lpc_ich_info *ich_info = dev->dev.platform_data;
+
+	if (!ich_info)
+		goto out;
+
+	spin_lock_init(&iTCO_wdt_private.io_lock);
+
+	iTCO_wdt_private.tco_res =
+		platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
+	if (!iTCO_wdt_private.tco_res)
+		goto out;
+
+	iTCO_wdt_private.smi_res =
+		platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
+	if (!iTCO_wdt_private.smi_res)
+		goto out;
+
+	iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+	iTCO_wdt_private.dev = dev;
+	iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
 
 	/*
-	 *      Find the ACPI/PM base I/O address which is the base
-	 *      for the TCO registers (TCOBASE=ACPIBASE + 0x60)
-	 *      ACPIBASE is bits [15:7] from 0x40-0x43
+	 * Get the Memory-Mapped GCS register, we need it for the
+	 * NO_REBOOT flag (TCO v2).
 	 */
-	pci_read_config_dword(pdev, 0x40, &base_address);
-	base_address &= 0x0000ff80;
-	if (base_address == 0x00000000) {
-		/* Something's wrong here, ACPIBASE has to be set */
-		pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
-		return -ENODEV;
-	}
-	iTCO_wdt_private.iTCO_version =
-			iTCO_chipset_info[ent->driver_data].iTCO_version;
-	iTCO_wdt_private.ACPIBASE = base_address;
-	iTCO_wdt_private.pdev = pdev;
-
-	/* Get the Memory-Mapped GCS register, we need it for the
-	   NO_REBOOT flag (TCO v2). To get access to it you have to
-	   read RCBA from PCI Config space 0xf0 and use it as base.
-	   GCS = RCBA + ICH6_GCS(0x3410). */
 	if (iTCO_wdt_private.iTCO_version == 2) {
-		pci_read_config_dword(pdev, 0xf0, &base_address);
-		if ((base_address & 1) == 0) {
-			pr_err("RCBA is disabled by hardware/BIOS, device disabled\n");
-			ret = -ENODEV;
+		iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+							IORESOURCE_MEM,
+							ICH_RES_MEM_GCS);
+
+		if (!iTCO_wdt_private.gcs_res)
+			goto out;
+
+		if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
+			resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+			ret = -EBUSY;
 			goto out;
 		}
-		RCBA = base_address & 0xffffc000;
-		iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
+		iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
+			resource_size(iTCO_wdt_private.gcs_res));
+		if (!iTCO_wdt_private.gcs) {
+			ret = -EIO;
+			goto unreg_gcs;
+		}
 	}
 
 	/* Check chipset's NO_REBOOT bit */
 	if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
 		pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
 		ret = -ENODEV;	/* Cannot reset NO_REBOOT bit */
-		goto out_unmap;
+		goto unmap_gcs;
 	}
 
 	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
 	iTCO_wdt_set_NO_REBOOT_bit();
 
 	/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
-	if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
-		pr_err("I/O address 0x%04lx already in use, device disabled\n",
-		       SMI_EN);
-		ret = -EIO;
-		goto out_unmap;
+	if (!request_region(iTCO_wdt_private.smi_res->start,
+			resource_size(iTCO_wdt_private.smi_res), dev->name)) {
+		pr_err("I/O address 0x%04llx already in use, device disabled\n",
+		       (u64)SMI_EN);
+		ret = -EBUSY;
+		goto unmap_gcs;
 	}
 	if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
-		/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+		/*
+		 * Bit 13: TCO_EN -> 0
+		 * Disables TCO logic generating an SMI#
+		 */
 		val32 = inl(SMI_EN);
 		val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
 		outl(val32, SMI_EN);
 	}
 
-	/* The TCO I/O registers reside in a 32-byte range pointed to
-	   by the TCOBASE value */
-	if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
-		pr_err("I/O address 0x%04lx already in use, device disabled\n",
-		       TCOBASE);
-		ret = -EIO;
-		goto unreg_smi_en;
+	if (!request_region(iTCO_wdt_private.tco_res->start,
+			resource_size(iTCO_wdt_private.tco_res), dev->name)) {
+		pr_err("I/O address 0x%04llx already in use, device disabled\n",
+		       (u64)TCOBASE);
+		ret = -EBUSY;
+		goto unreg_smi;
 	}
 
-	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
-		iTCO_chipset_info[ent->driver_data].name,
-		iTCO_chipset_info[ent->driver_data].iTCO_version,
-		TCOBASE);
+	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+		ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
 
 	/* Clear out the (probably old) status */
 	outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@
 	if (ret != 0) {
 		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
 		       WATCHDOG_MINOR, ret);
-		goto unreg_region;
+		goto unreg_tco;
 	}
 
 	pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@
 
 	return 0;
 
-unreg_region:
-	release_region(TCOBASE, 0x20);
-unreg_smi_en:
-	release_region(SMI_EN, 4);
-out_unmap:
+unreg_tco:
+	release_region(iTCO_wdt_private.tco_res->start,
+			resource_size(iTCO_wdt_private.tco_res));
+unreg_smi:
+	release_region(iTCO_wdt_private.smi_res->start,
+			resource_size(iTCO_wdt_private.smi_res));
+unmap_gcs:
 	if (iTCO_wdt_private.iTCO_version == 2)
 		iounmap(iTCO_wdt_private.gcs);
+unreg_gcs:
+	if (iTCO_wdt_private.iTCO_version == 2)
+		release_mem_region(iTCO_wdt_private.gcs_res->start,
+				resource_size(iTCO_wdt_private.gcs_res));
 out:
-	iTCO_wdt_private.ACPIBASE = 0;
-	return ret;
-}
-
-static void __devexit iTCO_wdt_cleanup(void)
-{
-	/* Stop the timer before we leave */
-	if (!nowayout)
-		iTCO_wdt_stop();
-
-	/* Deregister */
-	misc_deregister(&iTCO_wdt_miscdev);
-	release_region(TCOBASE, 0x20);
-	release_region(SMI_EN, 4);
-	if (iTCO_wdt_private.iTCO_version == 2)
-		iounmap(iTCO_wdt_private.gcs);
-	pci_dev_put(iTCO_wdt_private.pdev);
-	iTCO_wdt_private.ACPIBASE = 0;
-}
-
-static int __devinit iTCO_wdt_probe(struct platform_device *dev)
-{
-	int ret = -ENODEV;
-	int found = 0;
-	struct pci_dev *pdev = NULL;
-	const struct pci_device_id *ent;
-
-	spin_lock_init(&iTCO_wdt_private.io_lock);
-
-	for_each_pci_dev(pdev) {
-		ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
-		if (ent) {
-			found++;
-			ret = iTCO_wdt_init(pdev, ent, dev);
-			if (!ret)
-				break;
-		}
-	}
-
-	if (!found)
-		pr_info("No device detected\n");
+	iTCO_wdt_private.tco_res = NULL;
+	iTCO_wdt_private.smi_res = NULL;
+	iTCO_wdt_private.gcs_res = NULL;
+	iTCO_wdt_private.gcs = NULL;
 
 	return ret;
 }
 
 static int __devexit iTCO_wdt_remove(struct platform_device *dev)
 {
-	if (iTCO_wdt_private.ACPIBASE)
+	if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
 		iTCO_wdt_cleanup();
 
 	return 0;
@@ -977,23 +682,11 @@
 	if (err)
 		return err;
 
-	iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
-								-1, NULL, 0);
-	if (IS_ERR(iTCO_wdt_platform_device)) {
-		err = PTR_ERR(iTCO_wdt_platform_device);
-		goto unreg_platform_driver;
-	}
-
 	return 0;
-
-unreg_platform_driver:
-	platform_driver_unregister(&iTCO_wdt_driver);
-	return err;
 }
 
 static void __exit iTCO_wdt_cleanup_module(void)
 {
-	platform_device_unregister(iTCO_wdt_platform_device);
 	platform_driver_unregister(&iTCO_wdt_driver);
 	pr_info("Watchdog Module Unloaded\n");
 }
@@ -1006,3 +699,4 @@
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7a2b734..bcfab2b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -121,7 +121,7 @@
 {
 	if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
 		/* at our first start we enable clock and do initialisations */
-		clk_enable(imx2_wdt.clk);
+		clk_prepare_enable(imx2_wdt.clk);
 
 		imx2_wdt_setup();
 	} else	/* delete the timer that pings the watchdog after close */
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3..2e74c3a 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
 #include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <lantiq.h>
+#include <lantiq_soc.h>
 
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
  * The password sequence protects the WDT control register from unintended
  * write actions, which might cause malfunction of the WDT.
  *
@@ -70,7 +71,8 @@
 {
 	/* write the first password magic */
 	ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
-	/* write the second password magic with no config
+	/*
+	 * write the second password magic with no config
 	 * this turns the watchdog off
 	 */
 	ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@
 	.fops	= &ltq_wdt_fops,
 };
 
-static int __init
+static int __devinit
 ltq_wdt_probe(struct platform_device *pdev)
 {
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@
 		dev_err(&pdev->dev, "cannot obtain I/O memory region");
 		return -ENOENT;
 	}
-	res = devm_request_mem_region(&pdev->dev, res->start,
-		resource_size(res), dev_name(&pdev->dev));
-	if (!res) {
-		dev_err(&pdev->dev, "cannot request I/O memory region");
-		return -EBUSY;
-	}
-	ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-		resource_size(res));
+
+	ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
 	if (!ltq_wdt_membase) {
 		dev_err(&pdev->dev, "cannot remap I/O memory region\n");
 		return -ENOMEM;
 	}
 
 	/* we do not need to enable the clock as it is always running */
-	clk = clk_get(&pdev->dev, "io");
-	WARN_ON(!clk);
+	clk = clk_get_io();
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Failed to get clock\n");
+		return -ENOENT;
+	}
 	ltq_io_region_clk_rate = clk_get_rate(clk);
 	clk_put(clk);
 
+	/* find out if the watchdog caused the last reboot */
 	if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
 		ltq_wdt_bootstatus = WDIOF_CARDRESET;
 
+	dev_info(&pdev->dev, "Init done\n");
 	return misc_register(&ltq_wdt_miscdev);
 }
 
@@ -227,33 +228,26 @@
 	return 0;
 }
 
+static const struct of_device_id ltq_wdt_match[] = {
+	{ .compatible = "lantiq,wdt" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
 
 static struct platform_driver ltq_wdt_driver = {
+	.probe = ltq_wdt_probe,
 	.remove = __devexit_p(ltq_wdt_remove),
 	.driver = {
-		.name = "ltq_wdt",
+		.name = "wdt",
 		.owner = THIS_MODULE,
+		.of_match_table = ltq_wdt_match,
 	},
 };
 
-static int __init
-init_ltq_wdt(void)
-{
-	return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
-	return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
 
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
 MODULE_DESCRIPTION("Lantiq SoC Watchdog");
 MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 788aa15..0f57369 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,8 +24,8 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
+#include <linux/clk.h>
 #include <mach/bridge-regs.h>
-#include <plat/orion_wdt.h>
 
 /*
  * Watchdog timer block registers.
@@ -41,6 +41,7 @@
 static bool nowayout = WATCHDOG_NOWAYOUT;
 static int heartbeat = -1;		/* module parameter (seconds) */
 static unsigned int wdt_max_duration;	/* (seconds) */
+static struct clk *clk;
 static unsigned int wdt_tclk;
 static void __iomem *wdt_reg;
 static unsigned long wdt_status;
@@ -237,16 +238,16 @@
 
 static int __devinit orion_wdt_probe(struct platform_device *pdev)
 {
-	struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
 	struct resource *res;
 	int ret;
 
-	if (pdata) {
-		wdt_tclk = pdata->tclk;
-	} else {
-		pr_err("misses platform data\n");
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		printk(KERN_ERR "Orion Watchdog missing clock\n");
 		return -ENODEV;
 	}
+	clk_prepare_enable(clk);
+	wdt_tclk = clk_get_rate(clk);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -282,6 +283,9 @@
 	if (!ret)
 		orion_wdt_miscdev.parent = NULL;
 
+	clk_disable_unprepare(clk);
+	clk_put(clk);
+
 	return ret;
 }
 
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e..e4841c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
  * Watchdog driver for ARM SP805 watchdog module
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2 or later. This program is licensed "as is" without any
@@ -16,20 +16,17 @@
 #include <linux/amba/bus.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/math64.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-#include <linux/uaccess.h>
 #include <linux/watchdog.h>
 
 /* default timeout in seconds */
@@ -56,6 +53,7 @@
 
 /**
  * struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
  * @lock: spin lock protecting dev structure and io access
  * @base: base address of wdt
  * @clk: clock structure of wdt
@@ -65,24 +63,24 @@
  * @timeout: current programmed timeout
  */
 struct sp805_wdt {
+	struct watchdog_device		wdd;
 	spinlock_t			lock;
 	void __iomem			*base;
 	struct clk			*clk;
 	struct amba_device		*adev;
-	unsigned long			status;
-	#define WDT_BUSY		0
-	#define WDT_CAN_BE_CLOSED	1
 	unsigned int			load_val;
 	unsigned int			timeout;
 };
 
-/* local variables */
-static struct sp805_wdt *wdt;
 static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+		"Set to 1 to keep watchdog running after device release");
 
 /* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
 {
+	struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
 	u64 load, rate;
 
 	rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@
 	/* roundup timeout to closest positive integer value */
 	wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
 	spin_unlock(&wdt->lock);
+
+	return 0;
 }
 
 /* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
 {
+	struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
 	u64 load, rate;
 
 	rate = clk_get_rate(wdt->clk);
@@ -123,25 +124,62 @@
 	return div_u64(load, rate);
 }
 
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
 {
+	struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+	int ret;
+
+	if (!ping) {
+		ret = clk_prepare(wdt->clk);
+		if (ret) {
+			dev_err(&wdt->adev->dev, "clock prepare fail");
+			return ret;
+		}
+
+		ret = clk_enable(wdt->clk);
+		if (ret) {
+			dev_err(&wdt->adev->dev, "clock enable fail");
+			clk_unprepare(wdt->clk);
+			return ret;
+		}
+	}
+
 	spin_lock(&wdt->lock);
 
 	writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
 	writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
-	writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
-	writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
+
+	if (!ping) {
+		writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+		writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+				WDTCONTROL);
+	}
+
 	writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
 	/* Flush posted writes. */
 	readl_relaxed(wdt->base + WDTLOCK);
 	spin_unlock(&wdt->lock);
+
+	return 0;
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+	return wdt_config(wdd, true);
+}
+
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
+{
+	return wdt_config(wdd, false);
 }
 
 /* disables watchdog timers reset */
-static void wdt_disable(void)
+static int wdt_disable(struct watchdog_device *wdd)
 {
+	struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+
 	spin_lock(&wdt->lock);
 
 	writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
@@ -151,138 +189,31 @@
 	/* Flush posted writes. */
 	readl_relaxed(wdt->base + WDTLOCK);
 	spin_unlock(&wdt->lock);
-}
 
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
-		size_t len, loff_t *ppos)
-{
-	if (len) {
-		if (!nowayout) {
-			size_t i;
-
-			clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
-			for (i = 0; i != len; i++) {
-				char c;
-
-				if (get_user(c, data + i))
-					return -EFAULT;
-				/* Check for Magic Close character */
-				if (c == 'V') {
-					set_bit(WDT_CAN_BE_CLOSED,
-							&wdt->status);
-					break;
-				}
-			}
-		}
-		wdt_enable();
-	}
-	return len;
-}
-
-static const struct watchdog_info ident = {
-	.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-	.identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
-		unsigned long arg)
-{
-	int ret = -ENOTTY;
-	unsigned int timeout;
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		ret = copy_to_user((struct watchdog_info *)arg, &ident,
-				sizeof(ident)) ? -EFAULT : 0;
-		break;
-
-	case WDIOC_GETSTATUS:
-		ret = put_user(0, (int *)arg);
-		break;
-
-	case WDIOC_KEEPALIVE:
-		wdt_enable();
-		ret = 0;
-		break;
-
-	case WDIOC_SETTIMEOUT:
-		ret = get_user(timeout, (unsigned int *)arg);
-		if (ret)
-			break;
-
-		wdt_setload(timeout);
-
-		wdt_enable();
-		/* Fall through */
-
-	case WDIOC_GETTIMEOUT:
-		ret = put_user(wdt->timeout, (unsigned int *)arg);
-		break;
-	case WDIOC_GETTIMELEFT:
-		ret = put_user(wdt_timeleft(), (unsigned int *)arg);
-		break;
-	}
-	return ret;
-}
-
-static int sp805_wdt_open(struct inode *inode, struct file *file)
-{
-	int ret = 0;
-
-	if (test_and_set_bit(WDT_BUSY, &wdt->status))
-		return -EBUSY;
-
-	ret = clk_enable(wdt->clk);
-	if (ret) {
-		dev_err(&wdt->adev->dev, "clock enable fail");
-		goto err;
-	}
-
-	wdt_enable();
-
-	/* can not be closed, once enabled */
-	clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-	return nonseekable_open(inode, file);
-
-err:
-	clear_bit(WDT_BUSY, &wdt->status);
-	return ret;
-}
-
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
-	if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
-		clear_bit(WDT_BUSY, &wdt->status);
-		dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
-		return 0;
-	}
-
-	wdt_disable();
 	clk_disable(wdt->clk);
-	clear_bit(WDT_BUSY, &wdt->status);
+	clk_unprepare(wdt->clk);
 
 	return 0;
 }
 
-static const struct file_operations sp805_wdt_fops = {
-	.owner = THIS_MODULE,
-	.llseek = no_llseek,
-	.write = sp805_wdt_write,
-	.unlocked_ioctl = sp805_wdt_ioctl,
-	.open = sp805_wdt_open,
-	.release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+	.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+	.identity = MODULE_NAME,
 };
 
-static struct miscdevice sp805_wdt_miscdev = {
-	.minor = WATCHDOG_MINOR,
-	.name = "watchdog",
-	.fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+	.owner		= THIS_MODULE,
+	.start		= wdt_enable,
+	.stop		= wdt_disable,
+	.ping		= wdt_ping,
+	.set_timeout	= wdt_setload,
+	.get_timeleft	= wdt_timeleft,
 };
 
 static int __devinit
 sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
 {
+	struct sp805_wdt *wdt;
 	int ret = 0;
 
 	if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@
 	}
 
 	wdt->adev = adev;
-	spin_lock_init(&wdt->lock);
-	wdt_setload(DEFAULT_TIMEOUT);
+	wdt->wdd.info = &wdt_info;
+	wdt->wdd.ops = &wdt_ops;
 
-	ret = misc_register(&sp805_wdt_miscdev);
-	if (ret < 0) {
-		dev_warn(&adev->dev, "cannot register misc device\n");
-		goto err_misc_register;
+	spin_lock_init(&wdt->lock);
+	watchdog_set_nowayout(&wdt->wdd, nowayout);
+	watchdog_set_drvdata(&wdt->wdd, wdt);
+	wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
+
+	ret = watchdog_register_device(&wdt->wdd);
+	if (ret) {
+		dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+				ret);
+		goto err_register;
 	}
+	amba_set_drvdata(adev, wdt);
 
 	dev_info(&adev->dev, "registration successful\n");
 	return 0;
 
-err_misc_register:
+err_register:
 	clk_put(wdt->clk);
 err:
 	dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@
 
 static int __devexit sp805_wdt_remove(struct amba_device *adev)
 {
-	misc_deregister(&sp805_wdt_miscdev);
+	struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+	watchdog_unregister_device(&wdt->wdd);
+	amba_set_drvdata(adev, NULL);
+	watchdog_set_drvdata(&wdt->wdd, NULL);
 	clk_put(wdt->clk);
 
 	return 0;
@@ -345,28 +287,22 @@
 #ifdef CONFIG_PM
 static int sp805_wdt_suspend(struct device *dev)
 {
-	if (test_bit(WDT_BUSY, &wdt->status)) {
-		wdt_disable();
-		clk_disable(wdt->clk);
-	}
+	struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+	if (watchdog_active(&wdt->wdd))
+		return wdt_disable(&wdt->wdd);
 
 	return 0;
 }
 
 static int sp805_wdt_resume(struct device *dev)
 {
-	int ret = 0;
+	struct sp805_wdt *wdt = dev_get_drvdata(dev);
 
-	if (test_bit(WDT_BUSY, &wdt->status)) {
-		ret = clk_enable(wdt->clk);
-		if (ret) {
-			dev_err(dev, "clock enable fail");
-			return ret;
-		}
-		wdt_enable();
-	}
+	if (watchdog_active(&wdt->wdd))
+		return wdt_enable(&wdt->wdd);
 
-	return ret;
+	return 0;
 }
 #endif /* CONFIG_PM */
 
@@ -395,11 +331,6 @@
 
 module_amba_driver(sp805_wdt_driver);
 
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
-		"Set to 1 to keep watchdog running after device release");
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 5603e31..aa50da3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -91,7 +91,7 @@
 static void wdt_timer_tick(unsigned long data)
 {
 	if (time_before(jiffies, next_heartbeat) ||
-	   (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+	   (!watchdog_active(&wdt_dev))) {
 		wdt_reset();
 		mod_timer(&timer, jiffies + WDT_HEARTBEAT);
 	} else
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 14d768b..6aa46a9 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,8 +34,13 @@
 #include <linux/kernel.h>	/* For printk/panic/... */
 #include <linux/watchdog.h>	/* For watchdog specific items */
 #include <linux/init.h>		/* For __init/__exit/... */
+#include <linux/idr.h>		/* For ida_* macros */
+#include <linux/err.h>		/* For IS_ERR macros */
 
-#include "watchdog_dev.h"	/* For watchdog_dev_register/... */
+#include "watchdog_core.h"	/* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
 
 /**
  * watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
  */
 int watchdog_register_device(struct watchdog_device *wdd)
 {
-	int ret;
+	int ret, id, devno;
 
 	if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
 		return -EINVAL;
@@ -74,10 +79,38 @@
 	 * corrupted in a later stage then we expect a kernel panic!
 	 */
 
-	/* We only support 1 watchdog device via the /dev/watchdog interface */
+	mutex_init(&wdd->lock);
+	id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+	if (id < 0)
+		return id;
+	wdd->id = id;
+
 	ret = watchdog_dev_register(wdd);
 	if (ret) {
-		pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+		ida_simple_remove(&watchdog_ida, id);
+		if (!(id == 0 && ret == -EBUSY))
+			return ret;
+
+		/* Retry in case a legacy watchdog module exists */
+		id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+		if (id < 0)
+			return id;
+		wdd->id = id;
+
+		ret = watchdog_dev_register(wdd);
+		if (ret) {
+			ida_simple_remove(&watchdog_ida, id);
+			return ret;
+		}
+	}
+
+	devno = wdd->cdev.dev;
+	wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+					NULL, "watchdog%d", wdd->id);
+	if (IS_ERR(wdd->dev)) {
+		watchdog_dev_unregister(wdd);
+		ida_simple_remove(&watchdog_ida, id);
+		ret = PTR_ERR(wdd->dev);
 		return ret;
 	}
 
@@ -95,6 +128,7 @@
 void watchdog_unregister_device(struct watchdog_device *wdd)
 {
 	int ret;
+	int devno = wdd->cdev.dev;
 
 	if (wdd == NULL)
 		return;
@@ -102,9 +136,41 @@
 	ret = watchdog_dev_unregister(wdd);
 	if (ret)
 		pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+	device_destroy(watchdog_class, devno);
+	ida_simple_remove(&watchdog_ida, wdd->id);
+	wdd->dev = NULL;
 }
 EXPORT_SYMBOL_GPL(watchdog_unregister_device);
 
+static int __init watchdog_init(void)
+{
+	int err;
+
+	watchdog_class = class_create(THIS_MODULE, "watchdog");
+	if (IS_ERR(watchdog_class)) {
+		pr_err("couldn't create class\n");
+		return PTR_ERR(watchdog_class);
+	}
+
+	err = watchdog_dev_init();
+	if (err < 0) {
+		class_destroy(watchdog_class);
+		return err;
+	}
+
+	return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+	watchdog_dev_exit();
+	class_destroy(watchdog_class);
+	ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
 MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
 MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
 MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_core.h
similarity index 79%
rename from drivers/watchdog/watchdog_dev.h
rename to drivers/watchdog/watchdog_core.h
index bc7612b..6c95141 100644
--- a/drivers/watchdog/watchdog_dev.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -26,8 +26,12 @@
  *	This material is provided "AS-IS" and at no charge.
  */
 
+#define MAX_DOGS	32	/* Maximum number of watchdog devices */
+
 /*
  *	Functions/procedures to be called by the core
  */
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8558da9..ef8edec 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,10 +42,12 @@
 #include <linux/init.h>		/* For __init/__exit/... */
 #include <linux/uaccess.h>	/* For copy_to_user/put_user/... */
 
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
 /* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
 
 /*
  *	watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@
 
 static int watchdog_ping(struct watchdog_device *wddev)
 {
-	if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-		if (wddev->ops->ping)
-			return wddev->ops->ping(wddev);  /* ping the watchdog */
-		else
-			return wddev->ops->start(wddev); /* restart watchdog */
+	int err = 0;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_ping;
 	}
-	return 0;
+
+	if (!watchdog_active(wddev))
+		goto out_ping;
+
+	if (wddev->ops->ping)
+		err = wddev->ops->ping(wddev);  /* ping the watchdog */
+	else
+		err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+	mutex_unlock(&wddev->lock);
+	return err;
 }
 
 /*
@@ -79,16 +94,25 @@
 
 static int watchdog_start(struct watchdog_device *wddev)
 {
-	int err;
+	int err = 0;
 
-	if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
-		err = wddev->ops->start(wddev);
-		if (err < 0)
-			return err;
+	mutex_lock(&wddev->lock);
 
-		set_bit(WDOG_ACTIVE, &wddev->status);
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_start;
 	}
-	return 0;
+
+	if (watchdog_active(wddev))
+		goto out_start;
+
+	err = wddev->ops->start(wddev);
+	if (err == 0)
+		set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+	mutex_unlock(&wddev->lock);
+	return err;
 }
 
 /*
@@ -103,22 +127,155 @@
 
 static int watchdog_stop(struct watchdog_device *wddev)
 {
-	int err = -EBUSY;
+	int err = 0;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_stop;
+	}
+
+	if (!watchdog_active(wddev))
+		goto out_stop;
 
 	if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
-		pr_info("%s: nowayout prevents watchdog to be stopped!\n",
-							wddev->info->identity);
-		return err;
+		dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+		err = -EBUSY;
+		goto out_stop;
 	}
 
-	if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-		err = wddev->ops->stop(wddev);
-		if (err < 0)
-			return err;
-
+	err = wddev->ops->stop(wddev);
+	if (err == 0)
 		clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+	mutex_unlock(&wddev->lock);
+	return err;
+}
+
+/*
+ *	watchdog_get_status: wrapper to get the watchdog status
+ *	@wddev: the watchdog device to get the status from
+ *	@status: the status of the watchdog device
+ *
+ *	Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+							unsigned int *status)
+{
+	int err = 0;
+
+	*status = 0;
+	if (!wddev->ops->status)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_status;
 	}
-	return 0;
+
+	*status = wddev->ops->status(wddev);
+
+out_status:
+	mutex_unlock(&wddev->lock);
+	return err;
+}
+
+/*
+ *	watchdog_set_timeout: set the watchdog timer timeout
+ *	@wddev: the watchdog device to set the timeout for
+ *	@timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+							unsigned int timeout)
+{
+	int err;
+
+	if ((wddev->ops->set_timeout == NULL) ||
+	    !(wddev->info->options & WDIOF_SETTIMEOUT))
+		return -EOPNOTSUPP;
+
+	if ((wddev->max_timeout != 0) &&
+	    (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+		return -EINVAL;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_timeout;
+	}
+
+	err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+	mutex_unlock(&wddev->lock);
+	return err;
+}
+
+/*
+ *	watchdog_get_timeleft: wrapper to get the time left before a reboot
+ *	@wddev: the watchdog device to get the remaining time from
+ *	@timeleft: the time that's left
+ *
+ *	Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+							unsigned int *timeleft)
+{
+	int err = 0;
+
+	*timeleft = 0;
+	if (!wddev->ops->get_timeleft)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_timeleft;
+	}
+
+	*timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+	mutex_unlock(&wddev->lock);
+	return err;
+}
+
+/*
+ *	watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ *	@wddev: the watchdog device to do the ioctl on
+ *	@cmd: watchdog command
+ *	@arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+							unsigned long arg)
+{
+	int err;
+
+	if (!wddev->ops->ioctl)
+		return -ENOIOCTLCMD;
+
+	mutex_lock(&wddev->lock);
+
+	if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+		err = -ENODEV;
+		goto out_ioctl;
+	}
+
+	err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+	mutex_unlock(&wddev->lock);
+	return err;
 }
 
 /*
@@ -136,6 +293,7 @@
 static ssize_t watchdog_write(struct file *file, const char __user *data,
 						size_t len, loff_t *ppos)
 {
+	struct watchdog_device *wdd = file->private_data;
 	size_t i;
 	char c;
 
@@ -175,23 +333,24 @@
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
 							unsigned long arg)
 {
+	struct watchdog_device *wdd = file->private_data;
 	void __user *argp = (void __user *)arg;
 	int __user *p = argp;
 	unsigned int val;
 	int err;
 
-	if (wdd->ops->ioctl) {
-		err = wdd->ops->ioctl(wdd, cmd, arg);
-		if (err != -ENOIOCTLCMD)
-			return err;
-	}
+	err = watchdog_ioctl_op(wdd, cmd, arg);
+	if (err != -ENOIOCTLCMD)
+		return err;
 
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		return copy_to_user(argp, wdd->info,
 			sizeof(struct watchdog_info)) ? -EFAULT : 0;
 	case WDIOC_GETSTATUS:
-		val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+		err = watchdog_get_status(wdd, &val);
+		if (err == -ENODEV)
+			return err;
 		return put_user(val, p);
 	case WDIOC_GETBOOTSTATUS:
 		return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@
 		watchdog_ping(wdd);
 		return 0;
 	case WDIOC_SETTIMEOUT:
-		if ((wdd->ops->set_timeout == NULL) ||
-		    !(wdd->info->options & WDIOF_SETTIMEOUT))
-			return -EOPNOTSUPP;
 		if (get_user(val, p))
 			return -EFAULT;
-		if ((wdd->max_timeout != 0) &&
-		    (val < wdd->min_timeout || val > wdd->max_timeout))
-				return -EINVAL;
-		err = wdd->ops->set_timeout(wdd, val);
+		err = watchdog_set_timeout(wdd, val);
 		if (err < 0)
 			return err;
 		/* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@
 			return -EOPNOTSUPP;
 		return put_user(wdd->timeout, p);
 	case WDIOC_GETTIMELEFT:
-		if (!wdd->ops->get_timeleft)
-			return -EOPNOTSUPP;
-
-		return put_user(wdd->ops->get_timeleft(wdd), p);
+		err = watchdog_get_timeleft(wdd, &val);
+		if (err)
+			return err;
+		return put_user(val, p);
 	default:
 		return -ENOTTY;
 	}
 }
 
 /*
- *	watchdog_open: open the /dev/watchdog device.
+ *	watchdog_open: open the /dev/watchdog* devices.
  *	@inode: inode of device
  *	@file: file handle to device
  *
- *	When the /dev/watchdog device gets opened, we start the watchdog.
+ *	When the /dev/watchdog* device gets opened, we start the watchdog.
  *	Watch out: the /dev/watchdog device is single open, so we make sure
  *	it can only be opened once.
  */
@@ -259,6 +412,13 @@
 static int watchdog_open(struct inode *inode, struct file *file)
 {
 	int err = -EBUSY;
+	struct watchdog_device *wdd;
+
+	/* Get the corresponding watchdog device */
+	if (imajor(inode) == MISC_MAJOR)
+		wdd = old_wdd;
+	else
+		wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
 
 	/* the watchdog is single open! */
 	if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@
 	if (err < 0)
 		goto out_mod;
 
+	file->private_data = wdd;
+
+	if (wdd->ops->ref)
+		wdd->ops->ref(wdd);
+
 	/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
 	return nonseekable_open(inode, file);
 
@@ -286,9 +451,9 @@
 }
 
 /*
- *      watchdog_release: release the /dev/watchdog device.
- *      @inode: inode of device
- *      @file: file handle to device
+ *	watchdog_release: release the watchdog device.
+ *	@inode: inode of device
+ *	@file: file handle to device
  *
  *	This is the code for when /dev/watchdog gets closed. We will only
  *	stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@
 
 static int watchdog_release(struct inode *inode, struct file *file)
 {
+	struct watchdog_device *wdd = file->private_data;
 	int err = -EBUSY;
 
 	/*
@@ -310,7 +476,10 @@
 
 	/* If the watchdog was not stopped, send a keepalive ping */
 	if (err < 0) {
-		pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+		mutex_lock(&wdd->lock);
+		if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+			dev_crit(wdd->dev, "watchdog did not stop!\n");
+		mutex_unlock(&wdd->lock);
 		watchdog_ping(wdd);
 	}
 
@@ -320,6 +489,10 @@
 	/* make sure that /dev/watchdog can be re-opened */
 	clear_bit(WDOG_DEV_OPEN, &wdd->status);
 
+	/* Note wdd may be gone after this, do not use after this! */
+	if (wdd->ops->unref)
+		wdd->ops->unref(wdd);
+
 	return 0;
 }
 
@@ -338,62 +511,92 @@
 };
 
 /*
- *	watchdog_dev_register:
+ *	watchdog_dev_register: register a watchdog device
  *	@watchdog: watchdog device
  *
- *	Register a watchdog device as /dev/watchdog. /dev/watchdog
- *	is actually a miscdevice and thus we set it up like that.
+ *	Register a watchdog device including handling the legacy
+ *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ *	thus we set it up like that.
  */
 
 int watchdog_dev_register(struct watchdog_device *watchdog)
 {
-	int err;
+	int err, devno;
 
-	/* Only one device can register for /dev/watchdog */
-	if (test_and_set_bit(0, &watchdog_dev_busy)) {
-		pr_err("only one watchdog can use /dev/watchdog\n");
-		return -EBUSY;
+	if (watchdog->id == 0) {
+		watchdog_miscdev.parent = watchdog->parent;
+		err = misc_register(&watchdog_miscdev);
+		if (err != 0) {
+			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+				watchdog->info->identity, WATCHDOG_MINOR, err);
+			if (err == -EBUSY)
+				pr_err("%s: a legacy watchdog module is probably present.\n",
+					watchdog->info->identity);
+			return err;
+		}
+		old_wdd = watchdog;
 	}
 
-	wdd = watchdog;
+	/* Fill in the data structures */
+	devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+	cdev_init(&watchdog->cdev, &watchdog_fops);
+	watchdog->cdev.owner = watchdog->ops->owner;
 
-	err = misc_register(&watchdog_miscdev);
-	if (err != 0) {
-		pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
-		       watchdog->info->identity, WATCHDOG_MINOR, err);
-		goto out;
+	/* Add the device */
+	err  = cdev_add(&watchdog->cdev, devno, 1);
+	if (err) {
+		pr_err("watchdog%d unable to add device %d:%d\n",
+			watchdog->id,  MAJOR(watchdog_devt), watchdog->id);
+		if (watchdog->id == 0) {
+			misc_deregister(&watchdog_miscdev);
+			old_wdd = NULL;
+		}
 	}
-
-	return 0;
-
-out:
-	wdd = NULL;
-	clear_bit(0, &watchdog_dev_busy);
 	return err;
 }
 
 /*
- *	watchdog_dev_unregister:
+ *	watchdog_dev_unregister: unregister a watchdog device
  *	@watchdog: watchdog device
  *
- *	Deregister the /dev/watchdog device.
+ *	Unregister the watchdog and if needed the legacy /dev/watchdog device.
  */
 
 int watchdog_dev_unregister(struct watchdog_device *watchdog)
 {
-	/* Check that a watchdog device was registered in the past */
-	if (!test_bit(0, &watchdog_dev_busy) || !wdd)
-		return -ENODEV;
+	mutex_lock(&watchdog->lock);
+	set_bit(WDOG_UNREGISTERED, &watchdog->status);
+	mutex_unlock(&watchdog->lock);
 
-	/* We can only unregister the watchdog device that was registered */
-	if (watchdog != wdd) {
-		pr_err("%s: watchdog was not registered as /dev/watchdog\n",
-		       watchdog->info->identity);
-		return -ENODEV;
+	cdev_del(&watchdog->cdev);
+	if (watchdog->id == 0) {
+		misc_deregister(&watchdog_miscdev);
+		old_wdd = NULL;
 	}
-
-	misc_deregister(&watchdog_miscdev);
-	wdd = NULL;
-	clear_bit(0, &watchdog_dev_busy);
 	return 0;
 }
+
+/*
+ *	watchdog_dev_init: init dev part of watchdog core
+ *
+ *	Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+	int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+	if (err < 0)
+		pr_err("watchdog: unable to allocate char dev region\n");
+	return err;
+}
+
+/*
+ *	watchdog_dev_exit: exit dev part of watchdog core
+ *
+ *	Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+	unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6908e4c..7595581 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -827,6 +827,9 @@
 					      handle_edge_irq, "event");
 
 		xen_irq_info_evtchn_init(irq, evtchn);
+	} else {
+		struct irq_info *info = info_for_irq(irq);
+		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
 	}
 
 out:
@@ -862,6 +865,9 @@
 		xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
 
 		bind_evtchn_to_cpu(evtchn, cpu);
+	} else {
+		struct irq_info *info = info_for_irq(irq);
+		WARN_ON(info == NULL || info->type != IRQT_IPI);
 	}
 
  out:
@@ -939,6 +945,9 @@
 		xen_irq_info_virq_init(cpu, irq, evtchn, virq);
 
 		bind_evtchn_to_cpu(evtchn, cpu);
+	} else {
+		struct irq_info *info = info_for_irq(irq);
+		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
 	}
 
 out:
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b..18fff88 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@
 
 #ifdef CONFIG_ACPI
 		handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
-		if (!handle)
+		if (!handle && pci_dev->bus->bridge)
 			handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
 #ifdef CONFIG_PCI_IOV
 		if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521..89f264c 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@
 }
 
 /* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@
  * returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!)
  */
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
 				   struct page *page)
 {
 	u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@
 __setup("nofrontswap", no_frontswap);
 
 static struct frontswap_ops __initdata tmem_frontswap_ops = {
-	.put_page = tmem_frontswap_put_page,
-	.get_page = tmem_frontswap_get_page,
+	.store = tmem_frontswap_store,
+	.load = tmem_frontswap_load,
 	.invalidate_page = tmem_frontswap_flush_page,
 	.invalidate_area = tmem_frontswap_flush_area,
 	.init = tmem_frontswap_init
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579..7ff2569 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
 #include <acpi/acpi_drivers.h>
 #include <acpi/processor.h>
 
+#include <xen/xen.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 014c8dd..57ccb75 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -448,7 +448,7 @@
 	struct v9fs_inode *v9inode = V9FS_I(inode);
 
 	truncate_inode_pages(inode->i_mapping, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	filemap_fdatawrite(inode->i_mapping);
 
 #ifdef CONFIG_9P_FSCACHE
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index a1e6c99..e3dd2a1 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -68,24 +68,6 @@
 	return current_fsgid();
 }
 
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-	struct dentry *dentry;
-
-	spin_lock(&inode->i_lock);
-	/* Directory should have only one entry. */
-	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-	spin_unlock(&inode->i_lock);
-	return dentry;
-}
-
 static int v9fs_test_inode_dotl(struct inode *inode, void *data)
 {
 	struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -415,7 +397,7 @@
 	if (dir->i_mode & S_ISGID)
 		omode |= S_ISGID;
 
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dir_dentry = dentry->d_parent;
 	dfid = v9fs_fid_lookup(dir_dentry);
 	if (IS_ERR(dfid)) {
 		err = PTR_ERR(dfid);
@@ -793,7 +775,7 @@
 		 dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
 
 	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dir_dentry = dentry->d_parent;
 	dfid = v9fs_fid_lookup(dir_dentry);
 	if (IS_ERR(dfid))
 		return PTR_ERR(dfid);
@@ -858,7 +840,7 @@
 		return -EINVAL;
 
 	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dir_dentry = dentry->d_parent;
 	dfid = v9fs_fid_lookup(dir_dentry);
 	if (IS_ERR(dfid)) {
 		err = PTR_ERR(dfid);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 45a0ce4..1fceb32 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -18,14 +18,6 @@
 #define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
 #define AFFS_BLOCK(sb, bh, blk)		(AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
 
-#ifdef __LITTLE_ENDIAN
-#define BO_EXBITS	0x18UL
-#elif defined(__BIG_ENDIAN)
-#define BO_EXBITS	0x00UL
-#else
-#error Endianness must be known for affs to work.
-#endif
-
 #define AFFS_HEAD(bh)		((struct affs_head *)(bh)->b_data)
 #define AFFS_TAIL(sb, bh)	((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail)))
 #define AFFS_ROOT_HEAD(bh)	((struct affs_root_head *)(bh)->b_data)
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 88a4b0b..8bc4a59 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -264,7 +264,7 @@
 	}
 
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	affs_free_prealloc(inode);
 	cache_page = (unsigned long)AFFS_I(inode)->i_lc;
 	if (cache_page) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index d890ae3..95cffd3 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -423,7 +423,7 @@
 	ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
 
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	afs_give_up_callback(vnode);
 
diff --git a/fs/aio.c b/fs/aio.c
index e7f2fad..55c4c76 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -134,9 +134,9 @@
 	info->mmap_size = nr_pages * PAGE_SIZE;
 	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
 	down_write(&ctx->mm->mmap_sem);
-	info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
-				  PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
-				  0);
+	info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
+					PROT_READ|PROT_WRITE,
+					MAP_ANONYMOUS|MAP_PRIVATE, 0);
 	if (IS_ERR((void *)info->mmap_base)) {
 		up_write(&ctx->mm->mmap_sem);
 		info->mmap_size = 0;
@@ -1446,13 +1446,13 @@
 		ret = compat_rw_copy_check_uvector(type,
 				(struct compat_iovec __user *)kiocb->ki_buf,
 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-				&kiocb->ki_iovec, 1);
+				&kiocb->ki_iovec);
 	else
 #endif
 		ret = rw_copy_check_uvector(type,
 				(struct iovec __user *)kiocb->ki_buf,
 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-				&kiocb->ki_iovec, 1);
+				&kiocb->ki_iovec);
 	if (ret < 0)
 		goto out;
 
diff --git a/fs/attr.c b/fs/attr.c
index 584620e..0da9095 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -176,6 +176,11 @@
 			return -EPERM;
 	}
 
+	if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
+		if (attr->ia_size != inode->i_size)
+			inode_inc_iversion(inode);
+	}
+
 	if ((ia_valid & ATTR_MODE)) {
 		umode_t amode = attr->ia_mode;
 		/* Flag setting protected by i_mutex */
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 6e488eb..8a4fed8 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -100,7 +100,7 @@
 
 static void autofs4_evict_inode(struct inode *inode)
 {
-	end_writeback(inode);
+	clear_inode(inode);
 	kfree(inode->i_private);
 }
 
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 37268c5..1b35d6b 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -292,7 +292,6 @@
 	.getxattr	= bad_inode_getxattr,
 	.listxattr	= bad_inode_listxattr,
 	.removexattr	= bad_inode_removexattr,
-	/* truncate_range returns void */
 };
 
 
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e23dc7c..9870417 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -174,7 +174,7 @@
 
 	truncate_inode_pages(&inode->i_data, 0);
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	if (inode->i_nlink)
 		return;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e658dd1..1b52956 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -329,7 +329,6 @@
 	if (!size)
 		return addr;
 
-	down_write(&current->mm->mmap_sem);
 	/*
 	* total_size is the size of the ELF (interpreter) image.
 	* The _first_ mmap needs to know the full size, otherwise
@@ -340,13 +339,12 @@
 	*/
 	if (total_size) {
 		total_size = ELF_PAGEALIGN(total_size);
-		map_addr = do_mmap(filep, addr, total_size, prot, type, off);
+		map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
 		if (!BAD_ADDR(map_addr))
-			do_munmap(current->mm, map_addr+size, total_size-size);
+			vm_munmap(map_addr+size, total_size-size);
 	} else
-		map_addr = do_mmap(filep, addr, size, prot, type, off);
+		map_addr = vm_mmap(filep, addr, size, prot, type, off);
 
-	up_write(&current->mm->mmap_sem);
 	return(map_addr);
 }
 
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 6b2daf9..178cb70 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -562,7 +562,7 @@
 				realdatastart = (unsigned long) -ENOMEM;
 			printk("Unable to allocate RAM for process data, errno %d\n",
 					(int)-realdatastart);
-			do_munmap(current->mm, textpos, text_len);
+			vm_munmap(textpos, text_len);
 			ret = realdatastart;
 			goto err;
 		}
@@ -586,8 +586,8 @@
 		}
 		if (IS_ERR_VALUE(result)) {
 			printk("Unable to read data+bss, errno %d\n", (int)-result);
-			do_munmap(current->mm, textpos, text_len);
-			do_munmap(current->mm, realdatastart, len);
+			vm_munmap(textpos, text_len);
+			vm_munmap(realdatastart, len);
 			ret = result;
 			goto err;
 		}
@@ -654,7 +654,7 @@
 		}
 		if (IS_ERR_VALUE(result)) {
 			printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-			do_munmap(current->mm, textpos, text_len + data_len + extra +
+			vm_munmap(textpos, text_len + data_len + extra +
 				MAX_SHARED_LIBS * sizeof(unsigned long));
 			ret = result;
 			goto err;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 613aa06..790b3cd 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -505,7 +505,7 @@
 
 static void bm_evict_inode(struct inode *inode)
 {
-	end_writeback(inode);
+	clear_inode(inode);
 	kfree(inode->i_private);
 }
 
diff --git a/fs/bio.c b/fs/bio.c
index 84da885..73922ab 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -19,12 +19,14 @@
 #include <linux/swap.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/iocontext.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
+#include <linux/cgroup.h>
 #include <scsi/sg.h>		/* for struct sg_iovec */
 
 #include <trace/events/block.h>
@@ -418,6 +420,7 @@
 	 * last put frees it
 	 */
 	if (atomic_dec_and_test(&bio->bi_cnt)) {
+		bio_disassociate_task(bio);
 		bio->bi_next = NULL;
 		bio->bi_destructor(bio);
 	}
@@ -1646,6 +1649,64 @@
 }
 EXPORT_SYMBOL(bioset_create);
 
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_current - associate a bio with %current
+ * @bio: target bio
+ *
+ * Associate @bio with %current if it hasn't been associated yet.  Block
+ * layer will treat @bio as if it were issued by %current no matter which
+ * task actually issues it.
+ *
+ * This function takes an extra reference of @task's io_context and blkcg
+ * which will be put when @bio is released.  The caller must own @bio,
+ * ensure %current->io_context exists, and is responsible for synchronizing
+ * calls to this function.
+ */
+int bio_associate_current(struct bio *bio)
+{
+	struct io_context *ioc;
+	struct cgroup_subsys_state *css;
+
+	if (bio->bi_ioc)
+		return -EBUSY;
+
+	ioc = current->io_context;
+	if (!ioc)
+		return -ENOENT;
+
+	/* acquire active ref on @ioc and associate */
+	get_io_context_active(ioc);
+	bio->bi_ioc = ioc;
+
+	/* associate blkcg if exists */
+	rcu_read_lock();
+	css = task_subsys_state(current, blkio_subsys_id);
+	if (css && css_tryget(css))
+		bio->bi_css = css;
+	rcu_read_unlock();
+
+	return 0;
+}
+
+/**
+ * bio_disassociate_task - undo bio_associate_current()
+ * @bio: target bio
+ */
+void bio_disassociate_task(struct bio *bio)
+{
+	if (bio->bi_ioc) {
+		put_io_context(bio->bi_ioc);
+		bio->bi_ioc = NULL;
+	}
+	if (bio->bi_css) {
+		css_put(bio->bi_css);
+		bio->bi_css = NULL;
+	}
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
 static void __init biovec_init_slabs(void)
 {
 	int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba11c30..c2bbe1f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -487,7 +487,7 @@
 	struct list_head *p;
 	truncate_inode_pages(&inode->i_data, 0);
 	invalidate_inode_buffers(inode); /* is it needed here? */
-	end_writeback(inode);
+	clear_inode(inode);
 	spin_lock(&bdev_lock);
 	while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
 		__bd_forget(list_entry(p, struct inode, i_devices));
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 89b156d..761e2cd 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -227,7 +227,11 @@
 		if (ret > 0) {
 			/* we need an acl */
 			ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
+		} else {
+			cache_no_acl(inode);
 		}
+	} else {
+		cache_no_acl(inode);
 	}
 failed:
 	posix_acl_release(acl);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index bcec067..7301cdb 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -24,22 +24,135 @@
 #include "delayed-ref.h"
 #include "locking.h"
 
+struct extent_inode_elem {
+	u64 inum;
+	u64 offset;
+	struct extent_inode_elem *next;
+};
+
+static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
+				struct btrfs_file_extent_item *fi,
+				u64 extent_item_pos,
+				struct extent_inode_elem **eie)
+{
+	u64 data_offset;
+	u64 data_len;
+	struct extent_inode_elem *e;
+
+	data_offset = btrfs_file_extent_offset(eb, fi);
+	data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+	if (extent_item_pos < data_offset ||
+	    extent_item_pos >= data_offset + data_len)
+		return 1;
+
+	e = kmalloc(sizeof(*e), GFP_NOFS);
+	if (!e)
+		return -ENOMEM;
+
+	e->next = *eie;
+	e->inum = key->objectid;
+	e->offset = key->offset + (extent_item_pos - data_offset);
+	*eie = e;
+
+	return 0;
+}
+
+static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
+				u64 extent_item_pos,
+				struct extent_inode_elem **eie)
+{
+	u64 disk_byte;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	int slot;
+	int nritems;
+	int extent_type;
+	int ret;
+
+	/*
+	 * from the shared data ref, we only have the leaf but we need
+	 * the key. thus, we must look into all items and see that we
+	 * find one (some) with a reference to our extent item.
+	 */
+	nritems = btrfs_header_nritems(eb);
+	for (slot = 0; slot < nritems; ++slot) {
+		btrfs_item_key_to_cpu(eb, &key, slot);
+		if (key.type != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+		extent_type = btrfs_file_extent_type(eb, fi);
+		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+			continue;
+		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
+		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+		if (disk_byte != wanted_disk_byte)
+			continue;
+
+		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
 /*
  * this structure records all encountered refs on the way up to the root
  */
 struct __prelim_ref {
 	struct list_head list;
 	u64 root_id;
-	struct btrfs_key key;
+	struct btrfs_key key_for_search;
 	int level;
 	int count;
+	struct extent_inode_elem *inode_list;
 	u64 parent;
 	u64 wanted_disk_byte;
 };
 
+/*
+ * the rules for all callers of this function are:
+ * - obtaining the parent is the goal
+ * - if you add a key, you must know that it is a correct key
+ * - if you cannot add the parent or a correct key, then we will look into the
+ *   block later to set a correct key
+ *
+ * delayed refs
+ * ============
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    -   |     -
+ *      key to resolve |    -   |     y    |    y   |     y
+ *  tree block logical |    -   |     -    |    -   |     -
+ *  root for resolving |    y   |     y    |    y   |     y
+ *
+ * - column 1:       we've the parent -> done
+ * - column 2, 3, 4: we use the key to find the parent
+ *
+ * on disk refs (inline or keyed)
+ * ==============================
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    y   |     -
+ *      key to resolve |    -   |     -    |    -   |     y
+ *  tree block logical |    y   |     y    |    y   |     y
+ *  root for resolving |    -   |     y    |    y   |     y
+ *
+ * - column 1, 3: we've the parent -> done
+ * - column 2:    we take the first key from the block to find the parent
+ *                (see __add_missing_keys)
+ * - column 4:    we use the key to find the parent
+ *
+ * additional information that's available but not required to find the parent
+ * block might help in merging entries to gain some speed.
+ */
+
 static int __add_prelim_ref(struct list_head *head, u64 root_id,
-			    struct btrfs_key *key, int level, u64 parent,
-			    u64 wanted_disk_byte, int count)
+			    struct btrfs_key *key, int level,
+			    u64 parent, u64 wanted_disk_byte, int count)
 {
 	struct __prelim_ref *ref;
 
@@ -50,10 +163,11 @@
 
 	ref->root_id = root_id;
 	if (key)
-		ref->key = *key;
+		ref->key_for_search = *key;
 	else
-		memset(&ref->key, 0, sizeof(ref->key));
+		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 
+	ref->inode_list = NULL;
 	ref->level = level;
 	ref->count = count;
 	ref->parent = parent;
@@ -64,52 +178,75 @@
 }
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
-				struct ulist *parents,
-				struct extent_buffer *eb, int level,
-				u64 wanted_objectid, u64 wanted_disk_byte)
+				struct ulist *parents, int level,
+				struct btrfs_key *key_for_search, u64 time_seq,
+				u64 wanted_disk_byte,
+				const u64 *extent_item_pos)
 {
-	int ret;
+	int ret = 0;
 	int slot;
-	struct btrfs_file_extent_item *fi;
+	struct extent_buffer *eb;
 	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	struct extent_inode_elem *eie = NULL;
 	u64 disk_byte;
 
-add_parent:
-	ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
-	if (ret < 0)
-		return ret;
-
-	if (level != 0)
-		return 0;
-
-	/*
-	 * if the current leaf is full with EXTENT_DATA items, we must
-	 * check the next one if that holds a reference as well.
-	 * ref->count cannot be used to skip this check.
-	 * repeat this until we don't find any additional EXTENT_DATA items.
-	 */
-	while (1) {
-		ret = btrfs_next_leaf(root, path);
+	if (level != 0) {
+		eb = path->nodes[level];
+		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 		if (ret < 0)
 			return ret;
-		if (ret)
-			return 0;
-
-		eb = path->nodes[0];
-		for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
-			btrfs_item_key_to_cpu(eb, &key, slot);
-			if (key.objectid != wanted_objectid ||
-			    key.type != BTRFS_EXTENT_DATA_KEY)
-				return 0;
-			fi = btrfs_item_ptr(eb, slot,
-						struct btrfs_file_extent_item);
-			disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-			if (disk_byte == wanted_disk_byte)
-				goto add_parent;
-		}
+		return 0;
 	}
 
-	return 0;
+	/*
+	 * We normally enter this function with the path already pointing to
+	 * the first item to check. But sometimes, we may enter it with
+	 * slot==nritems. In that case, go to the next leaf before we continue.
+	 */
+	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
+		ret = btrfs_next_old_leaf(root, path, time_seq);
+
+	while (!ret) {
+		eb = path->nodes[0];
+		slot = path->slots[0];
+
+		btrfs_item_key_to_cpu(eb, &key, slot);
+
+		if (key.objectid != key_for_search->objectid ||
+		    key.type != BTRFS_EXTENT_DATA_KEY)
+			break;
+
+		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+
+		if (disk_byte == wanted_disk_byte) {
+			eie = NULL;
+			if (extent_item_pos) {
+				ret = check_extent_in_eb(&key, eb, fi,
+						*extent_item_pos,
+						&eie);
+				if (ret < 0)
+					break;
+			}
+			if (!ret) {
+				ret = ulist_add(parents, eb->start,
+						(unsigned long)eie, GFP_NOFS);
+				if (ret < 0)
+					break;
+				if (!extent_item_pos) {
+					ret = btrfs_next_old_leaf(root, path,
+							time_seq);
+					continue;
+				}
+			}
+		}
+		ret = btrfs_next_old_item(root, path, time_seq);
+	}
+
+	if (ret > 0)
+		ret = 0;
+	return ret;
 }
 
 /*
@@ -118,13 +255,14 @@
  */
 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
 					int search_commit_root,
+					u64 time_seq,
 					struct __prelim_ref *ref,
-					struct ulist *parents)
+					struct ulist *parents,
+					const u64 *extent_item_pos)
 {
 	struct btrfs_path *path;
 	struct btrfs_root *root;
 	struct btrfs_key root_key;
-	struct btrfs_key key = {0};
 	struct extent_buffer *eb;
 	int ret = 0;
 	int root_level;
@@ -152,12 +290,13 @@
 		goto out;
 
 	path->lowest_level = level;
-	ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
+	ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
 		 "%d for key (%llu %u %llu)\n",
 		 (unsigned long long)ref->root_id, level, ref->count, ret,
-		 (unsigned long long)ref->key.objectid, ref->key.type,
-		 (unsigned long long)ref->key.offset);
+		 (unsigned long long)ref->key_for_search.objectid,
+		 ref->key_for_search.type,
+		 (unsigned long long)ref->key_for_search.offset);
 	if (ret < 0)
 		goto out;
 
@@ -168,20 +307,9 @@
 		goto out;
 	}
 
-	if (level == 0) {
-		if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
-			ret = btrfs_next_leaf(root, path);
-			if (ret)
-				goto out;
-			eb = path->nodes[0];
-		}
-
-		btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
-	}
-
-	/* the last two parameters will only be used for level == 0 */
-	ret = add_all_parents(root, path, parents, eb, level, key.objectid,
-				ref->wanted_disk_byte);
+	ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
+				time_seq, ref->wanted_disk_byte,
+				extent_item_pos);
 out:
 	btrfs_free_path(path);
 	return ret;
@@ -191,8 +319,9 @@
  * resolve all indirect backrefs from the list
  */
 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
-				   int search_commit_root,
-				   struct list_head *head)
+				   int search_commit_root, u64 time_seq,
+				   struct list_head *head,
+				   const u64 *extent_item_pos)
 {
 	int err;
 	int ret = 0;
@@ -201,6 +330,7 @@
 	struct __prelim_ref *new_ref;
 	struct ulist *parents;
 	struct ulist_node *node;
+	struct ulist_iterator uiter;
 
 	parents = ulist_alloc(GFP_NOFS);
 	if (!parents)
@@ -217,7 +347,8 @@
 		if (ref->count == 0)
 			continue;
 		err = __resolve_indirect_ref(fs_info, search_commit_root,
-					     ref, parents);
+					     time_seq, ref, parents,
+					     extent_item_pos);
 		if (err) {
 			if (ret == 0)
 				ret = err;
@@ -225,11 +356,14 @@
 		}
 
 		/* we put the first parent into the ref at hand */
-		node = ulist_next(parents, NULL);
+		ULIST_ITER_INIT(&uiter);
+		node = ulist_next(parents, &uiter);
 		ref->parent = node ? node->val : 0;
+		ref->inode_list =
+			node ? (struct extent_inode_elem *)node->aux : 0;
 
 		/* additional parents require new refs being added here */
-		while ((node = ulist_next(parents, node))) {
+		while ((node = ulist_next(parents, &uiter))) {
 			new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
 			if (!new_ref) {
 				ret = -ENOMEM;
@@ -237,6 +371,8 @@
 			}
 			memcpy(new_ref, ref, sizeof(*ref));
 			new_ref->parent = node->val;
+			new_ref->inode_list =
+					(struct extent_inode_elem *)node->aux;
 			list_add(&new_ref->list, &ref->list);
 		}
 		ulist_reinit(parents);
@@ -246,10 +382,65 @@
 	return ret;
 }
 
+static inline int ref_for_same_block(struct __prelim_ref *ref1,
+				     struct __prelim_ref *ref2)
+{
+	if (ref1->level != ref2->level)
+		return 0;
+	if (ref1->root_id != ref2->root_id)
+		return 0;
+	if (ref1->key_for_search.type != ref2->key_for_search.type)
+		return 0;
+	if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
+		return 0;
+	if (ref1->key_for_search.offset != ref2->key_for_search.offset)
+		return 0;
+	if (ref1->parent != ref2->parent)
+		return 0;
+
+	return 1;
+}
+
+/*
+ * read tree blocks and add keys where required.
+ */
+static int __add_missing_keys(struct btrfs_fs_info *fs_info,
+			      struct list_head *head)
+{
+	struct list_head *pos;
+	struct extent_buffer *eb;
+
+	list_for_each(pos, head) {
+		struct __prelim_ref *ref;
+		ref = list_entry(pos, struct __prelim_ref, list);
+
+		if (ref->parent)
+			continue;
+		if (ref->key_for_search.type)
+			continue;
+		BUG_ON(!ref->wanted_disk_byte);
+		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
+				     fs_info->tree_root->leafsize, 0);
+		BUG_ON(!eb);
+		btrfs_tree_read_lock(eb);
+		if (btrfs_header_level(eb) == 0)
+			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
+		else
+			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
+		btrfs_tree_read_unlock(eb);
+		free_extent_buffer(eb);
+	}
+	return 0;
+}
+
 /*
  * merge two lists of backrefs and adjust counts accordingly
  *
  * mode = 1: merge identical keys, if key is set
+ *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
+ *           additionally, we could even add a key range for the blocks we
+ *           looked into to merge even more (-> replace unresolved refs by those
+ *           having a parent).
  * mode = 2: merge identical parents
  */
 static int __merge_refs(struct list_head *head, int mode)
@@ -263,20 +454,21 @@
 
 		ref1 = list_entry(pos1, struct __prelim_ref, list);
 
-		if (mode == 1 && ref1->key.type == 0)
-			continue;
 		for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
 		     pos2 = n2, n2 = pos2->next) {
 			struct __prelim_ref *ref2;
+			struct __prelim_ref *xchg;
 
 			ref2 = list_entry(pos2, struct __prelim_ref, list);
 
 			if (mode == 1) {
-				if (memcmp(&ref1->key, &ref2->key,
-					   sizeof(ref1->key)) ||
-				    ref1->level != ref2->level ||
-				    ref1->root_id != ref2->root_id)
+				if (!ref_for_same_block(ref1, ref2))
 					continue;
+				if (!ref1->parent && ref2->parent) {
+					xchg = ref1;
+					ref1 = ref2;
+					ref2 = xchg;
+				}
 				ref1->count += ref2->count;
 			} else {
 				if (ref1->parent != ref2->parent)
@@ -296,16 +488,17 @@
  * smaller or equal that seq to the list
  */
 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
-			      struct btrfs_key *info_key,
 			      struct list_head *prefs)
 {
 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
 	struct rb_node *n = &head->node.rb_node;
+	struct btrfs_key key;
+	struct btrfs_key op_key = {0};
 	int sgn;
 	int ret = 0;
 
 	if (extent_op && extent_op->update_key)
-		btrfs_disk_key_to_cpu(info_key, &extent_op->key);
+		btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
 
 	while ((n = rb_prev(n))) {
 		struct btrfs_delayed_ref_node *node;
@@ -337,7 +530,7 @@
 			struct btrfs_delayed_tree_ref *ref;
 
 			ref = btrfs_delayed_node_to_tree_ref(node);
-			ret = __add_prelim_ref(prefs, ref->root, info_key,
+			ret = __add_prelim_ref(prefs, ref->root, &op_key,
 					       ref->level + 1, 0, node->bytenr,
 					       node->ref_mod * sgn);
 			break;
@@ -346,7 +539,7 @@
 			struct btrfs_delayed_tree_ref *ref;
 
 			ref = btrfs_delayed_node_to_tree_ref(node);
-			ret = __add_prelim_ref(prefs, ref->root, info_key,
+			ret = __add_prelim_ref(prefs, ref->root, NULL,
 					       ref->level + 1, ref->parent,
 					       node->bytenr,
 					       node->ref_mod * sgn);
@@ -354,8 +547,6 @@
 		}
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_delayed_data_ref *ref;
-			struct btrfs_key key;
-
 			ref = btrfs_delayed_node_to_data_ref(node);
 
 			key.objectid = ref->objectid;
@@ -368,7 +559,6 @@
 		}
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			struct btrfs_delayed_data_ref *ref;
-			struct btrfs_key key;
 
 			ref = btrfs_delayed_node_to_data_ref(node);
 
@@ -394,8 +584,7 @@
  */
 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 			     struct btrfs_path *path, u64 bytenr,
-			     struct btrfs_key *info_key, int *info_level,
-			     struct list_head *prefs)
+			     int *info_level, struct list_head *prefs)
 {
 	int ret = 0;
 	int slot;
@@ -411,7 +600,7 @@
 	 * enumerate all inline refs
 	 */
 	leaf = path->nodes[0];
-	slot = path->slots[0] - 1;
+	slot = path->slots[0];
 
 	item_size = btrfs_item_size_nr(leaf, slot);
 	BUG_ON(item_size < sizeof(*ei));
@@ -424,12 +613,9 @@
 
 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 		struct btrfs_tree_block_info *info;
-		struct btrfs_disk_key disk_key;
 
 		info = (struct btrfs_tree_block_info *)ptr;
 		*info_level = btrfs_tree_block_level(leaf, info);
-		btrfs_tree_block_key(leaf, info, &disk_key);
-		btrfs_disk_key_to_cpu(info_key, &disk_key);
 		ptr += sizeof(struct btrfs_tree_block_info);
 		BUG_ON(ptr > end);
 	} else {
@@ -447,7 +633,7 @@
 
 		switch (type) {
 		case BTRFS_SHARED_BLOCK_REF_KEY:
-			ret = __add_prelim_ref(prefs, 0, info_key,
+			ret = __add_prelim_ref(prefs, 0, NULL,
 						*info_level + 1, offset,
 						bytenr, 1);
 			break;
@@ -462,8 +648,9 @@
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
-			ret = __add_prelim_ref(prefs, offset, info_key,
-					       *info_level + 1, 0, bytenr, 1);
+			ret = __add_prelim_ref(prefs, offset, NULL,
+					       *info_level + 1, 0,
+					       bytenr, 1);
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_extent_data_ref *dref;
@@ -477,8 +664,8 @@
 			key.type = BTRFS_EXTENT_DATA_KEY;
 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 			root = btrfs_extent_data_ref_root(leaf, dref);
-			ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
-						count);
+			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+					       bytenr, count);
 			break;
 		}
 		default:
@@ -496,8 +683,7 @@
  */
 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
 			    struct btrfs_path *path, u64 bytenr,
-			    struct btrfs_key *info_key, int info_level,
-			    struct list_head *prefs)
+			    int info_level, struct list_head *prefs)
 {
 	struct btrfs_root *extent_root = fs_info->extent_root;
 	int ret;
@@ -527,7 +713,7 @@
 
 		switch (key.type) {
 		case BTRFS_SHARED_BLOCK_REF_KEY:
-			ret = __add_prelim_ref(prefs, 0, info_key,
+			ret = __add_prelim_ref(prefs, 0, NULL,
 						info_level + 1, key.offset,
 						bytenr, 1);
 			break;
@@ -543,8 +729,9 @@
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
-			ret = __add_prelim_ref(prefs, key.offset, info_key,
-						info_level + 1, 0, bytenr, 1);
+			ret = __add_prelim_ref(prefs, key.offset, NULL,
+					       info_level + 1, 0,
+					       bytenr, 1);
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_extent_data_ref *dref;
@@ -560,7 +747,7 @@
 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 			root = btrfs_extent_data_ref_root(leaf, dref);
 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
-						bytenr, count);
+					       bytenr, count);
 			break;
 		}
 		default:
@@ -582,11 +769,12 @@
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
 			     struct btrfs_fs_info *fs_info, u64 bytenr,
-			     u64 seq, struct ulist *refs, struct ulist *roots)
+			     u64 delayed_ref_seq, u64 time_seq,
+			     struct ulist *refs, struct ulist *roots,
+			     const u64 *extent_item_pos)
 {
 	struct btrfs_key key;
 	struct btrfs_path *path;
-	struct btrfs_key info_key = { 0 };
 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
 	struct btrfs_delayed_ref_head *head;
 	int info_level = 0;
@@ -645,7 +833,7 @@
 				btrfs_put_delayed_ref(&head->node);
 				goto again;
 			}
-			ret = __add_delayed_refs(head, seq, &info_key,
+			ret = __add_delayed_refs(head, delayed_ref_seq,
 						 &prefs_delayed);
 			if (ret) {
 				spin_unlock(&delayed_refs->lock);
@@ -659,16 +847,17 @@
 		struct extent_buffer *leaf;
 		int slot;
 
+		path->slots[0]--;
 		leaf = path->nodes[0];
-		slot = path->slots[0] - 1;
+		slot = path->slots[0];
 		btrfs_item_key_to_cpu(leaf, &key, slot);
 		if (key.objectid == bytenr &&
 		    key.type == BTRFS_EXTENT_ITEM_KEY) {
 			ret = __add_inline_refs(fs_info, path, bytenr,
-						&info_key, &info_level, &prefs);
+						&info_level, &prefs);
 			if (ret)
 				goto out;
-			ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
+			ret = __add_keyed_refs(fs_info, path, bytenr,
 					       info_level, &prefs);
 			if (ret)
 				goto out;
@@ -676,21 +865,18 @@
 	}
 	btrfs_release_path(path);
 
-	/*
-	 * when adding the delayed refs above, the info_key might not have
-	 * been known yet. Go over the list and replace the missing keys
-	 */
-	list_for_each_entry(ref, &prefs_delayed, list) {
-		if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
-			memcpy(&ref->key, &info_key, sizeof(ref->key));
-	}
 	list_splice_init(&prefs_delayed, &prefs);
 
+	ret = __add_missing_keys(fs_info, &prefs);
+	if (ret)
+		goto out;
+
 	ret = __merge_refs(&prefs, 1);
 	if (ret)
 		goto out;
 
-	ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
+	ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
+				      &prefs, extent_item_pos);
 	if (ret)
 		goto out;
 
@@ -709,7 +895,33 @@
 			BUG_ON(ret < 0);
 		}
 		if (ref->count && ref->parent) {
-			ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+			struct extent_inode_elem *eie = NULL;
+			if (extent_item_pos && !ref->inode_list) {
+				u32 bsz;
+				struct extent_buffer *eb;
+				bsz = btrfs_level_size(fs_info->extent_root,
+							info_level);
+				eb = read_tree_block(fs_info->extent_root,
+							   ref->parent, bsz, 0);
+				BUG_ON(!eb);
+				ret = find_extent_in_eb(eb, bytenr,
+							*extent_item_pos, &eie);
+				ref->inode_list = eie;
+				free_extent_buffer(eb);
+			}
+			ret = ulist_add_merge(refs, ref->parent,
+					      (unsigned long)ref->inode_list,
+					      (unsigned long *)&eie, GFP_NOFS);
+			if (!ret && extent_item_pos) {
+				/*
+				 * we've recorded that parent, so we must extend
+				 * its inode list here
+				 */
+				BUG_ON(!eie);
+				while (eie->next)
+					eie = eie->next;
+				eie->next = ref->inode_list;
+			}
 			BUG_ON(ret < 0);
 		}
 		kfree(ref);
@@ -734,6 +946,28 @@
 	return ret;
 }
 
+static void free_leaf_list(struct ulist *blocks)
+{
+	struct ulist_node *node = NULL;
+	struct extent_inode_elem *eie;
+	struct extent_inode_elem *eie_next;
+	struct ulist_iterator uiter;
+
+	ULIST_ITER_INIT(&uiter);
+	while ((node = ulist_next(blocks, &uiter))) {
+		if (!node->aux)
+			continue;
+		eie = (struct extent_inode_elem *)node->aux;
+		for (; eie; eie = eie_next) {
+			eie_next = eie->next;
+			kfree(eie);
+		}
+		node->aux = 0;
+	}
+
+	ulist_free(blocks);
+}
+
 /*
  * Finds all leafs with a reference to the specified combination of bytenr and
  * offset. key_list_head will point to a list of corresponding keys (caller must
@@ -744,7 +978,9 @@
  */
 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
 				struct btrfs_fs_info *fs_info, u64 bytenr,
-				u64 num_bytes, u64 seq, struct ulist **leafs)
+				u64 delayed_ref_seq, u64 time_seq,
+				struct ulist **leafs,
+				const u64 *extent_item_pos)
 {
 	struct ulist *tmp;
 	int ret;
@@ -758,11 +994,12 @@
 		return -ENOMEM;
 	}
 
-	ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
+	ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+				time_seq, *leafs, tmp, extent_item_pos);
 	ulist_free(tmp);
 
 	if (ret < 0 && ret != -ENOENT) {
-		ulist_free(*leafs);
+		free_leaf_list(*leafs);
 		return ret;
 	}
 
@@ -784,10 +1021,12 @@
  */
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 				struct btrfs_fs_info *fs_info, u64 bytenr,
-				u64 num_bytes, u64 seq, struct ulist **roots)
+				u64 delayed_ref_seq, u64 time_seq,
+				struct ulist **roots)
 {
 	struct ulist *tmp;
 	struct ulist_node *node = NULL;
+	struct ulist_iterator uiter;
 	int ret;
 
 	tmp = ulist_alloc(GFP_NOFS);
@@ -799,15 +1038,16 @@
 		return -ENOMEM;
 	}
 
+	ULIST_ITER_INIT(&uiter);
 	while (1) {
-		ret = find_parent_nodes(trans, fs_info, bytenr, seq,
-					tmp, *roots);
+		ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+					time_seq, tmp, *roots, NULL);
 		if (ret < 0 && ret != -ENOENT) {
 			ulist_free(tmp);
 			ulist_free(*roots);
 			return ret;
 		}
-		node = ulist_next(tmp, node);
+		node = ulist_next(tmp, &uiter);
 		if (!node)
 			break;
 		bytenr = node->val;
@@ -1093,67 +1333,25 @@
 	return 0;
 }
 
-static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
-				u64 orig_extent_item_objectid,
-				u64 extent_item_pos, u64 root,
+static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
+				u64 root, u64 extent_item_objectid,
 				iterate_extent_inodes_t *iterate, void *ctx)
 {
-	u64 disk_byte;
-	struct btrfs_key key;
-	struct btrfs_file_extent_item *fi;
-	struct extent_buffer *eb;
-	int slot;
-	int nritems;
+	struct extent_inode_elem *eie;
 	int ret = 0;
-	int extent_type;
-	u64 data_offset;
-	u64 data_len;
 
-	eb = read_tree_block(fs_info->tree_root, logical,
-				fs_info->tree_root->leafsize, 0);
-	if (!eb)
-		return -EIO;
-
-	/*
-	 * from the shared data ref, we only have the leaf but we need
-	 * the key. thus, we must look into all items and see that we
-	 * find one (some) with a reference to our extent item.
-	 */
-	nritems = btrfs_header_nritems(eb);
-	for (slot = 0; slot < nritems; ++slot) {
-		btrfs_item_key_to_cpu(eb, &key, slot);
-		if (key.type != BTRFS_EXTENT_DATA_KEY)
-			continue;
-		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-		extent_type = btrfs_file_extent_type(eb, fi);
-		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
-			continue;
-		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
-		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-		if (disk_byte != orig_extent_item_objectid)
-			continue;
-
-		data_offset = btrfs_file_extent_offset(eb, fi);
-		data_len = btrfs_file_extent_num_bytes(eb, fi);
-
-		if (extent_item_pos < data_offset ||
-		    extent_item_pos >= data_offset + data_len)
-			continue;
-
+	for (eie = inode_list; eie; eie = eie->next) {
 		pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
-				"root %llu\n", orig_extent_item_objectid,
-				key.objectid, key.offset, root);
-		ret = iterate(key.objectid,
-				key.offset + (extent_item_pos - data_offset),
-				root, ctx);
+			 "root %llu\n", extent_item_objectid,
+			 eie->inum, eie->offset, root);
+		ret = iterate(eie->inum, eie->offset, root, ctx);
 		if (ret) {
-			pr_debug("stopping iteration because ret=%d\n", ret);
+			pr_debug("stopping iteration for %llu due to ret=%d\n",
+				 extent_item_objectid, ret);
 			break;
 		}
 	}
 
-	free_extent_buffer(eb);
-
 	return ret;
 }
 
@@ -1175,7 +1373,10 @@
 	struct ulist *roots = NULL;
 	struct ulist_node *ref_node = NULL;
 	struct ulist_node *root_node = NULL;
-	struct seq_list seq_elem;
+	struct seq_list seq_elem = {};
+	struct seq_list tree_mod_seq_elem = {};
+	struct ulist_iterator ref_uiter;
+	struct ulist_iterator root_uiter;
 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
 
 	pr_debug("resolving all inodes for extent %llu\n",
@@ -1192,34 +1393,41 @@
 		spin_lock(&delayed_refs->lock);
 		btrfs_get_delayed_seq(delayed_refs, &seq_elem);
 		spin_unlock(&delayed_refs->lock);
+		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 	}
 
 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
-				   extent_item_pos, seq_elem.seq,
-				   &refs);
-
+				   seq_elem.seq, tree_mod_seq_elem.seq, &refs,
+				   &extent_item_pos);
 	if (ret)
 		goto out;
 
-	while (!ret && (ref_node = ulist_next(refs, ref_node))) {
-		ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
-						seq_elem.seq, &roots);
+	ULIST_ITER_INIT(&ref_uiter);
+	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
+		ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
+						seq_elem.seq,
+						tree_mod_seq_elem.seq, &roots);
 		if (ret)
 			break;
-		while (!ret && (root_node = ulist_next(roots, root_node))) {
-			pr_debug("root %llu references leaf %llu\n",
-					root_node->val, ref_node->val);
-			ret = iterate_leaf_refs(fs_info, ref_node->val,
-						extent_item_objectid,
-						extent_item_pos, root_node->val,
-						iterate, ctx);
+		ULIST_ITER_INIT(&root_uiter);
+		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
+			pr_debug("root %llu references leaf %llu, data list "
+				 "%#lx\n", root_node->val, ref_node->val,
+				 ref_node->aux);
+			ret = iterate_leaf_refs(
+				(struct extent_inode_elem *)ref_node->aux,
+				root_node->val, extent_item_objectid,
+				iterate, ctx);
 		}
+		ulist_free(roots);
+		roots = NULL;
 	}
 
-	ulist_free(refs);
+	free_leaf_list(refs);
 	ulist_free(roots);
 out:
 	if (!search_commit_root) {
+		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 		btrfs_put_delayed_seq(delayed_refs, &seq_elem);
 		btrfs_end_transaction(trans, fs_info->extent_root);
 	}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 57ea2e9..c18d8ac 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -58,7 +58,8 @@
 
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 				struct btrfs_fs_info *fs_info, u64 bytenr,
-				u64 num_bytes, u64 seq, struct ulist **roots);
+				u64 delayed_ref_seq, u64 time_seq,
+				struct ulist **roots);
 
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 9b9b15f..12394a9 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -24,6 +24,21 @@
 #include "ordered-data.h"
 #include "delayed-inode.h"
 
+/*
+ * ordered_data_close is set by truncate when a file that used
+ * to have good data has been truncated to zero.  When it is set
+ * the btrfs file release call will add this inode to the
+ * ordered operations list so that we make sure to flush out any
+ * new data the application may have written before commit.
+ */
+#define BTRFS_INODE_ORDERED_DATA_CLOSE		0
+#define BTRFS_INODE_ORPHAN_META_RESERVED	1
+#define BTRFS_INODE_DUMMY			2
+#define BTRFS_INODE_IN_DEFRAG			3
+#define BTRFS_INODE_DELALLOC_META_RESERVED	4
+#define BTRFS_INODE_HAS_ORPHAN_ITEM		5
+#define BTRFS_INODE_HAS_ASYNC_EXTENT		6
+
 /* in memory btrfs inode */
 struct btrfs_inode {
 	/* which subvolume this inode belongs to */
@@ -57,9 +72,6 @@
 	/* used to order data wrt metadata */
 	struct btrfs_ordered_inode_tree ordered_tree;
 
-	/* for keeping track of orphaned inodes */
-	struct list_head i_orphan;
-
 	/* list of all the delalloc inodes in the FS.  There are times we need
 	 * to write all the delalloc pages to disk, and this list is used
 	 * to walk them all.
@@ -78,14 +90,13 @@
 	/* the space_info for where this inode's data allocations are done */
 	struct btrfs_space_info *space_info;
 
+	unsigned long runtime_flags;
+
 	/* full 64 bit generation number, struct vfs_inode doesn't have a big
 	 * enough field for this.
 	 */
 	u64 generation;
 
-	/* sequence number for NFS changes */
-	u64 sequence;
-
 	/*
 	 * transid of the trans_handle that last modified this inode
 	 */
@@ -145,22 +156,9 @@
 	unsigned reserved_extents;
 
 	/*
-	 * ordered_data_close is set by truncate when a file that used
-	 * to have good data has been truncated to zero.  When it is set
-	 * the btrfs file release call will add this inode to the
-	 * ordered operations list so that we make sure to flush out any
-	 * new data the application may have written before commit.
-	 */
-	unsigned ordered_data_close:1;
-	unsigned orphan_meta_reserved:1;
-	unsigned dummy_inode:1;
-	unsigned in_defrag:1;
-	unsigned delalloc_meta_reserved:1;
-
-	/*
 	 * always compress this one file
 	 */
-	unsigned force_compress:4;
+	unsigned force_compress;
 
 	struct btrfs_delayed_node *delayed_node;
 
@@ -202,4 +200,17 @@
 	return false;
 }
 
+static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+
+	mutex_lock(&root->log_mutex);
+	if (BTRFS_I(inode)->logged_trans == generation &&
+	    BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
+		ret = 1;
+	mutex_unlock(&root->log_mutex);
+	return ret;
+}
+
 #endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index c053e90..da6e936 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -93,6 +93,7 @@
 #include "print-tree.h"
 #include "locking.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
@@ -103,8 +104,6 @@
 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6)	/* in characters,
 							 * excluding " [...]" */
-#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
-
 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
 
 /*
@@ -210,8 +209,9 @@
 	u64 dev_bytenr;		/* physical bytenr on device */
 	u32 len;
 	struct btrfsic_dev_state *dev;
-	char *data;
-	struct buffer_head *bh;	/* do not use if set to NULL */
+	char **datav;
+	struct page **pagev;
+	void *mem_to_free;
 };
 
 /* This structure is used to implement recursion without occupying
@@ -243,6 +243,8 @@
 	struct btrfs_root *root;
 	u64 max_superblock_generation;
 	struct btrfsic_block *latest_superblock;
+	u32 metablock_size;
+	u32 datablock_size;
 };
 
 static void btrfsic_block_init(struct btrfsic_block *b);
@@ -290,8 +292,10 @@
 static int btrfsic_process_metablock(struct btrfsic_state *state,
 				     struct btrfsic_block *block,
 				     struct btrfsic_block_data_ctx *block_ctx,
-				     struct btrfs_header *hdr,
 				     int limit_nesting, int force_iodone_flag);
+static void btrfsic_read_from_block_data(
+	struct btrfsic_block_data_ctx *block_ctx,
+	void *dst, u32 offset, size_t len);
 static int btrfsic_create_link_to_next_block(
 		struct btrfsic_state *state,
 		struct btrfsic_block *block,
@@ -318,12 +322,13 @@
 static int btrfsic_read_block(struct btrfsic_state *state,
 			      struct btrfsic_block_data_ctx *block_ctx);
 static void btrfsic_dump_database(struct btrfsic_state *state);
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-				     const u8 *data, unsigned int size);
+				     char **datav, unsigned int num_pages);
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-					  u64 dev_bytenr, u8 *mapped_data,
-					  unsigned int len, struct bio *bio,
-					  int *bio_is_patched,
+					  u64 dev_bytenr, char **mapped_datav,
+					  unsigned int num_pages,
+					  struct bio *bio, int *bio_is_patched,
 					  struct buffer_head *bh,
 					  int submit_bio_bh_rw);
 static int btrfsic_process_written_superblock(
@@ -375,7 +380,7 @@
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
 					   u64 bytenr,
 					   struct btrfsic_dev_state *dev_state,
-					   u64 dev_bytenr, char *data);
+					   u64 dev_bytenr);
 
 static struct mutex btrfsic_mutex;
 static int btrfsic_is_initialized;
@@ -651,7 +656,7 @@
 	int pass;
 
 	BUG_ON(NULL == state);
-	selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
+	selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
 	if (NULL == selected_super) {
 		printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
 		return -1;
@@ -718,7 +723,7 @@
 
 		num_copies =
 		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				     next_bytenr, PAGE_SIZE);
+				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
 			       (unsigned long long)next_bytenr, num_copies);
@@ -727,9 +732,9 @@
 			struct btrfsic_block *next_block;
 			struct btrfsic_block_data_ctx tmp_next_block_ctx;
 			struct btrfsic_block_link *l;
-			struct btrfs_header *hdr;
 
-			ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+			ret = btrfsic_map_block(state, next_bytenr,
+						state->metablock_size,
 						&tmp_next_block_ctx,
 						mirror_num);
 			if (ret) {
@@ -758,7 +763,7 @@
 			BUG_ON(NULL == l);
 
 			ret = btrfsic_read_block(state, &tmp_next_block_ctx);
-			if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+			if (ret < (int)PAGE_CACHE_SIZE) {
 				printk(KERN_INFO
 				       "btrfsic: read @logical %llu failed!\n",
 				       (unsigned long long)
@@ -768,11 +773,9 @@
 				return -1;
 			}
 
-			hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
 			ret = btrfsic_process_metablock(state,
 							next_block,
 							&tmp_next_block_ctx,
-							hdr,
 							BTRFS_MAX_LEVEL + 3, 1);
 			btrfsic_release_block_ctx(&tmp_next_block_ctx);
 		}
@@ -799,7 +802,10 @@
 
 	/* super block bytenr is always the unmapped device bytenr */
 	dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
-	bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
+	if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
+		return -1;
+	bh = __bread(superblock_bdev, dev_bytenr / 4096,
+		     BTRFS_SUPER_INFO_SIZE);
 	if (NULL == bh)
 		return -1;
 	super_tmp = (struct btrfs_super_block *)
@@ -808,7 +814,10 @@
 	if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
 	    strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
 		    sizeof(super_tmp->magic)) ||
-	    memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
+	    memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
+	    btrfs_super_nodesize(super_tmp) != state->metablock_size ||
+	    btrfs_super_leafsize(super_tmp) != state->metablock_size ||
+	    btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
 		brelse(bh);
 		return 0;
 	}
@@ -835,13 +844,14 @@
 		superblock_tmp->never_written = 0;
 		superblock_tmp->mirror_num = 1 + superblock_mirror_num;
 		if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-			printk(KERN_INFO "New initial S-block (bdev %p, %s)"
-			       " @%llu (%s/%llu/%d)\n",
-			       superblock_bdev, device->name,
-			       (unsigned long long)dev_bytenr,
-			       dev_state->name,
-			       (unsigned long long)dev_bytenr,
-			       superblock_mirror_num);
+			printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)"
+				     " @%llu (%s/%llu/%d)\n",
+				     superblock_bdev,
+				     rcu_str_deref(device->name),
+				     (unsigned long long)dev_bytenr,
+				     dev_state->name,
+				     (unsigned long long)dev_bytenr,
+				     superblock_mirror_num);
 		list_add(&superblock_tmp->all_blocks_node,
 			 &state->all_blocks_list);
 		btrfsic_block_hashtable_add(superblock_tmp,
@@ -893,7 +903,7 @@
 
 		num_copies =
 		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				     next_bytenr, PAGE_SIZE);
+				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
 			       (unsigned long long)next_bytenr, num_copies);
@@ -902,7 +912,8 @@
 			struct btrfsic_block_data_ctx tmp_next_block_ctx;
 			struct btrfsic_block_link *l;
 
-			if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+			if (btrfsic_map_block(state, next_bytenr,
+					      state->metablock_size,
 					      &tmp_next_block_ctx,
 					      mirror_num)) {
 				printk(KERN_INFO "btrfsic: btrfsic_map_block("
@@ -966,13 +977,15 @@
 		struct btrfsic_state *state,
 		struct btrfsic_block *const first_block,
 		struct btrfsic_block_data_ctx *const first_block_ctx,
-		struct btrfs_header *const first_hdr,
 		int first_limit_nesting, int force_iodone_flag)
 {
 	struct btrfsic_stack_frame initial_stack_frame = { 0 };
 	struct btrfsic_stack_frame *sf;
 	struct btrfsic_stack_frame *next_stack;
+	struct btrfs_header *const first_hdr =
+		(struct btrfs_header *)first_block_ctx->datav[0];
 
+	BUG_ON(!first_hdr);
 	sf = &initial_stack_frame;
 	sf->error = 0;
 	sf->i = -1;
@@ -1012,21 +1025,47 @@
 		}
 
 		if (sf->i < sf->nr) {
-			struct btrfs_item *disk_item = leafhdr->items + sf->i;
-			struct btrfs_disk_key *disk_key = &disk_item->key;
+			struct btrfs_item disk_item;
+			u32 disk_item_offset =
+				(uintptr_t)(leafhdr->items + sf->i) -
+				(uintptr_t)leafhdr;
+			struct btrfs_disk_key *disk_key;
 			u8 type;
-			const u32 item_offset = le32_to_cpu(disk_item->offset);
+			u32 item_offset;
 
+			if (disk_item_offset + sizeof(struct btrfs_item) >
+			    sf->block_ctx->len) {
+leaf_item_out_of_bounce_error:
+				printk(KERN_INFO
+				       "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
+				       sf->block_ctx->start,
+				       sf->block_ctx->dev->name);
+				goto one_stack_frame_backwards;
+			}
+			btrfsic_read_from_block_data(sf->block_ctx,
+						     &disk_item,
+						     disk_item_offset,
+						     sizeof(struct btrfs_item));
+			item_offset = le32_to_cpu(disk_item.offset);
+			disk_key = &disk_item.key;
 			type = disk_key->type;
 
 			if (BTRFS_ROOT_ITEM_KEY == type) {
-				const struct btrfs_root_item *const root_item =
-				    (struct btrfs_root_item *)
-				    (sf->block_ctx->data +
-				     offsetof(struct btrfs_leaf, items) +
-				     item_offset);
-				const u64 next_bytenr =
-				    le64_to_cpu(root_item->bytenr);
+				struct btrfs_root_item root_item;
+				u32 root_item_offset;
+				u64 next_bytenr;
+
+				root_item_offset = item_offset +
+					offsetof(struct btrfs_leaf, items);
+				if (root_item_offset +
+				    sizeof(struct btrfs_root_item) >
+				    sf->block_ctx->len)
+					goto leaf_item_out_of_bounce_error;
+				btrfsic_read_from_block_data(
+					sf->block_ctx, &root_item,
+					root_item_offset,
+					sizeof(struct btrfs_root_item));
+				next_bytenr = le64_to_cpu(root_item.bytenr);
 
 				sf->error =
 				    btrfsic_create_link_to_next_block(
@@ -1041,7 +1080,7 @@
 						&sf->num_copies,
 						&sf->mirror_num,
 						disk_key,
-						le64_to_cpu(root_item->
+						le64_to_cpu(root_item.
 						generation));
 				if (sf->error)
 					goto one_stack_frame_backwards;
@@ -1049,7 +1088,7 @@
 				if (NULL != sf->next_block) {
 					struct btrfs_header *const next_hdr =
 					    (struct btrfs_header *)
-					    sf->next_block_ctx.data;
+					    sf->next_block_ctx.datav[0];
 
 					next_stack =
 					    btrfsic_stack_frame_alloc();
@@ -1111,10 +1150,24 @@
 		}
 
 		if (sf->i < sf->nr) {
-			struct btrfs_key_ptr *disk_key_ptr =
-			    nodehdr->ptrs + sf->i;
-			const u64 next_bytenr =
-			    le64_to_cpu(disk_key_ptr->blockptr);
+			struct btrfs_key_ptr key_ptr;
+			u32 key_ptr_offset;
+			u64 next_bytenr;
+
+			key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
+					  (uintptr_t)nodehdr;
+			if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
+			    sf->block_ctx->len) {
+				printk(KERN_INFO
+				       "btrfsic: node item out of bounce at logical %llu, dev %s\n",
+				       sf->block_ctx->start,
+				       sf->block_ctx->dev->name);
+				goto one_stack_frame_backwards;
+			}
+			btrfsic_read_from_block_data(
+				sf->block_ctx, &key_ptr, key_ptr_offset,
+				sizeof(struct btrfs_key_ptr));
+			next_bytenr = le64_to_cpu(key_ptr.blockptr);
 
 			sf->error = btrfsic_create_link_to_next_block(
 					state,
@@ -1127,15 +1180,15 @@
 					force_iodone_flag,
 					&sf->num_copies,
 					&sf->mirror_num,
-					&disk_key_ptr->key,
-					le64_to_cpu(disk_key_ptr->generation));
+					&key_ptr.key,
+					le64_to_cpu(key_ptr.generation));
 			if (sf->error)
 				goto one_stack_frame_backwards;
 
 			if (NULL != sf->next_block) {
 				struct btrfs_header *const next_hdr =
 				    (struct btrfs_header *)
-				    sf->next_block_ctx.data;
+				    sf->next_block_ctx.datav[0];
 
 				next_stack = btrfsic_stack_frame_alloc();
 				if (NULL == next_stack)
@@ -1181,6 +1234,35 @@
 	return sf->error;
 }
 
+static void btrfsic_read_from_block_data(
+	struct btrfsic_block_data_ctx *block_ctx,
+	void *dstv, u32 offset, size_t len)
+{
+	size_t cur;
+	size_t offset_in_page;
+	char *kaddr;
+	char *dst = (char *)dstv;
+	size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+
+	WARN_ON(offset + len > block_ctx->len);
+	offset_in_page = (start_offset + offset) &
+			 ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
+		BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
+			    PAGE_CACHE_SHIFT);
+		kaddr = block_ctx->datav[i];
+		memcpy(dst, kaddr + offset_in_page, cur);
+
+		dst += cur;
+		len -= cur;
+		offset_in_page = 0;
+		i++;
+	}
+}
+
 static int btrfsic_create_link_to_next_block(
 		struct btrfsic_state *state,
 		struct btrfsic_block *block,
@@ -1204,7 +1286,7 @@
 	if (0 == *num_copiesp) {
 		*num_copiesp =
 		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				     next_bytenr, PAGE_SIZE);
+				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
 			       (unsigned long long)next_bytenr, *num_copiesp);
@@ -1219,7 +1301,7 @@
 		       "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
 		       *mirror_nump);
 	ret = btrfsic_map_block(state, next_bytenr,
-				BTRFSIC_BLOCK_SIZE,
+				state->metablock_size,
 				next_block_ctx, *mirror_nump);
 	if (ret) {
 		printk(KERN_INFO
@@ -1314,7 +1396,7 @@
 
 	if (limit_nesting > 0 && did_alloc_block_link) {
 		ret = btrfsic_read_block(state, next_block_ctx);
-		if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+		if (ret < (int)next_block_ctx->len) {
 			printk(KERN_INFO
 			       "btrfsic: read block @logical %llu failed!\n",
 			       (unsigned long long)next_bytenr);
@@ -1339,43 +1421,74 @@
 		u32 item_offset, int force_iodone_flag)
 {
 	int ret;
-	struct btrfs_file_extent_item *file_extent_item =
-	    (struct btrfs_file_extent_item *)(block_ctx->data +
-					      offsetof(struct btrfs_leaf,
-						       items) + item_offset);
-	u64 next_bytenr =
-	    le64_to_cpu(file_extent_item->disk_bytenr) +
-	    le64_to_cpu(file_extent_item->offset);
-	u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
-	u64 generation = le64_to_cpu(file_extent_item->generation);
+	struct btrfs_file_extent_item file_extent_item;
+	u64 file_extent_item_offset;
+	u64 next_bytenr;
+	u64 num_bytes;
+	u64 generation;
 	struct btrfsic_block_link *l;
 
+	file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
+				  item_offset;
+	if (file_extent_item_offset +
+	    offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
+	    block_ctx->len) {
+		printk(KERN_INFO
+		       "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+		       block_ctx->start, block_ctx->dev->name);
+		return -1;
+	}
+
+	btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+		file_extent_item_offset,
+		offsetof(struct btrfs_file_extent_item, disk_num_bytes));
+	if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
+	    ((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr)) {
+		if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+			printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
+			       file_extent_item.type,
+			       (unsigned long long)
+			       le64_to_cpu(file_extent_item.disk_bytenr));
+		return 0;
+	}
+
+	if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
+	    block_ctx->len) {
+		printk(KERN_INFO
+		       "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+		       block_ctx->start, block_ctx->dev->name);
+		return -1;
+	}
+	btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+				     file_extent_item_offset,
+				     sizeof(struct btrfs_file_extent_item));
+	next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
+		      le64_to_cpu(file_extent_item.offset);
+	generation = le64_to_cpu(file_extent_item.generation);
+	num_bytes = le64_to_cpu(file_extent_item.num_bytes);
+	generation = le64_to_cpu(file_extent_item.generation);
+
 	if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
 		printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
 		       " offset = %llu, num_bytes = %llu\n",
-		       file_extent_item->type,
+		       file_extent_item.type,
 		       (unsigned long long)
-		       le64_to_cpu(file_extent_item->disk_bytenr),
-		       (unsigned long long)
-		       le64_to_cpu(file_extent_item->offset),
-		       (unsigned long long)
-		       le64_to_cpu(file_extent_item->num_bytes));
-	if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
-	    ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
-		return 0;
+		       le64_to_cpu(file_extent_item.disk_bytenr),
+		       (unsigned long long)le64_to_cpu(file_extent_item.offset),
+		       (unsigned long long)num_bytes);
 	while (num_bytes > 0) {
 		u32 chunk_len;
 		int num_copies;
 		int mirror_num;
 
-		if (num_bytes > BTRFSIC_BLOCK_SIZE)
-			chunk_len = BTRFSIC_BLOCK_SIZE;
+		if (num_bytes > state->datablock_size)
+			chunk_len = state->datablock_size;
 		else
 			chunk_len = num_bytes;
 
 		num_copies =
 		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				     next_bytenr, PAGE_SIZE);
+				     next_bytenr, state->datablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
 			       (unsigned long long)next_bytenr, num_copies);
@@ -1475,8 +1588,9 @@
 	block_ctx_out->dev_bytenr = multi->stripes[0].physical;
 	block_ctx_out->start = bytenr;
 	block_ctx_out->len = len;
-	block_ctx_out->data = NULL;
-	block_ctx_out->bh = NULL;
+	block_ctx_out->datav = NULL;
+	block_ctx_out->pagev = NULL;
+	block_ctx_out->mem_to_free = NULL;
 
 	if (0 == ret)
 		kfree(multi);
@@ -1496,8 +1610,9 @@
 	block_ctx_out->dev_bytenr = bytenr;
 	block_ctx_out->start = bytenr;
 	block_ctx_out->len = len;
-	block_ctx_out->data = NULL;
-	block_ctx_out->bh = NULL;
+	block_ctx_out->datav = NULL;
+	block_ctx_out->pagev = NULL;
+	block_ctx_out->mem_to_free = NULL;
 	if (NULL != block_ctx_out->dev) {
 		return 0;
 	} else {
@@ -1508,38 +1623,127 @@
 
 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
 {
-	if (NULL != block_ctx->bh) {
-		brelse(block_ctx->bh);
-		block_ctx->bh = NULL;
+	if (block_ctx->mem_to_free) {
+		unsigned int num_pages;
+
+		BUG_ON(!block_ctx->datav);
+		BUG_ON(!block_ctx->pagev);
+		num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+			    PAGE_CACHE_SHIFT;
+		while (num_pages > 0) {
+			num_pages--;
+			if (block_ctx->datav[num_pages]) {
+				kunmap(block_ctx->pagev[num_pages]);
+				block_ctx->datav[num_pages] = NULL;
+			}
+			if (block_ctx->pagev[num_pages]) {
+				__free_page(block_ctx->pagev[num_pages]);
+				block_ctx->pagev[num_pages] = NULL;
+			}
+		}
+
+		kfree(block_ctx->mem_to_free);
+		block_ctx->mem_to_free = NULL;
+		block_ctx->pagev = NULL;
+		block_ctx->datav = NULL;
 	}
 }
 
 static int btrfsic_read_block(struct btrfsic_state *state,
 			      struct btrfsic_block_data_ctx *block_ctx)
 {
-	block_ctx->bh = NULL;
-	if (block_ctx->dev_bytenr & 4095) {
+	unsigned int num_pages;
+	unsigned int i;
+	u64 dev_bytenr;
+	int ret;
+
+	BUG_ON(block_ctx->datav);
+	BUG_ON(block_ctx->pagev);
+	BUG_ON(block_ctx->mem_to_free);
+	if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
 		printk(KERN_INFO
 		       "btrfsic: read_block() with unaligned bytenr %llu\n",
 		       (unsigned long long)block_ctx->dev_bytenr);
 		return -1;
 	}
-	if (block_ctx->len > 4096) {
-		printk(KERN_INFO
-		       "btrfsic: read_block() with too huge size %d\n",
-		       block_ctx->len);
+
+	num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+		    PAGE_CACHE_SHIFT;
+	block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
+					  sizeof(*block_ctx->pagev)) *
+					 num_pages, GFP_NOFS);
+	if (!block_ctx->mem_to_free)
 		return -1;
+	block_ctx->datav = block_ctx->mem_to_free;
+	block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
+	for (i = 0; i < num_pages; i++) {
+		block_ctx->pagev[i] = alloc_page(GFP_NOFS);
+		if (!block_ctx->pagev[i])
+			return -1;
 	}
 
-	block_ctx->bh = __bread(block_ctx->dev->bdev,
-				block_ctx->dev_bytenr >> 12, 4096);
-	if (NULL == block_ctx->bh)
-		return -1;
-	block_ctx->data = block_ctx->bh->b_data;
+	dev_bytenr = block_ctx->dev_bytenr;
+	for (i = 0; i < num_pages;) {
+		struct bio *bio;
+		unsigned int j;
+		DECLARE_COMPLETION_ONSTACK(complete);
+
+		bio = bio_alloc(GFP_NOFS, num_pages - i);
+		if (!bio) {
+			printk(KERN_INFO
+			       "btrfsic: bio_alloc() for %u pages failed!\n",
+			       num_pages - i);
+			return -1;
+		}
+		bio->bi_bdev = block_ctx->dev->bdev;
+		bio->bi_sector = dev_bytenr >> 9;
+		bio->bi_end_io = btrfsic_complete_bio_end_io;
+		bio->bi_private = &complete;
+
+		for (j = i; j < num_pages; j++) {
+			ret = bio_add_page(bio, block_ctx->pagev[j],
+					   PAGE_CACHE_SIZE, 0);
+			if (PAGE_CACHE_SIZE != ret)
+				break;
+		}
+		if (j == i) {
+			printk(KERN_INFO
+			       "btrfsic: error, failed to add a single page!\n");
+			return -1;
+		}
+		submit_bio(READ, bio);
+
+		/* this will also unplug the queue */
+		wait_for_completion(&complete);
+
+		if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+			printk(KERN_INFO
+			       "btrfsic: read error at logical %llu dev %s!\n",
+			       block_ctx->start, block_ctx->dev->name);
+			bio_put(bio);
+			return -1;
+		}
+		bio_put(bio);
+		dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+		i = j;
+	}
+	for (i = 0; i < num_pages; i++) {
+		block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
+		if (!block_ctx->datav[i]) {
+			printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
+			       block_ctx->dev->name);
+			return -1;
+		}
+	}
 
 	return block_ctx->len;
 }
 
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
+{
+	complete((struct completion *)bio->bi_private);
+}
+
 static void btrfsic_dump_database(struct btrfsic_state *state)
 {
 	struct list_head *elem_all;
@@ -1617,32 +1821,39 @@
  * (note that this test fails for the super block)
  */
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-				     const u8 *data, unsigned int size)
+				     char **datav, unsigned int num_pages)
 {
 	struct btrfs_header *h;
 	u8 csum[BTRFS_CSUM_SIZE];
 	u32 crc = ~(u32)0;
-	int fail = 0;
-	int crc_fail = 0;
+	unsigned int i;
 
-	h = (struct btrfs_header *)data;
+	if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+		return 1; /* not metadata */
+	num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+	h = (struct btrfs_header *)datav[0];
 
 	if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
-		fail++;
+		return 1;
 
-	crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
+	for (i = 0; i < num_pages; i++) {
+		u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
+		size_t sublen = i ? PAGE_CACHE_SIZE :
+				    (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
+
+		crc = crc32c(crc, data, sublen);
+	}
 	btrfs_csum_final(crc, csum);
 	if (memcmp(csum, h->csum, state->csum_size))
-		crc_fail++;
+		return 1;
 
-	return fail || crc_fail;
+	return 0; /* is metadata */
 }
 
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-					  u64 dev_bytenr,
-					  u8 *mapped_data, unsigned int len,
-					  struct bio *bio,
-					  int *bio_is_patched,
+					  u64 dev_bytenr, char **mapped_datav,
+					  unsigned int num_pages,
+					  struct bio *bio, int *bio_is_patched,
 					  struct buffer_head *bh,
 					  int submit_bio_bh_rw)
 {
@@ -1652,12 +1863,19 @@
 	int ret;
 	struct btrfsic_state *state = dev_state->state;
 	struct block_device *bdev = dev_state->bdev;
+	unsigned int processed_len;
 
-	WARN_ON(len > PAGE_SIZE);
-	is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
 	if (NULL != bio_is_patched)
 		*bio_is_patched = 0;
 
+again:
+	if (num_pages == 0)
+		return;
+
+	processed_len = 0;
+	is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
+						      num_pages));
+
 	block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
 					       &state->block_hashtable);
 	if (NULL != block) {
@@ -1667,8 +1885,16 @@
 
 		if (block->is_superblock) {
 			bytenr = le64_to_cpu(((struct btrfs_super_block *)
-					      mapped_data)->bytenr);
+					      mapped_datav[0])->bytenr);
+			if (num_pages * PAGE_CACHE_SIZE <
+			    BTRFS_SUPER_INFO_SIZE) {
+				printk(KERN_INFO
+				       "btrfsic: cannot work with too short bios!\n");
+				return;
+			}
 			is_metadata = 1;
+			BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+			processed_len = BTRFS_SUPER_INFO_SIZE;
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
 				printk(KERN_INFO
@@ -1678,12 +1904,18 @@
 		}
 		if (is_metadata) {
 			if (!block->is_superblock) {
+				if (num_pages * PAGE_CACHE_SIZE <
+				    state->metablock_size) {
+					printk(KERN_INFO
+					       "btrfsic: cannot work with too short bios!\n");
+					return;
+				}
+				processed_len = state->metablock_size;
 				bytenr = le64_to_cpu(((struct btrfs_header *)
-						      mapped_data)->bytenr);
+						      mapped_datav[0])->bytenr);
 				btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
 							       dev_state,
-							       dev_bytenr,
-							       mapped_data);
+							       dev_bytenr);
 			}
 			if (block->logical_bytenr != bytenr) {
 				printk(KERN_INFO
@@ -1710,6 +1942,13 @@
 				       block->mirror_num,
 				       btrfsic_get_block_type(state, block));
 		} else {
+			if (num_pages * PAGE_CACHE_SIZE <
+			    state->datablock_size) {
+				printk(KERN_INFO
+				       "btrfsic: cannot work with too short bios!\n");
+				return;
+			}
+			processed_len = state->datablock_size;
 			bytenr = block->logical_bytenr;
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
 				printk(KERN_INFO
@@ -1747,7 +1986,7 @@
 			       le64_to_cpu(block->disk_key.offset),
 			       (unsigned long long)
 			       le64_to_cpu(((struct btrfs_header *)
-					    mapped_data)->generation),
+					    mapped_datav[0])->generation),
 			       (unsigned long long)
 			       state->max_superblock_generation);
 			btrfsic_dump_tree(state);
@@ -1765,10 +2004,10 @@
 			       (unsigned long long)block->generation,
 			       (unsigned long long)
 			       le64_to_cpu(((struct btrfs_header *)
-					    mapped_data)->generation));
+					    mapped_datav[0])->generation));
 			/* it would not be safe to go on */
 			btrfsic_dump_tree(state);
-			return;
+			goto continue_loop;
 		}
 
 		/*
@@ -1796,18 +2035,19 @@
 		}
 
 		if (block->is_superblock)
-			ret = btrfsic_map_superblock(state, bytenr, len,
+			ret = btrfsic_map_superblock(state, bytenr,
+						     processed_len,
 						     bdev, &block_ctx);
 		else
-			ret = btrfsic_map_block(state, bytenr, len,
+			ret = btrfsic_map_block(state, bytenr, processed_len,
 						&block_ctx, 0);
 		if (ret) {
 			printk(KERN_INFO
 			       "btrfsic: btrfsic_map_block(root @%llu)"
 			       " failed!\n", (unsigned long long)bytenr);
-			return;
+			goto continue_loop;
 		}
-		block_ctx.data = mapped_data;
+		block_ctx.datav = mapped_datav;
 		/* the following is required in case of writes to mirrors,
 		 * use the same that was used for the lookup */
 		block_ctx.dev = dev_state;
@@ -1863,11 +2103,13 @@
 			block->logical_bytenr = bytenr;
 			block->is_metadata = 1;
 			if (block->is_superblock) {
+				BUG_ON(PAGE_CACHE_SIZE !=
+				       BTRFS_SUPER_INFO_SIZE);
 				ret = btrfsic_process_written_superblock(
 						state,
 						block,
 						(struct btrfs_super_block *)
-						mapped_data);
+						mapped_datav[0]);
 				if (state->print_mask &
 				    BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
 					printk(KERN_INFO
@@ -1880,8 +2122,6 @@
 						state,
 						block,
 						&block_ctx,
-						(struct btrfs_header *)
-						block_ctx.data,
 						0, 0);
 			}
 			if (ret)
@@ -1912,26 +2152,30 @@
 		u64 bytenr;
 
 		if (!is_metadata) {
+			processed_len = state->datablock_size;
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
 				printk(KERN_INFO "Written block (%s/%llu/?)"
 				       " !found in hash table, D.\n",
 				       dev_state->name,
 				       (unsigned long long)dev_bytenr);
-			if (!state->include_extent_data)
-				return;	/* ignore that written D block */
+			if (!state->include_extent_data) {
+				/* ignore that written D block */
+				goto continue_loop;
+			}
 
 			/* this is getting ugly for the
 			 * include_extent_data case... */
 			bytenr = 0;	/* unknown */
 			block_ctx.start = bytenr;
-			block_ctx.len = len;
-			block_ctx.bh = NULL;
+			block_ctx.len = processed_len;
+			block_ctx.mem_to_free = NULL;
+			block_ctx.pagev = NULL;
 		} else {
+			processed_len = state->metablock_size;
 			bytenr = le64_to_cpu(((struct btrfs_header *)
-					      mapped_data)->bytenr);
+					      mapped_datav[0])->bytenr);
 			btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
-						       dev_bytenr,
-						       mapped_data);
+						       dev_bytenr);
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
 				printk(KERN_INFO
 				       "Written block @%llu (%s/%llu/?)"
@@ -1940,17 +2184,17 @@
 				       dev_state->name,
 				       (unsigned long long)dev_bytenr);
 
-			ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
-						0);
+			ret = btrfsic_map_block(state, bytenr, processed_len,
+						&block_ctx, 0);
 			if (ret) {
 				printk(KERN_INFO
 				       "btrfsic: btrfsic_map_block(root @%llu)"
 				       " failed!\n",
 				       (unsigned long long)dev_bytenr);
-				return;
+				goto continue_loop;
 			}
 		}
-		block_ctx.data = mapped_data;
+		block_ctx.datav = mapped_datav;
 		/* the following is required in case of writes to mirrors,
 		 * use the same that was used for the lookup */
 		block_ctx.dev = dev_state;
@@ -1960,7 +2204,7 @@
 		if (NULL == block) {
 			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
 			btrfsic_release_block_ctx(&block_ctx);
-			return;
+			goto continue_loop;
 		}
 		block->dev_state = dev_state;
 		block->dev_bytenr = dev_bytenr;
@@ -2020,9 +2264,7 @@
 
 		if (is_metadata) {
 			ret = btrfsic_process_metablock(state, block,
-							&block_ctx,
-							(struct btrfs_header *)
-							block_ctx.data, 0, 0);
+							&block_ctx, 0, 0);
 			if (ret)
 				printk(KERN_INFO
 				       "btrfsic: process_metablock(root @%llu)"
@@ -2031,6 +2273,13 @@
 		}
 		btrfsic_release_block_ctx(&block_ctx);
 	}
+
+continue_loop:
+	BUG_ON(!processed_len);
+	dev_bytenr += processed_len;
+	mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
+	num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+	goto again;
 }
 
 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
@@ -2213,7 +2462,7 @@
 
 		num_copies =
 		    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				     next_bytenr, PAGE_SIZE);
+				     next_bytenr, BTRFS_SUPER_INFO_SIZE);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
 			       (unsigned long long)next_bytenr, num_copies);
@@ -2224,7 +2473,8 @@
 				printk(KERN_INFO
 				       "btrfsic_process_written_superblock("
 				       "mirror_num=%d)\n", mirror_num);
-			ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+			ret = btrfsic_map_block(state, next_bytenr,
+						BTRFS_SUPER_INFO_SIZE,
 						&tmp_next_block_ctx,
 						mirror_num);
 			if (ret) {
@@ -2689,7 +2939,7 @@
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
 					   u64 bytenr,
 					   struct btrfsic_dev_state *dev_state,
-					   u64 dev_bytenr, char *data)
+					   u64 dev_bytenr)
 {
 	int num_copies;
 	int mirror_num;
@@ -2698,10 +2948,10 @@
 	int match = 0;
 
 	num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
-				      bytenr, PAGE_SIZE);
+				      bytenr, state->metablock_size);
 
 	for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-		ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+		ret = btrfsic_map_block(state, bytenr, state->metablock_size,
 					&block_ctx, mirror_num);
 		if (ret) {
 			printk(KERN_INFO "btrfsic:"
@@ -2727,7 +2977,8 @@
 		       (unsigned long long)bytenr, dev_state->name,
 		       (unsigned long long)dev_bytenr);
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-			ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+			ret = btrfsic_map_block(state, bytenr,
+						state->metablock_size,
 						&block_ctx, mirror_num);
 			if (ret)
 				continue;
@@ -2781,13 +3032,13 @@
 			       (unsigned long)bh->b_size, bh->b_data,
 			       bh->b_bdev);
 		btrfsic_process_written_block(dev_state, dev_bytenr,
-					      bh->b_data, bh->b_size, NULL,
+					      &bh->b_data, 1, NULL,
 					      NULL, bh, rw);
 	} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
 			printk(KERN_INFO
-			       "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
+			       "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
 			       rw, bh->b_bdev);
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 			if ((dev_state->state->print_mask &
@@ -2836,6 +3087,7 @@
 		unsigned int i;
 		u64 dev_bytenr;
 		int bio_is_patched;
+		char **mapped_datav;
 
 		dev_bytenr = 512 * bio->bi_sector;
 		bio_is_patched = 0;
@@ -2848,35 +3100,46 @@
 			       (unsigned long long)dev_bytenr,
 			       bio->bi_bdev);
 
+		mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
+				       GFP_NOFS);
+		if (!mapped_datav)
+			goto leave;
 		for (i = 0; i < bio->bi_vcnt; i++) {
-			u8 *mapped_data;
-
-			mapped_data = kmap(bio->bi_io_vec[i].bv_page);
+			BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+			mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
+			if (!mapped_datav[i]) {
+				while (i > 0) {
+					i--;
+					kunmap(bio->bi_io_vec[i].bv_page);
+				}
+				kfree(mapped_datav);
+				goto leave;
+			}
 			if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
 			     BTRFSIC_PRINT_MASK_VERBOSE) ==
 			    (dev_state->state->print_mask &
 			     (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
 			      BTRFSIC_PRINT_MASK_VERBOSE)))
 				printk(KERN_INFO
-				       "#%u: page=%p, mapped=%p, len=%u,"
-				       " offset=%u\n",
+				       "#%u: page=%p, len=%u, offset=%u\n",
 				       i, bio->bi_io_vec[i].bv_page,
-				       mapped_data,
 				       bio->bi_io_vec[i].bv_len,
 				       bio->bi_io_vec[i].bv_offset);
-			btrfsic_process_written_block(dev_state, dev_bytenr,
-						      mapped_data,
-						      bio->bi_io_vec[i].bv_len,
-						      bio, &bio_is_patched,
-						      NULL, rw);
-			kunmap(bio->bi_io_vec[i].bv_page);
-			dev_bytenr += bio->bi_io_vec[i].bv_len;
 		}
+		btrfsic_process_written_block(dev_state, dev_bytenr,
+					      mapped_datav, bio->bi_vcnt,
+					      bio, &bio_is_patched,
+					      NULL, rw);
+		while (i > 0) {
+			i--;
+			kunmap(bio->bi_io_vec[i].bv_page);
+		}
+		kfree(mapped_datav);
 	} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
 			printk(KERN_INFO
-			       "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
+			       "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
 			       rw, bio->bi_bdev);
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 			if ((dev_state->state->print_mask &
@@ -2903,6 +3166,7 @@
 			bio->bi_end_io = btrfsic_bio_end_io;
 		}
 	}
+leave:
 	mutex_unlock(&btrfsic_mutex);
 
 	submit_bio(rw, bio);
@@ -2917,6 +3181,30 @@
 	struct list_head *dev_head = &fs_devices->devices;
 	struct btrfs_device *device;
 
+	if (root->nodesize != root->leafsize) {
+		printk(KERN_INFO
+		       "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
+		       root->nodesize, root->leafsize);
+		return -1;
+	}
+	if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+		printk(KERN_INFO
+		       "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+		       root->nodesize, (unsigned long)PAGE_CACHE_SIZE);
+		return -1;
+	}
+	if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+		printk(KERN_INFO
+		       "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+		       root->leafsize, (unsigned long)PAGE_CACHE_SIZE);
+		return -1;
+	}
+	if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+		printk(KERN_INFO
+		       "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+		       root->sectorsize, (unsigned long)PAGE_CACHE_SIZE);
+		return -1;
+	}
 	state = kzalloc(sizeof(*state), GFP_NOFS);
 	if (NULL == state) {
 		printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
@@ -2933,6 +3221,8 @@
 	state->print_mask = print_mask;
 	state->include_extent_data = including_extent_data;
 	state->csum_size = 0;
+	state->metablock_size = root->nodesize;
+	state->datablock_size = root->sectorsize;
 	INIT_LIST_HEAD(&state->all_blocks_list);
 	btrfsic_block_hashtable_init(&state->block_hashtable);
 	btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -3049,7 +3339,7 @@
 				btrfsic_block_link_free(l);
 		}
 
-		if (b_all->is_iodone)
+		if (b_all->is_iodone || b_all->never_written)
 			btrfsic_block_free(b_all);
 		else
 			printk(KERN_INFO "btrfs: attempt to free %c-block"
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4106264..15cbc2b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -18,6 +18,7 @@
 
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/rbtree.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -37,7 +38,16 @@
 			      struct extent_buffer *dst_buf,
 			      struct extent_buffer *src_buf);
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		   struct btrfs_path *path, int level, int slot);
+		    struct btrfs_path *path, int level, int slot,
+		    int tree_mod_log);
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+				 struct extent_buffer *eb);
+struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
+					  u32 blocksize, u64 parent_transid,
+					  u64 time_seq);
+struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
+						u64 bytenr, u32 blocksize,
+						u64 time_seq);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -255,7 +265,7 @@
 
 	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
 				     new_root_objectid, &disk_key, level,
-				     buf->start, 0, 1);
+				     buf->start, 0);
 	if (IS_ERR(cow))
 		return PTR_ERR(cow);
 
@@ -288,6 +298,449 @@
 	return 0;
 }
 
+enum mod_log_op {
+	MOD_LOG_KEY_REPLACE,
+	MOD_LOG_KEY_ADD,
+	MOD_LOG_KEY_REMOVE,
+	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
+	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
+	MOD_LOG_MOVE_KEYS,
+	MOD_LOG_ROOT_REPLACE,
+};
+
+struct tree_mod_move {
+	int dst_slot;
+	int nr_items;
+};
+
+struct tree_mod_root {
+	u64 logical;
+	u8 level;
+};
+
+struct tree_mod_elem {
+	struct rb_node node;
+	u64 index;		/* shifted logical */
+	struct seq_list elem;
+	enum mod_log_op op;
+
+	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
+	int slot;
+
+	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
+	u64 generation;
+
+	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
+	struct btrfs_disk_key key;
+	u64 blockptr;
+
+	/* this is used for op == MOD_LOG_MOVE_KEYS */
+	struct tree_mod_move move;
+
+	/* this is used for op == MOD_LOG_ROOT_REPLACE */
+	struct tree_mod_root old_root;
+};
+
+static inline void
+__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
+{
+	elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
+	list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+}
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+			    struct seq_list *elem)
+{
+	elem->flags = 1;
+	spin_lock(&fs_info->tree_mod_seq_lock);
+	__get_tree_mod_seq(fs_info, elem);
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+			    struct seq_list *elem)
+{
+	struct rb_root *tm_root;
+	struct rb_node *node;
+	struct rb_node *next;
+	struct seq_list *cur_elem;
+	struct tree_mod_elem *tm;
+	u64 min_seq = (u64)-1;
+	u64 seq_putting = elem->seq;
+
+	if (!seq_putting)
+		return;
+
+	BUG_ON(!(elem->flags & 1));
+	spin_lock(&fs_info->tree_mod_seq_lock);
+	list_del(&elem->list);
+
+	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
+		if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
+			if (seq_putting > cur_elem->seq) {
+				/*
+				 * blocker with lower sequence number exists, we
+				 * cannot remove anything from the log
+				 */
+				goto out;
+			}
+			min_seq = cur_elem->seq;
+		}
+	}
+
+	/*
+	 * anything that's lower than the lowest existing (read: blocked)
+	 * sequence number can be removed from the tree.
+	 */
+	write_lock(&fs_info->tree_mod_log_lock);
+	tm_root = &fs_info->tree_mod_log;
+	for (node = rb_first(tm_root); node; node = next) {
+		next = rb_next(node);
+		tm = container_of(node, struct tree_mod_elem, node);
+		if (tm->elem.seq > min_seq)
+			continue;
+		rb_erase(node, tm_root);
+		list_del(&tm->elem.list);
+		kfree(tm);
+	}
+	write_unlock(&fs_info->tree_mod_log_lock);
+out:
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+/*
+ * key order of the log:
+ *       index -> sequence
+ *
+ * the index is the shifted logical of the *new* root node for root replace
+ * operations, or the shifted logical of the affected block for all other
+ * operations.
+ */
+static noinline int
+__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+{
+	struct rb_root *tm_root;
+	struct rb_node **new;
+	struct rb_node *parent = NULL;
+	struct tree_mod_elem *cur;
+	int ret = 0;
+
+	BUG_ON(!tm || !tm->elem.seq);
+
+	write_lock(&fs_info->tree_mod_log_lock);
+	tm_root = &fs_info->tree_mod_log;
+	new = &tm_root->rb_node;
+	while (*new) {
+		cur = container_of(*new, struct tree_mod_elem, node);
+		parent = *new;
+		if (cur->index < tm->index)
+			new = &((*new)->rb_left);
+		else if (cur->index > tm->index)
+			new = &((*new)->rb_right);
+		else if (cur->elem.seq < tm->elem.seq)
+			new = &((*new)->rb_left);
+		else if (cur->elem.seq > tm->elem.seq)
+			new = &((*new)->rb_right);
+		else {
+			kfree(tm);
+			ret = -EEXIST;
+			goto unlock;
+		}
+	}
+
+	rb_link_node(&tm->node, parent, new);
+	rb_insert_color(&tm->node, tm_root);
+unlock:
+	write_unlock(&fs_info->tree_mod_log_lock);
+	return ret;
+}
+
+static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
+				    struct extent_buffer *eb) {
+	smp_mb();
+	if (list_empty(&(fs_info)->tree_mod_seq_list))
+		return 1;
+	if (!eb)
+		return 0;
+	if (btrfs_header_level(eb) == 0)
+		return 1;
+	return 0;
+}
+
+/*
+ * This allocates memory and gets a tree modification sequence number when
+ * needed.
+ *
+ * Returns 0 when no sequence number is needed, < 0 on error.
+ * Returns 1 when a sequence number was added. In this case,
+ * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
+ * after inserting into the rb tree.
+ */
+static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
+				 struct tree_mod_elem **tm_ret)
+{
+	struct tree_mod_elem *tm;
+	int seq;
+
+	if (tree_mod_dont_log(fs_info, NULL))
+		return 0;
+
+	tm = *tm_ret = kzalloc(sizeof(*tm), flags);
+	if (!tm)
+		return -ENOMEM;
+
+	tm->elem.flags = 0;
+	spin_lock(&fs_info->tree_mod_seq_lock);
+	if (list_empty(&fs_info->tree_mod_seq_list)) {
+		/*
+		 * someone emptied the list while we were waiting for the lock.
+		 * we must not add to the list, because no blocker exists. items
+		 * are removed from the list only when the existing blocker is
+		 * removed from the list.
+		 */
+		kfree(tm);
+		seq = 0;
+		spin_unlock(&fs_info->tree_mod_seq_lock);
+	} else {
+		__get_tree_mod_seq(fs_info, &tm->elem);
+		seq = tm->elem.seq;
+	}
+
+	return seq;
+}
+
+static noinline int
+tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
+			     struct extent_buffer *eb, int slot,
+			     enum mod_log_op op, gfp_t flags)
+{
+	struct tree_mod_elem *tm;
+	int ret;
+
+	ret = tree_mod_alloc(fs_info, flags, &tm);
+	if (ret <= 0)
+		return ret;
+
+	tm->index = eb->start >> PAGE_CACHE_SHIFT;
+	if (op != MOD_LOG_KEY_ADD) {
+		btrfs_node_key(eb, &tm->key, slot);
+		tm->blockptr = btrfs_node_blockptr(eb, slot);
+	}
+	tm->op = op;
+	tm->slot = slot;
+	tm->generation = btrfs_node_ptr_generation(eb, slot);
+
+	ret = __tree_mod_log_insert(fs_info, tm);
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+	return ret;
+}
+
+static noinline int
+tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+			int slot, enum mod_log_op op)
+{
+	return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
+}
+
+static noinline int
+tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
+			 struct extent_buffer *eb, int dst_slot, int src_slot,
+			 int nr_items, gfp_t flags)
+{
+	struct tree_mod_elem *tm;
+	int ret;
+	int i;
+
+	if (tree_mod_dont_log(fs_info, eb))
+		return 0;
+
+	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+		ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+					      MOD_LOG_KEY_REMOVE_WHILE_MOVING);
+		BUG_ON(ret < 0);
+	}
+
+	ret = tree_mod_alloc(fs_info, flags, &tm);
+	if (ret <= 0)
+		return ret;
+
+	tm->index = eb->start >> PAGE_CACHE_SHIFT;
+	tm->slot = src_slot;
+	tm->move.dst_slot = dst_slot;
+	tm->move.nr_items = nr_items;
+	tm->op = MOD_LOG_MOVE_KEYS;
+
+	ret = __tree_mod_log_insert(fs_info, tm);
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+	return ret;
+}
+
+static noinline int
+tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
+			 struct extent_buffer *old_root,
+			 struct extent_buffer *new_root, gfp_t flags)
+{
+	struct tree_mod_elem *tm;
+	int ret;
+
+	ret = tree_mod_alloc(fs_info, flags, &tm);
+	if (ret <= 0)
+		return ret;
+
+	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+	tm->old_root.logical = old_root->start;
+	tm->old_root.level = btrfs_header_level(old_root);
+	tm->generation = btrfs_header_generation(old_root);
+	tm->op = MOD_LOG_ROOT_REPLACE;
+
+	ret = __tree_mod_log_insert(fs_info, tm);
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+	return ret;
+}
+
+static struct tree_mod_elem *
+__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
+		      int smallest)
+{
+	struct rb_root *tm_root;
+	struct rb_node *node;
+	struct tree_mod_elem *cur = NULL;
+	struct tree_mod_elem *found = NULL;
+	u64 index = start >> PAGE_CACHE_SHIFT;
+
+	read_lock(&fs_info->tree_mod_log_lock);
+	tm_root = &fs_info->tree_mod_log;
+	node = tm_root->rb_node;
+	while (node) {
+		cur = container_of(node, struct tree_mod_elem, node);
+		if (cur->index < index) {
+			node = node->rb_left;
+		} else if (cur->index > index) {
+			node = node->rb_right;
+		} else if (cur->elem.seq < min_seq) {
+			node = node->rb_left;
+		} else if (!smallest) {
+			/* we want the node with the highest seq */
+			if (found)
+				BUG_ON(found->elem.seq > cur->elem.seq);
+			found = cur;
+			node = node->rb_left;
+		} else if (cur->elem.seq > min_seq) {
+			/* we want the node with the smallest seq */
+			if (found)
+				BUG_ON(found->elem.seq < cur->elem.seq);
+			found = cur;
+			node = node->rb_right;
+		} else {
+			found = cur;
+			break;
+		}
+	}
+	read_unlock(&fs_info->tree_mod_log_lock);
+
+	return found;
+}
+
+/*
+ * this returns the element from the log with the smallest time sequence
+ * value that's in the log (the oldest log item). any element with a time
+ * sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
+			   u64 min_seq)
+{
+	return __tree_mod_log_search(fs_info, start, min_seq, 1);
+}
+
+/*
+ * this returns the element from the log with the largest time sequence
+ * value that's in the log (the most recent log item). any element with
+ * a time sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
+{
+	return __tree_mod_log_search(fs_info, start, min_seq, 0);
+}
+
+static inline void
+tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+		     struct extent_buffer *src, unsigned long dst_offset,
+		     unsigned long src_offset, int nr_items)
+{
+	int ret;
+	int i;
+
+	if (tree_mod_dont_log(fs_info, NULL))
+		return;
+
+	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+		return;
+
+	/* speed this up by single seq for all operations? */
+	for (i = 0; i < nr_items; i++) {
+		ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
+					      MOD_LOG_KEY_REMOVE);
+		BUG_ON(ret < 0);
+		ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
+					      MOD_LOG_KEY_ADD);
+		BUG_ON(ret < 0);
+	}
+}
+
+static inline void
+tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+		     int dst_offset, int src_offset, int nr_items)
+{
+	int ret;
+	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
+				       nr_items, GFP_NOFS);
+	BUG_ON(ret < 0);
+}
+
+static inline void
+tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
+			  struct extent_buffer *eb,
+			  struct btrfs_disk_key *disk_key, int slot, int atomic)
+{
+	int ret;
+
+	ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
+					   MOD_LOG_KEY_REPLACE,
+					   atomic ? GFP_ATOMIC : GFP_NOFS);
+	BUG_ON(ret < 0);
+}
+
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+				 struct extent_buffer *eb)
+{
+	int i;
+	int ret;
+	u32 nritems;
+
+	if (tree_mod_dont_log(fs_info, eb))
+		return;
+
+	nritems = btrfs_header_nritems(eb);
+	for (i = nritems - 1; i >= 0; i--) {
+		ret = tree_mod_log_insert_key(fs_info, eb, i,
+					      MOD_LOG_KEY_REMOVE_WHILE_FREEING);
+		BUG_ON(ret < 0);
+	}
+}
+
+static inline void
+tree_mod_log_set_root_pointer(struct btrfs_root *root,
+			      struct extent_buffer *new_root_node)
+{
+	int ret;
+	tree_mod_log_free_eb(root->fs_info, root->node);
+	ret = tree_mod_log_insert_root(root->fs_info, root->node,
+				       new_root_node, GFP_NOFS);
+	BUG_ON(ret < 0);
+}
+
 /*
  * check if the tree block can be shared by multiple trees
  */
@@ -409,6 +862,12 @@
 			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
 			BUG_ON(ret); /* -ENOMEM */
 		}
+		/*
+		 * don't log freeing in case we're freeing the root node, this
+		 * is done by tree_mod_log_set_root_pointer later
+		 */
+		if (buf != root->node && btrfs_header_level(buf) != 0)
+			tree_mod_log_free_eb(root->fs_info, buf);
 		clean_tree_block(trans, root, buf);
 		*last_ref = 1;
 	}
@@ -467,7 +926,7 @@
 
 	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
 				     root->root_key.objectid, &disk_key,
-				     level, search_start, empty_size, 1);
+				     level, search_start, empty_size);
 	if (IS_ERR(cow))
 		return PTR_ERR(cow);
 
@@ -506,10 +965,11 @@
 			parent_start = 0;
 
 		extent_buffer_get(cow);
+		tree_mod_log_set_root_pointer(root, cow);
 		rcu_assign_pointer(root->node, cow);
 
 		btrfs_free_tree_block(trans, root, buf, parent_start,
-				      last_ref, 1);
+				      last_ref);
 		free_extent_buffer(buf);
 		add_root_to_dirty_list(root);
 	} else {
@@ -519,13 +979,15 @@
 			parent_start = 0;
 
 		WARN_ON(trans->transid != btrfs_header_generation(parent));
+		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
+					MOD_LOG_KEY_REPLACE);
 		btrfs_set_node_blockptr(parent, parent_slot,
 					cow->start);
 		btrfs_set_node_ptr_generation(parent, parent_slot,
 					      trans->transid);
 		btrfs_mark_buffer_dirty(parent);
 		btrfs_free_tree_block(trans, root, buf, parent_start,
-				      last_ref, 1);
+				      last_ref);
 	}
 	if (unlock_orig)
 		btrfs_tree_unlock(buf);
@@ -535,6 +997,231 @@
 	return 0;
 }
 
+/*
+ * returns the logical address of the oldest predecessor of the given root.
+ * entries older than time_seq are ignored.
+ */
+static struct tree_mod_elem *
+__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
+			   struct btrfs_root *root, u64 time_seq)
+{
+	struct tree_mod_elem *tm;
+	struct tree_mod_elem *found = NULL;
+	u64 root_logical = root->node->start;
+	int looped = 0;
+
+	if (!time_seq)
+		return 0;
+
+	/*
+	 * the very last operation that's logged for a root is the replacement
+	 * operation (if it is replaced at all). this has the index of the *new*
+	 * root, making it the very first operation that's logged for this root.
+	 */
+	while (1) {
+		tm = tree_mod_log_search_oldest(fs_info, root_logical,
+						time_seq);
+		if (!looped && !tm)
+			return 0;
+		/*
+		 * we must have key remove operations in the log before the
+		 * replace operation.
+		 */
+		BUG_ON(!tm);
+
+		if (tm->op != MOD_LOG_ROOT_REPLACE)
+			break;
+
+		found = tm;
+		root_logical = tm->old_root.logical;
+		BUG_ON(root_logical == root->node->start);
+		looped = 1;
+	}
+
+	/* if there's no old root to return, return what we found instead */
+	if (!found)
+		found = tm;
+
+	return found;
+}
+
+/*
+ * tm is a pointer to the first operation to rewind within eb. then, all
+ * previous operations will be rewinded (until we reach something older than
+ * time_seq).
+ */
+static void
+__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
+		      struct tree_mod_elem *first_tm)
+{
+	u32 n;
+	struct rb_node *next;
+	struct tree_mod_elem *tm = first_tm;
+	unsigned long o_dst;
+	unsigned long o_src;
+	unsigned long p_size = sizeof(struct btrfs_key_ptr);
+
+	n = btrfs_header_nritems(eb);
+	while (tm && tm->elem.seq >= time_seq) {
+		/*
+		 * all the operations are recorded with the operator used for
+		 * the modification. as we're going backwards, we do the
+		 * opposite of each operation here.
+		 */
+		switch (tm->op) {
+		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
+			BUG_ON(tm->slot < n);
+		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
+		case MOD_LOG_KEY_REMOVE:
+			btrfs_set_node_key(eb, &tm->key, tm->slot);
+			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+			btrfs_set_node_ptr_generation(eb, tm->slot,
+						      tm->generation);
+			n++;
+			break;
+		case MOD_LOG_KEY_REPLACE:
+			BUG_ON(tm->slot >= n);
+			btrfs_set_node_key(eb, &tm->key, tm->slot);
+			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+			btrfs_set_node_ptr_generation(eb, tm->slot,
+						      tm->generation);
+			break;
+		case MOD_LOG_KEY_ADD:
+			if (tm->slot != n - 1) {
+				o_dst = btrfs_node_key_ptr_offset(tm->slot);
+				o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
+				memmove_extent_buffer(eb, o_dst, o_src, p_size);
+			}
+			n--;
+			break;
+		case MOD_LOG_MOVE_KEYS:
+			o_dst = btrfs_node_key_ptr_offset(tm->slot);
+			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
+			memmove_extent_buffer(eb, o_dst, o_src,
+					      tm->move.nr_items * p_size);
+			break;
+		case MOD_LOG_ROOT_REPLACE:
+			/*
+			 * this operation is special. for roots, this must be
+			 * handled explicitly before rewinding.
+			 * for non-roots, this operation may exist if the node
+			 * was a root: root A -> child B; then A gets empty and
+			 * B is promoted to the new root. in the mod log, we'll
+			 * have a root-replace operation for B, a tree block
+			 * that is no root. we simply ignore that operation.
+			 */
+			break;
+		}
+		next = rb_next(&tm->node);
+		if (!next)
+			break;
+		tm = container_of(next, struct tree_mod_elem, node);
+		if (tm->index != first_tm->index)
+			break;
+	}
+	btrfs_set_header_nritems(eb, n);
+}
+
+static struct extent_buffer *
+tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+		    u64 time_seq)
+{
+	struct extent_buffer *eb_rewin;
+	struct tree_mod_elem *tm;
+
+	if (!time_seq)
+		return eb;
+
+	if (btrfs_header_level(eb) == 0)
+		return eb;
+
+	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
+	if (!tm)
+		return eb;
+
+	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+		BUG_ON(tm->slot != 0);
+		eb_rewin = alloc_dummy_extent_buffer(eb->start,
+						fs_info->tree_root->nodesize);
+		BUG_ON(!eb_rewin);
+		btrfs_set_header_bytenr(eb_rewin, eb->start);
+		btrfs_set_header_backref_rev(eb_rewin,
+					     btrfs_header_backref_rev(eb));
+		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
+		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
+	} else {
+		eb_rewin = btrfs_clone_extent_buffer(eb);
+		BUG_ON(!eb_rewin);
+	}
+
+	extent_buffer_get(eb_rewin);
+	free_extent_buffer(eb);
+
+	__tree_mod_log_rewind(eb_rewin, time_seq, tm);
+
+	return eb_rewin;
+}
+
+/*
+ * get_old_root() rewinds the state of @root's root node to the given @time_seq
+ * value. If there are no changes, the current root->root_node is returned. If
+ * anything changed in between, there's a fresh buffer allocated on which the
+ * rewind operations are done. In any case, the returned buffer is read locked.
+ * Returns NULL on error (with no locks held).
+ */
+static inline struct extent_buffer *
+get_old_root(struct btrfs_root *root, u64 time_seq)
+{
+	struct tree_mod_elem *tm;
+	struct extent_buffer *eb;
+	struct tree_mod_root *old_root = NULL;
+	u64 old_generation = 0;
+	u64 logical;
+
+	eb = btrfs_read_lock_root_node(root);
+	tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+	if (!tm)
+		return root->node;
+
+	if (tm->op == MOD_LOG_ROOT_REPLACE) {
+		old_root = &tm->old_root;
+		old_generation = tm->generation;
+		logical = old_root->logical;
+	} else {
+		logical = root->node->start;
+	}
+
+	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
+	/*
+	 * there was an item in the log when __tree_mod_log_oldest_root
+	 * returned. this one must not go away, because the time_seq passed to
+	 * us must be blocking its removal.
+	 */
+	BUG_ON(!tm);
+
+	if (old_root)
+		eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
+					       root->nodesize);
+	else
+		eb = btrfs_clone_extent_buffer(root->node);
+	btrfs_tree_read_unlock(root->node);
+	free_extent_buffer(root->node);
+	if (!eb)
+		return NULL;
+	btrfs_tree_read_lock(eb);
+	if (old_root) {
+		btrfs_set_header_bytenr(eb, eb->start);
+		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+		btrfs_set_header_owner(eb, root->root_key.objectid);
+		btrfs_set_header_level(eb, old_root->level);
+		btrfs_set_header_generation(eb, old_generation);
+	}
+	__tree_mod_log_rewind(eb, time_seq, tm);
+	extent_buffer_get(eb);
+
+	return eb;
+}
+
 static inline int should_cow_block(struct btrfs_trans_handle *trans,
 				   struct btrfs_root *root,
 				   struct extent_buffer *buf)
@@ -739,7 +1426,11 @@
 				if (!cur)
 					return -EIO;
 			} else if (!uptodate) {
-				btrfs_read_buffer(cur, gen);
+				err = btrfs_read_buffer(cur, gen);
+				if (err) {
+					free_extent_buffer(cur);
+					return err;
+				}
 			}
 		}
 		if (search_start == 0)
@@ -854,20 +1545,18 @@
 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
 		      int level, int *slot)
 {
-	if (level == 0) {
+	if (level == 0)
 		return generic_bin_search(eb,
 					  offsetof(struct btrfs_leaf, items),
 					  sizeof(struct btrfs_item),
 					  key, btrfs_header_nritems(eb),
 					  slot);
-	} else {
+	else
 		return generic_bin_search(eb,
 					  offsetof(struct btrfs_node, ptrs),
 					  sizeof(struct btrfs_key_ptr),
 					  key, btrfs_header_nritems(eb),
 					  slot);
-	}
-	return -1;
 }
 
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
@@ -974,6 +1663,7 @@
 			goto enospc;
 		}
 
+		tree_mod_log_set_root_pointer(root, child);
 		rcu_assign_pointer(root->node, child);
 
 		add_root_to_dirty_list(root);
@@ -987,7 +1677,7 @@
 		free_extent_buffer(mid);
 
 		root_sub_used(root, mid->len);
-		btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+		btrfs_free_tree_block(trans, root, mid, 0, 1);
 		/* once for the root ptr */
 		free_extent_buffer_stale(mid);
 		return 0;
@@ -996,8 +1686,6 @@
 	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
 		return 0;
 
-	btrfs_header_nritems(mid);
-
 	left = read_node_slot(root, parent, pslot - 1);
 	if (left) {
 		btrfs_tree_lock(left);
@@ -1027,7 +1715,6 @@
 		wret = push_node_left(trans, root, left, mid, 1);
 		if (wret < 0)
 			ret = wret;
-		btrfs_header_nritems(mid);
 	}
 
 	/*
@@ -1040,14 +1727,16 @@
 		if (btrfs_header_nritems(right) == 0) {
 			clean_tree_block(trans, root, right);
 			btrfs_tree_unlock(right);
-			del_ptr(trans, root, path, level + 1, pslot + 1);
+			del_ptr(trans, root, path, level + 1, pslot + 1, 1);
 			root_sub_used(root, right->len);
-			btrfs_free_tree_block(trans, root, right, 0, 1, 0);
+			btrfs_free_tree_block(trans, root, right, 0, 1);
 			free_extent_buffer_stale(right);
 			right = NULL;
 		} else {
 			struct btrfs_disk_key right_key;
 			btrfs_node_key(right, &right_key, 0);
+			tree_mod_log_set_node_key(root->fs_info, parent,
+						  &right_key, pslot + 1, 0);
 			btrfs_set_node_key(parent, &right_key, pslot + 1);
 			btrfs_mark_buffer_dirty(parent);
 		}
@@ -1082,15 +1771,17 @@
 	if (btrfs_header_nritems(mid) == 0) {
 		clean_tree_block(trans, root, mid);
 		btrfs_tree_unlock(mid);
-		del_ptr(trans, root, path, level + 1, pslot);
+		del_ptr(trans, root, path, level + 1, pslot, 1);
 		root_sub_used(root, mid->len);
-		btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+		btrfs_free_tree_block(trans, root, mid, 0, 1);
 		free_extent_buffer_stale(mid);
 		mid = NULL;
 	} else {
 		/* update the parent key to reflect our changes */
 		struct btrfs_disk_key mid_key;
 		btrfs_node_key(mid, &mid_key, 0);
+		tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
+					  pslot, 0);
 		btrfs_set_node_key(parent, &mid_key, pslot);
 		btrfs_mark_buffer_dirty(parent);
 	}
@@ -1188,6 +1879,8 @@
 			struct btrfs_disk_key disk_key;
 			orig_slot += left_nr;
 			btrfs_node_key(mid, &disk_key, 0);
+			tree_mod_log_set_node_key(root->fs_info, parent,
+						  &disk_key, pslot, 0);
 			btrfs_set_node_key(parent, &disk_key, pslot);
 			btrfs_mark_buffer_dirty(parent);
 			if (btrfs_header_nritems(left) > orig_slot) {
@@ -1239,6 +1932,8 @@
 			struct btrfs_disk_key disk_key;
 
 			btrfs_node_key(right, &disk_key, 0);
+			tree_mod_log_set_node_key(root->fs_info, parent,
+						  &disk_key, pslot + 1, 0);
 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
 			btrfs_mark_buffer_dirty(parent);
 
@@ -1496,7 +2191,7 @@
 read_block_for_search(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct btrfs_path *p,
 		       struct extent_buffer **eb_ret, int level, int slot,
-		       struct btrfs_key *key)
+		       struct btrfs_key *key, u64 time_seq)
 {
 	u64 blocknr;
 	u64 gen;
@@ -1850,7 +2545,7 @@
 			}
 
 			err = read_block_for_search(trans, root, p,
-						    &b, level, slot, key);
+						    &b, level, slot, key, 0);
 			if (err == -EAGAIN)
 				goto again;
 			if (err) {
@@ -1922,6 +2617,113 @@
 }
 
 /*
+ * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
+ * current state of the tree together with the operations recorded in the tree
+ * modification log to search for the key in a previous version of this tree, as
+ * denoted by the time_seq parameter.
+ *
+ * Naturally, there is no support for insert, delete or cow operations.
+ *
+ * The resulting path and return value will be set up as if we called
+ * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
+ */
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+			  struct btrfs_path *p, u64 time_seq)
+{
+	struct extent_buffer *b;
+	int slot;
+	int ret;
+	int err;
+	int level;
+	int lowest_unlock = 1;
+	u8 lowest_level = 0;
+
+	lowest_level = p->lowest_level;
+	WARN_ON(p->nodes[0] != NULL);
+
+	if (p->search_commit_root) {
+		BUG_ON(time_seq);
+		return btrfs_search_slot(NULL, root, key, p, 0, 0);
+	}
+
+again:
+	b = get_old_root(root, time_seq);
+	level = btrfs_header_level(b);
+	p->locks[level] = BTRFS_READ_LOCK;
+
+	while (b) {
+		level = btrfs_header_level(b);
+		p->nodes[level] = b;
+		btrfs_clear_path_blocking(p, NULL, 0);
+
+		/*
+		 * we have a lock on b and as long as we aren't changing
+		 * the tree, there is no way to for the items in b to change.
+		 * It is safe to drop the lock on our parent before we
+		 * go through the expensive btree search on b.
+		 */
+		btrfs_unlock_up_safe(p, level + 1);
+
+		ret = bin_search(b, key, level, &slot);
+
+		if (level != 0) {
+			int dec = 0;
+			if (ret && slot > 0) {
+				dec = 1;
+				slot -= 1;
+			}
+			p->slots[level] = slot;
+			unlock_up(p, level, lowest_unlock, 0, NULL);
+
+			if (level == lowest_level) {
+				if (dec)
+					p->slots[level]++;
+				goto done;
+			}
+
+			err = read_block_for_search(NULL, root, p, &b, level,
+						    slot, key, time_seq);
+			if (err == -EAGAIN)
+				goto again;
+			if (err) {
+				ret = err;
+				goto done;
+			}
+
+			level = btrfs_header_level(b);
+			err = btrfs_try_tree_read_lock(b);
+			if (!err) {
+				btrfs_set_path_blocking(p);
+				btrfs_tree_read_lock(b);
+				btrfs_clear_path_blocking(p, b,
+							  BTRFS_READ_LOCK);
+			}
+			p->locks[level] = BTRFS_READ_LOCK;
+			p->nodes[level] = b;
+			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
+			if (b != p->nodes[level]) {
+				btrfs_tree_unlock_rw(p->nodes[level],
+						     p->locks[level]);
+				p->locks[level] = 0;
+				p->nodes[level] = b;
+			}
+		} else {
+			p->slots[level] = slot;
+			unlock_up(p, level, lowest_unlock, 0, NULL);
+			goto done;
+		}
+	}
+	ret = 1;
+done:
+	if (!p->leave_spinning)
+		btrfs_set_path_blocking(p);
+	if (ret < 0)
+		btrfs_release_path(p);
+
+	return ret;
+}
+
+/*
  * adjust the pointers going up the tree, starting at level
  * making sure the right key of each node is points to 'key'.
  * This is used after shifting pointers to the left, so it stops
@@ -1941,6 +2743,7 @@
 		if (!path->nodes[i])
 			break;
 		t = path->nodes[i];
+		tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
 		btrfs_set_node_key(t, key, tslot);
 		btrfs_mark_buffer_dirty(path->nodes[i]);
 		if (tslot != 0)
@@ -2023,12 +2826,16 @@
 	} else
 		push_items = min(src_nritems - 8, push_items);
 
+	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+			     push_items);
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(dst_nritems),
 			   btrfs_node_key_ptr_offset(0),
 			   push_items * sizeof(struct btrfs_key_ptr));
 
 	if (push_items < src_nritems) {
+		tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
+				     src_nritems - push_items);
 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
 				      btrfs_node_key_ptr_offset(push_items),
 				      (src_nritems - push_items) *
@@ -2082,11 +2889,14 @@
 	if (max_push < push_items)
 		push_items = max_push;
 
+	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
 				      btrfs_node_key_ptr_offset(0),
 				      (dst_nritems) *
 				      sizeof(struct btrfs_key_ptr));
 
+	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+			     src_nritems - push_items, push_items);
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -2129,7 +2939,7 @@
 
 	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
 				   root->root_key.objectid, &lower_key,
-				   level, root->node->start, 0, 0);
+				   level, root->node->start, 0);
 	if (IS_ERR(c))
 		return PTR_ERR(c);
 
@@ -2161,6 +2971,7 @@
 	btrfs_mark_buffer_dirty(c);
 
 	old = root->node;
+	tree_mod_log_set_root_pointer(root, c);
 	rcu_assign_pointer(root->node, c);
 
 	/* the super has an extra ref to root->node */
@@ -2184,10 +2995,11 @@
 static void insert_ptr(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct btrfs_path *path,
 		       struct btrfs_disk_key *key, u64 bytenr,
-		       int slot, int level)
+		       int slot, int level, int tree_mod_log)
 {
 	struct extent_buffer *lower;
 	int nritems;
+	int ret;
 
 	BUG_ON(!path->nodes[level]);
 	btrfs_assert_tree_locked(path->nodes[level]);
@@ -2196,11 +3008,19 @@
 	BUG_ON(slot > nritems);
 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
 	if (slot != nritems) {
+		if (tree_mod_log && level)
+			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
+					     slot, nritems - slot);
 		memmove_extent_buffer(lower,
 			      btrfs_node_key_ptr_offset(slot + 1),
 			      btrfs_node_key_ptr_offset(slot),
 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
 	}
+	if (tree_mod_log && level) {
+		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
+					      MOD_LOG_KEY_ADD);
+		BUG_ON(ret < 0);
+	}
 	btrfs_set_node_key(lower, key, slot);
 	btrfs_set_node_blockptr(lower, slot, bytenr);
 	WARN_ON(trans->transid == 0);
@@ -2252,7 +3072,7 @@
 
 	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
 					root->root_key.objectid,
-					&disk_key, level, c->start, 0, 0);
+					&disk_key, level, c->start, 0);
 	if (IS_ERR(split))
 		return PTR_ERR(split);
 
@@ -2271,7 +3091,7 @@
 			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
 			    BTRFS_UUID_SIZE);
 
-
+	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
 	copy_extent_buffer(split, c,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(mid),
@@ -2284,7 +3104,7 @@
 	btrfs_mark_buffer_dirty(split);
 
 	insert_ptr(trans, root, path, &disk_key, split->start,
-		   path->slots[level + 1] + 1, level + 1);
+		   path->slots[level + 1] + 1, level + 1, 1);
 
 	if (path->slots[level] >= mid) {
 		path->slots[level] -= mid;
@@ -2821,7 +3641,7 @@
 	btrfs_set_header_nritems(l, mid);
 	btrfs_item_key(right, &disk_key, 0);
 	insert_ptr(trans, root, path, &disk_key, right->start,
-		   path->slots[1] + 1, 1);
+		   path->slots[1] + 1, 1, 0);
 
 	btrfs_mark_buffer_dirty(right);
 	btrfs_mark_buffer_dirty(l);
@@ -3004,7 +3824,7 @@
 
 	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
 					root->root_key.objectid,
-					&disk_key, 0, l->start, 0, 0);
+					&disk_key, 0, l->start, 0);
 	if (IS_ERR(right))
 		return PTR_ERR(right);
 
@@ -3028,7 +3848,7 @@
 		if (mid <= slot) {
 			btrfs_set_header_nritems(right, 0);
 			insert_ptr(trans, root, path, &disk_key, right->start,
-				   path->slots[1] + 1, 1);
+				   path->slots[1] + 1, 1, 0);
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
@@ -3037,7 +3857,7 @@
 		} else {
 			btrfs_set_header_nritems(right, 0);
 			insert_ptr(trans, root, path, &disk_key, right->start,
-					  path->slots[1], 1);
+					  path->slots[1], 1, 0);
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
@@ -3749,19 +4569,29 @@
  * empty a node.
  */
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		    struct btrfs_path *path, int level, int slot)
+		    struct btrfs_path *path, int level, int slot,
+		    int tree_mod_log)
 {
 	struct extent_buffer *parent = path->nodes[level];
 	u32 nritems;
+	int ret;
 
 	nritems = btrfs_header_nritems(parent);
 	if (slot != nritems - 1) {
+		if (tree_mod_log && level)
+			tree_mod_log_eb_move(root->fs_info, parent, slot,
+					     slot + 1, nritems - slot - 1);
 		memmove_extent_buffer(parent,
 			      btrfs_node_key_ptr_offset(slot),
 			      btrfs_node_key_ptr_offset(slot + 1),
 			      sizeof(struct btrfs_key_ptr) *
 			      (nritems - slot - 1));
+	} else if (tree_mod_log && level) {
+		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
+					      MOD_LOG_KEY_REMOVE);
+		BUG_ON(ret < 0);
 	}
+
 	nritems--;
 	btrfs_set_header_nritems(parent, nritems);
 	if (nritems == 0 && parent == root->node) {
@@ -3793,7 +4623,7 @@
 				    struct extent_buffer *leaf)
 {
 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
-	del_ptr(trans, root, path, 1, path->slots[1]);
+	del_ptr(trans, root, path, 1, path->slots[1], 1);
 
 	/*
 	 * btrfs_free_extent is expensive, we want to make sure we
@@ -3804,7 +4634,7 @@
 	root_sub_used(root, leaf->len);
 
 	extent_buffer_get(leaf);
-	btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
+	btrfs_free_tree_block(trans, root, leaf, 0, 1);
 	free_extent_buffer_stale(leaf);
 }
 /*
@@ -4202,6 +5032,12 @@
  */
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
 {
+	return btrfs_next_old_leaf(root, path, 0);
+}
+
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+			u64 time_seq)
+{
 	int slot;
 	int level;
 	struct extent_buffer *c;
@@ -4226,7 +5062,10 @@
 	path->keep_locks = 1;
 	path->leave_spinning = 1;
 
-	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (time_seq)
+		ret = btrfs_search_old_slot(root, &key, path, time_seq);
+	else
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	path->keep_locks = 0;
 
 	if (ret < 0)
@@ -4271,7 +5110,7 @@
 		next = c;
 		next_rw_lock = path->locks[level];
 		ret = read_block_for_search(NULL, root, path, &next, level,
-					    slot, &key);
+					    slot, &key, 0);
 		if (ret == -EAGAIN)
 			goto again;
 
@@ -4308,7 +5147,7 @@
 			break;
 
 		ret = read_block_for_search(NULL, root, path, &next, level,
-					    0, &key);
+					    0, &key, 0);
 		if (ret == -EAGAIN)
 			goto again;
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8fd7233..fa5c45b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -173,6 +173,9 @@
 #define BTRFS_FT_XATTR		8
 #define BTRFS_FT_MAX		9
 
+/* ioprio of readahead is set to idle */
+#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -823,6 +826,14 @@
 	u8 csum;
 } __attribute__ ((__packed__));
 
+struct btrfs_dev_stats_item {
+	/*
+	 * grow this item struct at the end for future enhancements and keep
+	 * the existing values unchanged
+	 */
+	__le64 values[BTRFS_DEV_STAT_VALUES_MAX];
+} __attribute__ ((__packed__));
+
 /* different types of block groups (and chunks) */
 #define BTRFS_BLOCK_GROUP_DATA		(1ULL << 0)
 #define BTRFS_BLOCK_GROUP_SYSTEM	(1ULL << 1)
@@ -1129,6 +1140,15 @@
 	spinlock_t delayed_iput_lock;
 	struct list_head delayed_iputs;
 
+	/* this protects tree_mod_seq_list */
+	spinlock_t tree_mod_seq_lock;
+	atomic_t tree_mod_seq;
+	struct list_head tree_mod_seq_list;
+
+	/* this protects tree_mod_log */
+	rwlock_t tree_mod_log_lock;
+	struct rb_root tree_mod_log;
+
 	atomic_t nr_async_submits;
 	atomic_t async_submit_draining;
 	atomic_t nr_async_bios;
@@ -1375,7 +1395,7 @@
 	struct list_head root_list;
 
 	spinlock_t orphan_lock;
-	struct list_head orphan_list;
+	atomic_t orphan_inodes;
 	struct btrfs_block_rsv *orphan_block_rsv;
 	int orphan_item_inserted;
 	int orphan_cleanup_state;
@@ -1508,6 +1528,12 @@
 #define BTRFS_BALANCE_ITEM_KEY	248
 
 /*
+ * Persistantly stores the io stats in the device tree.
+ * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid).
+ */
+#define BTRFS_DEV_STATS_KEY	249
+
+/*
  * string items are for debugging.  They just store a short string of
  * data in the FS
  */
@@ -2415,6 +2441,30 @@
 	return btrfs_item_size(eb, e) - offset;
 }
 
+/* btrfs_dev_stats_item */
+static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
+					struct btrfs_dev_stats_item *ptr,
+					int index)
+{
+	u64 val;
+
+	read_extent_buffer(eb, &val,
+			   offsetof(struct btrfs_dev_stats_item, values) +
+			    ((unsigned long)ptr) + (index * sizeof(u64)),
+			   sizeof(val));
+	return val;
+}
+
+static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
+					     struct btrfs_dev_stats_item *ptr,
+					     int index, u64 val)
+{
+	write_extent_buffer(eb, &val,
+			    offsetof(struct btrfs_dev_stats_item, values) +
+			     ((unsigned long)ptr) + (index * sizeof(u64)),
+			    sizeof(val));
+}
+
 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 {
 	return sb->s_fs_info;
@@ -2496,11 +2546,11 @@
 					struct btrfs_root *root, u32 blocksize,
 					u64 parent, u64 root_objectid,
 					struct btrfs_disk_key *key, int level,
-					u64 hint, u64 empty_size, int for_cow);
+					u64 hint, u64 empty_size);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct extent_buffer *buf,
-			   u64 parent, int last_ref, int for_cow);
+			   u64 parent, int last_ref);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
 					    struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize,
@@ -2659,6 +2709,8 @@
 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
 		      ins_len, int cow);
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+			  struct btrfs_path *p, u64 time_seq);
 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct extent_buffer *parent,
 		       int start_slot, int cache_only, u64 *last_ret,
@@ -2701,13 +2753,20 @@
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
-static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+			u64 time_seq);
+static inline int btrfs_next_old_item(struct btrfs_root *root,
+				      struct btrfs_path *p, u64 time_seq)
 {
 	++p->slots[0];
 	if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
-		return btrfs_next_leaf(root, p);
+		return btrfs_next_old_leaf(root, p, time_seq);
 	return 0;
 }
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+	return btrfs_next_old_item(root, p, 0);
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
@@ -2922,7 +2981,6 @@
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 int btrfs_dirty_inode(struct inode *inode);
-int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
@@ -3098,4 +3156,23 @@
 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
 			 u64 start, int err);
 
+/* delayed seq elem */
+struct seq_list {
+	struct list_head list;
+	u64 seq;
+	u32 flags;
+};
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+			    struct seq_list *elem);
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+			    struct seq_list *elem);
+
+static inline int is_fstree(u64 rootid)
+{
+	if (rootid == BTRFS_FS_TREE_OBJECTID ||
+	    (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+		return 1;
+	return 0;
+}
 #endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 03e3748..2399f40 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -669,8 +669,8 @@
 		return ret;
 	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
 		spin_lock(&BTRFS_I(inode)->lock);
-		if (BTRFS_I(inode)->delalloc_meta_reserved) {
-			BTRFS_I(inode)->delalloc_meta_reserved = 0;
+		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+				       &BTRFS_I(inode)->runtime_flags)) {
 			spin_unlock(&BTRFS_I(inode)->lock);
 			release = true;
 			goto migrate;
@@ -1706,7 +1706,7 @@
 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
 	btrfs_set_stack_inode_generation(inode_item,
 					 BTRFS_I(inode)->generation);
-	btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1754,7 +1754,7 @@
 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
-	BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+	inode->i_version = btrfs_stack_inode_sequence(inode_item);
 	inode->i_rdev = 0;
 	*rdev = btrfs_stack_inode_rdev(inode_item);
 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
@@ -1879,3 +1879,21 @@
 		}
 	}
 }
+
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
+{
+	struct btrfs_delayed_root *delayed_root;
+	struct btrfs_delayed_node *curr_node, *prev_node;
+
+	delayed_root = btrfs_get_delayed_root(root);
+
+	curr_node = btrfs_first_delayed_node(delayed_root);
+	while (curr_node) {
+		__btrfs_kill_delayed_node(curr_node);
+
+		prev_node = curr_node;
+		curr_node = btrfs_next_delayed_node(curr_node);
+		btrfs_release_delayed_node(prev_node);
+	}
+}
+
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 7083d08..f5aa402 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -124,6 +124,9 @@
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 
+/* Used for clean the transaction */
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
+
 /* Used for readdir() */
 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
 			     struct list_head *del_list);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 69f22e3..13ae7b0 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -525,7 +525,7 @@
 	ref->is_head = 0;
 	ref->in_tree = 1;
 
-	if (need_ref_seq(for_cow, ref_root))
+	if (is_fstree(ref_root))
 		seq = inc_delayed_seq(delayed_refs);
 	ref->seq = seq;
 
@@ -584,7 +584,7 @@
 	ref->is_head = 0;
 	ref->in_tree = 1;
 
-	if (need_ref_seq(for_cow, ref_root))
+	if (is_fstree(ref_root))
 		seq = inc_delayed_seq(delayed_refs);
 	ref->seq = seq;
 
@@ -658,10 +658,11 @@
 	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, level, action,
 				   for_cow);
-	if (!need_ref_seq(for_cow, ref_root) &&
+	if (!is_fstree(ref_root) &&
 	    waitqueue_active(&delayed_refs->seq_wait))
 		wake_up(&delayed_refs->seq_wait);
 	spin_unlock(&delayed_refs->lock);
+
 	return 0;
 }
 
@@ -706,10 +707,11 @@
 	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, owner, offset,
 				   action, for_cow);
-	if (!need_ref_seq(for_cow, ref_root) &&
+	if (!is_fstree(ref_root) &&
 	    waitqueue_active(&delayed_refs->seq_wait))
 		wake_up(&delayed_refs->seq_wait);
 	spin_unlock(&delayed_refs->lock);
+
 	return 0;
 }
 
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index d8f244d..413927f 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -195,11 +195,6 @@
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
 			   struct list_head *cluster, u64 search_start);
 
-struct seq_list {
-	struct list_head list;
-	u64 seq;
-};
-
 static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
 {
 	assert_spin_locked(&delayed_refs->lock);
@@ -230,25 +225,6 @@
 			    u64 seq);
 
 /*
- * delayed refs with a ref_seq > 0 must be held back during backref walking.
- * this only applies to items in one of the fs-trees. for_cow items never need
- * to be held back, so they won't get a ref_seq number.
- */
-static inline int need_ref_seq(int for_cow, u64 rootid)
-{
-	if (for_cow)
-		return 0;
-
-	if (rootid == BTRFS_FS_TREE_OBJECTID)
-		return 1;
-
-	if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
-		return 1;
-
-	return 0;
-}
-
-/*
  * a node might live in a head or a regular ref, this lets you
  * test for the proper type to use.
  */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1fe74a..7b845ff 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -44,6 +44,7 @@
 #include "free-space-cache.h"
 #include "inode-map.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -1153,7 +1154,6 @@
 	root->orphan_block_rsv = NULL;
 
 	INIT_LIST_HEAD(&root->dirty_list);
-	INIT_LIST_HEAD(&root->orphan_list);
 	INIT_LIST_HEAD(&root->root_list);
 	spin_lock_init(&root->orphan_lock);
 	spin_lock_init(&root->inode_lock);
@@ -1166,6 +1166,7 @@
 	atomic_set(&root->log_commit[0], 0);
 	atomic_set(&root->log_commit[1], 0);
 	atomic_set(&root->log_writers, 0);
+	atomic_set(&root->orphan_inodes, 0);
 	root->log_batch = 0;
 	root->log_transid = 0;
 	root->last_log_commit = 0;
@@ -1252,7 +1253,7 @@
 
 	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
 				      BTRFS_TREE_LOG_OBJECTID, NULL,
-				      0, 0, 0, 0);
+				      0, 0, 0);
 	if (IS_ERR(leaf)) {
 		kfree(root);
 		return ERR_CAST(leaf);
@@ -1914,11 +1915,14 @@
 	spin_lock_init(&fs_info->delayed_iput_lock);
 	spin_lock_init(&fs_info->defrag_inodes_lock);
 	spin_lock_init(&fs_info->free_chunk_lock);
+	spin_lock_init(&fs_info->tree_mod_seq_lock);
+	rwlock_init(&fs_info->tree_mod_log_lock);
 	mutex_init(&fs_info->reloc_mutex);
 
 	init_completion(&fs_info->kobj_unregister);
 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
 	INIT_LIST_HEAD(&fs_info->space_info);
+	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
 	btrfs_mapping_init(&fs_info->mapping_tree);
 	btrfs_init_block_rsv(&fs_info->global_block_rsv);
 	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
@@ -1931,12 +1935,14 @@
 	atomic_set(&fs_info->async_submit_draining, 0);
 	atomic_set(&fs_info->nr_async_bios, 0);
 	atomic_set(&fs_info->defrag_running, 0);
+	atomic_set(&fs_info->tree_mod_seq, 0);
 	fs_info->sb = sb;
 	fs_info->max_inline = 8192 * 1024;
 	fs_info->metadata_ratio = 0;
 	fs_info->defrag_inodes = RB_ROOT;
 	fs_info->trans_no_join = 0;
 	fs_info->free_chunk_space = 0;
+	fs_info->tree_mod_log = RB_ROOT;
 
 	/* readahead state */
 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
@@ -2001,7 +2007,8 @@
 	BTRFS_I(fs_info->btree_inode)->root = tree_root;
 	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
 	       sizeof(struct btrfs_key));
-	BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
+	set_bit(BTRFS_INODE_DUMMY,
+		&BTRFS_I(fs_info->btree_inode)->runtime_flags);
 	insert_inode_hash(fs_info->btree_inode);
 
 	spin_lock_init(&fs_info->block_group_cache_lock);
@@ -2112,7 +2119,7 @@
 
 	features = btrfs_super_incompat_flags(disk_super);
 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-	if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
+	if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
 	/*
@@ -2353,6 +2360,13 @@
 	fs_info->generation = generation;
 	fs_info->last_trans_committed = generation;
 
+	ret = btrfs_init_dev_stats(fs_info);
+	if (ret) {
+		printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
+		       ret);
+		goto fail_block_groups;
+	}
+
 	ret = btrfs_init_space_info(fs_info);
 	if (ret) {
 		printk(KERN_ERR "Failed to initial space info: %d\n", ret);
@@ -2556,18 +2570,20 @@
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
-	char b[BDEVNAME_SIZE];
-
 	if (uptodate) {
 		set_buffer_uptodate(bh);
 	} else {
-		printk_ratelimited(KERN_WARNING "lost page write due to "
-					"I/O error on %s\n",
-				       bdevname(bh->b_bdev, b));
+		struct btrfs_device *device = (struct btrfs_device *)
+			bh->b_private;
+
+		printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+					  "I/O error on %s\n",
+					  rcu_str_deref(device->name));
 		/* note, we dont' set_buffer_write_io_error because we have
 		 * our own ways of dealing with the IO errors
 		 */
 		clear_buffer_uptodate(bh);
+		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
 	}
 	unlock_buffer(bh);
 	put_bh(bh);
@@ -2682,6 +2698,7 @@
 			set_buffer_uptodate(bh);
 			lock_buffer(bh);
 			bh->b_end_io = btrfs_end_buffer_write_sync;
+			bh->b_private = device;
 		}
 
 		/*
@@ -2734,12 +2751,15 @@
 		wait_for_completion(&device->flush_wait);
 
 		if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
-			printk("btrfs: disabling barriers on dev %s\n",
-			       device->name);
+			printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+				      rcu_str_deref(device->name));
 			device->nobarriers = 1;
 		}
 		if (!bio_flagged(bio, BIO_UPTODATE)) {
 			ret = -EIO;
+			if (!bio_flagged(bio, BIO_EOPNOTSUPP))
+				btrfs_dev_stat_inc_and_print(device,
+					BTRFS_DEV_STAT_FLUSH_ERRS);
 		}
 
 		/* drop the reference from the wait == 0 run */
@@ -2902,19 +2922,6 @@
 	return ret;
 }
 
-/* Kill all outstanding I/O */
-void btrfs_abort_devices(struct btrfs_root *root)
-{
-	struct list_head *head;
-	struct btrfs_device *dev;
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	head = &root->fs_info->fs_devices->devices;
-	list_for_each_entry_rcu(dev, head, dev_list) {
-		blk_abort_queue(dev->bdev->bd_disk->queue);
-	}
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
-}
-
 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 {
 	spin_lock(&fs_info->fs_roots_radix_lock);
@@ -3395,7 +3402,6 @@
 
 	delayed_refs = &trans->delayed_refs;
 
-again:
 	spin_lock(&delayed_refs->lock);
 	if (delayed_refs->num_entries == 0) {
 		spin_unlock(&delayed_refs->lock);
@@ -3403,31 +3409,37 @@
 		return ret;
 	}
 
-	node = rb_first(&delayed_refs->root);
-	while (node) {
+	while ((node = rb_first(&delayed_refs->root)) != NULL) {
 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-		node = rb_next(node);
-
-		ref->in_tree = 0;
-		rb_erase(&ref->rb_node, &delayed_refs->root);
-		delayed_refs->num_entries--;
 
 		atomic_set(&ref->refs, 1);
 		if (btrfs_delayed_ref_is_head(ref)) {
 			struct btrfs_delayed_ref_head *head;
 
 			head = btrfs_delayed_node_to_head(ref);
-			spin_unlock(&delayed_refs->lock);
-			mutex_lock(&head->mutex);
+			if (!mutex_trylock(&head->mutex)) {
+				atomic_inc(&ref->refs);
+				spin_unlock(&delayed_refs->lock);
+
+				/* Need to wait for the delayed ref to run */
+				mutex_lock(&head->mutex);
+				mutex_unlock(&head->mutex);
+				btrfs_put_delayed_ref(ref);
+
+				spin_lock(&delayed_refs->lock);
+				continue;
+			}
+
 			kfree(head->extent_op);
 			delayed_refs->num_heads--;
 			if (list_empty(&head->cluster))
 				delayed_refs->num_heads_ready--;
 			list_del_init(&head->cluster);
-			mutex_unlock(&head->mutex);
-			btrfs_put_delayed_ref(ref);
-			goto again;
 		}
+		ref->in_tree = 0;
+		rb_erase(&ref->rb_node, &delayed_refs->root);
+		delayed_refs->num_entries--;
+
 		spin_unlock(&delayed_refs->lock);
 		btrfs_put_delayed_ref(ref);
 
@@ -3515,11 +3527,9 @@
 			     &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
 					       offset >> PAGE_CACHE_SHIFT);
 			spin_unlock(&dirty_pages->buffer_lock);
-			if (eb) {
+			if (eb)
 				ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
 							 &eb->bflags);
-				atomic_set(&eb->refs, 1);
-			}
 			if (PageWriteback(page))
 				end_page_writeback(page);
 
@@ -3533,8 +3543,8 @@
 				spin_unlock_irq(&page->mapping->tree_lock);
 			}
 
-			page->mapping->a_ops->invalidatepage(page, 0);
 			unlock_page(page);
+			page_cache_release(page);
 		}
 	}
 
@@ -3548,8 +3558,10 @@
 	u64 start;
 	u64 end;
 	int ret;
+	bool loop = true;
 
 	unpin = pinned_extents;
+again:
 	while (1) {
 		ret = find_first_extent_bit(unpin, 0, &start, &end,
 					    EXTENT_DIRTY);
@@ -3567,6 +3579,15 @@
 		cond_resched();
 	}
 
+	if (loop) {
+		if (unpin == &root->fs_info->freed_extents[0])
+			unpin = &root->fs_info->freed_extents[1];
+		else
+			unpin = &root->fs_info->freed_extents[0];
+		loop = false;
+		goto again;
+	}
+
 	return 0;
 }
 
@@ -3580,21 +3601,23 @@
 	/* FIXME: cleanup wait for commit */
 	cur_trans->in_commit = 1;
 	cur_trans->blocked = 1;
-	if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
-		wake_up(&root->fs_info->transaction_blocked_wait);
+	wake_up(&root->fs_info->transaction_blocked_wait);
 
 	cur_trans->blocked = 0;
-	if (waitqueue_active(&root->fs_info->transaction_wait))
-		wake_up(&root->fs_info->transaction_wait);
+	wake_up(&root->fs_info->transaction_wait);
 
 	cur_trans->commit_done = 1;
-	if (waitqueue_active(&cur_trans->commit_wait))
-		wake_up(&cur_trans->commit_wait);
+	wake_up(&cur_trans->commit_wait);
+
+	btrfs_destroy_delayed_inodes(root);
+	btrfs_assert_delayed_root_empty(root);
 
 	btrfs_destroy_pending_snapshots(cur_trans);
 
 	btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
 				     EXTENT_DIRTY);
+	btrfs_destroy_pinned_extent(root,
+				    root->fs_info->pinned_extents);
 
 	/*
 	memset(cur_trans, 0, sizeof(*cur_trans));
@@ -3643,6 +3666,9 @@
 		if (waitqueue_active(&t->commit_wait))
 			wake_up(&t->commit_wait);
 
+		btrfs_destroy_delayed_inodes(root);
+		btrfs_assert_delayed_root_empty(root);
+
 		btrfs_destroy_pending_snapshots(t);
 
 		btrfs_destroy_delalloc_inodes(root);
@@ -3671,17 +3697,6 @@
 	return 0;
 }
 
-static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
-					  u64 start, u64 end,
-					  struct extent_state *state)
-{
-	struct super_block *sb = page->mapping->host->i_sb;
-	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
-	btrfs_error(fs_info, -EIO,
-		    "Error occured while writing out btree at %llu", start);
-	return -EIO;
-}
-
 static struct extent_io_ops btree_extent_io_ops = {
 	.write_cache_pages_lock_hook = btree_lock_page_hook,
 	.readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3689,5 +3704,4 @@
 	.submit_bio_hook = btree_submit_bio_hook,
 	/* note we're sharing with inode.c for the merge bio hook */
 	.merge_bio_hook = btrfs_merge_bio_hook,
-	.writepage_io_failed_hook = btree_writepage_io_failed_hook,
 };
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index ab1830a..05b3fab 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -89,7 +89,6 @@
 int btrfs_cleanup_transaction(struct btrfs_root *root);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
 				  struct btrfs_root *root);
-void btrfs_abort_devices(struct btrfs_root *root);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index e887ee6..614f34a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -13,15 +13,14 @@
 					     parent_root_objectid) / 4)
 #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
 
-static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-			   int connectable)
+static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+			   struct inode *parent)
 {
 	struct btrfs_fid *fid = (struct btrfs_fid *)fh;
-	struct inode *inode = dentry->d_inode;
 	int len = *max_len;
 	int type;
 
-	if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+	if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
 		*max_len = BTRFS_FID_SIZE_CONNECTABLE;
 		return 255;
 	} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
@@ -36,19 +35,13 @@
 	fid->root_objectid = BTRFS_I(inode)->root->objectid;
 	fid->gen = inode->i_generation;
 
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		struct inode *parent;
+	if (parent) {
 		u64 parent_root_id;
 
-		spin_lock(&dentry->d_lock);
-
-		parent = dentry->d_parent->d_inode;
 		fid->parent_objectid = BTRFS_I(parent)->location.objectid;
 		fid->parent_gen = parent->i_generation;
 		parent_root_id = BTRFS_I(parent)->root->objectid;
 
-		spin_unlock(&dentry->d_lock);
-
 		if (parent_root_id != fid->root_objectid) {
 			fid->parent_root_objectid = parent_root_id;
 			len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 49fd7b6..4b5a1e1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3578,7 +3578,7 @@
 	space_info->chunk_alloc = 0;
 	spin_unlock(&space_info->lock);
 out:
-	mutex_unlock(&extent_root->fs_info->chunk_mutex);
+	mutex_unlock(&fs_info->chunk_mutex);
 	return ret;
 }
 
@@ -4355,10 +4355,9 @@
 	BTRFS_I(inode)->outstanding_extents--;
 
 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
-	    BTRFS_I(inode)->delalloc_meta_reserved) {
+	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+			       &BTRFS_I(inode)->runtime_flags))
 		drop_inode_space = 1;
-		BTRFS_I(inode)->delalloc_meta_reserved = 0;
-	}
 
 	/*
 	 * If we have more or the same amount of outsanding extents than we have
@@ -4465,7 +4464,8 @@
 	 * Add an item to reserve for updating the inode when we complete the
 	 * delalloc io.
 	 */
-	if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+		      &BTRFS_I(inode)->runtime_flags)) {
 		nr_extents++;
 		extra_reserve = 1;
 	}
@@ -4511,7 +4511,8 @@
 
 	spin_lock(&BTRFS_I(inode)->lock);
 	if (extra_reserve) {
-		BTRFS_I(inode)->delalloc_meta_reserved = 1;
+		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+			&BTRFS_I(inode)->runtime_flags);
 		nr_extents--;
 	}
 	BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -5217,7 +5218,7 @@
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct extent_buffer *buf,
-			   u64 parent, int last_ref, int for_cow)
+			   u64 parent, int last_ref)
 {
 	struct btrfs_block_group_cache *cache = NULL;
 	int ret;
@@ -5227,7 +5228,7 @@
 					buf->start, buf->len,
 					parent, root->root_key.objectid,
 					btrfs_header_level(buf),
-					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
+					BTRFS_DROP_DELAYED_REF, NULL, 0);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 
@@ -6249,7 +6250,7 @@
 					struct btrfs_root *root, u32 blocksize,
 					u64 parent, u64 root_objectid,
 					struct btrfs_disk_key *key, int level,
-					u64 hint, u64 empty_size, int for_cow)
+					u64 hint, u64 empty_size)
 {
 	struct btrfs_key ins;
 	struct btrfs_block_rsv *block_rsv;
@@ -6297,7 +6298,7 @@
 					ins.objectid,
 					ins.offset, parent, root_objectid,
 					level, BTRFS_ADD_DELAYED_EXTENT,
-					extent_op, for_cow);
+					extent_op, 0);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 	return buf;
@@ -6715,7 +6716,7 @@
 			       btrfs_header_owner(path->nodes[level + 1]));
 	}
 
-	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
+	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
 out:
 	wc->refs[level] = 0;
 	wc->flags[level] = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c9018a0..aaa12c1 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -20,6 +20,7 @@
 #include "volumes.h"
 #include "check-integrity.h"
 #include "locking.h"
+#include "rcu-string.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -186,7 +187,6 @@
 			return parent;
 	}
 
-	entry = rb_entry(node, struct tree_entry, rb_node);
 	rb_link_node(node, parent, p);
 	rb_insert_color(node, root);
 	return NULL;
@@ -413,7 +413,7 @@
 
 /*
  * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1)
+ * it will optionally wake up any one waiting on this state (wake == 1).
  *
  * If no bits are set on the state struct after clearing things, the
  * struct is freed and removed from the tree
@@ -570,10 +570,8 @@
 		if (err)
 			goto out;
 		if (state->end <= end) {
-			clear_state_bit(tree, state, &bits, wake);
-			if (last_end == (u64)-1)
-				goto out;
-			start = last_end + 1;
+			state = clear_state_bit(tree, state, &bits, wake);
+			goto next;
 		}
 		goto search_again;
 	}
@@ -781,7 +779,6 @@
 	 * Just lock what we found and keep going
 	 */
 	if (state->start == start && state->end <= end) {
-		struct rb_node *next_node;
 		if (state->state & exclusive_bits) {
 			*failed_start = state->start;
 			err = -EEXIST;
@@ -789,20 +786,15 @@
 		}
 
 		set_state_bits(tree, state, &bits);
-
 		cache_state(state, cached_state);
 		merge_state(tree, state);
 		if (last_end == (u64)-1)
 			goto out;
-
 		start = last_end + 1;
-		next_node = rb_next(&state->rb_node);
-		if (next_node && start < end && prealloc && !need_resched()) {
-			state = rb_entry(next_node, struct extent_state,
-					 rb_node);
-			if (state->start == start)
-				goto hit_next;
-		}
+		state = next_state(state);
+		if (start < end && state && state->start == start &&
+		    !need_resched())
+			goto hit_next;
 		goto search_again;
 	}
 
@@ -845,6 +837,10 @@
 			if (last_end == (u64)-1)
 				goto out;
 			start = last_end + 1;
+			state = next_state(state);
+			if (start < end && state && state->start == start &&
+			    !need_resched())
+				goto hit_next;
 		}
 		goto search_again;
 	}
@@ -994,21 +990,14 @@
 	 * Just lock what we found and keep going
 	 */
 	if (state->start == start && state->end <= end) {
-		struct rb_node *next_node;
-
 		set_state_bits(tree, state, &bits);
-		clear_state_bit(tree, state, &clear_bits, 0);
+		state = clear_state_bit(tree, state, &clear_bits, 0);
 		if (last_end == (u64)-1)
 			goto out;
-
 		start = last_end + 1;
-		next_node = rb_next(&state->rb_node);
-		if (next_node && start < end && prealloc && !need_resched()) {
-			state = rb_entry(next_node, struct extent_state,
-					 rb_node);
-			if (state->start == start)
-				goto hit_next;
-		}
+		if (start < end && state && state->start == start &&
+		    !need_resched())
+			goto hit_next;
 		goto search_again;
 	}
 
@@ -1042,10 +1031,13 @@
 			goto out;
 		if (state->end <= end) {
 			set_state_bits(tree, state, &bits);
-			clear_state_bit(tree, state, &clear_bits, 0);
+			state = clear_state_bit(tree, state, &clear_bits, 0);
 			if (last_end == (u64)-1)
 				goto out;
 			start = last_end + 1;
+			if (start < end && state && state->start == start &&
+			    !need_resched())
+				goto hit_next;
 		}
 		goto search_again;
 	}
@@ -1173,9 +1165,8 @@
 			      cached_state, mask);
 }
 
-static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
-				 u64 end, struct extent_state **cached_state,
-				 gfp_t mask)
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+			  struct extent_state **cached_state, gfp_t mask)
 {
 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
 				cached_state, mask);
@@ -1293,7 +1284,7 @@
  * returned if we find something, and *start_ret and *end_ret are
  * set to reflect the state struct that was found.
  *
- * If nothing was found, 1 is returned, < 0 on error
+ * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
 			  u64 *start_ret, u64 *end_ret, int bits)
@@ -1923,12 +1914,13 @@
 	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
 		/* try to remap that extent elsewhere? */
 		bio_put(bio);
+		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
 		return -EIO;
 	}
 
-	printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
-			"sector %llu)\n", page->mapping->host->i_ino, start,
-			dev->name, sector);
+	printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
+		      "(dev %s sector %llu)\n", page->mapping->host->i_ino,
+		      start, rcu_str_deref(dev->name), sector);
 
 	bio_put(bio);
 	return 0;
@@ -2222,17 +2214,7 @@
 			uptodate = 0;
 	}
 
-	if (!uptodate && tree->ops &&
-	    tree->ops->writepage_io_failed_hook) {
-		ret = tree->ops->writepage_io_failed_hook(NULL, page,
-						 start, end, NULL);
-		/* Writeback already completed */
-		if (ret == 0)
-			return 1;
-	}
-
 	if (!uptodate) {
-		clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
 		ClearPageUptodate(page);
 		SetPageError(page);
 	}
@@ -2347,10 +2329,23 @@
 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
 			ret = tree->ops->readpage_end_io_hook(page, start, end,
 							      state, mirror);
-			if (ret)
+			if (ret) {
+				/* no IO indicated but software detected errors
+				 * in the block, either checksum errors or
+				 * issues with the contents */
+				struct btrfs_root *root =
+					BTRFS_I(page->mapping->host)->root;
+				struct btrfs_device *device;
+
 				uptodate = 0;
-			else
+				device = btrfs_find_device_for_logical(
+						root, start, mirror);
+				if (device)
+					btrfs_dev_stat_inc_and_print(device,
+						BTRFS_DEV_STAT_CORRUPTION_ERRS);
+			} else {
 				clean_io_failure(start, page);
+			}
 		}
 
 		if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
@@ -3164,7 +3159,7 @@
 	u64 offset = eb->start;
 	unsigned long i, num_pages;
 	int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
-	int ret;
+	int ret = 0;
 
 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
 	num_pages = num_extent_pages(eb->start, eb->len);
@@ -3930,6 +3925,7 @@
 	eb->start = start;
 	eb->len = len;
 	eb->tree = tree;
+	eb->bflags = 0;
 	rwlock_init(&eb->lock);
 	atomic_set(&eb->write_locks, 0);
 	atomic_set(&eb->read_locks, 0);
@@ -3967,6 +3963,60 @@
 	return eb;
 }
 
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
+{
+	unsigned long i;
+	struct page *p;
+	struct extent_buffer *new;
+	unsigned long num_pages = num_extent_pages(src->start, src->len);
+
+	new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
+	if (new == NULL)
+		return NULL;
+
+	for (i = 0; i < num_pages; i++) {
+		p = alloc_page(GFP_ATOMIC);
+		BUG_ON(!p);
+		attach_extent_buffer_page(new, p);
+		WARN_ON(PageDirty(p));
+		SetPageUptodate(p);
+		new->pages[i] = p;
+	}
+
+	copy_extent_buffer(new, src, 0, 0, src->len);
+	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
+	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+
+	return new;
+}
+
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+{
+	struct extent_buffer *eb;
+	unsigned long num_pages = num_extent_pages(0, len);
+	unsigned long i;
+
+	eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
+	if (!eb)
+		return NULL;
+
+	for (i = 0; i < num_pages; i++) {
+		eb->pages[i] = alloc_page(GFP_ATOMIC);
+		if (!eb->pages[i])
+			goto err;
+	}
+	set_extent_buffer_uptodate(eb);
+	btrfs_set_header_nritems(eb, 0);
+	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+
+	return eb;
+err:
+	for (i--; i > 0; i--)
+		__free_page(eb->pages[i]);
+	__free_extent_buffer(eb);
+	return NULL;
+}
+
 static int extent_buffer_under_io(struct extent_buffer *eb)
 {
 	return (atomic_read(&eb->io_pages) ||
@@ -3981,18 +4031,21 @@
 						unsigned long start_idx)
 {
 	unsigned long index;
+	unsigned long num_pages;
 	struct page *page;
+	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
 
 	BUG_ON(extent_buffer_under_io(eb));
 
-	index = num_extent_pages(eb->start, eb->len);
+	num_pages = num_extent_pages(eb->start, eb->len);
+	index = start_idx + num_pages;
 	if (start_idx >= index)
 		return;
 
 	do {
 		index--;
 		page = extent_buffer_page(eb, index);
-		if (page) {
+		if (page && mapped) {
 			spin_lock(&page->mapping->private_lock);
 			/*
 			 * We do this since we'll remove the pages after we've
@@ -4017,6 +4070,8 @@
 			}
 			spin_unlock(&page->mapping->private_lock);
 
+		}
+		if (page) {
 			/* One for when we alloced the page */
 			page_cache_release(page);
 		}
@@ -4235,14 +4290,18 @@
 {
 	WARN_ON(atomic_read(&eb->refs) == 0);
 	if (atomic_dec_and_test(&eb->refs)) {
-		struct extent_io_tree *tree = eb->tree;
+		if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
+			spin_unlock(&eb->refs_lock);
+		} else {
+			struct extent_io_tree *tree = eb->tree;
 
-		spin_unlock(&eb->refs_lock);
+			spin_unlock(&eb->refs_lock);
 
-		spin_lock(&tree->buffer_lock);
-		radix_tree_delete(&tree->buffer,
-				  eb->start >> PAGE_CACHE_SHIFT);
-		spin_unlock(&tree->buffer_lock);
+			spin_lock(&tree->buffer_lock);
+			radix_tree_delete(&tree->buffer,
+					  eb->start >> PAGE_CACHE_SHIFT);
+			spin_unlock(&tree->buffer_lock);
+		}
 
 		/* Should be safe to release our pages at this point */
 		btrfs_release_extent_buffer_page(eb, 0);
@@ -4260,6 +4319,10 @@
 
 	spin_lock(&eb->refs_lock);
 	if (atomic_read(&eb->refs) == 2 &&
+	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
+		atomic_dec(&eb->refs);
+
+	if (atomic_read(&eb->refs) == 2 &&
 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
 	    !extent_buffer_under_io(eb) &&
 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index b516c3b..25900af 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -39,6 +39,7 @@
 #define EXTENT_BUFFER_STALE 6
 #define EXTENT_BUFFER_WRITEBACK 7
 #define EXTENT_BUFFER_IOERR 8
+#define EXTENT_BUFFER_DUMMY 9
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -75,9 +76,6 @@
 			      unsigned long bio_flags);
 	int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
 	int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
-	int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
-					u64 start, u64 end,
-				       struct extent_state *state);
 	int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
 				    struct extent_state *state, int mirror);
 	int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -225,6 +223,8 @@
 		   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+			  struct extent_state **cached_state, gfp_t mask);
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 		   gfp_t mask);
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -265,6 +265,8 @@
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 					  u64 start, unsigned long len);
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
 					 u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 53bf2d7..70dc8ca 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -65,6 +65,21 @@
 	int cycled;
 };
 
+static int __compare_inode_defrag(struct inode_defrag *defrag1,
+				  struct inode_defrag *defrag2)
+{
+	if (defrag1->root > defrag2->root)
+		return 1;
+	else if (defrag1->root < defrag2->root)
+		return -1;
+	else if (defrag1->ino > defrag2->ino)
+		return 1;
+	else if (defrag1->ino < defrag2->ino)
+		return -1;
+	else
+		return 0;
+}
+
 /* pop a record for an inode into the defrag tree.  The lock
  * must be held already
  *
@@ -81,15 +96,17 @@
 	struct inode_defrag *entry;
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
+	int ret;
 
 	p = &root->fs_info->defrag_inodes.rb_node;
 	while (*p) {
 		parent = *p;
 		entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-		if (defrag->ino < entry->ino)
+		ret = __compare_inode_defrag(defrag, entry);
+		if (ret < 0)
 			p = &parent->rb_left;
-		else if (defrag->ino > entry->ino)
+		else if (ret > 0)
 			p = &parent->rb_right;
 		else {
 			/* if we're reinserting an entry for
@@ -103,7 +120,7 @@
 			goto exists;
 		}
 	}
-	BTRFS_I(inode)->in_defrag = 1;
+	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 	rb_link_node(&defrag->rb_node, parent, p);
 	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
 	return;
@@ -131,7 +148,7 @@
 	if (btrfs_fs_closing(root->fs_info))
 		return 0;
 
-	if (BTRFS_I(inode)->in_defrag)
+	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 		return 0;
 
 	if (trans)
@@ -148,7 +165,7 @@
 	defrag->root = root->root_key.objectid;
 
 	spin_lock(&root->fs_info->defrag_inodes_lock);
-	if (!BTRFS_I(inode)->in_defrag)
+	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 		__btrfs_add_inode_defrag(inode, defrag);
 	else
 		kfree(defrag);
@@ -159,28 +176,35 @@
 /*
  * must be called with the defrag_inodes lock held
  */
-struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
+struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
+					     u64 root, u64 ino,
 					     struct rb_node **next)
 {
 	struct inode_defrag *entry = NULL;
+	struct inode_defrag tmp;
 	struct rb_node *p;
 	struct rb_node *parent = NULL;
+	int ret;
+
+	tmp.ino = ino;
+	tmp.root = root;
 
 	p = info->defrag_inodes.rb_node;
 	while (p) {
 		parent = p;
 		entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-		if (ino < entry->ino)
+		ret = __compare_inode_defrag(&tmp, entry);
+		if (ret < 0)
 			p = parent->rb_left;
-		else if (ino > entry->ino)
+		else if (ret > 0)
 			p = parent->rb_right;
 		else
 			return entry;
 	}
 
 	if (next) {
-		while (parent && ino > entry->ino) {
+		while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 			parent = rb_next(parent);
 			entry = rb_entry(parent, struct inode_defrag, rb_node);
 		}
@@ -202,6 +226,7 @@
 	struct btrfs_key key;
 	struct btrfs_ioctl_defrag_range_args range;
 	u64 first_ino = 0;
+	u64 root_objectid = 0;
 	int num_defrag;
 	int defrag_batch = 1024;
 
@@ -214,11 +239,14 @@
 		n = NULL;
 
 		/* find an inode to defrag */
-		defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
+		defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
+						 first_ino, &n);
 		if (!defrag) {
-			if (n)
-				defrag = rb_entry(n, struct inode_defrag, rb_node);
-			else if (first_ino) {
+			if (n) {
+				defrag = rb_entry(n, struct inode_defrag,
+						  rb_node);
+			} else if (root_objectid || first_ino) {
+				root_objectid = 0;
 				first_ino = 0;
 				continue;
 			} else {
@@ -228,6 +256,7 @@
 
 		/* remove it from the rbtree */
 		first_ino = defrag->ino + 1;
+		root_objectid = defrag->root;
 		rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 
 		if (btrfs_fs_closing(fs_info))
@@ -252,7 +281,7 @@
 			goto next;
 
 		/* do a chunk of defrag */
-		BTRFS_I(inode)->in_defrag = 0;
+		clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 		range.start = defrag->last_offset;
 		num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 					       defrag_batch);
@@ -1404,12 +1433,11 @@
 		goto out;
 	}
 
-	err = btrfs_update_time(file);
+	err = file_update_time(file);
 	if (err) {
 		mutex_unlock(&inode->i_mutex);
 		goto out;
 	}
-	BTRFS_I(inode)->sequence++;
 
 	start_pos = round_down(pos, root->sectorsize);
 	if (start_pos > i_size_read(inode)) {
@@ -1466,8 +1494,8 @@
 	 * flush down new bytes that may have been written if the
 	 * application were using truncate to replace a file in place.
 	 */
-	if (BTRFS_I(inode)->ordered_data_close) {
-		BTRFS_I(inode)->ordered_data_close = 0;
+	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+			       &BTRFS_I(inode)->runtime_flags)) {
 		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
 			filemap_flush(inode->i_mapping);
@@ -1498,14 +1526,15 @@
 
 	trace_btrfs_sync_file(file, datasync);
 
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		return ret;
 	mutex_lock(&inode->i_mutex);
 
-	/* we wait first, since the writeback may change the inode */
+	/*
+	 * we wait first, since the writeback may change the inode, also wait
+	 * ordered range does a filemape_write_and_wait_range which is why we
+	 * don't do it above like other file systems.
+	 */
 	root->log_batch++;
-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	btrfs_wait_ordered_range(inode, start, end);
 	root->log_batch++;
 
 	/*
@@ -1523,7 +1552,8 @@
 	 * syncing
 	 */
 	smp_mb();
-	if (BTRFS_I(inode)->last_trans <=
+	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+	    BTRFS_I(inode)->last_trans <=
 	    root->fs_info->last_trans_committed) {
 		BTRFS_I(inode)->last_trans = 0;
 		mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 202008e..81296c5 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -33,6 +33,8 @@
 
 static int link_free_space(struct btrfs_free_space_ctl *ctl,
 			   struct btrfs_free_space *info);
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
+			      struct btrfs_free_space *info);
 
 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 					       struct btrfs_path *path,
@@ -75,7 +77,8 @@
 		return ERR_PTR(-ENOENT);
 	}
 
-	inode->i_mapping->flags &= ~__GFP_FS;
+	mapping_set_gfp_mask(inode->i_mapping,
+			mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 
 	return inode;
 }
@@ -365,7 +368,7 @@
 
 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 {
-	u64 *val;
+	__le64 *val;
 
 	io_ctl_map_page(io_ctl, 1);
 
@@ -388,7 +391,7 @@
 
 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
 {
-	u64 *gen;
+	__le64 *gen;
 
 	/*
 	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
@@ -584,6 +587,44 @@
 	return 0;
 }
 
+/*
+ * Since we attach pinned extents after the fact we can have contiguous sections
+ * of free space that are split up in entries.  This poses a problem with the
+ * tree logging stuff since it could have allocated across what appears to be 2
+ * entries since we would have merged the entries when adding the pinned extents
+ * back to the free space cache.  So run through the space cache that we just
+ * loaded and merge contiguous entries.  This will make the log replay stuff not
+ * blow up and it will make for nicer allocator behavior.
+ */
+static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
+{
+	struct btrfs_free_space *e, *prev = NULL;
+	struct rb_node *n;
+
+again:
+	spin_lock(&ctl->tree_lock);
+	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
+		e = rb_entry(n, struct btrfs_free_space, offset_index);
+		if (!prev)
+			goto next;
+		if (e->bitmap || prev->bitmap)
+			goto next;
+		if (prev->offset + prev->bytes == e->offset) {
+			unlink_free_space(ctl, prev);
+			unlink_free_space(ctl, e);
+			prev->bytes += e->bytes;
+			kmem_cache_free(btrfs_free_space_cachep, e);
+			link_free_space(ctl, prev);
+			prev = NULL;
+			spin_unlock(&ctl->tree_lock);
+			goto again;
+		}
+next:
+		prev = e;
+	}
+	spin_unlock(&ctl->tree_lock);
+}
+
 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 			    struct btrfs_free_space_ctl *ctl,
 			    struct btrfs_path *path, u64 offset)
@@ -726,6 +767,7 @@
 	}
 
 	io_ctl_drop_pages(&io_ctl);
+	merge_space_tree(ctl);
 	ret = 1;
 out:
 	io_ctl_free(&io_ctl);
@@ -972,9 +1014,7 @@
 		goto out;
 
 
-	ret = filemap_write_and_wait(inode->i_mapping);
-	if (ret)
-		goto out;
+	btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 61b16c6..d8bb0db 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -89,7 +89,7 @@
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize);
 static int btrfs_truncate(struct inode *inode);
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 static noinline int cow_file_range(struct inode *inode,
 				   struct page *locked_page,
 				   u64 start, u64 end, int *page_started,
@@ -257,10 +257,13 @@
 	ret = insert_inline_extent(trans, root, inode, start,
 				   inline_len, compressed_size,
 				   compress_type, compressed_pages);
-	if (ret) {
+	if (ret && ret != -ENOSPC) {
 		btrfs_abort_transaction(trans, root, ret);
 		return ret;
+	} else if (ret == -ENOSPC) {
+		return 1;
 	}
+
 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
 	return 0;
@@ -827,7 +830,7 @@
 	if (IS_ERR(trans)) {
 		extent_clear_unlock_delalloc(inode,
 			     &BTRFS_I(inode)->io_tree,
-			     start, end, NULL,
+			     start, end, locked_page,
 			     EXTENT_CLEAR_UNLOCK_PAGE |
 			     EXTENT_CLEAR_UNLOCK |
 			     EXTENT_CLEAR_DELALLOC |
@@ -960,7 +963,7 @@
 out_unlock:
 	extent_clear_unlock_delalloc(inode,
 		     &BTRFS_I(inode)->io_tree,
-		     start, end, NULL,
+		     start, end, locked_page,
 		     EXTENT_CLEAR_UNLOCK_PAGE |
 		     EXTENT_CLEAR_UNLOCK |
 		     EXTENT_CLEAR_DELALLOC |
@@ -983,8 +986,10 @@
 	compress_file_range(async_cow->inode, async_cow->locked_page,
 			    async_cow->start, async_cow->end, async_cow,
 			    &num_added);
-	if (num_added == 0)
+	if (num_added == 0) {
+		btrfs_add_delayed_iput(async_cow->inode);
 		async_cow->inode = NULL;
+	}
 }
 
 /*
@@ -1017,6 +1022,8 @@
 {
 	struct async_cow *async_cow;
 	async_cow = container_of(work, struct async_cow, work);
+	if (async_cow->inode)
+		btrfs_add_delayed_iput(async_cow->inode);
 	kfree(async_cow);
 }
 
@@ -1035,7 +1042,7 @@
 	while (start < end) {
 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
 		BUG_ON(!async_cow); /* -ENOMEM */
-		async_cow->inode = inode;
+		async_cow->inode = igrab(inode);
 		async_cow->root = root;
 		async_cow->locked_page = locked_page;
 		async_cow->start = start;
@@ -1133,8 +1140,18 @@
 	u64 ino = btrfs_ino(inode);
 
 	path = btrfs_alloc_path();
-	if (!path)
+	if (!path) {
+		extent_clear_unlock_delalloc(inode,
+			     &BTRFS_I(inode)->io_tree,
+			     start, end, locked_page,
+			     EXTENT_CLEAR_UNLOCK_PAGE |
+			     EXTENT_CLEAR_UNLOCK |
+			     EXTENT_CLEAR_DELALLOC |
+			     EXTENT_CLEAR_DIRTY |
+			     EXTENT_SET_WRITEBACK |
+			     EXTENT_END_WRITEBACK);
 		return -ENOMEM;
+	}
 
 	nolock = btrfs_is_free_space_inode(root, inode);
 
@@ -1144,6 +1161,15 @@
 		trans = btrfs_join_transaction(root);
 
 	if (IS_ERR(trans)) {
+		extent_clear_unlock_delalloc(inode,
+			     &BTRFS_I(inode)->io_tree,
+			     start, end, locked_page,
+			     EXTENT_CLEAR_UNLOCK_PAGE |
+			     EXTENT_CLEAR_UNLOCK |
+			     EXTENT_CLEAR_DELALLOC |
+			     EXTENT_CLEAR_DIRTY |
+			     EXTENT_SET_WRITEBACK |
+			     EXTENT_END_WRITEBACK);
 		btrfs_free_path(path);
 		return PTR_ERR(trans);
 	}
@@ -1324,8 +1350,11 @@
 	}
 	btrfs_release_path(path);
 
-	if (cur_offset <= end && cow_start == (u64)-1)
+	if (cur_offset <= end && cow_start == (u64)-1) {
 		cow_start = cur_offset;
+		cur_offset = end;
+	}
+
 	if (cow_start != (u64)-1) {
 		ret = cow_file_range(inode, locked_page, cow_start, end,
 				     page_started, nr_written, 1);
@@ -1344,6 +1373,17 @@
 	if (!ret)
 		ret = err;
 
+	if (ret && cur_offset < end)
+		extent_clear_unlock_delalloc(inode,
+			     &BTRFS_I(inode)->io_tree,
+			     cur_offset, end, locked_page,
+			     EXTENT_CLEAR_UNLOCK_PAGE |
+			     EXTENT_CLEAR_UNLOCK |
+			     EXTENT_CLEAR_DELALLOC |
+			     EXTENT_CLEAR_DIRTY |
+			     EXTENT_SET_WRITEBACK |
+			     EXTENT_END_WRITEBACK);
+
 	btrfs_free_path(path);
 	return ret;
 }
@@ -1358,20 +1398,23 @@
 	int ret;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 
-	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
+	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
 		ret = run_delalloc_nocow(inode, locked_page, start, end,
 					 page_started, 1, nr_written);
-	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
+	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
 		ret = run_delalloc_nocow(inode, locked_page, start, end,
 					 page_started, 0, nr_written);
-	else if (!btrfs_test_opt(root, COMPRESS) &&
-		 !(BTRFS_I(inode)->force_compress) &&
-		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
+	} else if (!btrfs_test_opt(root, COMPRESS) &&
+		   !(BTRFS_I(inode)->force_compress) &&
+		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
 		ret = cow_file_range(inode, locked_page, start, end,
 				      page_started, nr_written, 1);
-	else
+	} else {
+		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+			&BTRFS_I(inode)->runtime_flags);
 		ret = cow_file_range_async(inode, locked_page, start, end,
 					   page_started, nr_written);
+	}
 	return ret;
 }
 
@@ -1572,11 +1615,11 @@
 	if (btrfs_is_free_space_inode(root, inode))
 		metadata = 2;
 
-	ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
-	if (ret)
-		return ret;
-
 	if (!(rw & REQ_WRITE)) {
+		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+		if (ret)
+			return ret;
+
 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
 			return btrfs_submit_compressed_read(inode, bio,
 						    mirror_num, bio_flags);
@@ -1815,25 +1858,24 @@
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
  */
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 {
+	struct inode *inode = ordered_extent->inode;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans = NULL;
-	struct btrfs_ordered_extent *ordered_extent = NULL;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_state *cached_state = NULL;
 	int compress_type = 0;
 	int ret;
 	bool nolock;
 
-	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
-					     end - start + 1);
-	if (!ret)
-		return 0;
-	BUG_ON(!ordered_extent); /* Logic error */
-
 	nolock = btrfs_is_free_space_inode(root, inode);
 
+	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
+		ret = -EIO;
+		goto out;
+	}
+
 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -1889,12 +1931,10 @@
 				   ordered_extent->file_offset,
 				   ordered_extent->len);
 	}
-	unlock_extent_cached(io_tree, ordered_extent->file_offset,
-			     ordered_extent->file_offset +
-			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
+
 	if (ret < 0) {
 		btrfs_abort_transaction(trans, root, ret);
-		goto out;
+		goto out_unlock;
 	}
 
 	add_pending_csums(trans, inode, ordered_extent->file_offset,
@@ -1905,10 +1945,14 @@
 		ret = btrfs_update_inode_fallback(trans, root, inode);
 		if (ret) { /* -ENOMEM or corruption */
 			btrfs_abort_transaction(trans, root, ret);
-			goto out;
+			goto out_unlock;
 		}
 	}
 	ret = 0;
+out_unlock:
+	unlock_extent_cached(io_tree, ordered_extent->file_offset,
+			     ordered_extent->file_offset +
+			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
 out:
 	if (root != root->fs_info->tree_root)
 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
@@ -1919,26 +1963,57 @@
 			btrfs_end_transaction(trans, root);
 	}
 
+	if (ret)
+		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
+				      ordered_extent->file_offset +
+				      ordered_extent->len - 1, NULL, GFP_NOFS);
+
+	/*
+	 * This needs to be dont to make sure anybody waiting knows we are done
+	 * upating everything for this ordered extent.
+	 */
+	btrfs_remove_ordered_extent(inode, ordered_extent);
+
 	/* once for us */
 	btrfs_put_ordered_extent(ordered_extent);
 	/* once for the tree */
 	btrfs_put_ordered_extent(ordered_extent);
 
-	return 0;
-out_unlock:
-	unlock_extent_cached(io_tree, ordered_extent->file_offset,
-			     ordered_extent->file_offset +
-			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
-	goto out;
+	return ret;
+}
+
+static void finish_ordered_fn(struct btrfs_work *work)
+{
+	struct btrfs_ordered_extent *ordered_extent;
+	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
+	btrfs_finish_ordered_io(ordered_extent);
 }
 
 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
 				struct extent_state *state, int uptodate)
 {
+	struct inode *inode = page->mapping->host;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_ordered_extent *ordered_extent = NULL;
+	struct btrfs_workers *workers;
+
 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
 
 	ClearPagePrivate2(page);
-	return btrfs_finish_ordered_io(page->mapping->host, start, end);
+	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
+					    end - start + 1, uptodate))
+		return 0;
+
+	ordered_extent->work.func = finish_ordered_fn;
+	ordered_extent->work.flags = 0;
+
+	if (btrfs_is_free_space_inode(root, inode))
+		workers = &root->fs_info->endio_freespace_worker;
+	else
+		workers = &root->fs_info->endio_write_workers;
+	btrfs_queue_worker(workers, &ordered_extent->work);
+
+	return 0;
 }
 
 /*
@@ -2072,12 +2147,12 @@
 	struct btrfs_block_rsv *block_rsv;
 	int ret;
 
-	if (!list_empty(&root->orphan_list) ||
+	if (atomic_read(&root->orphan_inodes) ||
 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
 		return;
 
 	spin_lock(&root->orphan_lock);
-	if (!list_empty(&root->orphan_list)) {
+	if (atomic_read(&root->orphan_inodes)) {
 		spin_unlock(&root->orphan_lock);
 		return;
 	}
@@ -2134,8 +2209,8 @@
 		block_rsv = NULL;
 	}
 
-	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
-		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+			      &BTRFS_I(inode)->runtime_flags)) {
 #if 0
 		/*
 		 * For proper ENOSPC handling, we should do orphan
@@ -2148,12 +2223,12 @@
 			insert = 1;
 #endif
 		insert = 1;
+		atomic_dec(&root->orphan_inodes);
 	}
 
-	if (!BTRFS_I(inode)->orphan_meta_reserved) {
-		BTRFS_I(inode)->orphan_meta_reserved = 1;
+	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+			      &BTRFS_I(inode)->runtime_flags))
 		reserve = 1;
-	}
 	spin_unlock(&root->orphan_lock);
 
 	/* grab metadata reservation from transaction handle */
@@ -2166,6 +2241,8 @@
 	if (insert >= 1) {
 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
 		if (ret && ret != -EEXIST) {
+			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+				  &BTRFS_I(inode)->runtime_flags);
 			btrfs_abort_transaction(trans, root, ret);
 			return ret;
 		}
@@ -2196,15 +2273,13 @@
 	int ret = 0;
 
 	spin_lock(&root->orphan_lock);
-	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-		list_del_init(&BTRFS_I(inode)->i_orphan);
+	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+			       &BTRFS_I(inode)->runtime_flags))
 		delete_item = 1;
-	}
 
-	if (BTRFS_I(inode)->orphan_meta_reserved) {
-		BTRFS_I(inode)->orphan_meta_reserved = 0;
+	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+			       &BTRFS_I(inode)->runtime_flags))
 		release_rsv = 1;
-	}
 	spin_unlock(&root->orphan_lock);
 
 	if (trans && delete_item) {
@@ -2212,8 +2287,10 @@
 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
 	}
 
-	if (release_rsv)
+	if (release_rsv) {
 		btrfs_orphan_release_metadata(inode);
+		atomic_dec(&root->orphan_inodes);
+	}
 
 	return 0;
 }
@@ -2341,6 +2418,8 @@
 				ret = PTR_ERR(trans);
 				goto out;
 			}
+			printk(KERN_ERR "auto deleting %Lu\n",
+			       found_key.objectid);
 			ret = btrfs_del_orphan_item(trans, root,
 						    found_key.objectid);
 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
@@ -2352,9 +2431,8 @@
 		 * add this inode to the orphan list so btrfs_orphan_del does
 		 * the proper thing when we hit it
 		 */
-		spin_lock(&root->orphan_lock);
-		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
-		spin_unlock(&root->orphan_lock);
+		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+			&BTRFS_I(inode)->runtime_flags);
 
 		/* if we have links, this was a truncate, lets do that */
 		if (inode->i_nlink) {
@@ -2510,7 +2588,7 @@
 
 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
-	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
+	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
 	inode->i_generation = BTRFS_I(inode)->generation;
 	inode->i_rdev = 0;
 	rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -2594,7 +2672,7 @@
 
 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
-	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
+	btrfs_set_inode_sequence(leaf, item, inode->i_version);
 	btrfs_set_inode_transid(leaf, item, trans->transid);
 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
@@ -2752,6 +2830,8 @@
 		goto out;
 
 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+	inode_inc_iversion(inode);
+	inode_inc_iversion(dir);
 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 	btrfs_update_inode(trans, root, dir);
 out:
@@ -3089,6 +3169,7 @@
 	}
 
 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+	inode_inc_iversion(dir);
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, dir);
 	if (ret)
@@ -3607,7 +3688,8 @@
 		 * any new writes get down to disk quickly.
 		 */
 		if (newsize == 0)
-			BTRFS_I(inode)->ordered_data_close = 1;
+			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+				&BTRFS_I(inode)->runtime_flags);
 
 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
 		truncate_setsize(inode, newsize);
@@ -3638,6 +3720,7 @@
 
 	if (attr->ia_valid) {
 		setattr_copy(inode, attr);
+		inode_inc_iversion(inode);
 		err = btrfs_dirty_inode(inode);
 
 		if (!err && attr->ia_valid & ATTR_MODE)
@@ -3671,7 +3754,8 @@
 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
 	if (root->fs_info->log_root_recovering) {
-		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
+		BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+				 &BTRFS_I(inode)->runtime_flags));
 		goto no_delete;
 	}
 
@@ -3756,7 +3840,7 @@
 	btrfs_end_transaction(trans, root);
 	btrfs_btree_balance_dirty(root, nr);
 no_delete:
-	end_writeback(inode);
+	clear_inode(inode);
 	return;
 }
 
@@ -4066,7 +4150,7 @@
 
 	BTRFS_I(inode)->root = root;
 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
-	BTRFS_I(inode)->dummy_inode = 1;
+	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
 
 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
 	inode->i_op = &btrfs_dir_ro_inode_operations;
@@ -4370,7 +4454,7 @@
 	int ret = 0;
 	bool nolock = false;
 
-	if (BTRFS_I(inode)->dummy_inode)
+	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
 		return 0;
 
 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
@@ -4403,7 +4487,7 @@
 	struct btrfs_trans_handle *trans;
 	int ret;
 
-	if (BTRFS_I(inode)->dummy_inode)
+	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
 		return 0;
 
 	trans = btrfs_join_transaction(root);
@@ -4431,46 +4515,18 @@
  * This is a copy of file_update_time.  We need this so we can return error on
  * ENOSPC for updating the inode in the case of file write and mmap writes.
  */
-int btrfs_update_time(struct file *file)
+static int btrfs_update_time(struct inode *inode, struct timespec *now,
+			     int flags)
 {
-	struct inode *inode = file->f_path.dentry->d_inode;
-	struct timespec now;
-	int ret;
-	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
-
-	/* First try to exhaust all avenues to not sync */
-	if (IS_NOCMTIME(inode))
-		return 0;
-
-	now = current_fs_time(inode->i_sb);
-	if (!timespec_equal(&inode->i_mtime, &now))
-		sync_it = S_MTIME;
-
-	if (!timespec_equal(&inode->i_ctime, &now))
-		sync_it |= S_CTIME;
-
-	if (IS_I_VERSION(inode))
-		sync_it |= S_VERSION;
-
-	if (!sync_it)
-		return 0;
-
-	/* Finally allowed to write? Takes lock. */
-	if (mnt_want_write_file(file))
-		return 0;
-
-	/* Only change inode inside the lock region */
-	if (sync_it & S_VERSION)
+	if (flags & S_VERSION)
 		inode_inc_iversion(inode);
-	if (sync_it & S_CTIME)
-		inode->i_ctime = now;
-	if (sync_it & S_MTIME)
-		inode->i_mtime = now;
-	ret = btrfs_dirty_inode(inode);
-	if (!ret)
-		mark_inode_dirty_sync(inode);
-	mnt_drop_write(file->f_path.mnt);
-	return ret;
+	if (flags & S_CTIME)
+		inode->i_ctime = *now;
+	if (flags & S_MTIME)
+		inode->i_mtime = *now;
+	if (flags & S_ATIME)
+		inode->i_atime = *now;
+	return btrfs_dirty_inode(inode);
 }
 
 /*
@@ -4730,6 +4786,7 @@
 
 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
 			   name_len * 2);
+	inode_inc_iversion(parent_inode);
 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, parent_inode);
 	if (ret)
@@ -4937,6 +4994,7 @@
 	}
 
 	btrfs_inc_nlink(inode);
+	inode_inc_iversion(inode);
 	inode->i_ctime = CURRENT_TIME;
 	ihold(inode);
 
@@ -5903,9 +5961,7 @@
 	struct btrfs_dio_private *dip = bio->bi_private;
 	struct inode *inode = dip->inode;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	struct btrfs_trans_handle *trans;
 	struct btrfs_ordered_extent *ordered = NULL;
-	struct extent_state *cached_state = NULL;
 	u64 ordered_offset = dip->logical_offset;
 	u64 ordered_bytes = dip->bytes;
 	int ret;
@@ -5915,73 +5971,14 @@
 again:
 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
 						   &ordered_offset,
-						   ordered_bytes);
+						   ordered_bytes, !err);
 	if (!ret)
 		goto out_test;
 
-	BUG_ON(!ordered);
-
-	trans = btrfs_join_transaction(root);
-	if (IS_ERR(trans)) {
-		err = -ENOMEM;
-		goto out;
-	}
-	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
-
-	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
-		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-		if (!ret)
-			err = btrfs_update_inode_fallback(trans, root, inode);
-		goto out;
-	}
-
-	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-			 ordered->file_offset + ordered->len - 1, 0,
-			 &cached_state);
-
-	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
-		ret = btrfs_mark_extent_written(trans, inode,
-						ordered->file_offset,
-						ordered->file_offset +
-						ordered->len);
-		if (ret) {
-			err = ret;
-			goto out_unlock;
-		}
-	} else {
-		ret = insert_reserved_file_extent(trans, inode,
-						  ordered->file_offset,
-						  ordered->start,
-						  ordered->disk_len,
-						  ordered->len,
-						  ordered->len,
-						  0, 0, 0,
-						  BTRFS_FILE_EXTENT_REG);
-		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
-				   ordered->file_offset, ordered->len);
-		if (ret) {
-			err = ret;
-			WARN_ON(1);
-			goto out_unlock;
-		}
-	}
-
-	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
-	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
-		btrfs_update_inode_fallback(trans, root, inode);
-	ret = 0;
-out_unlock:
-	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-			     ordered->file_offset + ordered->len - 1,
-			     &cached_state, GFP_NOFS);
-out:
-	btrfs_delalloc_release_metadata(inode, ordered->len);
-	btrfs_end_transaction(trans, root);
-	ordered_offset = ordered->file_offset + ordered->len;
-	btrfs_put_ordered_extent(ordered);
-	btrfs_put_ordered_extent(ordered);
-
+	ordered->work.func = finish_ordered_fn;
+	ordered->work.flags = 0;
+	btrfs_queue_worker(&root->fs_info->endio_write_workers,
+			   &ordered->work);
 out_test:
 	/*
 	 * our bio might span multiple ordered extents.  If we haven't
@@ -5990,12 +5987,12 @@
 	if (ordered_offset < dip->logical_offset + dip->bytes) {
 		ordered_bytes = dip->logical_offset + dip->bytes -
 			ordered_offset;
+		ordered = NULL;
 		goto again;
 	}
 out_done:
 	bio->bi_private = dip->private;
 
-	kfree(dip->csums);
 	kfree(dip);
 
 	/* If we had an error make sure to clear the uptodate flag */
@@ -6063,9 +6060,12 @@
 	int ret;
 
 	bio_get(bio);
-	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-	if (ret)
-		goto err;
+
+	if (!write) {
+		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+		if (ret)
+			goto err;
+	}
 
 	if (skip_sum)
 		goto map;
@@ -6485,13 +6485,13 @@
 
 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
 {
+	struct inode *inode = page->mapping->host;
 	struct extent_io_tree *tree;
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
 	u64 page_start = page_offset(page);
 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
 
-
 	/*
 	 * we have the page locked, so new writeback can't start,
 	 * and the dirty bit won't be cleared while we are here.
@@ -6501,13 +6501,13 @@
 	 */
 	wait_on_page_writeback(page);
 
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	tree = &BTRFS_I(inode)->io_tree;
 	if (offset) {
 		btrfs_releasepage(page, GFP_NOFS);
 		return;
 	}
 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
-	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
+	ordered = btrfs_lookup_ordered_extent(inode,
 					   page_offset(page));
 	if (ordered) {
 		/*
@@ -6522,9 +6522,10 @@
 		 * whoever cleared the private bit is responsible
 		 * for the finish_ordered_io
 		 */
-		if (TestClearPagePrivate2(page)) {
-			btrfs_finish_ordered_io(page->mapping->host,
-						page_start, page_end);
+		if (TestClearPagePrivate2(page) &&
+		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
+						   PAGE_CACHE_SIZE, 1)) {
+			btrfs_finish_ordered_io(ordered);
 		}
 		btrfs_put_ordered_extent(ordered);
 		cached_state = NULL;
@@ -6576,7 +6577,7 @@
 
 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
 	if (!ret) {
-		ret = btrfs_update_time(vma->vm_file);
+		ret = file_update_time(vma->vm_file);
 		reserved = 1;
 	}
 	if (ret) {
@@ -6771,7 +6772,8 @@
 	 * using truncate to replace the contents of the file will
 	 * end up with a zero length file after a crash.
 	 */
-	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
+	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+					   &BTRFS_I(inode)->runtime_flags))
 		btrfs_add_ordered_operation(trans, root, inode);
 
 	while (1) {
@@ -6894,7 +6896,6 @@
 	ei->root = NULL;
 	ei->space_info = NULL;
 	ei->generation = 0;
-	ei->sequence = 0;
 	ei->last_trans = 0;
 	ei->last_sub_trans = 0;
 	ei->logged_trans = 0;
@@ -6909,11 +6910,7 @@
 	ei->outstanding_extents = 0;
 	ei->reserved_extents = 0;
 
-	ei->ordered_data_close = 0;
-	ei->orphan_meta_reserved = 0;
-	ei->dummy_inode = 0;
-	ei->in_defrag = 0;
-	ei->delalloc_meta_reserved = 0;
+	ei->runtime_flags = 0;
 	ei->force_compress = BTRFS_COMPRESS_NONE;
 
 	ei->delayed_node = NULL;
@@ -6927,7 +6924,6 @@
 	mutex_init(&ei->log_mutex);
 	mutex_init(&ei->delalloc_mutex);
 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
-	INIT_LIST_HEAD(&ei->i_orphan);
 	INIT_LIST_HEAD(&ei->delalloc_inodes);
 	INIT_LIST_HEAD(&ei->ordered_operations);
 	RB_CLEAR_NODE(&ei->rb_node);
@@ -6972,13 +6968,12 @@
 		spin_unlock(&root->fs_info->ordered_extent_lock);
 	}
 
-	spin_lock(&root->orphan_lock);
-	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+		     &BTRFS_I(inode)->runtime_flags)) {
 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
 		       (unsigned long long)btrfs_ino(inode));
-		list_del_init(&BTRFS_I(inode)->i_orphan);
+		atomic_dec(&root->orphan_inodes);
 	}
-	spin_unlock(&root->orphan_lock);
 
 	while (1) {
 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -7099,10 +7094,13 @@
 	else
 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
 
-	if (b_dir->flags & BTRFS_INODE_COMPRESS)
+	if (b_dir->flags & BTRFS_INODE_COMPRESS) {
 		b_inode->flags |= BTRFS_INODE_COMPRESS;
-	else
-		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
+		b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+	} else {
+		b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
+				    BTRFS_INODE_NOCOMPRESS);
+	}
 }
 
 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -7193,6 +7191,9 @@
 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
 		btrfs_add_ordered_operation(trans, root, old_inode);
 
+	inode_inc_iversion(old_dir);
+	inode_inc_iversion(new_dir);
+	inode_inc_iversion(old_inode);
 	old_dir->i_ctime = old_dir->i_mtime = ctime;
 	new_dir->i_ctime = new_dir->i_mtime = ctime;
 	old_inode->i_ctime = ctime;
@@ -7219,6 +7220,7 @@
 	}
 
 	if (new_inode) {
+		inode_inc_iversion(new_inode);
 		new_inode->i_ctime = CURRENT_TIME;
 		if (unlikely(btrfs_ino(new_inode) ==
 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -7490,6 +7492,7 @@
 		cur_offset += ins.offset;
 		*alloc_hint = ins.objectid + ins.offset;
 
+		inode_inc_iversion(inode);
 		inode->i_ctime = CURRENT_TIME;
 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -7647,6 +7650,7 @@
 	.permission	= btrfs_permission,
 	.fiemap		= btrfs_fiemap,
 	.get_acl	= btrfs_get_acl,
+	.update_time	= btrfs_update_time,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
 	.getattr	= btrfs_getattr,
@@ -7657,6 +7661,7 @@
 	.listxattr	= btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.get_acl	= btrfs_get_acl,
+	.update_time	= btrfs_update_time,
 };
 static const struct inode_operations btrfs_symlink_inode_operations = {
 	.readlink	= generic_readlink,
@@ -7670,6 +7675,7 @@
 	.listxattr	= btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.get_acl	= btrfs_get_acl,
+	.update_time	= btrfs_update_time,
 };
 
 const struct dentry_operations btrfs_dentry_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 14f8e1f..0e92e57 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -52,6 +52,7 @@
 #include "locking.h"
 #include "inode-map.h"
 #include "backref.h"
+#include "rcu-string.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -261,6 +262,7 @@
 	}
 
 	btrfs_update_iflags(inode);
+	inode_inc_iversion(inode);
 	inode->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, inode);
 
@@ -367,7 +369,7 @@
 		return PTR_ERR(trans);
 
 	leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
-				      0, objectid, NULL, 0, 0, 0, 0);
+				      0, objectid, NULL, 0, 0, 0);
 	if (IS_ERR(leaf)) {
 		ret = PTR_ERR(leaf);
 		goto fail;
@@ -784,48 +786,12 @@
 	return -ENOENT;
 }
 
-/*
- * Validaty check of prev em and next em:
- * 1) no prev/next em
- * 2) prev/next em is an hole/inline extent
- */
-static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
 {
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-	struct extent_map *prev = NULL, *next = NULL;
-	int ret = 0;
-
-	read_lock(&em_tree->lock);
-	prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
-	next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
-	read_unlock(&em_tree->lock);
-
-	if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
-	    (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
-		ret = 1;
-	free_extent_map(prev);
-	free_extent_map(next);
-
-	return ret;
-}
-
-static int should_defrag_range(struct inode *inode, u64 start, u64 len,
-			       int thresh, u64 *last_len, u64 *skip,
-			       u64 *defrag_end)
-{
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-	struct extent_map *em = NULL;
-	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-	int ret = 1;
-
-	/*
-	 * make sure that once we start defragging an extent, we keep on
-	 * defragging it
-	 */
-	if (start < *defrag_end)
-		return 1;
-
-	*skip = 0;
+	struct extent_map *em;
+	u64 len = PAGE_CACHE_SIZE;
 
 	/*
 	 * hopefully we have this extent in the tree already, try without
@@ -842,27 +808,64 @@
 		unlock_extent(io_tree, start, start + len - 1);
 
 		if (IS_ERR(em))
-			return 0;
+			return NULL;
 	}
 
+	return em;
+}
+
+static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
+{
+	struct extent_map *next;
+	bool ret = true;
+
+	/* this is the last extent */
+	if (em->start + em->len >= i_size_read(inode))
+		return false;
+
+	next = defrag_lookup_extent(inode, em->start + em->len);
+	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+		ret = false;
+
+	free_extent_map(next);
+	return ret;
+}
+
+static int should_defrag_range(struct inode *inode, u64 start, int thresh,
+			       u64 *last_len, u64 *skip, u64 *defrag_end)
+{
+	struct extent_map *em;
+	int ret = 1;
+	bool next_mergeable = true;
+
+	/*
+	 * make sure that once we start defragging an extent, we keep on
+	 * defragging it
+	 */
+	if (start < *defrag_end)
+		return 1;
+
+	*skip = 0;
+
+	em = defrag_lookup_extent(inode, start);
+	if (!em)
+		return 0;
+
 	/* this will cover holes, and inline extents */
 	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
 		ret = 0;
 		goto out;
 	}
 
-	/* If we have nothing to merge with us, just skip. */
-	if (check_adjacent_extents(inode, em)) {
-		ret = 0;
-		goto out;
-	}
+	next_mergeable = defrag_check_next_extent(inode, em);
 
 	/*
-	 * we hit a real extent, if it is big don't bother defragging it again
+	 * we hit a real extent, if it is big or the next extent is not a
+	 * real extent, don't bother defragging it
 	 */
-	if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
+	if ((*last_len == 0 || *last_len >= thresh) &&
+	    (em->len >= thresh || !next_mergeable))
 		ret = 0;
-
 out:
 	/*
 	 * last_len ends up being a counter of how many bytes we've defragged.
@@ -1141,8 +1144,8 @@
 			break;
 
 		if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
-					 PAGE_CACHE_SIZE, extent_thresh,
-					 &last_len, &skip, &defrag_end)) {
+					 extent_thresh, &last_len, &skip,
+					 &defrag_end)) {
 			unsigned long next;
 			/*
 			 * the should_defrag function tells us how much to skip
@@ -1303,6 +1306,14 @@
 		ret = -EINVAL;
 		goto out_free;
 	}
+	if (device->fs_devices && device->fs_devices->seeding) {
+		printk(KERN_INFO "btrfs: resizer unable to apply on "
+		       "seeding device %llu\n",
+		       (unsigned long long)devid);
+		ret = -EINVAL;
+		goto out_free;
+	}
+
 	if (!strcmp(sizestr, "max"))
 		new_size = device->bdev->bd_inode->i_size;
 	else {
@@ -1344,8 +1355,9 @@
 	do_div(new_size, root->sectorsize);
 	new_size *= root->sectorsize;
 
-	printk(KERN_INFO "btrfs: new size for %s is %llu\n",
-		device->name, (unsigned long long)new_size);
+	printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
+		      rcu_str_deref(device->name),
+		      (unsigned long long)new_size);
 
 	if (new_size > old_size) {
 		trans = btrfs_start_transaction(root, 0);
@@ -2262,10 +2274,17 @@
 	di_args->bytes_used = dev->bytes_used;
 	di_args->total_bytes = dev->total_bytes;
 	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
-	if (dev->name)
-		strncpy(di_args->path, dev->name, sizeof(di_args->path));
-	else
+	if (dev->name) {
+		struct rcu_string *name;
+
+		rcu_read_lock();
+		name = rcu_dereference(dev->name);
+		strncpy(di_args->path, name->str, sizeof(di_args->path));
+		rcu_read_unlock();
+		di_args->path[sizeof(di_args->path) - 1] = 0;
+	} else {
 		di_args->path[0] = '\0';
+	}
 
 out:
 	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
@@ -2622,6 +2641,7 @@
 			btrfs_mark_buffer_dirty(leaf);
 			btrfs_release_path(path);
 
+			inode_inc_iversion(inode);
 			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
 			/*
@@ -2914,7 +2934,7 @@
 		up_read(&info->groups_sem);
 	}
 
-	user_dest = (struct btrfs_ioctl_space_info *)
+	user_dest = (struct btrfs_ioctl_space_info __user *)
 		(arg + sizeof(struct btrfs_ioctl_space_args));
 
 	if (copy_to_user(user_dest, dest_orig, alloc_size))
@@ -3042,6 +3062,28 @@
 	return ret;
 }
 
+static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+				      void __user *arg, int reset_after_read)
+{
+	struct btrfs_ioctl_get_dev_stats *sa;
+	int ret;
+
+	if (reset_after_read && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	sa = memdup_user(arg, sizeof(*sa));
+	if (IS_ERR(sa))
+		return PTR_ERR(sa);
+
+	ret = btrfs_get_dev_stats(root, sa, reset_after_read);
+
+	if (copy_to_user(arg, sa, sizeof(*sa)))
+		ret = -EFAULT;
+
+	kfree(sa);
+	return ret;
+}
+
 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
 {
 	int ret = 0;
@@ -3212,8 +3254,9 @@
 	}
 }
 
-static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_balance(struct file *file, void __user *arg)
 {
+	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_ioctl_balance_args *bargs;
 	struct btrfs_balance_control *bctl;
@@ -3225,6 +3268,10 @@
 	if (fs_info->sb->s_flags & MS_RDONLY)
 		return -EROFS;
 
+	ret = mnt_want_write(file->f_path.mnt);
+	if (ret)
+		return ret;
+
 	mutex_lock(&fs_info->volume_mutex);
 	mutex_lock(&fs_info->balance_mutex);
 
@@ -3291,6 +3338,7 @@
 out:
 	mutex_unlock(&fs_info->balance_mutex);
 	mutex_unlock(&fs_info->volume_mutex);
+	mnt_drop_write(file->f_path.mnt);
 	return ret;
 }
 
@@ -3386,7 +3434,7 @@
 	case BTRFS_IOC_DEV_INFO:
 		return btrfs_ioctl_dev_info(root, argp);
 	case BTRFS_IOC_BALANCE:
-		return btrfs_ioctl_balance(root, NULL);
+		return btrfs_ioctl_balance(file, NULL);
 	case BTRFS_IOC_CLONE:
 		return btrfs_ioctl_clone(file, arg, 0, 0, 0);
 	case BTRFS_IOC_CLONE_RANGE:
@@ -3419,11 +3467,15 @@
 	case BTRFS_IOC_SCRUB_PROGRESS:
 		return btrfs_ioctl_scrub_progress(root, argp);
 	case BTRFS_IOC_BALANCE_V2:
-		return btrfs_ioctl_balance(root, argp);
+		return btrfs_ioctl_balance(file, argp);
 	case BTRFS_IOC_BALANCE_CTL:
 		return btrfs_ioctl_balance_ctl(root, arg);
 	case BTRFS_IOC_BALANCE_PROGRESS:
 		return btrfs_ioctl_balance_progress(root, argp);
+	case BTRFS_IOC_GET_DEV_STATS:
+		return btrfs_ioctl_get_dev_stats(root, argp, 0);
+	case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
+		return btrfs_ioctl_get_dev_stats(root, argp, 1);
 	}
 
 	return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 086e6bd..497c530 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -266,6 +266,35 @@
 	__u64				inodes;
 };
 
+enum btrfs_dev_stat_values {
+	/* disk I/O failure stats */
+	BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
+	BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
+	BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
+
+	/* stats for indirect indications for I/O failures */
+	BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
+					 * contents is illegal: this is an
+					 * indication that the block was damaged
+					 * during read or write, or written to
+					 * wrong location or read from wrong
+					 * location */
+	BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
+					 * been written */
+
+	BTRFS_DEV_STAT_VALUES_MAX
+};
+
+struct btrfs_ioctl_get_dev_stats {
+	__u64 devid;				/* in */
+	__u64 nr_items;				/* in/out */
+
+	/* out values: */
+	__u64 values[BTRFS_DEV_STAT_VALUES_MAX];
+
+	__u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
+};
+
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
 				   struct btrfs_ioctl_vol_args)
 #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -330,5 +359,9 @@
 					struct btrfs_ioctl_ino_path_args)
 #define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
 					struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
+				      struct btrfs_ioctl_get_dev_stats)
+#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
+					struct btrfs_ioctl_get_dev_stats)
 
 #endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index bbf6d0d..643335a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -196,7 +196,7 @@
 	entry->len = len;
 	entry->disk_len = disk_len;
 	entry->bytes_left = len;
-	entry->inode = inode;
+	entry->inode = igrab(inode);
 	entry->compress_type = compress_type;
 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 		set_bit(type, &entry->flags);
@@ -212,12 +212,12 @@
 
 	trace_btrfs_ordered_extent_add(inode, entry);
 
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	node = tree_insert(&tree->tree, file_offset,
 			   &entry->rb_node);
 	if (node)
 		ordered_data_tree_panic(inode, -EEXIST, file_offset);
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 
 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 	list_add_tail(&entry->root_extent_list,
@@ -264,9 +264,9 @@
 	struct btrfs_ordered_inode_tree *tree;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	list_add_tail(&sum->list, &entry->list);
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 }
 
 /*
@@ -283,18 +283,19 @@
  */
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
-				   u64 *file_offset, u64 io_size)
+				   u64 *file_offset, u64 io_size, int uptodate)
 {
 	struct btrfs_ordered_inode_tree *tree;
 	struct rb_node *node;
 	struct btrfs_ordered_extent *entry = NULL;
 	int ret;
+	unsigned long flags;
 	u64 dec_end;
 	u64 dec_start;
 	u64 to_dec;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irqsave(&tree->lock, flags);
 	node = tree_search(tree, *file_offset);
 	if (!node) {
 		ret = 1;
@@ -323,6 +324,9 @@
 		       (unsigned long long)to_dec);
 	}
 	entry->bytes_left -= to_dec;
+	if (!uptodate)
+		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
 	if (entry->bytes_left == 0)
 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 	else
@@ -332,7 +336,7 @@
 		*cached = entry;
 		atomic_inc(&entry->refs);
 	}
-	spin_unlock(&tree->lock);
+	spin_unlock_irqrestore(&tree->lock, flags);
 	return ret == 0;
 }
 
@@ -347,15 +351,21 @@
  */
 int btrfs_dec_test_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
-				   u64 file_offset, u64 io_size)
+				   u64 file_offset, u64 io_size, int uptodate)
 {
 	struct btrfs_ordered_inode_tree *tree;
 	struct rb_node *node;
 	struct btrfs_ordered_extent *entry = NULL;
+	unsigned long flags;
 	int ret;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irqsave(&tree->lock, flags);
+	if (cached && *cached) {
+		entry = *cached;
+		goto have_entry;
+	}
+
 	node = tree_search(tree, file_offset);
 	if (!node) {
 		ret = 1;
@@ -363,6 +373,7 @@
 	}
 
 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+have_entry:
 	if (!offset_in_entry(entry, file_offset)) {
 		ret = 1;
 		goto out;
@@ -374,6 +385,9 @@
 		       (unsigned long long)io_size);
 	}
 	entry->bytes_left -= io_size;
+	if (!uptodate)
+		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
 	if (entry->bytes_left == 0)
 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 	else
@@ -383,7 +397,7 @@
 		*cached = entry;
 		atomic_inc(&entry->refs);
 	}
-	spin_unlock(&tree->lock);
+	spin_unlock_irqrestore(&tree->lock, flags);
 	return ret == 0;
 }
 
@@ -399,6 +413,8 @@
 	trace_btrfs_ordered_extent_put(entry->inode, entry);
 
 	if (atomic_dec_and_test(&entry->refs)) {
+		if (entry->inode)
+			btrfs_add_delayed_iput(entry->inode);
 		while (!list_empty(&entry->list)) {
 			cur = entry->list.next;
 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
@@ -411,21 +427,22 @@
 
 /*
  * remove an ordered extent from the tree.  No references are dropped
- * and you must wake_up entry->wait.  You must hold the tree lock
- * while you call this function.
+ * and waiters are woken up.
  */
-static void __btrfs_remove_ordered_extent(struct inode *inode,
-					  struct btrfs_ordered_extent *entry)
+void btrfs_remove_ordered_extent(struct inode *inode,
+				 struct btrfs_ordered_extent *entry)
 {
 	struct btrfs_ordered_inode_tree *tree;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct rb_node *node;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
+	spin_lock_irq(&tree->lock);
 	node = &entry->rb_node;
 	rb_erase(node, &tree->tree);
 	tree->last = NULL;
 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
+	spin_unlock_irq(&tree->lock);
 
 	spin_lock(&root->fs_info->ordered_extent_lock);
 	list_del_init(&entry->root_extent_list);
@@ -442,21 +459,6 @@
 		list_del_init(&BTRFS_I(inode)->ordered_operations);
 	}
 	spin_unlock(&root->fs_info->ordered_extent_lock);
-}
-
-/*
- * remove an ordered extent from the tree.  No references are dropped
- * but any waiters are woken.
- */
-void btrfs_remove_ordered_extent(struct inode *inode,
-				 struct btrfs_ordered_extent *entry)
-{
-	struct btrfs_ordered_inode_tree *tree;
-
-	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
-	__btrfs_remove_ordered_extent(inode, entry);
-	spin_unlock(&tree->lock);
 	wake_up(&entry->wait);
 }
 
@@ -621,17 +623,29 @@
 		if (orig_end > INT_LIMIT(loff_t))
 			orig_end = INT_LIMIT(loff_t);
 	}
-again:
+
 	/* start IO across the range first to instantiate any delalloc
 	 * extents
 	 */
 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
-	/* The compression code will leave pages locked but return from
-	 * writepage without setting the page writeback.  Starting again
-	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
+	/*
+	 * So with compression we will find and lock a dirty page and clear the
+	 * first one as dirty, setup an async extent, and immediately return
+	 * with the entire range locked but with nobody actually marked with
+	 * writeback.  So we can't just filemap_write_and_wait_range() and
+	 * expect it to work since it will just kick off a thread to do the
+	 * actual work.  So we need to call filemap_fdatawrite_range _again_
+	 * since it will wait on the page lock, which won't be unlocked until
+	 * after the pages have been marked as writeback and so we're good to go
+	 * from there.  We have to do this otherwise we'll miss the ordered
+	 * extents and that results in badness.  Please Josef, do not think you
+	 * know better and pull this out at some point in the future, it is
+	 * right and you are wrong.
 	 */
-	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+		     &BTRFS_I(inode)->runtime_flags))
+		filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
 	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
@@ -657,11 +671,6 @@
 			break;
 		end--;
 	}
-	if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
-			   EXTENT_DELALLOC, 0, NULL)) {
-		schedule_timeout(1);
-		goto again;
-	}
 }
 
 /*
@@ -676,7 +685,7 @@
 	struct btrfs_ordered_extent *entry = NULL;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	node = tree_search(tree, file_offset);
 	if (!node)
 		goto out;
@@ -687,7 +696,7 @@
 	if (entry)
 		atomic_inc(&entry->refs);
 out:
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 	return entry;
 }
 
@@ -703,7 +712,7 @@
 	struct btrfs_ordered_extent *entry = NULL;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	node = tree_search(tree, file_offset);
 	if (!node) {
 		node = tree_search(tree, file_offset + len);
@@ -728,7 +737,7 @@
 out:
 	if (entry)
 		atomic_inc(&entry->refs);
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 	return entry;
 }
 
@@ -744,7 +753,7 @@
 	struct btrfs_ordered_extent *entry = NULL;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	node = tree_search(tree, file_offset);
 	if (!node)
 		goto out;
@@ -752,7 +761,7 @@
 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 	atomic_inc(&entry->refs);
 out:
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 	return entry;
 }
 
@@ -764,7 +773,6 @@
 				struct btrfs_ordered_extent *ordered)
 {
 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
-	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	u64 disk_i_size;
 	u64 new_i_size;
 	u64 i_size_test;
@@ -779,7 +787,7 @@
 	else
 		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
 
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	disk_i_size = BTRFS_I(inode)->disk_i_size;
 
 	/* truncate file */
@@ -798,14 +806,6 @@
 	}
 
 	/*
-	 * we can't update the disk_isize if there are delalloc bytes
-	 * between disk_i_size and  this ordered extent
-	 */
-	if (test_range_bit(io_tree, disk_i_size, offset - 1,
-			   EXTENT_DELALLOC, 0, NULL)) {
-		goto out;
-	}
-	/*
 	 * walk backward from this ordered extent to disk_i_size.
 	 * if we find an ordered extent then we can't update disk i_size
 	 * yet
@@ -825,15 +825,18 @@
 		}
 		node = prev;
 	}
-	while (node) {
+	for (; node; node = rb_prev(node)) {
 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+
+		/* We treat this entry as if it doesnt exist */
+		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+			continue;
 		if (test->file_offset + test->len <= disk_i_size)
 			break;
 		if (test->file_offset >= i_size)
 			break;
 		if (test->file_offset >= disk_i_size)
 			goto out;
-		node = rb_prev(node);
 	}
 	new_i_size = min_t(u64, offset, i_size);
 
@@ -851,43 +854,49 @@
 		else
 			node = rb_first(&tree->tree);
 	}
-	i_size_test = 0;
-	if (node) {
-		/*
-		 * do we have an area where IO might have finished
-		 * between our ordered extent and the next one.
-		 */
+
+	/*
+	 * We are looking for an area between our current extent and the next
+	 * ordered extent to update the i_size to.  There are 3 cases here
+	 *
+	 * 1) We don't actually have anything and we can update to i_size.
+	 * 2) We have stuff but they already did their i_size update so again we
+	 * can just update to i_size.
+	 * 3) We have an outstanding ordered extent so the most we can update
+	 * our disk_i_size to is the start of the next offset.
+	 */
+	i_size_test = i_size;
+	for (; node; node = rb_next(node)) {
 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-		if (test->file_offset > offset)
+
+		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+			continue;
+		if (test->file_offset > offset) {
 			i_size_test = test->file_offset;
-	} else {
-		i_size_test = i_size;
+			break;
+		}
 	}
 
 	/*
 	 * i_size_test is the end of a region after this ordered
-	 * extent where there are no ordered extents.  As long as there
-	 * are no delalloc bytes in this area, it is safe to update
-	 * disk_i_size to the end of the region.
+	 * extent where there are no ordered extents, we can safely set
+	 * disk_i_size to this.
 	 */
-	if (i_size_test > offset &&
-	    !test_range_bit(io_tree, offset, i_size_test - 1,
-			    EXTENT_DELALLOC, 0, NULL)) {
+	if (i_size_test > offset)
 		new_i_size = min_t(u64, i_size_test, i_size);
-	}
 	BTRFS_I(inode)->disk_i_size = new_i_size;
 	ret = 0;
 out:
 	/*
-	 * we need to remove the ordered extent with the tree lock held
-	 * so that other people calling this function don't find our fully
-	 * processed ordered entry and skip updating the i_size
+	 * We need to do this because we can't remove ordered extents until
+	 * after the i_disk_size has been updated and then the inode has been
+	 * updated to reflect the change, so we need to tell anybody who finds
+	 * this ordered extent that we've already done all the real work, we
+	 * just haven't completed all the other work.
 	 */
 	if (ordered)
-		__btrfs_remove_ordered_extent(inode, ordered);
-	spin_unlock(&tree->lock);
-	if (ordered)
-		wake_up(&ordered->wait);
+		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
+	spin_unlock_irq(&tree->lock);
 	return ret;
 }
 
@@ -912,7 +921,7 @@
 	if (!ordered)
 		return 1;
 
-	spin_lock(&tree->lock);
+	spin_lock_irq(&tree->lock);
 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
 		if (disk_bytenr >= ordered_sum->bytenr) {
 			num_sectors = ordered_sum->len / sectorsize;
@@ -927,7 +936,7 @@
 		}
 	}
 out:
-	spin_unlock(&tree->lock);
+	spin_unlock_irq(&tree->lock);
 	btrfs_put_ordered_extent(ordered);
 	return ret;
 }
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index c355ad4..e03c560 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -74,6 +74,12 @@
 
 #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
 
+#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
+
+#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent
+				       * has done its due diligence in updating
+				       * the isize. */
+
 struct btrfs_ordered_extent {
 	/* logical offset in the file */
 	u64 file_offset;
@@ -113,6 +119,8 @@
 
 	/* a per root list of all the pending ordered extents */
 	struct list_head root_extent_list;
+
+	struct btrfs_work work;
 };
 
 
@@ -143,10 +151,11 @@
 				struct btrfs_ordered_extent *entry);
 int btrfs_dec_test_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
-				   u64 file_offset, u64 io_size);
+				   u64 file_offset, u64 io_size, int uptodate);
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
-				   u64 *file_offset, u64 io_size);
+				   u64 *file_offset, u64 io_size,
+				   int uptodate);
 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 			     u64 start, u64 len, u64 disk_len, int type);
 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f38e452..5e23684 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -294,6 +294,9 @@
 			       btrfs_dev_extent_chunk_offset(l, dev_extent),
 			       (unsigned long long)
 			       btrfs_dev_extent_length(l, dev_extent));
+		case BTRFS_DEV_STATS_KEY:
+			printk(KERN_INFO "\t\tdevice stats\n");
+			break;
 		};
 	}
 }
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
new file mode 100644
index 0000000..9e111e4
--- /dev/null
+++ b/fs/btrfs/rcu-string.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+struct rcu_string {
+	struct rcu_head rcu;
+	char str[0];
+};
+
+static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
+{
+	size_t len = strlen(src) + 1;
+	struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
+					 (len * sizeof(char)), mask);
+	if (!ret)
+		return ret;
+	strncpy(ret->str, src, len);
+	return ret;
+}
+
+static inline void rcu_string_free(struct rcu_string *str)
+{
+	if (str)
+		kfree_rcu(str, rcu);
+}
+
+#define printk_in_rcu(fmt, ...) do {	\
+	rcu_read_lock();		\
+	printk(fmt, __VA_ARGS__);	\
+	rcu_read_unlock();		\
+} while (0)
+
+#define printk_ratelimited_in_rcu(fmt, ...) do {	\
+	rcu_read_lock();				\
+	printk_ratelimited(fmt, __VA_ARGS__);		\
+	rcu_read_unlock();				\
+} while (0)
+
+#define rcu_str_deref(rcu_str) ({				\
+	struct rcu_string *__str = rcu_dereference(rcu_str);	\
+	__str->str;						\
+})
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index ac5d010..48a4882 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -718,13 +718,18 @@
 {
 	struct reada_machine_work *rmw;
 	struct btrfs_fs_info *fs_info;
+	int old_ioprio;
 
 	rmw = container_of(work, struct reada_machine_work, work);
 	fs_info = rmw->fs_info;
 
 	kfree(rmw);
 
+	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
+				       task_nice_ioprio(current));
+	set_task_ioprio(current, BTRFS_IOPRIO_READA);
 	__reada_start_machine(fs_info);
+	set_task_ioprio(current, old_ioprio);
 }
 
 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2f3d6f9..b223620 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -26,6 +26,7 @@
 #include "backref.h"
 #include "extent_io.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -50,7 +51,7 @@
 struct scrub_page {
 	struct scrub_block	*sblock;
 	struct page		*page;
-	struct block_device	*bdev;
+	struct btrfs_device	*dev;
 	u64			flags;  /* extent flags */
 	u64			generation;
 	u64			logical;
@@ -86,6 +87,7 @@
 		unsigned int	header_error:1;
 		unsigned int	checksum_error:1;
 		unsigned int	no_io_error_seen:1;
+		unsigned int	generation_error:1; /* also sets header_error */
 	};
 };
 
@@ -319,10 +321,10 @@
 	 * hold all of the paths here
 	 */
 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-		printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+		printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
 			"length %llu, links %u (path: %s)\n", swarn->errstr,
-			swarn->logical, swarn->dev->name,
+			swarn->logical, rcu_str_deref(swarn->dev->name),
 			(unsigned long long)swarn->sector, root, inum, offset,
 			min(isize - offset, (u64)PAGE_SIZE), nlink,
 			(char *)(unsigned long)ipath->fspath->val[i]);
@@ -331,10 +333,10 @@
 	return 0;
 
 err:
-	printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+	printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
 		"resolving failed with ret=%d\n", swarn->errstr,
-		swarn->logical, swarn->dev->name,
+		swarn->logical, rcu_str_deref(swarn->dev->name),
 		(unsigned long long)swarn->sector, root, inum, offset, ret);
 
 	free_ipath(ipath);
@@ -389,10 +391,11 @@
 		do {
 			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 							&ref_root, &ref_level);
-			printk(KERN_WARNING
+			printk_in_rcu(KERN_WARNING
 				"btrfs: %s at logical %llu on dev %s, "
 				"sector %llu: metadata %s (level %d) in tree "
-				"%llu\n", errstr, swarn.logical, dev->name,
+				"%llu\n", errstr, swarn.logical,
+				rcu_str_deref(dev->name),
 				(unsigned long long)swarn.sector,
 				ref_level ? "node" : "leaf",
 				ret < 0 ? -1 : ref_level,
@@ -579,9 +582,11 @@
 		spin_lock(&sdev->stat_lock);
 		++sdev->stat.uncorrectable_errors;
 		spin_unlock(&sdev->stat_lock);
-		printk_ratelimited(KERN_ERR
+
+		printk_ratelimited_in_rcu(KERN_ERR
 			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
-			(unsigned long long)fixup->logical, sdev->dev->name);
+			(unsigned long long)fixup->logical,
+			rcu_str_deref(sdev->dev->name));
 	}
 
 	btrfs_free_path(path);
@@ -675,6 +680,8 @@
 		sdev->stat.read_errors++;
 		sdev->stat.uncorrectable_errors++;
 		spin_unlock(&sdev->stat_lock);
+		btrfs_dev_stat_inc_and_print(sdev->dev,
+					     BTRFS_DEV_STAT_READ_ERRS);
 		goto out;
 	}
 
@@ -686,6 +693,8 @@
 		sdev->stat.read_errors++;
 		sdev->stat.uncorrectable_errors++;
 		spin_unlock(&sdev->stat_lock);
+		btrfs_dev_stat_inc_and_print(sdev->dev,
+					     BTRFS_DEV_STAT_READ_ERRS);
 		goto out;
 	}
 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
@@ -699,6 +708,8 @@
 		sdev->stat.read_errors++;
 		sdev->stat.uncorrectable_errors++;
 		spin_unlock(&sdev->stat_lock);
+		btrfs_dev_stat_inc_and_print(sdev->dev,
+					     BTRFS_DEV_STAT_READ_ERRS);
 		goto out;
 	}
 
@@ -725,12 +736,16 @@
 		spin_unlock(&sdev->stat_lock);
 		if (__ratelimit(&_rs))
 			scrub_print_warning("i/o error", sblock_to_check);
+		btrfs_dev_stat_inc_and_print(sdev->dev,
+					     BTRFS_DEV_STAT_READ_ERRS);
 	} else if (sblock_bad->checksum_error) {
 		spin_lock(&sdev->stat_lock);
 		sdev->stat.csum_errors++;
 		spin_unlock(&sdev->stat_lock);
 		if (__ratelimit(&_rs))
 			scrub_print_warning("checksum error", sblock_to_check);
+		btrfs_dev_stat_inc_and_print(sdev->dev,
+					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
 	} else if (sblock_bad->header_error) {
 		spin_lock(&sdev->stat_lock);
 		sdev->stat.verify_errors++;
@@ -738,6 +753,12 @@
 		if (__ratelimit(&_rs))
 			scrub_print_warning("checksum/header error",
 					    sblock_to_check);
+		if (sblock_bad->generation_error)
+			btrfs_dev_stat_inc_and_print(sdev->dev,
+				BTRFS_DEV_STAT_GENERATION_ERRS);
+		else
+			btrfs_dev_stat_inc_and_print(sdev->dev,
+				BTRFS_DEV_STAT_CORRUPTION_ERRS);
 	}
 
 	if (sdev->readonly)
@@ -919,18 +940,20 @@
 			spin_lock(&sdev->stat_lock);
 			sdev->stat.corrected_errors++;
 			spin_unlock(&sdev->stat_lock);
-			printk_ratelimited(KERN_ERR
+			printk_ratelimited_in_rcu(KERN_ERR
 				"btrfs: fixed up error at logical %llu on dev %s\n",
-				(unsigned long long)logical, sdev->dev->name);
+				(unsigned long long)logical,
+				rcu_str_deref(sdev->dev->name));
 		}
 	} else {
 did_not_correct_error:
 		spin_lock(&sdev->stat_lock);
 		sdev->stat.uncorrectable_errors++;
 		spin_unlock(&sdev->stat_lock);
-		printk_ratelimited(KERN_ERR
+		printk_ratelimited_in_rcu(KERN_ERR
 			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
-			(unsigned long long)logical, sdev->dev->name);
+			(unsigned long long)logical,
+			rcu_str_deref(sdev->dev->name));
 	}
 
 out:
@@ -998,8 +1021,8 @@
 			page = sblock->pagev + page_index;
 			page->logical = logical;
 			page->physical = bbio->stripes[mirror_index].physical;
-			/* for missing devices, bdev is NULL */
-			page->bdev = bbio->stripes[mirror_index].dev->bdev;
+			/* for missing devices, dev->bdev is NULL */
+			page->dev = bbio->stripes[mirror_index].dev;
 			page->mirror_num = mirror_index + 1;
 			page->page = alloc_page(GFP_NOFS);
 			if (!page->page) {
@@ -1043,7 +1066,7 @@
 		struct scrub_page *page = sblock->pagev + page_num;
 		DECLARE_COMPLETION_ONSTACK(complete);
 
-		if (page->bdev == NULL) {
+		if (page->dev->bdev == NULL) {
 			page->io_error = 1;
 			sblock->no_io_error_seen = 0;
 			continue;
@@ -1053,7 +1076,7 @@
 		bio = bio_alloc(GFP_NOFS, 1);
 		if (!bio)
 			return -EIO;
-		bio->bi_bdev = page->bdev;
+		bio->bi_bdev = page->dev->bdev;
 		bio->bi_sector = page->physical >> 9;
 		bio->bi_end_io = scrub_complete_bio_end_io;
 		bio->bi_private = &complete;
@@ -1102,11 +1125,14 @@
 		h = (struct btrfs_header *)mapped_buffer;
 
 		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
-		    generation != le64_to_cpu(h->generation) ||
 		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
 		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
-			   BTRFS_UUID_SIZE))
+			   BTRFS_UUID_SIZE)) {
 			sblock->header_error = 1;
+		} else if (generation != le64_to_cpu(h->generation)) {
+			sblock->header_error = 1;
+			sblock->generation_error = 1;
+		}
 		csum = h->csum;
 	} else {
 		if (!have_csum)
@@ -1182,7 +1208,7 @@
 		bio = bio_alloc(GFP_NOFS, 1);
 		if (!bio)
 			return -EIO;
-		bio->bi_bdev = page_bad->bdev;
+		bio->bi_bdev = page_bad->dev->bdev;
 		bio->bi_sector = page_bad->physical >> 9;
 		bio->bi_end_io = scrub_complete_bio_end_io;
 		bio->bi_private = &complete;
@@ -1196,6 +1222,12 @@
 
 		/* this will also unplug the queue */
 		wait_for_completion(&complete);
+		if (!bio_flagged(bio, BIO_UPTODATE)) {
+			btrfs_dev_stat_inc_and_print(page_bad->dev,
+				BTRFS_DEV_STAT_WRITE_ERRS);
+			bio_put(bio);
+			return -EIO;
+		}
 		bio_put(bio);
 	}
 
@@ -1352,7 +1384,8 @@
 	u64 mapped_size;
 	void *p;
 	u32 crc = ~(u32)0;
-	int fail = 0;
+	int fail_gen = 0;
+	int fail_cor = 0;
 	u64 len;
 	int index;
 
@@ -1363,13 +1396,13 @@
 	memcpy(on_disk_csum, s->csum, sdev->csum_size);
 
 	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
-		++fail;
+		++fail_cor;
 
 	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
-		++fail;
+		++fail_gen;
 
 	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
-		++fail;
+		++fail_cor;
 
 	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
@@ -1394,9 +1427,9 @@
 
 	btrfs_csum_final(crc, calculated_csum);
 	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
-		++fail;
+		++fail_cor;
 
-	if (fail) {
+	if (fail_cor + fail_gen) {
 		/*
 		 * if we find an error in a super block, we just report it.
 		 * They will get written with the next transaction commit
@@ -1405,9 +1438,15 @@
 		spin_lock(&sdev->stat_lock);
 		++sdev->stat.super_errors;
 		spin_unlock(&sdev->stat_lock);
+		if (fail_cor)
+			btrfs_dev_stat_inc_and_print(sdev->dev,
+				BTRFS_DEV_STAT_CORRUPTION_ERRS);
+		else
+			btrfs_dev_stat_inc_and_print(sdev->dev,
+				BTRFS_DEV_STAT_GENERATION_ERRS);
 	}
 
-	return fail;
+	return fail_cor + fail_gen;
 }
 
 static void scrub_block_get(struct scrub_block *sblock)
@@ -1551,7 +1590,7 @@
 			return -ENOMEM;
 		}
 		spage->sblock = sblock;
-		spage->bdev = sdev->dev->bdev;
+		spage->dev = sdev->dev;
 		spage->flags = flags;
 		spage->generation = gen;
 		spage->logical = logical;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5f8fca..0eb9a4d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -54,6 +54,7 @@
 #include "version.h"
 #include "export.h"
 #include "compression.h"
+#include "rcu-string.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/btrfs.h>
@@ -188,7 +189,8 @@
 	va_start(args, fmt);
 
 	if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
-		strncpy(lvl, fmt, 3);
+		memcpy(lvl, fmt, 3);
+		lvl[3] = '\0';
 		fmt += 3;
 		type = logtypes[fmt[1] - '0'];
 	} else
@@ -435,11 +437,8 @@
 		case Opt_thread_pool:
 			intarg = 0;
 			match_int(&args[0], &intarg);
-			if (intarg) {
+			if (intarg)
 				info->thread_pool_size = intarg;
-				printk(KERN_INFO "btrfs: thread pool %d\n",
-				       info->thread_pool_size);
-			}
 			break;
 		case Opt_max_inline:
 			num = match_strdup(&args[0]);
@@ -769,7 +768,7 @@
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
 	sb->s_flags |= MS_POSIXACL;
 #endif
-
+	sb->s_flags |= MS_I_VERSION;
 	err = open_ctree(sb, fs_devices, (char *)data);
 	if (err) {
 		printk("btrfs: open_ctree failed\n");
@@ -925,63 +924,48 @@
  */
 static char *setup_root_args(char *args)
 {
-	unsigned copied = 0;
-	unsigned len = strlen(args) + 2;
-	char *pos;
-	char *ret;
+	unsigned len = strlen(args) + 2 + 1;
+	char *src, *dst, *buf;
 
 	/*
-	 * We need the same args as before, but minus
+	 * We need the same args as before, but with this substitution:
+	 * s!subvol=[^,]+!subvolid=0!
 	 *
-	 * subvol=a
-	 *
-	 * and add
-	 *
-	 * subvolid=0
-	 *
-	 * which is a difference of 2 characters, so we allocate strlen(args) +
-	 * 2 characters.
+	 * Since the replacement string is up to 2 bytes longer than the
+	 * original, allocate strlen(args) + 2 + 1 bytes.
 	 */
-	ret = kzalloc(len * sizeof(char), GFP_NOFS);
-	if (!ret)
-		return NULL;
-	pos = strstr(args, "subvol=");
 
+	src = strstr(args, "subvol=");
 	/* This shouldn't happen, but just in case.. */
-	if (!pos) {
-		kfree(ret);
+	if (!src)
 		return NULL;
-	}
+
+	buf = dst = kmalloc(len, GFP_NOFS);
+	if (!buf)
+		return NULL;
 
 	/*
-	 * The subvol=<> arg is not at the front of the string, copy everybody
-	 * up to that into ret.
+	 * If the subvol= arg is not at the start of the string,
+	 * copy whatever precedes it into buf.
 	 */
-	if (pos != args) {
-		*pos = '\0';
-		strcpy(ret, args);
-		copied += strlen(args);
-		pos++;
+	if (src != args) {
+		*src++ = '\0';
+		strcpy(buf, args);
+		dst += strlen(args);
 	}
 
-	strncpy(ret + copied, "subvolid=0", len - copied);
-
-	/* Length of subvolid=0 */
-	copied += 10;
+	strcpy(dst, "subvolid=0");
+	dst += strlen("subvolid=0");
 
 	/*
-	 * If there is no , after the subvol= option then we know there's no
-	 * other options and we can just return.
+	 * If there is a "," after the original subvol=... string,
+	 * copy that suffix into our buffer.  Otherwise, we're done.
 	 */
-	pos = strchr(pos, ',');
-	if (!pos)
-		return ret;
+	src = strchr(src, ',');
+	if (src)
+		strcpy(dst, src);
 
-	/* Copy the rest of the arguments into our buffer */
-	strncpy(ret + copied, pos, len - copied);
-	copied += strlen(pos);
-
-	return ret;
+	return buf;
 }
 
 static struct dentry *mount_subvol(const char *subvol_name, int flags,
@@ -1118,6 +1102,40 @@
 	return ERR_PTR(error);
 }
 
+static void btrfs_set_max_workers(struct btrfs_workers *workers, int new_limit)
+{
+	spin_lock_irq(&workers->lock);
+	workers->max_workers = new_limit;
+	spin_unlock_irq(&workers->lock);
+}
+
+static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
+				     int new_pool_size, int old_pool_size)
+{
+	if (new_pool_size == old_pool_size)
+		return;
+
+	fs_info->thread_pool_size = new_pool_size;
+
+	printk(KERN_INFO "btrfs: resize thread pool %d -> %d\n",
+	       old_pool_size, new_pool_size);
+
+	btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
+	btrfs_set_max_workers(&fs_info->workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->endio_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->endio_meta_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->endio_meta_write_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->endio_write_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->endio_freespace_worker, new_pool_size);
+	btrfs_set_max_workers(&fs_info->delayed_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->readahead_workers, new_pool_size);
+	btrfs_set_max_workers(&fs_info->scrub_workers, new_pool_size);
+}
+
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1137,6 +1155,9 @@
 		goto restore;
 	}
 
+	btrfs_resize_thread_pool(fs_info,
+		fs_info->thread_pool_size, old_thread_pool_size);
+
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
 
@@ -1180,7 +1201,8 @@
 	fs_info->compress_type = old_compress_type;
 	fs_info->max_inline = old_max_inline;
 	fs_info->alloc_start = old_alloc_start;
-	fs_info->thread_pool_size = old_thread_pool_size;
+	btrfs_resize_thread_pool(fs_info,
+		old_thread_pool_size, fs_info->thread_pool_size);
 	fs_info->metadata_ratio = old_metadata_ratio;
 	return ret;
 }
@@ -1461,12 +1483,44 @@
 				   "error %d\n", btrfs_ino(inode), ret);
 }
 
+static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
+{
+	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
+	struct btrfs_fs_devices *cur_devices;
+	struct btrfs_device *dev, *first_dev = NULL;
+	struct list_head *head;
+	struct rcu_string *name;
+
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	cur_devices = fs_info->fs_devices;
+	while (cur_devices) {
+		head = &cur_devices->devices;
+		list_for_each_entry(dev, head, dev_list) {
+			if (!first_dev || dev->devid < first_dev->devid)
+				first_dev = dev;
+		}
+		cur_devices = cur_devices->seed;
+	}
+
+	if (first_dev) {
+		rcu_read_lock();
+		name = rcu_dereference(first_dev->name);
+		seq_escape(m, name->str, " \t\n\\");
+		rcu_read_unlock();
+	} else {
+		WARN_ON(1);
+	}
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+	return 0;
+}
+
 static const struct super_operations btrfs_super_ops = {
 	.drop_inode	= btrfs_drop_inode,
 	.evict_inode	= btrfs_evict_inode,
 	.put_super	= btrfs_put_super,
 	.sync_fs	= btrfs_sync_fs,
 	.show_options	= btrfs_show_options,
+	.show_devname	= btrfs_show_devname,
 	.write_inode	= btrfs_write_inode,
 	.dirty_inode	= btrfs_fs_dirty_inode,
 	.alloc_inode	= btrfs_alloc_inode,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3642225..b72b068 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,6 +28,7 @@
 #include "locking.h"
 #include "tree-log.h"
 #include "inode-map.h"
+#include "volumes.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
@@ -55,49 +56,54 @@
 static noinline int join_transaction(struct btrfs_root *root, int nofail)
 {
 	struct btrfs_transaction *cur_trans;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 loop:
 	/* The file system has been taken offline. No new transactions. */
-	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-		spin_unlock(&root->fs_info->trans_lock);
+	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		spin_unlock(&fs_info->trans_lock);
 		return -EROFS;
 	}
 
-	if (root->fs_info->trans_no_join) {
+	if (fs_info->trans_no_join) {
 		if (!nofail) {
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 			return -EBUSY;
 		}
 	}
 
-	cur_trans = root->fs_info->running_transaction;
+	cur_trans = fs_info->running_transaction;
 	if (cur_trans) {
 		if (cur_trans->aborted) {
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 			return cur_trans->aborted;
 		}
 		atomic_inc(&cur_trans->use_count);
 		atomic_inc(&cur_trans->num_writers);
 		cur_trans->num_joined++;
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 		return 0;
 	}
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 
 	cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
 	if (!cur_trans)
 		return -ENOMEM;
 
-	spin_lock(&root->fs_info->trans_lock);
-	if (root->fs_info->running_transaction) {
+	spin_lock(&fs_info->trans_lock);
+	if (fs_info->running_transaction) {
 		/*
 		 * someone started a transaction after we unlocked.  Make sure
 		 * to redo the trans_no_join checks above
 		 */
 		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
-		cur_trans = root->fs_info->running_transaction;
+		cur_trans = fs_info->running_transaction;
 		goto loop;
+	} else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		spin_unlock(&root->fs_info->trans_lock);
+		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+		return -EROFS;
 	}
 
 	atomic_set(&cur_trans->num_writers, 1);
@@ -121,20 +127,38 @@
 	cur_trans->delayed_refs.flushing = 0;
 	cur_trans->delayed_refs.run_delayed_start = 0;
 	cur_trans->delayed_refs.seq = 1;
+
+	/*
+	 * although the tree mod log is per file system and not per transaction,
+	 * the log must never go across transaction boundaries.
+	 */
+	smp_mb();
+	if (!list_empty(&fs_info->tree_mod_seq_list)) {
+		printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+			"creating a fresh transaction\n");
+		WARN_ON(1);
+	}
+	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
+		printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+			"creating a fresh transaction\n");
+		WARN_ON(1);
+	}
+	atomic_set(&fs_info->tree_mod_seq, 0);
+
 	init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
 	spin_lock_init(&cur_trans->commit_lock);
 	spin_lock_init(&cur_trans->delayed_refs.lock);
 	INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
-	list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
-			     root->fs_info->btree_inode->i_mapping);
-	root->fs_info->generation++;
-	cur_trans->transid = root->fs_info->generation;
-	root->fs_info->running_transaction = cur_trans;
+			     fs_info->btree_inode->i_mapping);
+	fs_info->generation++;
+	cur_trans->transid = fs_info->generation;
+	fs_info->running_transaction = cur_trans;
 	cur_trans->aborted = 0;
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 
 	return 0;
 }
@@ -758,6 +782,9 @@
 	if (ret)
 		return ret;
 
+	ret = btrfs_run_dev_stats(trans, root->fs_info);
+	BUG_ON(ret);
+
 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
 		next = fs_info->dirty_cowonly_roots.next;
 		list_del_init(next);
@@ -1190,14 +1217,20 @@
 
 
 static void cleanup_transaction(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root)
+				struct btrfs_root *root, int err)
 {
 	struct btrfs_transaction *cur_trans = trans->transaction;
 
 	WARN_ON(trans->use_count > 1);
 
+	btrfs_abort_transaction(trans, root, err);
+
 	spin_lock(&root->fs_info->trans_lock);
 	list_del_init(&cur_trans->list);
+	if (cur_trans == root->fs_info->running_transaction) {
+		root->fs_info->running_transaction = NULL;
+		root->fs_info->trans_no_join = 0;
+	}
 	spin_unlock(&root->fs_info->trans_lock);
 
 	btrfs_cleanup_one_transaction(trans->transaction, root);
@@ -1503,7 +1536,7 @@
 //	WARN_ON(1);
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
-	cleanup_transaction(trans, root);
+	cleanup_transaction(trans, root, ret);
 
 	return ret;
 }
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index eb1ae90..2017d0f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1628,7 +1628,9 @@
 	int i;
 	int ret;
 
-	btrfs_read_buffer(eb, gen);
+	ret = btrfs_read_buffer(eb, gen);
+	if (ret)
+		return ret;
 
 	level = btrfs_header_level(eb);
 
@@ -1749,7 +1751,11 @@
 
 			path->slots[*level]++;
 			if (wc->free) {
-				btrfs_read_buffer(next, ptr_gen);
+				ret = btrfs_read_buffer(next, ptr_gen);
+				if (ret) {
+					free_extent_buffer(next);
+					return ret;
+				}
 
 				btrfs_tree_lock(next);
 				btrfs_set_lock_blocking(next);
@@ -1766,7 +1772,11 @@
 			free_extent_buffer(next);
 			continue;
 		}
-		btrfs_read_buffer(next, ptr_gen);
+		ret = btrfs_read_buffer(next, ptr_gen);
+		if (ret) {
+			free_extent_buffer(next);
+			return ret;
+		}
 
 		WARN_ON(*level <= 0);
 		if (path->nodes[*level-1])
@@ -2657,6 +2667,8 @@
 		btrfs_release_path(path);
 	}
 	btrfs_release_path(path);
+	if (ret > 0)
+		ret = 0;
 	return ret;
 }
 
@@ -3028,21 +3040,6 @@
 	return ret;
 }
 
-static int inode_in_log(struct btrfs_trans_handle *trans,
-		 struct inode *inode)
-{
-	struct btrfs_root *root = BTRFS_I(inode)->root;
-	int ret = 0;
-
-	mutex_lock(&root->log_mutex);
-	if (BTRFS_I(inode)->logged_trans == trans->transid &&
-	    BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
-		ret = 1;
-	mutex_unlock(&root->log_mutex);
-	return ret;
-}
-
-
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
@@ -3083,7 +3080,7 @@
 	if (ret)
 		goto end_no_trans;
 
-	if (inode_in_log(trans, inode)) {
+	if (btrfs_inode_in_log(inode, trans->transid)) {
 		ret = BTRFS_NO_LOG_SYNC;
 		goto end_no_trans;
 	}
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 12f5147..ab942f4 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -23,9 +23,9 @@
  *
  * ulist = ulist_alloc();
  * ulist_add(ulist, root);
- * elem = NULL;
+ * ULIST_ITER_INIT(&uiter);
  *
- * while ((elem = ulist_next(ulist, elem)) {
+ * while ((elem = ulist_next(ulist, &uiter)) {
  * 	for (all child nodes n in elem)
  *		ulist_add(ulist, n);
  *	do something useful with the node;
@@ -95,7 +95,7 @@
  *
  * The allocated ulist will be returned in an initialized state.
  */
-struct ulist *ulist_alloc(unsigned long gfp_mask)
+struct ulist *ulist_alloc(gfp_t gfp_mask)
 {
 	struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
 
@@ -144,13 +144,22 @@
  * unaltered.
  */
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-	      unsigned long gfp_mask)
+	      gfp_t gfp_mask)
+{
+	return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
+}
+
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+		    unsigned long *old_aux, gfp_t gfp_mask)
 {
 	int i;
 
 	for (i = 0; i < ulist->nnodes; ++i) {
-		if (ulist->nodes[i].val == val)
+		if (ulist->nodes[i].val == val) {
+			if (old_aux)
+				*old_aux = ulist->nodes[i].aux;
 			return 0;
+		}
 	}
 
 	if (ulist->nnodes >= ulist->nodes_alloced) {
@@ -188,33 +197,26 @@
 /**
  * ulist_next - iterate ulist
  * @ulist:	ulist to iterate
- * @prev:	previously returned element or %NULL to start iteration
+ * @uiter:	iterator variable, initialized with ULIST_ITER_INIT(&iterator)
  *
  * Note: locking must be provided by the caller. In case of rwlocks only read
  *       locking is needed
  *
- * This function is used to iterate an ulist. The iteration is started with
- * @prev = %NULL. It returns the next element from the ulist or %NULL when the
+ * This function is used to iterate an ulist.
+ * It returns the next element from the ulist or %NULL when the
  * end is reached. No guarantee is made with respect to the order in which
  * the elements are returned. They might neither be returned in order of
  * addition nor in ascending order.
  * It is allowed to call ulist_add during an enumeration. Newly added items
  * are guaranteed to show up in the running enumeration.
  */
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
 {
-	int next;
-
 	if (ulist->nnodes == 0)
 		return NULL;
-
-	if (!prev)
-		return &ulist->nodes[0];
-
-	next = (prev - ulist->nodes) + 1;
-	if (next < 0 || next >= ulist->nnodes)
+	if (uiter->i < 0 || uiter->i >= ulist->nnodes)
 		return NULL;
 
-	return &ulist->nodes[next];
+	return &ulist->nodes[uiter->i++];
 }
 EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 2e25dec..21bdc8e 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -24,6 +24,10 @@
  */
 #define ULIST_SIZE 16
 
+struct ulist_iterator {
+	int i;
+};
+
 /*
  * element of the list
  */
@@ -59,10 +63,15 @@
 void ulist_init(struct ulist *ulist);
 void ulist_fini(struct ulist *ulist);
 void ulist_reinit(struct ulist *ulist);
-struct ulist *ulist_alloc(unsigned long gfp_mask);
+struct ulist *ulist_alloc(gfp_t gfp_mask);
 void ulist_free(struct ulist *ulist);
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-	      unsigned long gfp_mask);
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
+	      gfp_t gfp_mask);
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+		    unsigned long *old_aux, gfp_t gfp_mask);
+struct ulist_node *ulist_next(struct ulist *ulist,
+			      struct ulist_iterator *uiter);
+
+#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
 
 #endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1411b99..8a3d259 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -23,6 +23,7 @@
 #include <linux/random.h>
 #include <linux/iocontext.h>
 #include <linux/capability.h>
+#include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <asm/div64.h>
 #include "compat.h"
@@ -34,11 +35,14 @@
 #include "volumes.h"
 #include "async-thread.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root,
 				struct btrfs_device *device);
 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
 static DEFINE_MUTEX(uuid_mutex);
 static LIST_HEAD(fs_uuids);
@@ -61,7 +65,7 @@
 		device = list_entry(fs_devices->devices.next,
 				    struct btrfs_device, dev_list);
 		list_del(&device->dev_list);
-		kfree(device->name);
+		rcu_string_free(device->name);
 		kfree(device);
 	}
 	kfree(fs_devices);
@@ -331,8 +335,8 @@
 {
 	struct btrfs_device *device;
 	struct btrfs_fs_devices *fs_devices;
+	struct rcu_string *name;
 	u64 found_transid = btrfs_super_generation(disk_super);
-	char *name;
 
 	fs_devices = find_fsid(disk_super->fsid);
 	if (!fs_devices) {
@@ -361,15 +365,18 @@
 			return -ENOMEM;
 		}
 		device->devid = devid;
+		device->dev_stats_valid = 0;
 		device->work.func = pending_bios_fn;
 		memcpy(device->uuid, disk_super->dev_item.uuid,
 		       BTRFS_UUID_SIZE);
 		spin_lock_init(&device->io_lock);
-		device->name = kstrdup(path, GFP_NOFS);
-		if (!device->name) {
+
+		name = rcu_string_strdup(path, GFP_NOFS);
+		if (!name) {
 			kfree(device);
 			return -ENOMEM;
 		}
+		rcu_assign_pointer(device->name, name);
 		INIT_LIST_HEAD(&device->dev_alloc_list);
 
 		/* init readahead state */
@@ -386,12 +393,12 @@
 
 		device->fs_devices = fs_devices;
 		fs_devices->num_devices++;
-	} else if (!device->name || strcmp(device->name, path)) {
-		name = kstrdup(path, GFP_NOFS);
+	} else if (!device->name || strcmp(device->name->str, path)) {
+		name = rcu_string_strdup(path, GFP_NOFS);
 		if (!name)
 			return -ENOMEM;
-		kfree(device->name);
-		device->name = name;
+		rcu_string_free(device->name);
+		rcu_assign_pointer(device->name, name);
 		if (device->missing) {
 			fs_devices->missing_devices--;
 			device->missing = 0;
@@ -426,15 +433,22 @@
 
 	/* We have held the volume lock, it is safe to get the devices. */
 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+		struct rcu_string *name;
+
 		device = kzalloc(sizeof(*device), GFP_NOFS);
 		if (!device)
 			goto error;
 
-		device->name = kstrdup(orig_dev->name, GFP_NOFS);
-		if (!device->name) {
+		/*
+		 * This is ok to do without rcu read locked because we hold the
+		 * uuid mutex so nothing we touch in here is going to disappear.
+		 */
+		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
+		if (!name) {
 			kfree(device);
 			goto error;
 		}
+		rcu_assign_pointer(device->name, name);
 
 		device->devid = orig_dev->devid;
 		device->work.func = pending_bios_fn;
@@ -487,7 +501,7 @@
 		}
 		list_del_init(&device->dev_list);
 		fs_devices->num_devices--;
-		kfree(device->name);
+		rcu_string_free(device->name);
 		kfree(device);
 	}
 
@@ -512,7 +526,7 @@
 	if (device->bdev)
 		blkdev_put(device->bdev, device->mode);
 
-	kfree(device->name);
+	rcu_string_free(device->name);
 	kfree(device);
 }
 
@@ -536,6 +550,7 @@
 	mutex_lock(&fs_devices->device_list_mutex);
 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 		struct btrfs_device *new_device;
+		struct rcu_string *name;
 
 		if (device->bdev)
 			fs_devices->open_devices--;
@@ -551,8 +566,11 @@
 		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
 		BUG_ON(!new_device); /* -ENOMEM */
 		memcpy(new_device, device, sizeof(*new_device));
-		new_device->name = kstrdup(device->name, GFP_NOFS);
-		BUG_ON(device->name && !new_device->name); /* -ENOMEM */
+
+		/* Safe because we are under uuid_mutex */
+		name = rcu_string_strdup(device->name->str, GFP_NOFS);
+		BUG_ON(device->name && !name); /* -ENOMEM */
+		rcu_assign_pointer(new_device->name, name);
 		new_device->bdev = NULL;
 		new_device->writeable = 0;
 		new_device->in_fs_metadata = 0;
@@ -617,9 +635,9 @@
 		if (!device->name)
 			continue;
 
-		bdev = blkdev_get_by_path(device->name, flags, holder);
+		bdev = blkdev_get_by_path(device->name->str, flags, holder);
 		if (IS_ERR(bdev)) {
-			printk(KERN_INFO "open %s failed\n", device->name);
+			printk(KERN_INFO "open %s failed\n", device->name->str);
 			goto error;
 		}
 		filemap_write_and_wait(bdev->bd_inode->i_mapping);
@@ -1628,12 +1646,13 @@
 	struct block_device *bdev;
 	struct list_head *devices;
 	struct super_block *sb = root->fs_info->sb;
+	struct rcu_string *name;
 	u64 total_bytes;
 	int seeding_dev = 0;
 	int ret = 0;
 
 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
-		return -EINVAL;
+		return -EROFS;
 
 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
 				  root->fs_info->bdev_holder);
@@ -1667,23 +1686,24 @@
 		goto error;
 	}
 
-	device->name = kstrdup(device_path, GFP_NOFS);
-	if (!device->name) {
+	name = rcu_string_strdup(device_path, GFP_NOFS);
+	if (!name) {
 		kfree(device);
 		ret = -ENOMEM;
 		goto error;
 	}
+	rcu_assign_pointer(device->name, name);
 
 	ret = find_next_devid(root, &device->devid);
 	if (ret) {
-		kfree(device->name);
+		rcu_string_free(device->name);
 		kfree(device);
 		goto error;
 	}
 
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
-		kfree(device->name);
+		rcu_string_free(device->name);
 		kfree(device);
 		ret = PTR_ERR(trans);
 		goto error;
@@ -1792,7 +1812,7 @@
 	unlock_chunks(root);
 	btrfs_abort_transaction(trans, root, ret);
 	btrfs_end_transaction(trans, root);
-	kfree(device->name);
+	rcu_string_free(device->name);
 	kfree(device);
 error:
 	blkdev_put(bdev, FMODE_EXCL);
@@ -4001,13 +4021,58 @@
 	return 0;
 }
 
+static void *merge_stripe_index_into_bio_private(void *bi_private,
+						 unsigned int stripe_index)
+{
+	/*
+	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
+	 * at most 1.
+	 * The alternative solution (instead of stealing bits from the
+	 * pointer) would be to allocate an intermediate structure
+	 * that contains the old private pointer plus the stripe_index.
+	 */
+	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
+	BUG_ON(stripe_index > 3);
+	return (void *)(((uintptr_t)bi_private) | stripe_index);
+}
+
+static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
+{
+	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
+}
+
+static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
+{
+	return (unsigned int)((uintptr_t)bi_private) & 3;
+}
+
 static void btrfs_end_bio(struct bio *bio, int err)
 {
-	struct btrfs_bio *bbio = bio->bi_private;
+	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
 	int is_orig_bio = 0;
 
-	if (err)
+	if (err) {
 		atomic_inc(&bbio->error);
+		if (err == -EIO || err == -EREMOTEIO) {
+			unsigned int stripe_index =
+				extract_stripe_index_from_bio_private(
+					bio->bi_private);
+			struct btrfs_device *dev;
+
+			BUG_ON(stripe_index >= bbio->num_stripes);
+			dev = bbio->stripes[stripe_index].dev;
+			if (bio->bi_rw & WRITE)
+				btrfs_dev_stat_inc(dev,
+						   BTRFS_DEV_STAT_WRITE_ERRS);
+			else
+				btrfs_dev_stat_inc(dev,
+						   BTRFS_DEV_STAT_READ_ERRS);
+			if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+				btrfs_dev_stat_inc(dev,
+						   BTRFS_DEV_STAT_FLUSH_ERRS);
+			btrfs_dev_stat_print_on_error(dev);
+		}
+	}
 
 	if (bio == bbio->orig_bio)
 		is_orig_bio = 1;
@@ -4149,14 +4214,23 @@
 			bio = first_bio;
 		}
 		bio->bi_private = bbio;
+		bio->bi_private = merge_stripe_index_into_bio_private(
+				bio->bi_private, (unsigned int)dev_nr);
 		bio->bi_end_io = btrfs_end_bio;
 		bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
 		dev = bbio->stripes[dev_nr].dev;
 		if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
+#ifdef DEBUG
+			struct rcu_string *name;
+
+			rcu_read_lock();
+			name = rcu_dereference(dev->name);
 			pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
 				 "(%s id %llu), size=%u\n", rw,
 				 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
-				 dev->name, dev->devid, bio->bi_size);
+				 name->str, dev->devid, bio->bi_size);
+			rcu_read_unlock();
+#endif
 			bio->bi_bdev = dev->bdev;
 			if (async_submit)
 				schedule_bio(root, dev, rw, bio);
@@ -4509,6 +4583,28 @@
 	return ret;
 }
 
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+						   u64 logical, int mirror_num)
+{
+	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	int ret;
+	u64 map_length = 0;
+	struct btrfs_bio *bbio = NULL;
+	struct btrfs_device *device;
+
+	BUG_ON(mirror_num == 0);
+	ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
+			      mirror_num);
+	if (ret) {
+		BUG_ON(bbio != NULL);
+		return NULL;
+	}
+	BUG_ON(mirror_num != bbio->mirror_num);
+	device = bbio->stripes[mirror_num - 1].dev;
+	kfree(bbio);
+	return device;
+}
+
 int btrfs_read_chunk_tree(struct btrfs_root *root)
 {
 	struct btrfs_path *path;
@@ -4583,3 +4679,231 @@
 	btrfs_free_path(path);
 	return ret;
 }
+
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
+{
+	int i;
+
+	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+		btrfs_dev_stat_reset(dev, i);
+}
+
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_root *dev_root = fs_info->dev_root;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	struct extent_buffer *eb;
+	int slot;
+	int ret = 0;
+	struct btrfs_device *device;
+	struct btrfs_path *path = NULL;
+	int i;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mutex_lock(&fs_devices->device_list_mutex);
+	list_for_each_entry(device, &fs_devices->devices, dev_list) {
+		int item_size;
+		struct btrfs_dev_stats_item *ptr;
+
+		key.objectid = 0;
+		key.type = BTRFS_DEV_STATS_KEY;
+		key.offset = device->devid;
+		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+		if (ret) {
+			printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
+				      rcu_str_deref(device->name),
+				      (unsigned long long)device->devid);
+			__btrfs_reset_dev_stats(device);
+			device->dev_stats_valid = 1;
+			btrfs_release_path(path);
+			continue;
+		}
+		slot = path->slots[0];
+		eb = path->nodes[0];
+		btrfs_item_key_to_cpu(eb, &found_key, slot);
+		item_size = btrfs_item_size_nr(eb, slot);
+
+		ptr = btrfs_item_ptr(eb, slot,
+				     struct btrfs_dev_stats_item);
+
+		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+			if (item_size >= (1 + i) * sizeof(__le64))
+				btrfs_dev_stat_set(device, i,
+					btrfs_dev_stats_value(eb, ptr, i));
+			else
+				btrfs_dev_stat_reset(device, i);
+		}
+
+		device->dev_stats_valid = 1;
+		btrfs_dev_stat_print_on_load(device);
+		btrfs_release_path(path);
+	}
+	mutex_unlock(&fs_devices->device_list_mutex);
+
+out:
+	btrfs_free_path(path);
+	return ret < 0 ? ret : 0;
+}
+
+static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+				struct btrfs_root *dev_root,
+				struct btrfs_device *device)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct extent_buffer *eb;
+	struct btrfs_dev_stats_item *ptr;
+	int ret;
+	int i;
+
+	key.objectid = 0;
+	key.type = BTRFS_DEV_STATS_KEY;
+	key.offset = device->devid;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
+	if (ret < 0) {
+		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
+			      ret, rcu_str_deref(device->name));
+		goto out;
+	}
+
+	if (ret == 0 &&
+	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+		/* need to delete old one and insert a new one */
+		ret = btrfs_del_item(trans, dev_root, path);
+		if (ret != 0) {
+			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
+				      rcu_str_deref(device->name), ret);
+			goto out;
+		}
+		ret = 1;
+	}
+
+	if (ret == 1) {
+		/* need to insert a new item */
+		btrfs_release_path(path);
+		ret = btrfs_insert_empty_item(trans, dev_root, path,
+					      &key, sizeof(*ptr));
+		if (ret < 0) {
+			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
+				      rcu_str_deref(device->name), ret);
+			goto out;
+		}
+	}
+
+	eb = path->nodes[0];
+	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
+	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+		btrfs_set_dev_stats_value(eb, ptr, i,
+					  btrfs_dev_stat_read(device, i));
+	btrfs_mark_buffer_dirty(eb);
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * called from commit_transaction. Writes all changed device stats to disk.
+ */
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+			struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_root *dev_root = fs_info->dev_root;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	struct btrfs_device *device;
+	int ret = 0;
+
+	mutex_lock(&fs_devices->device_list_mutex);
+	list_for_each_entry(device, &fs_devices->devices, dev_list) {
+		if (!device->dev_stats_valid || !device->dev_stats_dirty)
+			continue;
+
+		ret = update_dev_stat_item(trans, dev_root, device);
+		if (!ret)
+			device->dev_stats_dirty = 0;
+	}
+	mutex_unlock(&fs_devices->device_list_mutex);
+
+	return ret;
+}
+
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
+{
+	btrfs_dev_stat_inc(dev, index);
+	btrfs_dev_stat_print_on_error(dev);
+}
+
+void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
+{
+	if (!dev->dev_stats_valid)
+		return;
+	printk_ratelimited_in_rcu(KERN_ERR
+			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+			   rcu_str_deref(dev->name),
+			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+			   btrfs_dev_stat_read(dev,
+					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
+			   btrfs_dev_stat_read(dev,
+					       BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
+{
+	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+	       rcu_str_deref(dev->name),
+	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
+	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+int btrfs_get_dev_stats(struct btrfs_root *root,
+			struct btrfs_ioctl_get_dev_stats *stats,
+			int reset_after_read)
+{
+	struct btrfs_device *dev;
+	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	int i;
+
+	mutex_lock(&fs_devices->device_list_mutex);
+	dev = btrfs_find_device(root, stats->devid, NULL, NULL);
+	mutex_unlock(&fs_devices->device_list_mutex);
+
+	if (!dev) {
+		printk(KERN_WARNING
+		       "btrfs: get dev_stats failed, device not found\n");
+		return -ENODEV;
+	} else if (!dev->dev_stats_valid) {
+		printk(KERN_WARNING
+		       "btrfs: get dev_stats failed, not yet valid\n");
+		return -ENODEV;
+	} else if (reset_after_read) {
+		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+			if (stats->nr_items > i)
+				stats->values[i] =
+					btrfs_dev_stat_read_and_reset(dev, i);
+			else
+				btrfs_dev_stat_reset(dev, i);
+		}
+	} else {
+		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+			if (stats->nr_items > i)
+				stats->values[i] = btrfs_dev_stat_read(dev, i);
+	}
+	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
+		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
+	return 0;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index bb6b03f..74366f2 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -22,6 +22,7 @@
 #include <linux/bio.h>
 #include <linux/sort.h>
 #include "async-thread.h"
+#include "ioctl.h"
 
 #define BTRFS_STRIPE_LEN	(64 * 1024)
 
@@ -57,7 +58,7 @@
 	/* the mode sent to blkdev_get */
 	fmode_t mode;
 
-	char *name;
+	struct rcu_string *name;
 
 	/* the internal btrfs device id */
 	u64 devid;
@@ -106,6 +107,11 @@
 	struct completion flush_wait;
 	int nobarriers;
 
+	/* disk I/O failure stats. For detailed description refer to
+	 * enum btrfs_dev_stat_values in ioctl.h */
+	int dev_stats_valid;
+	int dev_stats_dirty; /* counters need to be written to disk */
+	atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
 };
 
 struct btrfs_fs_devices {
@@ -281,4 +287,50 @@
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
 			 u64 *start, u64 *max_avail);
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+						   u64 logical, int mirror_num);
+void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
+int btrfs_get_dev_stats(struct btrfs_root *root,
+			struct btrfs_ioctl_get_dev_stats *stats,
+			int reset_after_read);
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+			struct btrfs_fs_info *fs_info);
+
+static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
+				      int index)
+{
+	atomic_inc(dev->dev_stat_values + index);
+	dev->dev_stats_dirty = 1;
+}
+
+static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
+				      int index)
+{
+	return atomic_read(dev->dev_stat_values + index);
+}
+
+static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+						int index)
+{
+	int ret;
+
+	ret = atomic_xchg(dev->dev_stat_values + index, 0);
+	dev->dev_stats_dirty = 1;
+	return ret;
+}
+
+static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
+				      int index, unsigned long val)
+{
+	atomic_set(dev->dev_stat_values + index, val);
+	dev->dev_stats_dirty = 1;
+}
+
+static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
+					int index)
+{
+	btrfs_dev_stat_set(dev, index, 0);
+}
 #endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index e7a5659..3f4e2d6 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -196,6 +196,7 @@
 	if (ret)
 		goto out;
 
+	inode_inc_iversion(inode);
 	inode->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
diff --git a/fs/buffer.c b/fs/buffer.c
index ad5938c..838a9cf 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3152,7 +3152,7 @@
 /*
  * Buffer-head allocation
  */
-static struct kmem_cache *bh_cachep;
+static struct kmem_cache *bh_cachep __read_mostly;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 173b1d2..8b67304 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -54,7 +54,12 @@
 	(CONGESTION_ON_THRESH(congestion_kb) -				\
 	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
 
-
+static inline struct ceph_snap_context *page_snap_context(struct page *page)
+{
+	if (PagePrivate(page))
+		return (void *)page->private;
+	return NULL;
+}
 
 /*
  * Dirty a page.  Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@
 {
 	struct inode *inode;
 	struct ceph_inode_info *ci;
-	struct ceph_snap_context *snapc = (void *)page->private;
+	struct ceph_snap_context *snapc = page_snap_context(page);
 
 	BUG_ON(!PageLocked(page));
-	BUG_ON(!page->private);
 	BUG_ON(!PagePrivate(page));
 	BUG_ON(!page->mapping);
 
@@ -182,7 +186,6 @@
 	struct inode *inode = page->mapping ? page->mapping->host : NULL;
 	dout("%p releasepage %p idx %lu\n", inode, page, page->index);
 	WARN_ON(PageDirty(page));
-	WARN_ON(page->private);
 	WARN_ON(PagePrivate(page));
 	return 0;
 }
@@ -443,7 +446,7 @@
 	osdc = &fsc->client->osdc;
 
 	/* verify this is a writeable snap context */
-	snapc = (void *)page->private;
+	snapc = page_snap_context(page);
 	if (snapc == NULL) {
 		dout("writepage %p page %p not dirty?\n", inode, page);
 		goto out;
@@ -451,7 +454,7 @@
 	oldest = get_oldest_context(inode, &snap_size);
 	if (snapc->seq > oldest->seq) {
 		dout("writepage %p page %p snapc %p not writeable - noop\n",
-		     inode, page, (void *)page->private);
+		     inode, page, snapc);
 		/* we should only noop if called by kswapd */
 		WARN_ON((current->flags & PF_MEMALLOC) == 0);
 		ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@
 			clear_bdi_congested(&fsc->backing_dev_info,
 					    BLK_RW_ASYNC);
 
-		ceph_put_snap_context((void *)page->private);
+		ceph_put_snap_context(page_snap_context(page));
 		page->private = 0;
 		ClearPagePrivate(page);
 		dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@
 			}
 
 			/* only if matching snap context */
-			pgsnapc = (void *)page->private;
+			pgsnapc = page_snap_context(page);
 			if (pgsnapc->seq > snapc->seq) {
 				dout("page snapc %p %lld > oldest %p %lld\n",
 				     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@
 	BUG_ON(!ci->i_snap_realm);
 	down_read(&mdsc->snap_rwsem);
 	BUG_ON(!ci->i_snap_realm->cached_context);
-	snapc = (void *)page->private;
+	snapc = page_snap_context(page);
 	if (snapc && snapc != ci->i_head_snapc) {
 		/*
 		 * this page is already dirty in another (older) snap
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index fbb2a64..8e1b60e 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -40,38 +40,49 @@
 	u32 parent_name_hash;
 } __attribute__ ((packed));
 
-static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
-			  int connectable)
+/*
+ * The presence of @parent_inode here tells us whether NFS wants a
+ * connectable file handle.  However, we want to make a connectionable
+ * file handle unconditionally so that the MDS gets as much of a hint
+ * as possible.  That means we only use @parent_dentry to indicate
+ * whether nfsd wants a connectable fh, and whether we should indicate
+ * failure from a too-small @max_len.
+ */
+static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+			  struct inode *parent_inode)
 {
 	int type;
 	struct ceph_nfs_fh *fh = (void *)rawfh;
 	struct ceph_nfs_confh *cfh = (void *)rawfh;
-	struct dentry *parent;
-	struct inode *inode = dentry->d_inode;
 	int connected_handle_length = sizeof(*cfh)/4;
 	int handle_length = sizeof(*fh)/4;
+	struct dentry *dentry = d_find_alias(inode);
+	struct dentry *parent;
 
 	/* don't re-export snaps */
 	if (ceph_snap(inode) != CEPH_NOSNAP)
 		return -EINVAL;
 
-	spin_lock(&dentry->d_lock);
-	parent = dentry->d_parent;
-	if (*max_len >= connected_handle_length) {
+	/* if we found an alias, generate a connectable fh */
+	if (*max_len >= connected_handle_length && dentry) {
 		dout("encode_fh %p connectable\n", dentry);
-		cfh->ino = ceph_ino(dentry->d_inode);
+		spin_lock(&dentry->d_lock);
+		parent = dentry->d_parent;
+		cfh->ino = ceph_ino(inode);
 		cfh->parent_ino = ceph_ino(parent->d_inode);
 		cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode,
 							 dentry);
 		*max_len = connected_handle_length;
 		type = 2;
+		spin_unlock(&dentry->d_lock);
 	} else if (*max_len >= handle_length) {
-		if (connectable) {
+		if (parent_inode) {
+			/* nfsd wants connectable */
 			*max_len = connected_handle_length;
 			type = 255;
 		} else {
 			dout("encode_fh %p\n", dentry);
-			fh->ino = ceph_ino(dentry->d_inode);
+			fh->ino = ceph_ino(inode);
 			*max_len = handle_length;
 			type = 1;
 		}
@@ -79,7 +90,6 @@
 		*max_len = handle_length;
 		type = 255;
 	}
-	spin_unlock(&dentry->d_lock);
 	return type;
 }
 
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ed72428..988d4f3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -54,7 +54,6 @@
 	req->r_fmode = ceph_flags_to_mode(flags);
 	req->r_args.open.flags = cpu_to_le32(flags);
 	req->r_args.open.mode = cpu_to_le32(create_mode);
-	req->r_args.open.preferred = cpu_to_le32(-1);
 out:
 	return req;
 }
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 790914a59..8e3fb69 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -26,8 +26,7 @@
 		l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
 		l.object_size = ceph_file_layout_object_size(ci->i_layout);
 		l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-		l.preferred_osd =
-			(s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+		l.preferred_osd = (s32)-1;
 		if (copy_to_user(arg, &l, sizeof(l)))
 			return -EFAULT;
 	}
@@ -35,6 +34,32 @@
 	return err;
 }
 
+static long __validate_layout(struct ceph_mds_client *mdsc,
+			      struct ceph_ioctl_layout *l)
+{
+	int i, err;
+
+	/* validate striping parameters */
+	if ((l->object_size & ~PAGE_MASK) ||
+	    (l->stripe_unit & ~PAGE_MASK) ||
+	    ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+		return -EINVAL;
+
+	/* make sure it's a valid data pool */
+	mutex_lock(&mdsc->mutex);
+	err = -EINVAL;
+	for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+		if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
+			err = 0;
+			break;
+		}
+	mutex_unlock(&mdsc->mutex);
+	if (err)
+		return err;
+
+	return 0;
+}
+
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
 	struct inode *inode = file->f_dentry->d_inode;
@@ -44,52 +69,40 @@
 	struct ceph_ioctl_layout l;
 	struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
 	struct ceph_ioctl_layout nl;
-	int err, i;
+	int err;
 
 	if (copy_from_user(&l, arg, sizeof(l)))
 		return -EFAULT;
 
 	/* validate changed params against current layout */
 	err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
-	if (!err) {
-		nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
-		nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
-		nl.object_size = ceph_file_layout_object_size(ci->i_layout);
-		nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-		nl.preferred_osd =
-				(s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
-	} else
+	if (err)
 		return err;
 
+	memset(&nl, 0, sizeof(nl));
 	if (l.stripe_count)
 		nl.stripe_count = l.stripe_count;
+	else
+		nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
 	if (l.stripe_unit)
 		nl.stripe_unit = l.stripe_unit;
+	else
+		nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
 	if (l.object_size)
 		nl.object_size = l.object_size;
+	else
+		nl.object_size = ceph_file_layout_object_size(ci->i_layout);
 	if (l.data_pool)
 		nl.data_pool = l.data_pool;
-	if (l.preferred_osd)
-		nl.preferred_osd = l.preferred_osd;
+	else
+		nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
 
-	if ((nl.object_size & ~PAGE_MASK) ||
-	    (nl.stripe_unit & ~PAGE_MASK) ||
-	    ((unsigned)nl.object_size % (unsigned)nl.stripe_unit))
-		return -EINVAL;
+	/* this is obsolete, and always -1 */
+	nl.preferred_osd = le64_to_cpu(-1);
 
-	/* make sure it's a valid data pool */
-	if (l.data_pool > 0) {
-		mutex_lock(&mdsc->mutex);
-		err = -EINVAL;
-		for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-			if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-				err = 0;
-				break;
-			}
-		mutex_unlock(&mdsc->mutex);
-		if (err)
-			return err;
-	}
+	err = __validate_layout(mdsc, &nl);
+	if (err)
+		return err;
 
 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
 				       USE_AUTH_MDS);
@@ -106,8 +119,6 @@
 	req->r_args.setlayout.layout.fl_object_size =
 		cpu_to_le32(l.object_size);
 	req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
-	req->r_args.setlayout.layout.fl_pg_preferred =
-		cpu_to_le32(l.preferred_osd);
 
 	parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
 	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
@@ -127,33 +138,16 @@
 	struct inode *inode = file->f_dentry->d_inode;
 	struct ceph_mds_request *req;
 	struct ceph_ioctl_layout l;
-	int err, i;
+	int err;
 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 
 	/* copy and validate */
 	if (copy_from_user(&l, arg, sizeof(l)))
 		return -EFAULT;
 
-	if ((l.object_size & ~PAGE_MASK) ||
-	    (l.stripe_unit & ~PAGE_MASK) ||
-	    !l.stripe_unit ||
-	    (l.object_size &&
-	        (unsigned)l.object_size % (unsigned)l.stripe_unit))
-		return -EINVAL;
-
-	/* make sure it's a valid data pool */
-	if (l.data_pool > 0) {
-		mutex_lock(&mdsc->mutex);
-		err = -EINVAL;
-		for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-			if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-				err = 0;
-				break;
-			}
-		mutex_unlock(&mdsc->mutex);
-		if (err)
-			return err;
-	}
+	err = __validate_layout(mdsc, &l);
+	if (err)
+		return err;
 
 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
 				       USE_AUTH_MDS);
@@ -171,8 +165,6 @@
 			cpu_to_le32(l.object_size);
 	req->r_args.setlayout.layout.fl_pg_pool =
 			cpu_to_le32(l.data_pool);
-	req->r_args.setlayout.layout.fl_pg_preferred =
-			cpu_to_le32(l.preferred_osd);
 
 	err = ceph_mdsc_do_request(mdsc, inode, req);
 	ceph_mdsc_put_request(req);
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index be4a604..c77028a 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -34,6 +34,8 @@
 struct ceph_ioctl_layout {
 	__u64 stripe_unit, stripe_count, object_size;
 	__u64 data_pool;
+
+	/* obsolete.  new values ignored, always return -1 */
 	__s64 preferred_osd;
 };
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 89971e1..200bc87 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -334,10 +334,10 @@
 	dout("mdsc put_session %p %d -> %d\n", s,
 	     atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
 	if (atomic_dec_and_test(&s->s_ref)) {
-		if (s->s_authorizer)
+		if (s->s_auth.authorizer)
 		     s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
 			     s->s_mdsc->fsc->client->monc.auth,
-			     s->s_authorizer);
+			     s->s_auth.authorizer);
 		kfree(s);
 	}
 }
@@ -3395,39 +3395,33 @@
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-			  void **buf, int *len, int *proto,
-			  void **reply_buf, int *reply_len, int force_new)
+
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+					int *proto, int force_new)
 {
 	struct ceph_mds_session *s = con->private;
 	struct ceph_mds_client *mdsc = s->s_mdsc;
 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
-	int ret = 0;
+	struct ceph_auth_handshake *auth = &s->s_auth;
 
-	if (force_new && s->s_authorizer) {
-		ac->ops->destroy_authorizer(ac, s->s_authorizer);
-		s->s_authorizer = NULL;
+	if (force_new && auth->authorizer) {
+		if (ac->ops && ac->ops->destroy_authorizer)
+			ac->ops->destroy_authorizer(ac, auth->authorizer);
+		auth->authorizer = NULL;
 	}
-	if (s->s_authorizer == NULL) {
-		if (ac->ops->create_authorizer) {
-			ret = ac->ops->create_authorizer(
-				ac, CEPH_ENTITY_TYPE_MDS,
-				&s->s_authorizer,
-				&s->s_authorizer_buf,
-				&s->s_authorizer_buf_len,
-				&s->s_authorizer_reply_buf,
-				&s->s_authorizer_reply_buf_len);
-			if (ret)
-				return ret;
-		}
+	if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+		int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
+							auth);
+		if (ret)
+			return ERR_PTR(ret);
 	}
-
 	*proto = ac->protocol;
-	*buf = s->s_authorizer_buf;
-	*len = s->s_authorizer_buf_len;
-	*reply_buf = s->s_authorizer_reply_buf;
-	*reply_len = s->s_authorizer_reply_buf_len;
-	return 0;
+
+	return auth;
 }
 
 
@@ -3437,7 +3431,7 @@
 	struct ceph_mds_client *mdsc = s->s_mdsc;
 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
-	return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+	return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 8c7c04e..dd26846 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -11,6 +11,7 @@
 #include <linux/ceph/types.h>
 #include <linux/ceph/messenger.h>
 #include <linux/ceph/mdsmap.h>
+#include <linux/ceph/auth.h>
 
 /*
  * Some lock dependencies:
@@ -113,9 +114,7 @@
 
 	struct ceph_connection s_con;
 
-	struct ceph_authorizer *s_authorizer;
-	void             *s_authorizer_buf, *s_authorizer_reply_buf;
-	size_t            s_authorizer_buf_len, s_authorizer_reply_buf_len;
+	struct ceph_auth_handshake s_auth;
 
 	/* protected by s_gen_ttl_lock */
 	spinlock_t        s_gen_ttl_lock;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f04c096..e5206fc 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -331,7 +331,7 @@
 
 	/* alloc new snap context */
 	err = -ENOMEM;
-	if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
+	if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
 		goto fail;
 	snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
 	if (!snapc)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 35b8633..785cb30 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -118,15 +118,6 @@
 		(unsigned long long)ceph_file_layout_su(ci->i_layout),
 		(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
 		(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-
-	if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
-		val += ret;
-		size -= ret;
-		ret += snprintf(val, size, "preferred_osd=%lld\n",
-			    (unsigned long long)ceph_file_layout_pg_preferred(
-				    ci->i_layout));
-	}
-
 	return ret;
 }
 
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 2b243af..a08306a 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -158,3 +158,23 @@
 	  depends on CIFS && EXPERIMENTAL && BROKEN
 	  help
 	   Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+
+config CIFS_SMB2
+	bool "SMB2 network file system support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && INET && BROKEN
+	select NLS
+	select KEYS
+	select FSCACHE
+	select DNS_RESOLVER
+
+	help
+	  This enables experimental support for the SMB2 (Server Message Block
+	  version 2) protocol. The SMB2 protocol is the successor to the
+	  popular CIFS and SMB network file sharing protocols. SMB2 is the
+	  native file sharing mechanism for recent versions of Windows
+	  operating systems (since Vista).  SMB2 enablement will eventually
+	  allow users better performance, security and features, than would be
+	  possible with cifs. Note that smb2 mount options also are simpler
+	  (compared to cifs) due to protocol improvements.
+
+	  Unless you are a developer or tester, say N.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 005d524..4b41275 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,7 +6,7 @@
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
 	  link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
 	  cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
-	  readdir.o ioctl.o sess.o export.o
+	  readdir.o ioctl.o sess.o export.o smb1ops.o
 
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
 
@@ -15,3 +15,5 @@
 cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
 
 cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b7d782b..22ab7b5 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -608,11 +608,6 @@
 			in the kernel configuration.
 
 Configuration pseudo-files:
-MultiuserMount		If set to one, more than one CIFS session to 
-			the same server ip address can be established
-			if more than one uid accesses the same mount
-			point and if the uids user/password mapping
-			information is available. (default is 0)
 PacketSigningEnabled	If set to one, cifs packet signing is enabled
 			and will be used if the server requires 
 			it.  If set to two, cifs packet signing is
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 2704646..e814052 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -57,19 +57,21 @@
 	}
 }
 
-#ifdef CONFIG_CIFS_DEBUG2
 void cifs_dump_detail(void *buf)
 {
+#ifdef CONFIG_CIFS_DEBUG2
 	struct smb_hdr *smb = (struct smb_hdr *)buf;
 
 	cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
 		  smb->Command, smb->Status.CifsError,
 		  smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
 	cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
+#endif /* CONFIG_CIFS_DEBUG2 */
 }
 
 void cifs_dump_mids(struct TCP_Server_Info *server)
 {
+#ifdef CONFIG_CIFS_DEBUG2
 	struct list_head *tmp;
 	struct mid_q_entry *mid_entry;
 
@@ -102,8 +104,8 @@
 		}
 	}
 	spin_unlock(&GlobalMid_Lock);
-}
 #endif /* CONFIG_CIFS_DEBUG2 */
+}
 
 #ifdef CONFIG_PROC_FS
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
@@ -420,7 +422,6 @@
 static const struct file_operations cifsFYI_proc_fops;
 static const struct file_operations cifs_lookup_cache_proc_fops;
 static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_multiuser_mount_proc_fops;
 static const struct file_operations cifs_security_flags_proc_fops;
 static const struct file_operations cifs_linux_ext_proc_fops;
 
@@ -440,8 +441,6 @@
 	proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
 	proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
 		    &cifs_linux_ext_proc_fops);
-	proc_create("MultiuserMount", 0, proc_fs_cifs,
-		    &cifs_multiuser_mount_proc_fops);
 	proc_create("SecurityFlags", 0, proc_fs_cifs,
 		    &cifs_security_flags_proc_fops);
 	proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -460,7 +459,6 @@
 #ifdef CONFIG_CIFS_STATS
 	remove_proc_entry("Stats", proc_fs_cifs);
 #endif
-	remove_proc_entry("MultiuserMount", proc_fs_cifs);
 	remove_proc_entry("SecurityFlags", proc_fs_cifs);
 	remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
 	remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -617,52 +615,6 @@
 	.write		= traceSMB_proc_write,
 };
 
-static int cifs_multiuser_mount_proc_show(struct seq_file *m, void *v)
-{
-	seq_printf(m, "%d\n", multiuser_mount);
-	return 0;
-}
-
-static int cifs_multiuser_mount_proc_open(struct inode *inode, struct file *fh)
-{
-	return single_open(fh, cifs_multiuser_mount_proc_show, NULL);
-}
-
-static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
-		const char __user *buffer, size_t count, loff_t *ppos)
-{
-	char c;
-	int rc;
-	static bool warned;
-
-	rc = get_user(c, buffer);
-	if (rc)
-		return rc;
-	if (c == '0' || c == 'n' || c == 'N')
-		multiuser_mount = 0;
-	else if (c == '1' || c == 'y' || c == 'Y') {
-		multiuser_mount = 1;
-		if (!warned) {
-			warned = true;
-			printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
-				"mount code is scheduled to be deprecated in "
-				"3.5. Please switch to using the multiuser "
-				"mount option.");
-		}
-	}
-
-	return count;
-}
-
-static const struct file_operations cifs_multiuser_mount_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= cifs_multiuser_mount_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_multiuser_mount_proc_write,
-};
-
 static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
 {
 	seq_printf(m, "0x%x\n", global_secflags);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 566e0ae..c0c68bb 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,10 +24,10 @@
 #define _H_CIFS_DEBUG
 
 void cifs_dump_mem(char *label, void *data, int length);
-#ifdef CONFIG_CIFS_DEBUG2
-#define DBG2 2
 void cifs_dump_detail(void *);
 void cifs_dump_mids(struct TCP_Server_Info *);
+#ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
 #else
 #define DBG2 0
 #endif
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 541ef81..8b6e344 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,7 +56,6 @@
 bool enable_oplocks = true;
 unsigned int linuxExtEnabled = 1;
 unsigned int lookupCacheEnabled = 1;
-unsigned int multiuser_mount = 0;
 unsigned int global_secflags = CIFSSEC_DEF;
 /* unsigned int ntlmv2_support = 0; */
 unsigned int sign_CIFS_PDUs = 1;
@@ -125,7 +124,7 @@
 		goto out_no_root;
 	}
 
-	/* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+	/* do that *after* d_make_root() - we want NULL ->d_op for root here */
 	if (cifs_sb_master_tcon(cifs_sb)->nocase)
 		sb->s_d_op = &cifs_ci_dentry_ops;
 	else
@@ -272,7 +271,7 @@
 cifs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	cifs_fscache_release_inode_cookie(inode);
 }
 
@@ -329,6 +328,19 @@
 		seq_printf(s, "i");
 }
 
+static void
+cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
+{
+	seq_printf(s, ",cache=");
+
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+		seq_printf(s, "strict");
+	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+		seq_printf(s, "none");
+	else
+		seq_printf(s, "loose");
+}
+
 /*
  * cifs_show_options() is for displaying mount options in /proc/mounts.
  * Not all settable options are displayed but most of the important
@@ -342,7 +354,9 @@
 	struct sockaddr *srcaddr;
 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
+	seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
 	cifs_show_security(s, tcon->ses->server);
+	cifs_show_cache_flavor(s, cifs_sb);
 
 	seq_printf(s, ",unc=%s", tcon->treeName);
 
@@ -408,8 +422,6 @@
 		seq_printf(s, ",rwpidforward");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
 		seq_printf(s, ",forcemand");
-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
-		seq_printf(s, ",directio");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 		seq_printf(s, ",nouser_xattr");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
@@ -432,8 +444,6 @@
 		seq_printf(s, ",nostrictsync");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 		seq_printf(s, ",noperm");
-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
-		seq_printf(s, ",strictcache");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
 		seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
@@ -945,7 +955,6 @@
 	struct cifsInodeInfo *cifsi = inode;
 
 	inode_init_once(&cifsi->vfs_inode);
-	INIT_LIST_HEAD(&cifsi->llist);
 	mutex_init(&cifsi->lock_mutex);
 }
 
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ff6313..6df0cbe 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -43,6 +43,7 @@
 
 #define CIFS_MIN_RCV_POOL 4
 
+#define MAX_REOPEN_ATT	5 /* these many maximum attempts to reopen a file */
 /*
  * default attribute cache timeout (jiffies)
  */
@@ -150,6 +151,58 @@
  *****************************************************************
  */
 
+enum smb_version {
+	Smb_1 = 1,
+	Smb_21,
+};
+
+struct mid_q_entry;
+struct TCP_Server_Info;
+struct cifsFileInfo;
+struct cifs_ses;
+
+struct smb_version_operations {
+	int (*send_cancel)(struct TCP_Server_Info *, void *,
+			   struct mid_q_entry *);
+	bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
+	/* setup request: allocate mid, sign message */
+	int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
+			     struct mid_q_entry **);
+	/* check response: verify signature, map error */
+	int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
+			     bool);
+	void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
+	void (*set_credits)(struct TCP_Server_Info *, const int);
+	int * (*get_credits_field)(struct TCP_Server_Info *);
+	__u64 (*get_next_mid)(struct TCP_Server_Info *);
+	/* data offset from read response message */
+	unsigned int (*read_data_offset)(char *);
+	/* data length from read response message */
+	unsigned int (*read_data_length)(char *);
+	/* map smb to linux error */
+	int (*map_error)(char *, bool);
+	/* find mid corresponding to the response message */
+	struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
+	void (*dump_detail)(void *);
+	/* verify the message */
+	int (*check_message)(char *, unsigned int);
+	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+};
+
+struct smb_version_values {
+	char		*version_string;
+	__u32		large_lock_type;
+	__u32		exclusive_lock_type;
+	__u32		shared_lock_type;
+	__u32		unlock_lock_type;
+	size_t		header_size;
+	size_t		max_header_size;
+	size_t		read_rsp_size;
+};
+
+#define HEADER_SIZE(server) (server->vals->header_size)
+#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+
 struct smb_vol {
 	char *username;
 	char *password;
@@ -205,6 +258,8 @@
 	bool sockopt_tcp_nodelay:1;
 	unsigned short int port;
 	unsigned long actimeo; /* attribute cache timeout (jiffies) */
+	struct smb_version_operations *ops;
+	struct smb_version_values *vals;
 	char *prepath;
 	struct sockaddr_storage srcaddr; /* allow binding to a local IP */
 	struct nls_table *local_nls;
@@ -242,6 +297,8 @@
 	int srv_count; /* reference counter */
 	/* 15 character server name + 0x20 16th byte indicating type = srv */
 	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+	struct smb_version_operations	*ops;
+	struct smb_version_values	*vals;
 	enum statusEnum tcpStatus; /* what we think the status is */
 	char *hostname; /* hostname portion of UNC string */
 	struct socket *ssocket;
@@ -321,16 +378,6 @@
 	return num;
 }
 
-static inline int*
-get_credits_field(struct TCP_Server_Info *server)
-{
-	/*
-	 * This will change to switch statement when we reserve slots for echos
-	 * and oplock breaks.
-	 */
-	return &server->credits;
-}
-
 static inline bool
 has_credits(struct TCP_Server_Info *server, int *credits)
 {
@@ -341,16 +388,22 @@
 	return num > 0;
 }
 
-static inline size_t
-header_size(void)
+static inline void
+add_credits(struct TCP_Server_Info *server, const unsigned int add)
 {
-	return sizeof(struct smb_hdr);
+	server->ops->add_credits(server, add);
 }
 
-static inline size_t
-max_header_size(void)
+static inline void
+set_credits(struct TCP_Server_Info *server, const int val)
 {
-	return MAX_CIFS_HDR_SIZE;
+	server->ops->set_credits(server, val);
+}
+
+static inline __u64
+get_next_mid(struct TCP_Server_Info *server)
+{
+	return server->ops->get_next_mid(server);
 }
 
 /*
@@ -547,8 +600,7 @@
 	__u64 offset;
 	__u64 length;
 	__u32 pid;
-	__u8 type;
-	__u16 netfid;
+	__u32 type;
 };
 
 /*
@@ -573,6 +625,10 @@
 struct cifsFileInfo {
 	struct list_head tlist;	/* pointer to next fid owned by tcon */
 	struct list_head flist;	/* next fid (file instance) for this inode */
+	struct list_head llist;	/*
+				 * brlocks held by this fid, protected by
+				 * lock_mutex from cifsInodeInfo structure
+				 */
 	unsigned int uid;	/* allows finding which FileInfo structure */
 	__u32 pid;		/* process id who opened file */
 	__u16 netfid;		/* file id from remote */
@@ -615,9 +671,12 @@
  */
 
 struct cifsInodeInfo {
-	struct list_head llist;		/* brlocks for this inode */
 	bool can_cache_brlcks;
-	struct mutex lock_mutex;	/* protect two fields above */
+	struct mutex lock_mutex;	/*
+					 * protect the field above and llist
+					 * from every cifsFileInfo structure
+					 * from openFileList
+					 */
 	/* BB add in lists for dirty pages i.e. write caching info for oplock */
 	struct list_head openFileList;
 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -703,7 +762,6 @@
 
 #endif
 
-struct mid_q_entry;
 
 /*
  * This is the prototype for the mid receive function. This function is for
@@ -1042,12 +1100,7 @@
 GLOBAL_EXTERN atomic_t midCount;
 
 /* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
-				to be established on existing mount if we
-				have the uid/password or Kerberos credential
-				or equivalent for current user */
-/* enable or disable oplocks */
-GLOBAL_EXTERN bool enable_oplocks;
+GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
 GLOBAL_EXTERN unsigned int lookupCacheEnabled;
 GLOBAL_EXTERN unsigned int global_secflags;	/* if on, session setup sent
 				with more secure ntlmssp2 challenge/resp */
@@ -1074,4 +1127,11 @@
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
 
+/* Operations for different SMB versions */
+#define SMB1_VERSION_STRING	"1.0"
+extern struct smb_version_operations smb1_operations;
+extern struct smb_version_values smb1_values;
+#define SMB21_VERSION_STRING	"2.1"
+extern struct smb_version_operations smb21_operations;
+extern struct smb_version_values smb21_values;
 #endif	/* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 96192c1..0a6cbfe 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,6 +78,8 @@
 			int * /* bytes returned */ , const int long_op);
 extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 			    char *in_buf, int flags);
+extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
+			      struct mid_q_entry **);
 extern int cifs_check_receive(struct mid_q_entry *mid,
 			struct TCP_Server_Info *server, bool log_error);
 extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -88,9 +90,6 @@
 			struct smb_hdr *in_buf ,
 			struct smb_hdr *out_buf,
 			int *bytes_returned);
-extern void cifs_add_credits(struct TCP_Server_Info *server,
-			     const unsigned int add);
-extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
 extern int checkSMB(char *buf, unsigned int length);
 extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
 extern bool backup_cred(struct cifs_sb_info *);
@@ -115,7 +114,6 @@
 				void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
 			     const struct nls_table *nls_cp);
-extern __u64 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@@ -192,11 +190,13 @@
 
 extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
 		const char *searchName, const struct nls_table *nls_codepage,
-		__u16 *searchHandle, struct cifs_search_info *psrch_inf,
+		__u16 *searchHandle, __u16 search_flags,
+		struct cifs_search_info *psrch_inf,
 		int map, const char dirsep);
 
 extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
-		__u16 searchHandle, struct cifs_search_info *psrch_inf);
+		__u16 searchHandle, __u16 search_flags,
+		struct cifs_search_info *psrch_inf);
 
 extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
 			const __u16 search_handle);
@@ -464,6 +464,9 @@
 
 /* asynchronous read support */
 struct cifs_readdata {
+	struct kref			refcount;
+	struct list_head		list;
+	struct completion		done;
 	struct cifsFileInfo		*cfile;
 	struct address_space		*mapping;
 	__u64				offset;
@@ -472,12 +475,13 @@
 	int				result;
 	struct list_head		pages;
 	struct work_struct		work;
+	int (*marshal_iov) (struct cifs_readdata *rdata,
+			    unsigned int remaining);
 	unsigned int			nr_iov;
 	struct kvec			iov[1];
 };
 
-struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
-void cifs_readdata_free(struct cifs_readdata *rdata);
+void cifs_readdata_release(struct kref *refcount);
 int cifs_async_readv(struct cifs_readdata *rdata);
 
 /* asynchronous write support */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index da2f544..5b40073 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -87,7 +87,6 @@
 #endif /* CIFS_POSIX */
 
 /* Forward declarations */
-static void cifs_readv_complete(struct work_struct *work);
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
@@ -269,7 +268,7 @@
 		return rc;
 
 	buffer = (struct smb_hdr *)*request_buf;
-	buffer->Mid = GetNextMid(ses->server);
+	buffer->Mid = get_next_mid(ses->server);
 	if (ses->capabilities & CAP_UNICODE)
 		buffer->Flags2 |= SMBFLG2_UNICODE;
 	if (ses->capabilities & CAP_STATUS32)
@@ -403,7 +402,7 @@
 
 	cFYI(1, "secFlags 0x%x", secFlags);
 
-	pSMB->hdr.Mid = GetNextMid(server);
+	pSMB->hdr.Mid = get_next_mid(server);
 	pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
 
 	if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
@@ -461,7 +460,7 @@
 		server->maxReq = min_t(unsigned int,
 				       le16_to_cpu(rsp->MaxMpxCount),
 				       cifs_max_pending);
-		cifs_set_credits(server, server->maxReq);
+		set_credits(server, server->maxReq);
 		server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
 		server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
 		/* even though we do not use raw we might as well set this
@@ -569,7 +568,7 @@
 	   little endian */
 	server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
 			       cifs_max_pending);
-	cifs_set_credits(server, server->maxReq);
+	set_credits(server, server->maxReq);
 	/* probably no need to store and check maxvcs */
 	server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
 	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
@@ -721,7 +720,7 @@
 	struct TCP_Server_Info *server = mid->callback_data;
 
 	DeleteMidQEntry(mid);
-	cifs_add_credits(server, 1);
+	add_credits(server, 1);
 }
 
 int
@@ -783,7 +782,7 @@
 		return rc;
 	}
 
-	pSMB->hdr.Mid = GetNextMid(ses->server);
+	pSMB->hdr.Mid = get_next_mid(ses->server);
 
 	if (ses->server->sec_mode &
 		   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
@@ -1385,28 +1384,6 @@
 	return rc;
 }
 
-struct cifs_readdata *
-cifs_readdata_alloc(unsigned int nr_pages)
-{
-	struct cifs_readdata *rdata;
-
-	/* readdata + 1 kvec for each page */
-	rdata = kzalloc(sizeof(*rdata) +
-			sizeof(struct kvec) * nr_pages, GFP_KERNEL);
-	if (rdata != NULL) {
-		INIT_WORK(&rdata->work, cifs_readv_complete);
-		INIT_LIST_HEAD(&rdata->pages);
-	}
-	return rdata;
-}
-
-void
-cifs_readdata_free(struct cifs_readdata *rdata)
-{
-	cifsFileInfo_put(rdata->cfile);
-	kfree(rdata);
-}
-
 /*
  * Discard any remaining data in the current SMB. To do this, we borrow the
  * current bigbuf.
@@ -1423,7 +1400,7 @@
 
 		length = cifs_read_from_socket(server, server->bigbuf,
 				min_t(unsigned int, remaining,
-					CIFSMaxBufSize + max_header_size()));
+				    CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
 		if (length < 0)
 			return length;
 		server->total_read += length;
@@ -1434,38 +1411,14 @@
 	return 0;
 }
 
-static inline size_t
-read_rsp_size(void)
-{
-	return sizeof(READ_RSP);
-}
-
-static inline unsigned int
-read_data_offset(char *buf)
-{
-	READ_RSP *rsp = (READ_RSP *)buf;
-	return le16_to_cpu(rsp->DataOffset);
-}
-
-static inline unsigned int
-read_data_length(char *buf)
-{
-	READ_RSP *rsp = (READ_RSP *)buf;
-	return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
-	       le16_to_cpu(rsp->DataLength);
-}
-
 static int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
 	int length, len;
-	unsigned int data_offset, remaining, data_len;
+	unsigned int data_offset, data_len;
 	struct cifs_readdata *rdata = mid->callback_data;
 	char *buf = server->smallbuf;
 	unsigned int buflen = get_rfc1002_length(buf) + 4;
-	u64 eof;
-	pgoff_t eof_index;
-	struct page *page, *tpage;
 
 	cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
 		mid->mid, rdata->offset, rdata->bytes);
@@ -1475,9 +1428,10 @@
 	 * can if there's not enough data. At this point, we've read down to
 	 * the Mid.
 	 */
-	len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
+	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+							HEADER_SIZE(server) + 1;
 
-	rdata->iov[0].iov_base = buf + header_size() - 1;
+	rdata->iov[0].iov_base = buf + HEADER_SIZE(server) - 1;
 	rdata->iov[0].iov_len = len;
 
 	length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1486,7 +1440,7 @@
 	server->total_read += length;
 
 	/* Was the SMB read successful? */
-	rdata->result = map_smb_to_linux_error(buf, false);
+	rdata->result = server->ops->map_error(buf, false);
 	if (rdata->result != 0) {
 		cFYI(1, "%s: server returned error %d", __func__,
 			rdata->result);
@@ -1494,14 +1448,15 @@
 	}
 
 	/* Is there enough to get to the rest of the READ_RSP header? */
-	if (server->total_read < read_rsp_size()) {
+	if (server->total_read < server->vals->read_rsp_size) {
 		cFYI(1, "%s: server returned short header. got=%u expected=%zu",
-			__func__, server->total_read, read_rsp_size());
+			__func__, server->total_read,
+			server->vals->read_rsp_size);
 		rdata->result = -EIO;
 		return cifs_readv_discard(server, mid);
 	}
 
-	data_offset = read_data_offset(buf) + 4;
+	data_offset = server->ops->read_data_offset(buf) + 4;
 	if (data_offset < server->total_read) {
 		/*
 		 * win2k8 sometimes sends an offset of 0 when the read
@@ -1540,7 +1495,7 @@
 		rdata->iov[0].iov_base, rdata->iov[0].iov_len);
 
 	/* how much data is in the response? */
-	data_len = read_data_length(buf);
+	data_len = server->ops->read_data_length(buf);
 	if (data_offset + data_len > buflen) {
 		/* data_len is corrupt -- discard frame */
 		rdata->result = -EIO;
@@ -1548,64 +1503,8 @@
 	}
 
 	/* marshal up the page array */
-	len = 0;
-	remaining = data_len;
-	rdata->nr_iov = 1;
-
-	/* determine the eof that the server (probably) has */
-	eof = CIFS_I(rdata->mapping->host)->server_eof;
-	eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
-	cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
-
-	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
-		if (remaining >= PAGE_CACHE_SIZE) {
-			/* enough data to fill the page */
-			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
-			rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
-			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
-				rdata->nr_iov, page->index,
-				rdata->iov[rdata->nr_iov].iov_base,
-				rdata->iov[rdata->nr_iov].iov_len);
-			++rdata->nr_iov;
-			len += PAGE_CACHE_SIZE;
-			remaining -= PAGE_CACHE_SIZE;
-		} else if (remaining > 0) {
-			/* enough for partial page, fill and zero the rest */
-			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
-			rdata->iov[rdata->nr_iov].iov_len = remaining;
-			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
-				rdata->nr_iov, page->index,
-				rdata->iov[rdata->nr_iov].iov_base,
-				rdata->iov[rdata->nr_iov].iov_len);
-			memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
-				'\0', PAGE_CACHE_SIZE - remaining);
-			++rdata->nr_iov;
-			len += remaining;
-			remaining = 0;
-		} else if (page->index > eof_index) {
-			/*
-			 * The VFS will not try to do readahead past the
-			 * i_size, but it's possible that we have outstanding
-			 * writes with gaps in the middle and the i_size hasn't
-			 * caught up yet. Populate those with zeroed out pages
-			 * to prevent the VFS from repeatedly attempting to
-			 * fill them until the writes are flushed.
-			 */
-			zero_user(page, 0, PAGE_CACHE_SIZE);
-			list_del(&page->lru);
-			lru_cache_add_file(page);
-			flush_dcache_page(page);
-			SetPageUptodate(page);
-			unlock_page(page);
-			page_cache_release(page);
-		} else {
-			/* no need to hold page hostage */
-			list_del(&page->lru);
-			lru_cache_add_file(page);
-			unlock_page(page);
-			page_cache_release(page);
-		}
-	}
+	len = rdata->marshal_iov(rdata, data_len);
+	data_len -= len;
 
 	/* issue the read if we have any iovecs left to fill */
 	if (rdata->nr_iov > 1) {
@@ -1621,7 +1520,7 @@
 	rdata->bytes = length;
 
 	cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
-		buflen, remaining);
+		buflen, data_len);
 
 	/* discard anything left over */
 	if (server->total_read < buflen)
@@ -1632,33 +1531,6 @@
 }
 
 static void
-cifs_readv_complete(struct work_struct *work)
-{
-	struct cifs_readdata *rdata = container_of(work,
-						struct cifs_readdata, work);
-	struct page *page, *tpage;
-
-	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
-		list_del(&page->lru);
-		lru_cache_add_file(page);
-
-		if (rdata->result == 0) {
-			kunmap(page);
-			flush_dcache_page(page);
-			SetPageUptodate(page);
-		}
-
-		unlock_page(page);
-
-		if (rdata->result == 0)
-			cifs_readpage_to_fscache(rdata->mapping->host, page);
-
-		page_cache_release(page);
-	}
-	cifs_readdata_free(rdata);
-}
-
-static void
 cifs_readv_callback(struct mid_q_entry *mid)
 {
 	struct cifs_readdata *rdata = mid->callback_data;
@@ -1691,7 +1563,7 @@
 
 	queue_work(cifsiod_wq, &rdata->work);
 	DeleteMidQEntry(mid);
-	cifs_add_credits(server, 1);
+	add_credits(server, 1);
 }
 
 /* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1744,12 +1616,15 @@
 	rdata->iov[0].iov_base = smb;
 	rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
 
+	kref_get(&rdata->refcount);
 	rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
 			     cifs_readv_receive, cifs_readv_callback,
 			     rdata, false);
 
 	if (rc == 0)
 		cifs_stats_inc(&tcon->num_reads);
+	else
+		kref_put(&rdata->refcount, cifs_readdata_release);
 
 	cifs_small_buf_release(smb);
 	return rc;
@@ -2135,7 +2010,7 @@
 
 	queue_work(cifsiod_wq, &wdata->work);
 	DeleteMidQEntry(mid);
-	cifs_add_credits(tcon->ses->server, 1);
+	add_credits(tcon->ses->server, 1);
 }
 
 /* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -4344,7 +4219,7 @@
 CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
 	      const char *searchName,
 	      const struct nls_table *nls_codepage,
-	      __u16 *pnetfid,
+	      __u16 *pnetfid, __u16 search_flags,
 	      struct cifs_search_info *psrch_inf, int remap, const char dirsep)
 {
 /* level 257 SMB_ */
@@ -4416,8 +4291,7 @@
 	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
 			ATTR_DIRECTORY);
 	pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
-	pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
-		CIFS_SEARCH_RETURN_RESUME);
+	pSMB->SearchFlags = cpu_to_le16(search_flags);
 	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
 
 	/* BB what should we set StorageType to? Does it matter? BB */
@@ -4487,8 +4361,8 @@
 	return rc;
 }
 
-int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
-		 __u16 searchHandle, struct cifs_search_info *psrch_inf)
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
+		 __u16 search_flags, struct cifs_search_info *psrch_inf)
 {
 	TRANSACTION2_FNEXT_REQ *pSMB = NULL;
 	TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4531,8 +4405,7 @@
 		cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
 	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
 	pSMB->ResumeKey = psrch_inf->resume_key;
-	pSMB->SearchFlags =
-	      cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
+	pSMB->SearchFlags = cpu_to_le16(search_flags);
 
 	name_len = psrch_inf->resume_name_len;
 	params += name_len;
@@ -4889,7 +4762,7 @@
 
 	/* server pointer checked in called function,
 	but should never be null here anyway */
-	pSMB->hdr.Mid = GetNextMid(ses->server);
+	pSMB->hdr.Mid = get_next_mid(ses->server);
 	pSMB->hdr.Tid = ses->ipc_tid;
 	pSMB->hdr.Uid = ses->Suid;
 	if (ses->capabilities & CAP_STATUS32)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e0b56d7..78db68a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/connect.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2009
+ *   Copyright (C) International Business Machines  Corp., 2002,2011
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -102,7 +102,7 @@
 	Opt_srcaddr, Opt_prefixpath,
 	Opt_iocharset, Opt_sockopt,
 	Opt_netbiosname, Opt_servern,
-	Opt_ver, Opt_sec,
+	Opt_ver, Opt_vers, Opt_sec, Opt_cache,
 
 	/* Mount options to be ignored */
 	Opt_ignore,
@@ -210,9 +210,9 @@
 	{ Opt_netbiosname, "netbiosname=%s" },
 	{ Opt_servern, "servern=%s" },
 	{ Opt_ver, "ver=%s" },
-	{ Opt_ver, "vers=%s" },
-	{ Opt_ver, "version=%s" },
+	{ Opt_vers, "vers=%s" },
 	{ Opt_sec, "sec=%s" },
+	{ Opt_cache, "cache=%s" },
 
 	{ Opt_ignore, "cred" },
 	{ Opt_ignore, "credentials" },
@@ -261,6 +261,26 @@
 	{ Opt_sec_err, NULL }
 };
 
+/* cache flavors */
+enum {
+	Opt_cache_loose,
+	Opt_cache_strict,
+	Opt_cache_none,
+	Opt_cache_err
+};
+
+static const match_table_t cifs_cacheflavor_tokens = {
+	{ Opt_cache_loose, "loose" },
+	{ Opt_cache_strict, "strict" },
+	{ Opt_cache_none, "none" },
+	{ Opt_cache_err, NULL }
+};
+
+static const match_table_t cifs_smb_version_tokens = {
+	{ Smb_1, SMB1_VERSION_STRING },
+	{ Smb_21, SMB21_VERSION_STRING },
+};
+
 static int ip_connect(struct TCP_Server_Info *server);
 static int generic_ip_connect(struct TCP_Server_Info *server);
 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -549,7 +569,7 @@
 		}
 	} else if (server->large_buf) {
 		/* we are reusing a dirty large buf, clear its start */
-		memset(server->bigbuf, 0, header_size());
+		memset(server->bigbuf, 0, HEADER_SIZE(server));
 	}
 
 	if (!server->smallbuf) {
@@ -563,7 +583,7 @@
 		/* beginning of smb buffer is cleared in our buf_get */
 	} else {
 		/* if existing small buf clear beginning */
-		memset(server->smallbuf, 0, header_size());
+		memset(server->smallbuf, 0, HEADER_SIZE(server));
 	}
 
 	return true;
@@ -764,25 +784,6 @@
 	return false;
 }
 
-static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, char *buffer)
-{
-	struct smb_hdr *buf = (struct smb_hdr *)buffer;
-	struct mid_q_entry *mid;
-
-	spin_lock(&GlobalMid_Lock);
-	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
-		if (mid->mid == buf->Mid &&
-		    mid->mid_state == MID_REQUEST_SUBMITTED &&
-		    le16_to_cpu(mid->command) == buf->Command) {
-			spin_unlock(&GlobalMid_Lock);
-			return mid;
-		}
-	}
-	spin_unlock(&GlobalMid_Lock);
-	return NULL;
-}
-
 void
 dequeue_mid(struct mid_q_entry *mid, bool malformed)
 {
@@ -934,7 +935,7 @@
 	unsigned int pdu_length = get_rfc1002_length(buf);
 
 	/* make sure this will fit in a large buffer */
-	if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
+	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
 		cERROR(1, "SMB response too long (%u bytes)",
 			pdu_length);
 		cifs_reconnect(server);
@@ -950,8 +951,8 @@
 	}
 
 	/* now read the rest */
-	length = cifs_read_from_socket(server, buf + header_size() - 1,
-				       pdu_length - header_size() + 1 + 4);
+	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+				pdu_length - HEADER_SIZE(server) + 1 + 4);
 	if (length < 0)
 		return length;
 	server->total_read += length;
@@ -967,7 +968,7 @@
 	 * 48 bytes is enough to display the header and a little bit
 	 * into the payload for debugging purposes.
 	 */
-	length = checkSMB(buf, server->total_read);
+	length = server->ops->check_message(buf, server->total_read);
 	if (length != 0)
 		cifs_dump_mem("Bad SMB: ", buf,
 			min_t(unsigned int, server->total_read, 48));
@@ -1025,7 +1026,7 @@
 			continue;
 
 		/* make sure we have enough to get to the MID */
-		if (pdu_length < header_size() - 1 - 4) {
+		if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
 			cERROR(1, "SMB response too short (%u bytes)",
 				pdu_length);
 			cifs_reconnect(server);
@@ -1035,12 +1036,12 @@
 
 		/* read down to the MID */
 		length = cifs_read_from_socket(server, buf + 4,
-					       header_size() - 1 - 4);
+					       HEADER_SIZE(server) - 1 - 4);
 		if (length < 0)
 			continue;
 		server->total_read += length;
 
-		mid_entry = find_mid(server, buf);
+		mid_entry = server->ops->find_mid(server, buf);
 
 		if (!mid_entry || !mid_entry->receive)
 			length = standard_receive3(server, mid_entry);
@@ -1057,12 +1058,15 @@
 		if (mid_entry != NULL) {
 			if (!mid_entry->multiRsp || mid_entry->multiEnd)
 				mid_entry->callback(mid_entry);
-		} else if (!is_valid_oplock_break(buf, server)) {
+		} else if (!server->ops->is_oplock_break ||
+			   !server->ops->is_oplock_break(buf, server)) {
 			cERROR(1, "No task to wake, unknown frame received! "
 				   "NumMids %d", atomic_read(&midCount));
-			cifs_dump_mem("Received Data is: ", buf, header_size());
+			cifs_dump_mem("Received Data is: ", buf,
+				      HEADER_SIZE(server));
 #ifdef CONFIG_CIFS_DEBUG2
-			cifs_dump_detail(buf);
+			if (server->ops->dump_detail)
+				server->ops->dump_detail(buf);
 			cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -1186,6 +1190,54 @@
 }
 
 static int
+cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	switch (match_token(value, cifs_cacheflavor_tokens, args)) {
+	case Opt_cache_loose:
+		vol->direct_io = false;
+		vol->strict_io = false;
+		break;
+	case Opt_cache_strict:
+		vol->direct_io = false;
+		vol->strict_io = true;
+		break;
+	case Opt_cache_none:
+		vol->direct_io = true;
+		vol->strict_io = false;
+		break;
+	default:
+		cERROR(1, "bad cache= option: %s", value);
+		return 1;
+	}
+	return 0;
+}
+
+static int
+cifs_parse_smb_version(char *value, struct smb_vol *vol)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	switch (match_token(value, cifs_smb_version_tokens, args)) {
+	case Smb_1:
+		vol->ops = &smb1_operations;
+		vol->vals = &smb1_values;
+		break;
+#ifdef CONFIG_CIFS_SMB2
+	case Smb_21:
+		vol->ops = &smb21_operations;
+		vol->vals = &smb21_values;
+		break;
+#endif
+	default:
+		cERROR(1, "Unknown vers= option specified: %s", value);
+		return 1;
+	}
+	return 0;
+}
+
+static int
 cifs_parse_mount_options(const char *mountdata, const char *devname,
 			 struct smb_vol *vol)
 {
@@ -1203,6 +1255,8 @@
 	char *string = NULL;
 	char *tmp_end, *value;
 	char delim;
+	bool cache_specified = false;
+	static bool cache_warned = false;
 
 	separator[0] = ',';
 	separator[1] = 0;
@@ -1236,6 +1290,10 @@
 
 	vol->actimeo = CIFS_DEF_ACTIMEO;
 
+	/* FIXME: add autonegotiation -- for now, SMB1 is default */
+	vol->ops = &smb1_operations;
+	vol->vals = &smb1_values;
+
 	if (!mountdata)
 		goto cifs_parse_mount_err;
 
@@ -1414,10 +1472,20 @@
 			vol->seal = 1;
 			break;
 		case Opt_direct:
-			vol->direct_io = 1;
+			cache_specified = true;
+			vol->direct_io = true;
+			vol->strict_io = false;
+			cERROR(1, "The \"directio\" option will be removed in "
+				  "3.7. Please switch to the \"cache=none\" "
+				  "option.");
 			break;
 		case Opt_strictcache:
-			vol->strict_io = 1;
+			cache_specified = true;
+			vol->direct_io = false;
+			vol->strict_io = true;
+			cERROR(1, "The \"strictcache\" option will be removed "
+				"in 3.7. Please switch to the \"cache=strict\" "
+				"option.");
 			break;
 		case Opt_noac:
 			printk(KERN_WARNING "CIFS: Mount option noac not "
@@ -1821,8 +1889,7 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (strnicmp(string, "cifs", 4) == 0 ||
-			    strnicmp(string, "1", 1) == 0) {
+			if (strnicmp(string, "1", 1) == 0) {
 				/* This is the default */
 				break;
 			}
@@ -1830,6 +1897,14 @@
 			printk(KERN_WARNING "CIFS: Invalid version"
 					    " specified\n");
 			goto cifs_parse_mount_err;
+		case Opt_vers:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (cifs_parse_smb_version(string, vol) != 0)
+				goto cifs_parse_mount_err;
+			break;
 		case Opt_sec:
 			string = match_strdup(args);
 			if (string == NULL)
@@ -1838,6 +1913,15 @@
 			if (cifs_parse_security_flavors(string, vol) != 0)
 				goto cifs_parse_mount_err;
 			break;
+		case Opt_cache:
+			cache_specified = true;
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (cifs_parse_cache_flavor(string, vol) != 0)
+				goto cifs_parse_mount_err;
+			break;
 		default:
 			/*
 			 * An option we don't recognize. Save it off for later
@@ -1881,6 +1965,14 @@
 		printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
 				   "specified with no gid= option.\n");
 
+	/* FIXME: remove this block in 3.7 */
+	if (!cache_specified && !cache_warned) {
+		cache_warned = true;
+		printk(KERN_NOTICE "CIFS: no cache= option specified, using "
+				   "\"cache=loose\". This default will change "
+				   "to \"cache=strict\" in 3.7.\n");
+	}
+
 	kfree(mountdata_copy);
 	return 0;
 
@@ -2041,6 +2133,9 @@
 static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
 			 struct smb_vol *vol)
 {
+	if ((server->vals != vol->vals) || (server->ops != vol->ops))
+		return 0;
+
 	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
 		return 0;
 
@@ -2163,6 +2258,8 @@
 		goto out_err;
 	}
 
+	tcp_ses->ops = volume_info->ops;
+	tcp_ses->vals = volume_info->vals;
 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
 	tcp_ses->hostname = extract_hostname(volume_info->UNC);
 	if (IS_ERR(tcp_ses->hostname)) {
@@ -3569,6 +3666,7 @@
 	if (cifs_parse_mount_options(mount_data, devname, volume_info))
 		return -EINVAL;
 
+
 	if (volume_info->nullauth) {
 		cFYI(1, "Anonymous login");
 		kfree(volume_info->username);
@@ -3842,7 +3940,7 @@
 	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
 			NULL /*no tid */ , 4 /*wct */ );
 
-	smb_buffer->Mid = GetNextMid(ses->server);
+	smb_buffer->Mid = get_next_mid(ses->server);
 	smb_buffer->Uid = ses->Suid;
 	pSMB = (TCONX_REQ *) smb_buffer;
 	pSMBr = (TCONX_RSP *) smb_buffer_response;
@@ -4010,11 +4108,11 @@
 	if (server->maxBuf != 0)
 		return 0;
 
-	cifs_set_credits(server, 1);
+	set_credits(server, 1);
 	rc = CIFSSMBNegotiate(xid, ses);
 	if (rc == -EAGAIN) {
 		/* retry only once on 1st time connection */
-		cifs_set_credits(server, 1);
+		set_credits(server, 1);
 		rc = CIFSSMBNegotiate(xid, ses);
 		if (rc == -EAGAIN)
 			rc = -EHOSTDOWN;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 81725e9..513adbc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -264,6 +264,7 @@
 	pCifsFile->tlink = cifs_get_tlink(tlink);
 	mutex_init(&pCifsFile->fh_mutex);
 	INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+	INIT_LIST_HEAD(&pCifsFile->llist);
 
 	spin_lock(&cifs_file_list_lock);
 	list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
@@ -334,9 +335,7 @@
 	 * is closed anyway.
 	 */
 	mutex_lock(&cifsi->lock_mutex);
-	list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
-		if (li->netfid != cifs_file->netfid)
-			continue;
+	list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
 		list_del(&li->llist);
 		cifs_del_lock_waiters(li);
 		kfree(li);
@@ -645,7 +644,7 @@
 }
 
 static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type)
 {
 	struct cifsLockInfo *lock =
 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -654,7 +653,6 @@
 	lock->offset = offset;
 	lock->length = length;
 	lock->type = type;
-	lock->netfid = netfid;
 	lock->pid = current->tgid;
 	INIT_LIST_HEAD(&lock->blist);
 	init_waitqueue_head(&lock->block_q);
@@ -672,19 +670,20 @@
 }
 
 static bool
-__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
-			__u64 length, __u8 type, __u16 netfid,
-			struct cifsLockInfo **conf_lock)
+cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
+			    __u64 length, __u8 type, struct cifsFileInfo *cur,
+			    struct cifsLockInfo **conf_lock)
 {
-	struct cifsLockInfo *li, *tmp;
+	struct cifsLockInfo *li;
+	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
 
-	list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+	list_for_each_entry(li, &cfile->llist, llist) {
 		if (offset + length <= li->offset ||
 		    offset >= li->offset + li->length)
 			continue;
-		else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
-			 ((netfid == li->netfid && current->tgid == li->pid) ||
-			  type == li->type))
+		else if ((type & server->vals->shared_lock_type) &&
+			 ((server->ops->compare_fids(cur, cfile) &&
+			   current->tgid == li->pid) || type == li->type))
 			continue;
 		else {
 			*conf_lock = li;
@@ -695,11 +694,23 @@
 }
 
 static bool
-cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
-			struct cifsLockInfo **conf_lock)
+cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+			__u8 type, struct cifsLockInfo **conf_lock)
 {
-	return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
-					 lock->type, lock->netfid, conf_lock);
+	bool rc = false;
+	struct cifsFileInfo *fid, *tmp;
+	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+
+	spin_lock(&cifs_file_list_lock);
+	list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
+		rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
+						 cfile, conf_lock);
+		if (rc)
+			break;
+	}
+	spin_unlock(&cifs_file_list_lock);
+
+	return rc;
 }
 
 /*
@@ -710,22 +721,24 @@
  * the server or 1 otherwise.
  */
 static int
-cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
-	       __u8 type, __u16 netfid, struct file_lock *flock)
+cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+	       __u8 type, struct file_lock *flock)
 {
 	int rc = 0;
 	struct cifsLockInfo *conf_lock;
+	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
 	bool exist;
 
 	mutex_lock(&cinode->lock_mutex);
 
-	exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
-					  &conf_lock);
+	exist = cifs_find_lock_conflict(cfile, offset, length, type,
+					&conf_lock);
 	if (exist) {
 		flock->fl_start = conf_lock->offset;
 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
 		flock->fl_pid = conf_lock->pid;
-		if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
+		if (conf_lock->type & server->vals->shared_lock_type)
 			flock->fl_type = F_RDLCK;
 		else
 			flock->fl_type = F_WRLCK;
@@ -739,10 +752,11 @@
 }
 
 static void
-cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
+cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
 {
+	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
 	mutex_lock(&cinode->lock_mutex);
-	list_add_tail(&lock->llist, &cinode->llist);
+	list_add_tail(&lock->llist, &cfile->llist);
 	mutex_unlock(&cinode->lock_mutex);
 }
 
@@ -753,10 +767,11 @@
  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
  */
 static int
-cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
 		 bool wait)
 {
 	struct cifsLockInfo *conf_lock;
+	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
 	bool exist;
 	int rc = 0;
 
@@ -764,9 +779,10 @@
 	exist = false;
 	mutex_lock(&cinode->lock_mutex);
 
-	exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
+	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+					lock->type, &conf_lock);
 	if (!exist && cinode->can_cache_brlcks) {
-		list_add_tail(&lock->llist, &cinode->llist);
+		list_add_tail(&lock->llist, &cfile->llist);
 		mutex_unlock(&cinode->lock_mutex);
 		return rc;
 	}
@@ -860,7 +876,7 @@
 	struct cifsLockInfo *li, *tmp;
 	struct cifs_tcon *tcon;
 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
-	unsigned int num, max_num;
+	unsigned int num, max_num, max_buf;
 	LOCKING_ANDX_RANGE *buf, *cur;
 	int types[] = {LOCKING_ANDX_LARGE_FILES,
 		       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
@@ -876,8 +892,19 @@
 		return rc;
 	}
 
-	max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-		  sizeof(LOCKING_ANDX_RANGE);
+	/*
+	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+	 * and check it for zero before using.
+	 */
+	max_buf = tcon->ses->server->maxBuf;
+	if (!max_buf) {
+		mutex_unlock(&cinode->lock_mutex);
+		FreeXid(xid);
+		return -EINVAL;
+	}
+
+	max_num = (max_buf - sizeof(struct smb_hdr)) /
+						sizeof(LOCKING_ANDX_RANGE);
 	buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
 	if (!buf) {
 		mutex_unlock(&cinode->lock_mutex);
@@ -888,7 +915,7 @@
 	for (i = 0; i < 2; i++) {
 		cur = buf;
 		num = 0;
-		list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+		list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
 			if (li->type != types[i])
 				continue;
 			cur->Pid = cpu_to_le16(li->pid);
@@ -898,7 +925,8 @@
 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
 			if (++num == max_num) {
 				stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
-						       li->type, 0, num, buf);
+						       (__u8)li->type, 0, num,
+						       buf);
 				if (stored_rc)
 					rc = stored_rc;
 				cur = buf;
@@ -909,7 +937,7 @@
 
 		if (num) {
 			stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
-					       types[i], 0, num, buf);
+					       (__u8)types[i], 0, num, buf);
 			if (stored_rc)
 				rc = stored_rc;
 		}
@@ -1053,8 +1081,8 @@
 }
 
 static void
-cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
-		bool *wait_flag)
+cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
+		bool *wait_flag, struct TCP_Server_Info *server)
 {
 	if (flock->fl_flags & FL_POSIX)
 		cFYI(1, "Posix");
@@ -1073,38 +1101,50 @@
 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
 		cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
 
-	*type = LOCKING_ANDX_LARGE_FILES;
+	*type = server->vals->large_lock_type;
 	if (flock->fl_type == F_WRLCK) {
 		cFYI(1, "F_WRLCK ");
+		*type |= server->vals->exclusive_lock_type;
 		*lock = 1;
 	} else if (flock->fl_type == F_UNLCK) {
 		cFYI(1, "F_UNLCK");
+		*type |= server->vals->unlock_lock_type;
 		*unlock = 1;
 		/* Check if unlock includes more than one lock range */
 	} else if (flock->fl_type == F_RDLCK) {
 		cFYI(1, "F_RDLCK");
-		*type |= LOCKING_ANDX_SHARED_LOCK;
+		*type |= server->vals->shared_lock_type;
 		*lock = 1;
 	} else if (flock->fl_type == F_EXLCK) {
 		cFYI(1, "F_EXLCK");
+		*type |= server->vals->exclusive_lock_type;
 		*lock = 1;
 	} else if (flock->fl_type == F_SHLCK) {
 		cFYI(1, "F_SHLCK");
-		*type |= LOCKING_ANDX_SHARED_LOCK;
+		*type |= server->vals->shared_lock_type;
 		*lock = 1;
 	} else
 		cFYI(1, "Unknown type of lock");
 }
 
 static int
-cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
+		    __u64 length, __u32 type, int lock, int unlock, bool wait)
+{
+	return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
+			   current->tgid, length, offset, unlock, lock,
+			   (__u8)type, wait, 0);
+}
+
+static int
+cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
 	   bool wait_flag, bool posix_lck, int xid)
 {
 	int rc = 0;
 	__u64 length = 1 + flock->fl_end - flock->fl_start;
 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
-	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+	struct TCP_Server_Info *server = tcon->ses->server;
 	__u16 netfid = cfile->netfid;
 
 	if (posix_lck) {
@@ -1114,7 +1154,7 @@
 		if (!rc)
 			return rc;
 
-		if (type & LOCKING_ANDX_SHARED_LOCK)
+		if (type & server->vals->shared_lock_type)
 			posix_lock_type = CIFS_RDLCK;
 		else
 			posix_lock_type = CIFS_WRLCK;
@@ -1124,38 +1164,35 @@
 		return rc;
 	}
 
-	rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
-			    flock);
+	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
 	if (!rc)
 		return rc;
 
 	/* BB we could chain these into one lock request BB */
-	rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-			 flock->fl_start, 0, 1, type, 0, 0);
+	rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
+				 1, 0, false);
 	if (rc == 0) {
-		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
-				 length, flock->fl_start, 1, 0,
-				 type, 0, 0);
+		rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+					 type, 0, 1, false);
 		flock->fl_type = F_UNLCK;
 		if (rc != 0)
 			cERROR(1, "Error unlocking previously locked "
-				   "range %d during test of lock", rc);
+				  "range %d during test of lock", rc);
 		return 0;
 	}
 
-	if (type & LOCKING_ANDX_SHARED_LOCK) {
+	if (type & server->vals->shared_lock_type) {
 		flock->fl_type = F_WRLCK;
 		return 0;
 	}
 
-	rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-			 flock->fl_start, 0, 1,
-			 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
+	rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+				 type | server->vals->shared_lock_type, 1, 0,
+				 false);
 	if (rc == 0) {
-		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
-				 length, flock->fl_start, 1, 0,
-				 type | LOCKING_ANDX_SHARED_LOCK,
-				 0, 0);
+		rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+					 type | server->vals->shared_lock_type,
+					 0, 1, false);
 		flock->fl_type = F_RDLCK;
 		if (rc != 0)
 			cERROR(1, "Error unlocking previously locked "
@@ -1192,7 +1229,7 @@
 	int types[] = {LOCKING_ANDX_LARGE_FILES,
 		       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
 	unsigned int i;
-	unsigned int max_num, num;
+	unsigned int max_num, num, max_buf;
 	LOCKING_ANDX_RANGE *buf, *cur;
 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -1202,8 +1239,16 @@
 
 	INIT_LIST_HEAD(&tmp_llist);
 
-	max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-		  sizeof(LOCKING_ANDX_RANGE);
+	/*
+	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+	 * and check it for zero before using.
+	 */
+	max_buf = tcon->ses->server->maxBuf;
+	if (!max_buf)
+		return -EINVAL;
+
+	max_num = (max_buf - sizeof(struct smb_hdr)) /
+						sizeof(LOCKING_ANDX_RANGE);
 	buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -1212,71 +1257,64 @@
 	for (i = 0; i < 2; i++) {
 		cur = buf;
 		num = 0;
-		list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+		list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
 			if (flock->fl_start > li->offset ||
 			    (flock->fl_start + length) <
 			    (li->offset + li->length))
 				continue;
 			if (current->tgid != li->pid)
 				continue;
-			if (cfile->netfid != li->netfid)
-				continue;
 			if (types[i] != li->type)
 				continue;
-			if (!cinode->can_cache_brlcks) {
-				cur->Pid = cpu_to_le16(li->pid);
-				cur->LengthLow = cpu_to_le32((u32)li->length);
-				cur->LengthHigh =
-					cpu_to_le32((u32)(li->length>>32));
-				cur->OffsetLow = cpu_to_le32((u32)li->offset);
-				cur->OffsetHigh =
-					cpu_to_le32((u32)(li->offset>>32));
-				/*
-				 * We need to save a lock here to let us add
-				 * it again to the inode list if the unlock
-				 * range request fails on the server.
-				 */
-				list_move(&li->llist, &tmp_llist);
-				if (++num == max_num) {
-					stored_rc = cifs_lockv(xid, tcon,
-							       cfile->netfid,
-							       li->type, num,
-							       0, buf);
-					if (stored_rc) {
-						/*
-						 * We failed on the unlock range
-						 * request - add all locks from
-						 * the tmp list to the head of
-						 * the inode list.
-						 */
-						cifs_move_llist(&tmp_llist,
-								&cinode->llist);
-						rc = stored_rc;
-					} else
-						/*
-						 * The unlock range request
-						 * succeed - free the tmp list.
-						 */
-						cifs_free_llist(&tmp_llist);
-					cur = buf;
-					num = 0;
-				} else
-					cur++;
-			} else {
+			if (cinode->can_cache_brlcks) {
 				/*
 				 * We can cache brlock requests - simply remove
-				 * a lock from the inode list.
+				 * a lock from the file's list.
 				 */
 				list_del(&li->llist);
 				cifs_del_lock_waiters(li);
 				kfree(li);
+				continue;
 			}
+			cur->Pid = cpu_to_le16(li->pid);
+			cur->LengthLow = cpu_to_le32((u32)li->length);
+			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+			cur->OffsetLow = cpu_to_le32((u32)li->offset);
+			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+			/*
+			 * We need to save a lock here to let us add it again to
+			 * the file's list if the unlock range request fails on
+			 * the server.
+			 */
+			list_move(&li->llist, &tmp_llist);
+			if (++num == max_num) {
+				stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
+						       li->type, num, 0, buf);
+				if (stored_rc) {
+					/*
+					 * We failed on the unlock range
+					 * request - add all locks from the tmp
+					 * list to the head of the file's list.
+					 */
+					cifs_move_llist(&tmp_llist,
+							&cfile->llist);
+					rc = stored_rc;
+				} else
+					/*
+					 * The unlock range request succeed -
+					 * free the tmp list.
+					 */
+					cifs_free_llist(&tmp_llist);
+				cur = buf;
+				num = 0;
+			} else
+				cur++;
 		}
 		if (num) {
 			stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
 					       types[i], num, 0, buf);
 			if (stored_rc) {
-				cifs_move_llist(&tmp_llist, &cinode->llist);
+				cifs_move_llist(&tmp_llist, &cfile->llist);
 				rc = stored_rc;
 			} else
 				cifs_free_llist(&tmp_llist);
@@ -1289,14 +1327,14 @@
 }
 
 static int
-cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
+cifs_setlk(struct file *file,  struct file_lock *flock, __u32 type,
 	   bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
 {
 	int rc = 0;
 	__u64 length = 1 + flock->fl_end - flock->fl_start;
 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
-	struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+	struct TCP_Server_Info *server = tcon->ses->server;
 	__u16 netfid = cfile->netfid;
 
 	if (posix_lck) {
@@ -1306,7 +1344,7 @@
 		if (!rc || rc < 0)
 			return rc;
 
-		if (type & LOCKING_ANDX_SHARED_LOCK)
+		if (type & server->vals->shared_lock_type)
 			posix_lock_type = CIFS_RDLCK;
 		else
 			posix_lock_type = CIFS_WRLCK;
@@ -1323,24 +1361,24 @@
 	if (lock) {
 		struct cifsLockInfo *lock;
 
-		lock = cifs_lock_init(flock->fl_start, length, type, netfid);
+		lock = cifs_lock_init(flock->fl_start, length, type);
 		if (!lock)
 			return -ENOMEM;
 
-		rc = cifs_lock_add_if(cinode, lock, wait_flag);
+		rc = cifs_lock_add_if(cfile, lock, wait_flag);
 		if (rc < 0)
 			kfree(lock);
 		if (rc <= 0)
 			goto out;
 
-		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-				 flock->fl_start, 0, 1, type, wait_flag, 0);
+		rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+					 type, 1, 0, wait_flag);
 		if (rc) {
 			kfree(lock);
 			goto out;
 		}
 
-		cifs_lock_add(cinode, lock);
+		cifs_lock_add(cfile, lock);
 	} else if (unlock)
 		rc = cifs_unlock_range(cfile, flock, xid);
 
@@ -1361,7 +1399,7 @@
 	struct cifsInodeInfo *cinode;
 	struct cifsFileInfo *cfile;
 	__u16 netfid;
-	__u8 type;
+	__u32 type;
 
 	rc = -EACCES;
 	xid = GetXid();
@@ -1370,11 +1408,13 @@
 		"end: %lld", cmd, flock->fl_flags, flock->fl_type,
 		flock->fl_start, flock->fl_end);
 
-	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
-
-	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	cfile = (struct cifsFileInfo *)file->private_data;
 	tcon = tlink_tcon(cfile->tlink);
+
+	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+			tcon->ses->server);
+
+	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	netfid = cfile->netfid;
 	cinode = CIFS_I(file->f_path.dentry->d_inode);
 
@@ -1539,10 +1579,11 @@
 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 					bool fsuid_only)
 {
-	struct cifsFileInfo *open_file;
+	struct cifsFileInfo *open_file, *inv_file = NULL;
 	struct cifs_sb_info *cifs_sb;
 	bool any_available = false;
 	int rc;
+	unsigned int refind = 0;
 
 	/* Having a null inode here (because mapping->host was set to zero by
 	the VFS or MM) should not happen but we had reports of on oops (due to
@@ -1562,40 +1603,25 @@
 
 	spin_lock(&cifs_file_list_lock);
 refind_writable:
+	if (refind > MAX_REOPEN_ATT) {
+		spin_unlock(&cifs_file_list_lock);
+		return NULL;
+	}
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
 		if (!any_available && open_file->pid != current->tgid)
 			continue;
 		if (fsuid_only && open_file->uid != current_fsuid())
 			continue;
 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-			cifsFileInfo_get(open_file);
-
 			if (!open_file->invalidHandle) {
 				/* found a good writable file */
+				cifsFileInfo_get(open_file);
 				spin_unlock(&cifs_file_list_lock);
 				return open_file;
+			} else {
+				if (!inv_file)
+					inv_file = open_file;
 			}
-
-			spin_unlock(&cifs_file_list_lock);
-
-			/* Had to unlock since following call can block */
-			rc = cifs_reopen_file(open_file, false);
-			if (!rc)
-				return open_file;
-
-			/* if it fails, try another handle if possible */
-			cFYI(1, "wp failed on reopen file");
-			cifsFileInfo_put(open_file);
-
-			spin_lock(&cifs_file_list_lock);
-
-			/* else we simply continue to the next entry. Thus
-			   we do not loop on reopen errors.  If we
-			   can not reopen the file, for example if we
-			   reconnected to a server with another client
-			   racing to delete or lock the file we would not
-			   make progress if we restarted before the beginning
-			   of the loop here. */
 		}
 	}
 	/* couldn't find useable FH with same pid, try any available */
@@ -1603,7 +1629,30 @@
 		any_available = true;
 		goto refind_writable;
 	}
+
+	if (inv_file) {
+		any_available = false;
+		cifsFileInfo_get(inv_file);
+	}
+
 	spin_unlock(&cifs_file_list_lock);
+
+	if (inv_file) {
+		rc = cifs_reopen_file(inv_file, false);
+		if (!rc)
+			return inv_file;
+		else {
+			spin_lock(&cifs_file_list_lock);
+			list_move_tail(&inv_file->flist,
+					&cifs_inode->openFileList);
+			spin_unlock(&cifs_file_list_lock);
+			cifsFileInfo_put(inv_file);
+			spin_lock(&cifs_file_list_lock);
+			++refind;
+			goto refind_writable;
+		}
+	}
+
 	return NULL;
 }
 
@@ -2339,24 +2388,224 @@
 	return cifs_user_writev(iocb, iov, nr_segs, pos);
 }
 
+static struct cifs_readdata *
+cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
+{
+	struct cifs_readdata *rdata;
+
+	rdata = kzalloc(sizeof(*rdata) +
+			sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
+	if (rdata != NULL) {
+		kref_init(&rdata->refcount);
+		INIT_LIST_HEAD(&rdata->list);
+		init_completion(&rdata->done);
+		INIT_WORK(&rdata->work, complete);
+		INIT_LIST_HEAD(&rdata->pages);
+	}
+	return rdata;
+}
+
+void
+cifs_readdata_release(struct kref *refcount)
+{
+	struct cifs_readdata *rdata = container_of(refcount,
+					struct cifs_readdata, refcount);
+
+	if (rdata->cfile)
+		cifsFileInfo_put(rdata->cfile);
+
+	kfree(rdata);
+}
+
+static int
+cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
+{
+	int rc = 0;
+	struct page *page, *tpage;
+	unsigned int i;
+
+	for (i = 0; i < npages; i++) {
+		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+		if (!page) {
+			rc = -ENOMEM;
+			break;
+		}
+		list_add(&page->lru, list);
+	}
+
+	if (rc) {
+		list_for_each_entry_safe(page, tpage, list, lru) {
+			list_del(&page->lru);
+			put_page(page);
+		}
+	}
+	return rc;
+}
+
+static void
+cifs_uncached_readdata_release(struct kref *refcount)
+{
+	struct page *page, *tpage;
+	struct cifs_readdata *rdata = container_of(refcount,
+					struct cifs_readdata, refcount);
+
+	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+		list_del(&page->lru);
+		put_page(page);
+	}
+	cifs_readdata_release(refcount);
+}
+
+static int
+cifs_retry_async_readv(struct cifs_readdata *rdata)
+{
+	int rc;
+
+	do {
+		if (rdata->cfile->invalidHandle) {
+			rc = cifs_reopen_file(rdata->cfile, true);
+			if (rc != 0)
+				continue;
+		}
+		rc = cifs_async_readv(rdata);
+	} while (rc == -EAGAIN);
+
+	return rc;
+}
+
+/**
+ * cifs_readdata_to_iov - copy data from pages in response to an iovec
+ * @rdata:	the readdata response with list of pages holding data
+ * @iov:	vector in which we should copy the data
+ * @nr_segs:	number of segments in vector
+ * @offset:	offset into file of the first iovec
+ * @copied:	used to return the amount of data copied to the iov
+ *
+ * This function copies data from a list of pages in a readdata response into
+ * an array of iovecs. It will first calculate where the data should go
+ * based on the info in the readdata and then copy the data into that spot.
+ */
+static ssize_t
+cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
+			unsigned long nr_segs, loff_t offset, ssize_t *copied)
+{
+	int rc = 0;
+	struct iov_iter ii;
+	size_t pos = rdata->offset - offset;
+	struct page *page, *tpage;
+	ssize_t remaining = rdata->bytes;
+	unsigned char *pdata;
+
+	/* set up iov_iter and advance to the correct offset */
+	iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
+	iov_iter_advance(&ii, pos);
+
+	*copied = 0;
+	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+		ssize_t copy;
+
+		/* copy a whole page or whatever's left */
+		copy = min_t(ssize_t, remaining, PAGE_SIZE);
+
+		/* ...but limit it to whatever space is left in the iov */
+		copy = min_t(ssize_t, copy, iov_iter_count(&ii));
+
+		/* go while there's data to be copied and no errors */
+		if (copy && !rc) {
+			pdata = kmap(page);
+			rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
+						(int)copy);
+			kunmap(page);
+			if (!rc) {
+				*copied += copy;
+				remaining -= copy;
+				iov_iter_advance(&ii, copy);
+			}
+		}
+
+		list_del(&page->lru);
+		put_page(page);
+	}
+
+	return rc;
+}
+
+static void
+cifs_uncached_readv_complete(struct work_struct *work)
+{
+	struct cifs_readdata *rdata = container_of(work,
+						struct cifs_readdata, work);
+
+	/* if the result is non-zero then the pages weren't kmapped */
+	if (rdata->result == 0) {
+		struct page *page;
+
+		list_for_each_entry(page, &rdata->pages, lru)
+			kunmap(page);
+	}
+
+	complete(&rdata->done);
+	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+}
+
+static int
+cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
+				unsigned int remaining)
+{
+	int len = 0;
+	struct page *page, *tpage;
+
+	rdata->nr_iov = 1;
+	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+		if (remaining >= PAGE_SIZE) {
+			/* enough data to fill the page */
+			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+			rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
+			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+				rdata->nr_iov, page->index,
+				rdata->iov[rdata->nr_iov].iov_base,
+				rdata->iov[rdata->nr_iov].iov_len);
+			++rdata->nr_iov;
+			len += PAGE_SIZE;
+			remaining -= PAGE_SIZE;
+		} else if (remaining > 0) {
+			/* enough for partial page, fill and zero the rest */
+			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+			rdata->iov[rdata->nr_iov].iov_len = remaining;
+			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+				rdata->nr_iov, page->index,
+				rdata->iov[rdata->nr_iov].iov_base,
+				rdata->iov[rdata->nr_iov].iov_len);
+			memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+				'\0', PAGE_SIZE - remaining);
+			++rdata->nr_iov;
+			len += remaining;
+			remaining = 0;
+		} else {
+			/* no need to hold page hostage */
+			list_del(&page->lru);
+			put_page(page);
+		}
+	}
+
+	return len;
+}
+
 static ssize_t
 cifs_iovec_read(struct file *file, const struct iovec *iov,
 		 unsigned long nr_segs, loff_t *poffset)
 {
-	int rc;
-	int xid;
-	ssize_t total_read;
-	unsigned int bytes_read = 0;
+	ssize_t rc;
 	size_t len, cur_len;
-	int iov_offset = 0;
+	ssize_t total_read = 0;
+	loff_t offset = *poffset;
+	unsigned int npages;
 	struct cifs_sb_info *cifs_sb;
-	struct cifs_tcon *pTcon;
+	struct cifs_tcon *tcon;
 	struct cifsFileInfo *open_file;
-	struct smb_com_read_rsp *pSMBr;
-	struct cifs_io_parms io_parms;
-	char *read_data;
-	unsigned int rsize;
-	__u32 pid;
+	struct cifs_readdata *rdata, *tmp;
+	struct list_head rdata_list;
+	pid_t pid;
 
 	if (!nr_segs)
 		return 0;
@@ -2365,14 +2614,10 @@
 	if (!len)
 		return 0;
 
-	xid = GetXid();
+	INIT_LIST_HEAD(&rdata_list);
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
-	/* FIXME: set up handlers for larger reads and/or convert to async */
-	rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
-
 	open_file = file->private_data;
-	pTcon = tlink_tcon(open_file->tlink);
+	tcon = tlink_tcon(open_file->tlink);
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 		pid = open_file->pid;
@@ -2382,56 +2627,78 @@
 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
 		cFYI(1, "attempting read on write only file instance");
 
-	for (total_read = 0; total_read < len; total_read += bytes_read) {
-		cur_len = min_t(const size_t, len - total_read, rsize);
-		rc = -EAGAIN;
-		read_data = NULL;
+	do {
+		cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
+		npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
 
-		while (rc == -EAGAIN) {
-			int buf_type = CIFS_NO_BUFFER;
-			if (open_file->invalidHandle) {
-				rc = cifs_reopen_file(open_file, true);
-				if (rc != 0)
-					break;
-			}
-			io_parms.netfid = open_file->netfid;
-			io_parms.pid = pid;
-			io_parms.tcon = pTcon;
-			io_parms.offset = *poffset;
-			io_parms.length = cur_len;
-			rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
-					 &read_data, &buf_type);
-			pSMBr = (struct smb_com_read_rsp *)read_data;
-			if (read_data) {
-				char *data_offset = read_data + 4 +
-						le16_to_cpu(pSMBr->DataOffset);
-				if (memcpy_toiovecend(iov, data_offset,
-						      iov_offset, bytes_read))
-					rc = -EFAULT;
-				if (buf_type == CIFS_SMALL_BUFFER)
-					cifs_small_buf_release(read_data);
-				else if (buf_type == CIFS_LARGE_BUFFER)
-					cifs_buf_release(read_data);
-				read_data = NULL;
-				iov_offset += bytes_read;
-			}
+		/* allocate a readdata struct */
+		rdata = cifs_readdata_alloc(npages,
+					    cifs_uncached_readv_complete);
+		if (!rdata) {
+			rc = -ENOMEM;
+			goto error;
 		}
 
-		if (rc || (bytes_read == 0)) {
-			if (total_read) {
-				break;
-			} else {
-				FreeXid(xid);
-				return rc;
-			}
-		} else {
-			cifs_stats_bytes_read(pTcon, bytes_read);
-			*poffset += bytes_read;
+		rc = cifs_read_allocate_pages(&rdata->pages, npages);
+		if (rc)
+			goto error;
+
+		rdata->cfile = cifsFileInfo_get(open_file);
+		rdata->offset = offset;
+		rdata->bytes = cur_len;
+		rdata->pid = pid;
+		rdata->marshal_iov = cifs_uncached_read_marshal_iov;
+
+		rc = cifs_retry_async_readv(rdata);
+error:
+		if (rc) {
+			kref_put(&rdata->refcount,
+				 cifs_uncached_readdata_release);
+			break;
 		}
+
+		list_add_tail(&rdata->list, &rdata_list);
+		offset += cur_len;
+		len -= cur_len;
+	} while (len > 0);
+
+	/* if at least one read request send succeeded, then reset rc */
+	if (!list_empty(&rdata_list))
+		rc = 0;
+
+	/* the loop below should proceed in the order of increasing offsets */
+restart_loop:
+	list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+		if (!rc) {
+			ssize_t copied;
+
+			/* FIXME: freezable sleep too? */
+			rc = wait_for_completion_killable(&rdata->done);
+			if (rc)
+				rc = -EINTR;
+			else if (rdata->result)
+				rc = rdata->result;
+			else {
+				rc = cifs_readdata_to_iov(rdata, iov,
+							nr_segs, *poffset,
+							&copied);
+				total_read += copied;
+			}
+
+			/* resend call if it's a retryable error */
+			if (rc == -EAGAIN) {
+				rc = cifs_retry_async_readv(rdata);
+				goto restart_loop;
+			}
+		}
+		list_del_init(&rdata->list);
+		kref_put(&rdata->refcount, cifs_uncached_readdata_release);
 	}
 
-	FreeXid(xid);
-	return total_read;
+	cifs_stats_bytes_read(tcon, total_read);
+	*poffset += total_read;
+
+	return total_read ? total_read : rc;
 }
 
 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
@@ -2606,6 +2873,100 @@
 	return rc;
 }
 
+static void
+cifs_readv_complete(struct work_struct *work)
+{
+	struct cifs_readdata *rdata = container_of(work,
+						struct cifs_readdata, work);
+	struct page *page, *tpage;
+
+	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+		list_del(&page->lru);
+		lru_cache_add_file(page);
+
+		if (rdata->result == 0) {
+			kunmap(page);
+			flush_dcache_page(page);
+			SetPageUptodate(page);
+		}
+
+		unlock_page(page);
+
+		if (rdata->result == 0)
+			cifs_readpage_to_fscache(rdata->mapping->host, page);
+
+		page_cache_release(page);
+	}
+	kref_put(&rdata->refcount, cifs_readdata_release);
+}
+
+static int
+cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
+{
+	int len = 0;
+	struct page *page, *tpage;
+	u64 eof;
+	pgoff_t eof_index;
+
+	/* determine the eof that the server (probably) has */
+	eof = CIFS_I(rdata->mapping->host)->server_eof;
+	eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+	cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
+	rdata->nr_iov = 1;
+	list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+		if (remaining >= PAGE_CACHE_SIZE) {
+			/* enough data to fill the page */
+			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+			rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
+			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+				rdata->nr_iov, page->index,
+				rdata->iov[rdata->nr_iov].iov_base,
+				rdata->iov[rdata->nr_iov].iov_len);
+			++rdata->nr_iov;
+			len += PAGE_CACHE_SIZE;
+			remaining -= PAGE_CACHE_SIZE;
+		} else if (remaining > 0) {
+			/* enough for partial page, fill and zero the rest */
+			rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+			rdata->iov[rdata->nr_iov].iov_len = remaining;
+			cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+				rdata->nr_iov, page->index,
+				rdata->iov[rdata->nr_iov].iov_base,
+				rdata->iov[rdata->nr_iov].iov_len);
+			memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+				'\0', PAGE_CACHE_SIZE - remaining);
+			++rdata->nr_iov;
+			len += remaining;
+			remaining = 0;
+		} else if (page->index > eof_index) {
+			/*
+			 * The VFS will not try to do readahead past the
+			 * i_size, but it's possible that we have outstanding
+			 * writes with gaps in the middle and the i_size hasn't
+			 * caught up yet. Populate those with zeroed out pages
+			 * to prevent the VFS from repeatedly attempting to
+			 * fill them until the writes are flushed.
+			 */
+			zero_user(page, 0, PAGE_CACHE_SIZE);
+			list_del(&page->lru);
+			lru_cache_add_file(page);
+			flush_dcache_page(page);
+			SetPageUptodate(page);
+			unlock_page(page);
+			page_cache_release(page);
+		} else {
+			/* no need to hold page hostage */
+			list_del(&page->lru);
+			lru_cache_add_file(page);
+			unlock_page(page);
+			page_cache_release(page);
+		}
+	}
+
+	return len;
+}
+
 static int cifs_readpages(struct file *file, struct address_space *mapping,
 	struct list_head *page_list, unsigned num_pages)
 {
@@ -2708,7 +3069,7 @@
 			nr_pages++;
 		}
 
-		rdata = cifs_readdata_alloc(nr_pages);
+		rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
 		if (!rdata) {
 			/* best to give up if we're out of mem */
 			list_for_each_entry_safe(page, tpage, &tmplist, lru) {
@@ -2722,24 +3083,16 @@
 		}
 
 		spin_lock(&cifs_file_list_lock);
-		cifsFileInfo_get(open_file);
 		spin_unlock(&cifs_file_list_lock);
-		rdata->cfile = open_file;
+		rdata->cfile = cifsFileInfo_get(open_file);
 		rdata->mapping = mapping;
 		rdata->offset = offset;
 		rdata->bytes = bytes;
 		rdata->pid = pid;
+		rdata->marshal_iov = cifs_readpages_marshal_iov;
 		list_splice_init(&tmplist, &rdata->pages);
 
-		do {
-			if (open_file->invalidHandle) {
-				rc = cifs_reopen_file(open_file, true);
-				if (rc != 0)
-					continue;
-			}
-			rc = cifs_async_readv(rdata);
-		} while (rc == -EAGAIN);
-
+		rc = cifs_retry_async_readv(rdata);
 		if (rc != 0) {
 			list_for_each_entry_safe(page, tpage, &rdata->pages,
 						 lru) {
@@ -2748,9 +3101,11 @@
 				unlock_page(page);
 				page_cache_release(page);
 			}
-			cifs_readdata_free(rdata);
+			kref_put(&rdata->refcount, cifs_readdata_release);
 			break;
 		}
+
+		kref_put(&rdata->refcount, cifs_readdata_release);
 	}
 
 	return rc;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 4221b5e..6d2667f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -51,7 +51,15 @@
 	cifs_sb = CIFS_SB(inode->i_sb);
 
 	switch (command) {
+		static bool warned = false;
 		case CIFS_IOC_CHECKUMOUNT:
+			if (!warned) {
+				warned = true;
+				cERROR(1, "the CIFS_IOC_CHECKMOUNT ioctl will "
+					  "be deprecated in 3.7. Please "
+					  "migrate away from the use of "
+					  "umount.cifs");
+			}
 			cFYI(1, "User unmount attempted");
 			if (cifs_sb->mnt_uid == current_uid())
 				rc = 0;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c29d1aa..557506a 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -212,93 +212,6 @@
 	return;
 }
 
-/*
- * Find a free multiplex id (SMB mid). Otherwise there could be
- * mid collisions which might cause problems, demultiplexing the
- * wrong response to this request. Multiplex ids could collide if
- * one of a series requests takes much longer than the others, or
- * if a very large number of long lived requests (byte range
- * locks or FindNotify requests) are pending. No more than
- * 64K-1 requests can be outstanding at one time. If no
- * mids are available, return zero. A future optimization
- * could make the combination of mids and uid the key we use
- * to demultiplex on (rather than mid alone).
- * In addition to the above check, the cifs demultiplex
- * code already used the command code as a secondary
- * check of the frame and if signing is negotiated the
- * response would be discarded if the mid were the same
- * but the signature was wrong. Since the mid is not put in the
- * pending queue until later (when it is about to be dispatched)
- * we do have to limit the number of outstanding requests
- * to somewhat less than 64K-1 although it is hard to imagine
- * so many threads being in the vfs at one time.
- */
-__u64 GetNextMid(struct TCP_Server_Info *server)
-{
-	__u64 mid = 0;
-	__u16 last_mid, cur_mid;
-	bool collision;
-
-	spin_lock(&GlobalMid_Lock);
-
-	/* mid is 16 bit only for CIFS/SMB */
-	cur_mid = (__u16)((server->CurrentMid) & 0xffff);
-	/* we do not want to loop forever */
-	last_mid = cur_mid;
-	cur_mid++;
-
-	/*
-	 * This nested loop looks more expensive than it is.
-	 * In practice the list of pending requests is short,
-	 * fewer than 50, and the mids are likely to be unique
-	 * on the first pass through the loop unless some request
-	 * takes longer than the 64 thousand requests before it
-	 * (and it would also have to have been a request that
-	 * did not time out).
-	 */
-	while (cur_mid != last_mid) {
-		struct mid_q_entry *mid_entry;
-		unsigned int num_mids;
-
-		collision = false;
-		if (cur_mid == 0)
-			cur_mid++;
-
-		num_mids = 0;
-		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
-			++num_mids;
-			if (mid_entry->mid == cur_mid &&
-			    mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
-				/* This mid is in use, try a different one */
-				collision = true;
-				break;
-			}
-		}
-
-		/*
-		 * if we have more than 32k mids in the list, then something
-		 * is very wrong. Possibly a local user is trying to DoS the
-		 * box by issuing long-running calls and SIGKILL'ing them. If
-		 * we get to 2^16 mids then we're in big trouble as this
-		 * function could loop forever.
-		 *
-		 * Go ahead and assign out the mid in this situation, but force
-		 * an eventual reconnect to clean out the pending_mid_q.
-		 */
-		if (num_mids > 32768)
-			server->tcpStatus = CifsNeedReconnect;
-
-		if (!collision) {
-			mid = (__u64)cur_mid;
-			server->CurrentMid = mid;
-			break;
-		}
-		cur_mid++;
-	}
-	spin_unlock(&GlobalMid_Lock);
-	return mid;
-}
-
 /* NB: MID can not be set if treeCon not passed in, in that
    case it is responsbility of caller to set the mid */
 void
@@ -306,8 +219,6 @@
 		const struct cifs_tcon *treeCon, int word_count
 		/* length of fixed section (word count) in two byte units  */)
 {
-	struct list_head *temp_item;
-	struct cifs_ses *ses;
 	char *temp = (char *) buffer;
 
 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -336,52 +247,7 @@
 
 			/* Uid is not converted */
 			buffer->Uid = treeCon->ses->Suid;
-			buffer->Mid = GetNextMid(treeCon->ses->server);
-			if (multiuser_mount != 0) {
-		/* For the multiuser case, there are few obvious technically  */
-		/* possible mechanisms to match the local linux user (uid)    */
-		/* to a valid remote smb user (smb_uid):		      */
-		/* 	1) Query Winbind (or other local pam/nss daemon       */
-		/* 	  for userid/password/logon_domain or credential      */
-		/*      2) Query Winbind for uid to sid to username mapping   */
-		/* 	   and see if we have a matching password for existing*/
-		/*         session for that user perhas getting password by   */
-		/*         adding a new pam_cifs module that stores passwords */
-		/*         so that the cifs vfs can get at that for all logged*/
-		/*	   on users					      */
-		/*	3) (Which is the mechanism we have chosen)	      */
-		/*	   Search through sessions to the same server for a   */
-		/*	   a match on the uid that was passed in on mount     */
-		/*         with the current processes uid (or euid?) and use  */
-		/* 	   that smb uid.   If no existing smb session for     */
-		/* 	   that uid found, use the default smb session ie     */
-		/*         the smb session for the volume mounted which is    */
-		/* 	   the same as would be used if the multiuser mount   */
-		/* 	   flag were disabled.  */
-
-		/*  BB Add support for establishing new tCon and SMB Session  */
-		/*      with userid/password pairs found on the smb session   */
-		/*	for other target tcp/ip addresses 		BB    */
-				if (current_fsuid() != treeCon->ses->linux_uid) {
-					cFYI(1, "Multiuser mode and UID "
-						 "did not match tcon uid");
-					spin_lock(&cifs_tcp_ses_lock);
-					list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
-						ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
-						if (ses->linux_uid == current_fsuid()) {
-							if (ses->server == treeCon->ses->server) {
-								cFYI(1, "found matching uid substitute right smb_uid");
-								buffer->Uid = ses->Suid;
-								break;
-							} else {
-				/* BB eventually call cifs_setup_session here */
-								cFYI(1, "local UID found but no smb sess with this server exists");
-							}
-						}
-					}
-					spin_unlock(&cifs_tcp_ses_lock);
-				}
-			}
+			buffer->Mid = get_next_mid(treeCon->ses->server);
 		}
 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
 			buffer->Flags2 |= SMBFLG2_DFS;
@@ -700,22 +566,3 @@
 
 	return false;
 }
-
-void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
-{
-	spin_lock(&server->req_lock);
-	server->credits += add;
-	server->in_flight--;
-	spin_unlock(&server->req_lock);
-	wake_up(&server->request_q);
-}
-
-void
-cifs_set_credits(struct TCP_Server_Info *server, const int val)
-{
-	spin_lock(&server->req_lock);
-	server->credits = val;
-	server->oplocks = val > 1 ? enable_oplocks : false;
-	spin_unlock(&server->req_lock);
-}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e2bbc68..0a8224d 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -219,6 +219,7 @@
 
 static int initiate_cifs_search(const int xid, struct file *file)
 {
+	__u16 search_flags;
 	int rc = 0;
 	char *full_path = NULL;
 	struct cifsFileInfo *cifsFile;
@@ -270,8 +271,12 @@
 		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
 	}
 
+	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+	if (backup_cred(cifs_sb))
+		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
 	rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
-		&cifsFile->netfid, &cifsFile->srch_inf,
+		&cifsFile->netfid, search_flags, &cifsFile->srch_inf,
 		cifs_sb->mnt_cifs_flags &
 			CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
 	if (rc == 0)
@@ -502,11 +507,13 @@
 static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
 	struct file *file, char **ppCurrentEntry, int *num_to_ret)
 {
+	__u16 search_flags;
 	int rc = 0;
 	int pos_in_buf = 0;
 	loff_t first_entry_in_buffer;
 	loff_t index_to_find = file->f_pos;
 	struct cifsFileInfo *cifsFile = file->private_data;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	/* check if index in the buffer */
 
 	if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
@@ -560,10 +567,14 @@
 						cifsFile);
 	}
 
+	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+	if (backup_cred(cifs_sb))
+		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
 	while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
 	      (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
 		cFYI(1, "calling findnext2");
-		rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
+		rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
 				  &cifsFile->srch_inf);
 		/* FindFirst/Next set last_entry to NULL on malformed reply */
 		if (cifsFile->srch_inf.last_entry)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
new file mode 100644
index 0000000..6dec38f
--- /dev/null
+++ b/fs/cifs/smb1ops.c
@@ -0,0 +1,243 @@
+/*
+ *  SMB1 (CIFS) version specific operations
+ *
+ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ *  This library is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *  the GNU Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public License
+ *  along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifspdu.h"
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, void *buf,
+	       struct mid_q_entry *mid)
+{
+	int rc = 0;
+	struct smb_hdr *in_buf = (struct smb_hdr *)buf;
+
+	/* -4 for RFC1001 length and +2 for BCC field */
+	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
+	in_buf->Command = SMB_COM_NT_CANCEL;
+	in_buf->WordCount = 0;
+	put_bcc(0, in_buf);
+
+	mutex_lock(&server->srv_mutex);
+	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+	if (rc) {
+		mutex_unlock(&server->srv_mutex);
+		return rc;
+	}
+	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+	mutex_unlock(&server->srv_mutex);
+
+	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+		in_buf->Mid, rc);
+
+	return rc;
+}
+
+static bool
+cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+{
+	return ob1->netfid == ob2->netfid;
+}
+
+static unsigned int
+cifs_read_data_offset(char *buf)
+{
+	READ_RSP *rsp = (READ_RSP *)buf;
+	return le16_to_cpu(rsp->DataOffset);
+}
+
+static unsigned int
+cifs_read_data_length(char *buf)
+{
+	READ_RSP *rsp = (READ_RSP *)buf;
+	return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+	       le16_to_cpu(rsp->DataLength);
+}
+
+static struct mid_q_entry *
+cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
+{
+	struct smb_hdr *buf = (struct smb_hdr *)buffer;
+	struct mid_q_entry *mid;
+
+	spin_lock(&GlobalMid_Lock);
+	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+		if (mid->mid == buf->Mid &&
+		    mid->mid_state == MID_REQUEST_SUBMITTED &&
+		    le16_to_cpu(mid->command) == buf->Command) {
+			spin_unlock(&GlobalMid_Lock);
+			return mid;
+		}
+	}
+	spin_unlock(&GlobalMid_Lock);
+	return NULL;
+}
+
+static void
+cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
+{
+	spin_lock(&server->req_lock);
+	server->credits += add;
+	server->in_flight--;
+	spin_unlock(&server->req_lock);
+	wake_up(&server->request_q);
+}
+
+static void
+cifs_set_credits(struct TCP_Server_Info *server, const int val)
+{
+	spin_lock(&server->req_lock);
+	server->credits = val;
+	server->oplocks = val > 1 ? enable_oplocks : false;
+	spin_unlock(&server->req_lock);
+}
+
+static int *
+cifs_get_credits_field(struct TCP_Server_Info *server)
+{
+	return &server->credits;
+}
+
+/*
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+static __u64
+cifs_get_next_mid(struct TCP_Server_Info *server)
+{
+	__u64 mid = 0;
+	__u16 last_mid, cur_mid;
+	bool collision;
+
+	spin_lock(&GlobalMid_Lock);
+
+	/* mid is 16 bit only for CIFS/SMB */
+	cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+	/* we do not want to loop forever */
+	last_mid = cur_mid;
+	cur_mid++;
+
+	/*
+	 * This nested loop looks more expensive than it is.
+	 * In practice the list of pending requests is short,
+	 * fewer than 50, and the mids are likely to be unique
+	 * on the first pass through the loop unless some request
+	 * takes longer than the 64 thousand requests before it
+	 * (and it would also have to have been a request that
+	 * did not time out).
+	 */
+	while (cur_mid != last_mid) {
+		struct mid_q_entry *mid_entry;
+		unsigned int num_mids;
+
+		collision = false;
+		if (cur_mid == 0)
+			cur_mid++;
+
+		num_mids = 0;
+		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+			++num_mids;
+			if (mid_entry->mid == cur_mid &&
+			    mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
+				/* This mid is in use, try a different one */
+				collision = true;
+				break;
+			}
+		}
+
+		/*
+		 * if we have more than 32k mids in the list, then something
+		 * is very wrong. Possibly a local user is trying to DoS the
+		 * box by issuing long-running calls and SIGKILL'ing them. If
+		 * we get to 2^16 mids then we're in big trouble as this
+		 * function could loop forever.
+		 *
+		 * Go ahead and assign out the mid in this situation, but force
+		 * an eventual reconnect to clean out the pending_mid_q.
+		 */
+		if (num_mids > 32768)
+			server->tcpStatus = CifsNeedReconnect;
+
+		if (!collision) {
+			mid = (__u64)cur_mid;
+			server->CurrentMid = mid;
+			break;
+		}
+		cur_mid++;
+	}
+	spin_unlock(&GlobalMid_Lock);
+	return mid;
+}
+
+struct smb_version_operations smb1_operations = {
+	.send_cancel = send_nt_cancel,
+	.compare_fids = cifs_compare_fids,
+	.setup_request = cifs_setup_request,
+	.check_receive = cifs_check_receive,
+	.add_credits = cifs_add_credits,
+	.set_credits = cifs_set_credits,
+	.get_credits_field = cifs_get_credits_field,
+	.get_next_mid = cifs_get_next_mid,
+	.read_data_offset = cifs_read_data_offset,
+	.read_data_length = cifs_read_data_length,
+	.map_error = map_smb_to_linux_error,
+	.find_mid = cifs_find_mid,
+	.check_message = checkSMB,
+	.dump_detail = cifs_dump_detail,
+	.is_oplock_break = is_valid_oplock_break,
+};
+
+struct smb_version_values smb1_values = {
+	.version_string = SMB1_VERSION_STRING,
+	.large_lock_type = LOCKING_ANDX_LARGE_FILES,
+	.exclusive_lock_type = 0,
+	.shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+	.unlock_lock_type = 0,
+	.header_size = sizeof(struct smb_hdr),
+	.max_header_size = MAX_CIFS_HDR_SIZE,
+	.read_rsp_size = sizeof(READ_RSP),
+};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
new file mode 100644
index 0000000..f065e89
--- /dev/null
+++ b/fs/cifs/smb2ops.c
@@ -0,0 +1,27 @@
+/*
+ *  SMB2 version specific operations
+ *
+ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ *  This library is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *  the GNU Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public License
+ *  along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+
+struct smb_version_operations smb21_operations = {
+};
+
+struct smb_version_values smb21_values = {
+	.version_string = SMB21_VERSION_STRING,
+};
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 0961336..3097ee5 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -304,7 +304,8 @@
 static int
 wait_for_free_request(struct TCP_Server_Info *server, const int optype)
 {
-	return wait_for_free_credits(server, optype, get_credits_field(server));
+	return wait_for_free_credits(server, optype,
+				     server->ops->get_credits_field(server));
 }
 
 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -396,7 +397,7 @@
 	rc = cifs_setup_async_request(server, iov, nvec, &mid);
 	if (rc) {
 		mutex_unlock(&server->srv_mutex);
-		cifs_add_credits(server, 1);
+		add_credits(server, 1);
 		wake_up(&server->request_q);
 		return rc;
 	}
@@ -418,7 +419,7 @@
 	return rc;
 out_err:
 	delete_mid(mid);
-	cifs_add_credits(server, 1);
+	add_credits(server, 1);
 	wake_up(&server->request_q);
 	return rc;
 }
@@ -483,41 +484,11 @@
 	return rc;
 }
 
-/*
- * An NT cancel request header looks just like the original request except:
- *
- * The Command is SMB_COM_NT_CANCEL
- * The WordCount is zeroed out
- * The ByteCount is zeroed out
- *
- * This function mangles an existing request buffer into a
- * SMB_COM_NT_CANCEL request and then sends it.
- */
-static int
-send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
-		struct mid_q_entry *mid)
+static inline int
+send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
 {
-	int rc = 0;
-
-	/* -4 for RFC1001 length and +2 for BCC field */
-	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
-	in_buf->Command = SMB_COM_NT_CANCEL;
-	in_buf->WordCount = 0;
-	put_bcc(0, in_buf);
-
-	mutex_lock(&server->srv_mutex);
-	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
-	if (rc) {
-		mutex_unlock(&server->srv_mutex);
-		return rc;
-	}
-	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
-	mutex_unlock(&server->srv_mutex);
-
-	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
-		in_buf->Mid, rc);
-
-	return rc;
+	return server->ops->send_cancel ?
+				server->ops->send_cancel(server, buf, mid) : 0;
 }
 
 int
@@ -544,7 +515,7 @@
 	return map_smb_to_linux_error(mid->resp_buf, log_error);
 }
 
-static int
+int
 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
 		   unsigned int nvec, struct mid_q_entry **ret_mid)
 {
@@ -607,12 +578,12 @@
 
 	mutex_lock(&ses->server->srv_mutex);
 
-	rc = cifs_setup_request(ses, iov, n_vec, &midQ);
+	rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
 	if (rc) {
 		mutex_unlock(&ses->server->srv_mutex);
 		cifs_small_buf_release(buf);
 		/* Update # of requests on wire to server */
-		cifs_add_credits(ses->server, 1);
+		add_credits(ses->server, 1);
 		return rc;
 	}
 
@@ -636,13 +607,13 @@
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
-		send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
+		send_cancel(ses->server, buf, midQ);
 		spin_lock(&GlobalMid_Lock);
 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
 			cifs_small_buf_release(buf);
-			cifs_add_credits(ses->server, 1);
+			add_credits(ses->server, 1);
 			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
@@ -652,7 +623,7 @@
 
 	rc = cifs_sync_mid_result(midQ, ses->server);
 	if (rc != 0) {
-		cifs_add_credits(ses->server, 1);
+		add_credits(ses->server, 1);
 		return rc;
 	}
 
@@ -670,14 +641,15 @@
 	else
 		*pRespBufType = CIFS_SMALL_BUFFER;
 
-	rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
+	rc = ses->server->ops->check_receive(midQ, ses->server,
+					     flags & CIFS_LOG_ERROR);
 
 	/* mark it so buf will not be freed by delete_mid */
 	if ((flags & CIFS_NO_RESP) == 0)
 		midQ->resp_buf = NULL;
 out:
 	delete_mid(midQ);
-	cifs_add_credits(ses->server, 1);
+	add_credits(ses->server, 1);
 
 	return rc;
 }
@@ -727,7 +699,7 @@
 	if (rc) {
 		mutex_unlock(&ses->server->srv_mutex);
 		/* Update # of requests on wire to server */
-		cifs_add_credits(ses->server, 1);
+		add_credits(ses->server, 1);
 		return rc;
 	}
 
@@ -753,13 +725,13 @@
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
-		send_nt_cancel(ses->server, in_buf, midQ);
+		send_cancel(ses->server, in_buf, midQ);
 		spin_lock(&GlobalMid_Lock);
 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 			/* no longer considered to be "in-flight" */
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
-			cifs_add_credits(ses->server, 1);
+			add_credits(ses->server, 1);
 			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
@@ -767,7 +739,7 @@
 
 	rc = cifs_sync_mid_result(midQ, ses->server);
 	if (rc != 0) {
-		cifs_add_credits(ses->server, 1);
+		add_credits(ses->server, 1);
 		return rc;
 	}
 
@@ -783,7 +755,7 @@
 	rc = cifs_check_receive(midQ, ses->server, 0);
 out:
 	delete_mid(midQ);
-	cifs_add_credits(ses->server, 1);
+	add_credits(ses->server, 1);
 
 	return rc;
 }
@@ -807,7 +779,7 @@
 
 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
 	pSMB->Timeout = 0;
-	pSMB->hdr.Mid = GetNextMid(ses->server);
+	pSMB->hdr.Mid = get_next_mid(ses->server);
 
 	return SendReceive(xid, ses, in_buf, out_buf,
 			&bytes_returned, 0);
@@ -898,7 +870,7 @@
 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
 			   blocking lock to return. */
-			rc = send_nt_cancel(ses->server, in_buf, midQ);
+			rc = send_cancel(ses->server, in_buf, midQ);
 			if (rc) {
 				delete_mid(midQ);
 				return rc;
@@ -919,7 +891,7 @@
 
 		rc = wait_for_response(ses->server, midQ);
 		if (rc) {
-			send_nt_cancel(ses->server, in_buf, midQ);
+			send_cancel(ses->server, in_buf, midQ);
 			spin_lock(&GlobalMid_Lock);
 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 				/* no longer considered to be "in-flight" */
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 2870597..f181312 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -244,7 +244,7 @@
 static void coda_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	coda_cache_clear_inode(inode);
 }
 
diff --git a/fs/compat.c b/fs/compat.c
index 0781e61..6161255 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -532,7 +532,7 @@
 ssize_t compat_rw_copy_check_uvector(int type,
 		const struct compat_iovec __user *uvector, unsigned long nr_segs,
 		unsigned long fast_segs, struct iovec *fast_pointer,
-		struct iovec **ret_pointer, int check_access)
+		struct iovec **ret_pointer)
 {
 	compat_ssize_t tot_len;
 	struct iovec *iov = *ret_pointer = fast_pointer;
@@ -579,7 +579,7 @@
 		}
 		if (len < 0)	/* size_t not fitting in compat_ssize_t .. */
 			goto out;
-		if (check_access &&
+		if (type >= 0 &&
 		    !access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
 			ret = -EFAULT;
 			goto out;
@@ -871,12 +871,12 @@
 {
 	int error;
 	struct file *file;
+	int fput_needed;
 	struct compat_readdir_callback buf;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.result = 0;
 	buf.dirent = dirent;
@@ -885,8 +885,7 @@
 	if (buf.result)
 		error = buf.result;
 
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
 
@@ -953,16 +952,15 @@
 	struct file * file;
 	struct compat_linux_dirent __user * lastdirent;
 	struct compat_getdents_callback buf;
+	int fput_needed;
 	int error;
 
-	error = -EFAULT;
 	if (!access_ok(VERIFY_WRITE, dirent, count))
-		goto out;
+		return -EFAULT;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.current_dir = dirent;
 	buf.previous = NULL;
@@ -979,8 +977,7 @@
 		else
 			error = count - buf.count;
 	}
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
 
@@ -1041,16 +1038,15 @@
 	struct file * file;
 	struct linux_dirent64 __user * lastdirent;
 	struct compat_getdents_callback64 buf;
+	int fput_needed;
 	int error;
 
-	error = -EFAULT;
 	if (!access_ok(VERIFY_WRITE, dirent, count))
-		goto out;
+		return -EFAULT;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.current_dir = dirent;
 	buf.previous = NULL;
@@ -1068,8 +1064,7 @@
 		else
 			error = count - buf.count;
 	}
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
 #endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
@@ -1094,7 +1089,7 @@
 		goto out;
 
 	tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
-					       UIO_FASTIOV, iovstack, &iov, 1);
+					       UIO_FASTIOV, iovstack, &iov);
 	if (tot_len == 0) {
 		ret = 0;
 		goto out;
@@ -1547,7 +1542,6 @@
 				 compat_ptr(a.exp), compat_ptr(a.tvp));
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
 	struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
@@ -1670,11 +1664,9 @@
 
 	return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef CONFIG_EPOLL
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long compat_sys_epoll_pwait(int epfd,
 			struct compat_epoll_event __user *events,
 			int maxevents, int timeout,
@@ -1718,7 +1710,6 @@
 
 	return err;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #endif /* CONFIG_EPOLL */
 
diff --git a/fs/dcache.c b/fs/dcache.c
index 4435d8b..4046904 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2575,7 +2575,7 @@
 	bool slash = false;
 	int error = 0;
 
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	while (dentry != root->dentry || vfsmnt != root->mnt) {
 		struct dentry * parent;
 
@@ -2606,7 +2606,7 @@
 		error = prepend(buffer, buflen, "/", 1);
 
 out:
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 	return error;
 
 global_root:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index f4aadd1..0c85fae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -145,50 +145,6 @@
 
 static struct kmem_cache *dio_cache __read_mostly;
 
-static void __inode_dio_wait(struct inode *inode)
-{
-	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
-	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
-	do {
-		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
-		if (atomic_read(&inode->i_dio_count))
-			schedule();
-	} while (atomic_read(&inode->i_dio_count));
-	finish_wait(wq, &q.wait);
-}
-
-/**
- * inode_dio_wait - wait for outstanding DIO requests to finish
- * @inode: inode to wait for
- *
- * Waits for all pending direct I/O requests to finish so that we can
- * proceed with a truncate or equivalent operation.
- *
- * Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
- */
-void inode_dio_wait(struct inode *inode)
-{
-	if (atomic_read(&inode->i_dio_count))
-		__inode_dio_wait(inode);
-}
-EXPORT_SYMBOL(inode_dio_wait);
-
-/*
- * inode_dio_done - signal finish of a direct I/O requests
- * @inode: inode the direct I/O happens on
- *
- * This is called once we've finished processing a direct I/O request,
- * and is used to wake up callers waiting for direct I/O to be quiesced.
- */
-void inode_dio_done(struct inode *inode)
-{
-	if (atomic_dec_and_test(&inode->i_dio_count))
-		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
-}
-EXPORT_SYMBOL(inode_dio_done);
-
 /*
  * How many pages are in the queue?
  */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index ab35b11..a07441a 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -660,11 +660,10 @@
 {
 	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
 	char *lower_buf;
-	size_t lower_bufsiz = PATH_MAX;
 	mm_segment_t old_fs;
 	int rc;
 
-	lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
+	lower_buf = kmalloc(PATH_MAX, GFP_KERNEL);
 	if (!lower_buf) {
 		rc = -ENOMEM;
 		goto out;
@@ -673,58 +672,29 @@
 	set_fs(get_ds());
 	rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
 						   (char __user *)lower_buf,
-						   lower_bufsiz);
+						   PATH_MAX);
 	set_fs(old_fs);
 	if (rc < 0)
 		goto out;
-	lower_bufsiz = rc;
 	rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
-						  lower_buf, lower_bufsiz);
+						  lower_buf, rc);
 out:
 	kfree(lower_buf);
 	return rc;
 }
 
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
-{
-	char *kbuf;
-	size_t kbufsiz, copied;
-	int rc;
-
-	rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
-	if (rc)
-		goto out;
-	copied = min_t(size_t, bufsiz, kbufsiz);
-	rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
-	kfree(kbuf);
-	fsstack_copy_attr_atime(dentry->d_inode,
-				ecryptfs_dentry_to_lower(dentry)->d_inode);
-out:
-	return rc;
-}
-
 static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	char *buf;
-	int len = PAGE_SIZE, rc;
-	mm_segment_t old_fs;
+	size_t len = PATH_MAX;
+	int rc;
 
-	/* Released in ecryptfs_put_link(); only release here on error */
-	buf = kmalloc(len, GFP_KERNEL);
-	if (!buf) {
-		buf = ERR_PTR(-ENOMEM);
+	rc = ecryptfs_readlink_lower(dentry, &buf, &len);
+	if (rc)
 		goto out;
-	}
-	old_fs = get_fs();
-	set_fs(get_ds());
-	rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
-	set_fs(old_fs);
-	if (rc < 0) {
-		kfree(buf);
-		buf = ERR_PTR(rc);
-	} else
-		buf[rc] = '\0';
+	fsstack_copy_attr_atime(dentry->d_inode,
+				ecryptfs_dentry_to_lower(dentry)->d_inode);
+	buf[len] = '\0';
 out:
 	nd_set_link(nd, buf);
 	return NULL;
@@ -1153,7 +1123,7 @@
 }
 
 const struct inode_operations ecryptfs_symlink_iops = {
-	.readlink = ecryptfs_readlink,
+	.readlink = generic_readlink,
 	.follow_link = ecryptfs_follow_link,
 	.put_link = ecryptfs_put_link,
 	.permission = ecryptfs_permission,
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2dd946b..e879cf8 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -133,7 +133,7 @@
 static void ecryptfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	iput(ecryptfs_inode_to_lower(inode));
 }
 
diff --git a/fs/eventfd.c b/fs/eventfd.c
index dba15fe..d81b9f6 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -46,20 +46,16 @@
  * value, and we signal this as overflow condition by returining a POLLERR
  * to poll(2).
  *
- * Returns @n in case of success, a non-negative number lower than @n in case
- * of overflow, or the following error codes:
- *
- * -EINVAL    : The value of @n is negative.
+ * Returns the amount by which the counter was incrememnted.  This will be less
+ * than @n if the counter has overflowed.
  */
-int eventfd_signal(struct eventfd_ctx *ctx, int n)
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
 	unsigned long flags;
 
-	if (n < 0)
-		return -EINVAL;
 	spin_lock_irqsave(&ctx->wqh.lock, flags);
 	if (ULLONG_MAX - ctx->count < n)
-		n = (int) (ULLONG_MAX - ctx->count);
+		n = ULLONG_MAX - ctx->count;
 	ctx->count += n;
 	if (waitqueue_active(&ctx->wqh))
 		wake_up_locked_poll(&ctx->wqh, POLLIN);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 079d1be..74598f6 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1853,8 +1853,6 @@
 	return error;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
-
 /*
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_pwait(2).
@@ -1899,8 +1897,6 @@
 	return error;
 }
 
-#endif /* HAVE_SET_RESTORE_SIGMASK */
-
 static int __init eventpoll_init(void)
 {
 	struct sysinfo si;
diff --git a/fs/exec.c b/fs/exec.c
index 52c9e2f..da27b91 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -280,10 +280,6 @@
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
 
-	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-	if (err)
-		goto err;
-
 	err = insert_vm_struct(mm, vma);
 	if (err)
 		goto err;
@@ -823,10 +819,10 @@
 	/* Notify parent that we're no longer interested in the old VM */
 	tsk = current;
 	old_mm = current->mm;
-	sync_mm_rss(old_mm);
 	mm_release(tsk, old_mm);
 
 	if (old_mm) {
+		sync_mm_rss(old_mm);
 		/*
 		 * Make sure that if there is a core dump in progress
 		 * for the old mm, we get out and die instead of going
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 352ba14..389ba83 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -16,5 +16,5 @@
 libore-y := ore.o ore_raid.o
 obj-$(CONFIG_ORE) += libore.o
 
-exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
 obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index ca9d496..fffe86f 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -56,6 +56,9 @@
 struct exofs_dev {
 	struct ore_dev ored;
 	unsigned did;
+	unsigned urilen;
+	uint8_t *uri;
+	struct kobject ed_kobj;
 };
 /*
  * our extension to the in-memory superblock
@@ -73,6 +76,7 @@
 	struct ore_layout	layout;		/* Default files layout       */
 	struct ore_comp one_comp;		/* id & cred of partition id=0*/
 	struct ore_components oc;		/* comps for the partition    */
+	struct kobject	s_kobj;			/* holds per-sbi kobject      */
 };
 
 /*
@@ -176,6 +180,16 @@
 			   const struct osd_obj_id *obj);
 int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
 
+/* sys.c                 */
+int exofs_sysfs_init(void);
+void exofs_sysfs_uninit(void);
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+		       struct exofs_dt_device_info *dt_dev);
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
+int exofs_sysfs_odev_add(struct exofs_dev *edev,
+			 struct exofs_sb_info *sbi);
+void exofs_sysfs_dbg_print(void);
+
 /*********************
  * operation vectors *
  *********************/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ea5e1f9..5badb0c 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1473,7 +1473,7 @@
 		goto no_delete;
 
 	inode->i_size = 0;
-	end_writeback(inode);
+	clear_inode(inode);
 
 	/* if we are deleting an obj that hasn't been created yet, wait.
 	 * This also makes sure that create_done cannot be called with an
@@ -1503,5 +1503,5 @@
 	return;
 
 no_delete:
-	end_writeback(inode);
+	clear_inode(inode);
 }
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 735ca06..4337836 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -472,6 +472,7 @@
 	_exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
 			    sbi->one_comp.obj.partition);
 
+	exofs_sysfs_sb_del(sbi);
 	bdi_destroy(&sbi->bdi);
 	exofs_free_sbi(sbi);
 	sb->s_fs_info = NULL;
@@ -632,6 +633,12 @@
 	memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
 		(numdevs - 1) * sizeof(sbi->oc.ods[0]));
 
+	/* create sysfs subdir under which we put the device table
+	 * And cluster layout. A Superblock is identified by the string:
+	 *	"dev[0].osdname"_"pid"
+	 */
+	exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
+
 	for (i = 0; i < numdevs; i++) {
 		struct exofs_fscb fscb;
 		struct osd_dev_info odi;
@@ -657,6 +664,7 @@
 			eds[i].ored.od = fscb_od;
 			++sbi->oc.numdevs;
 			fscb_od = NULL;
+			exofs_sysfs_odev_add(&eds[i], sbi);
 			continue;
 		}
 
@@ -682,6 +690,7 @@
 				  odi.osdname);
 			goto out;
 		}
+		exofs_sysfs_odev_add(&eds[i], sbi);
 
 		/* TODO: verify other information is correct and FS-uuid
 		 *	 matches. Benny what did you say about device table
@@ -745,7 +754,6 @@
 	sbi->one_comp.obj.partition = opts->pid;
 	sbi->one_comp.obj.id = 0;
 	exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
-	sbi->oc.numdevs = 1;
 	sbi->oc.single_comp = EC_SINGLE_COMP;
 	sbi->oc.comps = &sbi->one_comp;
 
@@ -804,6 +812,7 @@
 			goto free_sbi;
 
 		ore_comp_set_dev(&sbi->oc, 0, od);
+		sbi->oc.numdevs = 1;
 	}
 
 	__sbi_read_stats(sbi);
@@ -844,6 +853,7 @@
 		goto free_sbi;
 	}
 
+	exofs_sysfs_dbg_print();
 	_exofs_print_device("Mounting", opts->dev_name,
 			    ore_comp_dev(&sbi->oc, 0),
 			    sbi->one_comp.obj.partition);
@@ -1023,6 +1033,9 @@
 	if (err)
 		goto out_d;
 
+	/* We don't fail if sysfs creation failed */
+	exofs_sysfs_init();
+
 	return 0;
 out_d:
 	destroy_inodecache();
@@ -1032,6 +1045,7 @@
 
 static void __exit exit_exofs(void)
 {
+	exofs_sysfs_uninit();
 	unregister_filesystem(&exofs_type);
 	destroy_inodecache();
 }
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
new file mode 100644
index 0000000..5a7b691
--- /dev/null
+++ b/fs/exofs/sys.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2012
+ * Sachin Bhamare <sbhamare@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This file is part of exofs.
+ *
+ * exofs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ *
+ * exofs is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with exofs; if not, write to the:
+ *	Free Software Foundation <licensing@fsf.org>
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+
+#include "exofs.h"
+
+struct odev_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct exofs_dev *, char *);
+	ssize_t (*store)(struct exofs_dev *, const char *, size_t);
+};
+
+static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+	struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+	return a->show ? a->show(edp, buf) : 0;
+}
+
+static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
+		const char *buf, size_t len)
+{
+	struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+	struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+	return a->store ? a->store(edp, buf, len) : len;
+}
+
+static const struct sysfs_ops odev_attr_ops = {
+	.show  = odev_attr_show,
+	.store = odev_attr_store,
+};
+
+
+static struct kset *exofs_kset;
+
+static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
+{
+	struct osd_dev *odev = edp->ored.od;
+	const struct osd_dev_info *odi = osduld_device_info(odev);
+
+	return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
+}
+
+static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
+{
+	struct osd_dev *odev = edp->ored.od;
+	const struct osd_dev_info *odi = osduld_device_info(odev);
+
+	memcpy(buf, odi->systemid, odi->systemid_len);
+	return odi->systemid_len;
+}
+
+static ssize_t uri_show(struct exofs_dev *edp, char *buf)
+{
+	return snprintf(buf, edp->urilen, "%s", edp->uri);
+}
+
+static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
+{
+	edp->urilen = strlen(buf) + 1;
+	edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+	strncpy(edp->uri, buf, edp->urilen);
+	return edp->urilen;
+}
+
+#define OSD_ATTR(name, mode, show, store) \
+	static struct odev_attr odev_attr_##name = \
+					__ATTR(name, mode, show, store)
+
+OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
+OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
+OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
+
+static struct attribute *odev_attrs[] = {
+	&odev_attr_osdname.attr,
+	&odev_attr_systemid.attr,
+	&odev_attr_uri.attr,
+	NULL,
+};
+
+static struct kobj_type odev_ktype = {
+	.default_attrs	= odev_attrs,
+	.sysfs_ops	= &odev_attr_ops,
+};
+
+static struct kobj_type uuid_ktype = {
+};
+
+void exofs_sysfs_dbg_print(void)
+{
+#ifdef CONFIG_EXOFS_DEBUG
+	struct kobject *k_name, *k_tmp;
+
+	list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+		printk(KERN_INFO "%s: name %s ref %d\n",
+			__func__, kobject_name(k_name),
+			(int)atomic_read(&k_name->kref.refcount));
+	}
+#endif
+}
+/*
+ * This function removes all kobjects under exofs_kset
+ * At the end of it, exofs_kset kobject will have a refcount
+ * of 1 which gets decremented only on exofs module unload
+ */
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
+{
+	struct kobject *k_name, *k_tmp;
+	struct kobject *s_kobj = &sbi->s_kobj;
+
+	list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+		/* Remove all that are children of this SBI */
+		if (k_name->parent == s_kobj)
+			kobject_put(k_name);
+	}
+	kobject_put(s_kobj);
+}
+
+/*
+ * This function creates sysfs entries to hold the current exofs cluster
+ * instance (uniquely identified by osdname,pid tuple).
+ * This function gets called once per exofs mount instance.
+ */
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+		       struct exofs_dt_device_info *dt_dev)
+{
+	struct kobject *s_kobj;
+	int retval = 0;
+	uint64_t pid = sbi->one_comp.obj.partition;
+
+	/* allocate new uuid dirent */
+	s_kobj = &sbi->s_kobj;
+	s_kobj->kset = exofs_kset;
+	retval = kobject_init_and_add(s_kobj, &uuid_ktype,
+			&exofs_kset->kobj,  "%s_%llx", dt_dev->osdname, pid);
+	if (retval) {
+		EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+			  "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
+{
+	struct kobject *d_kobj;
+	int retval = 0;
+
+	/* create osd device group which contains following attributes
+	 * osdname, systemid & uri
+	 */
+	d_kobj = &edev->ed_kobj;
+	d_kobj->kset = exofs_kset;
+	retval = kobject_init_and_add(d_kobj, &odev_ktype,
+			&sbi->s_kobj, "dev%u", edev->did);
+	if (retval) {
+		EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+				"device dev%u\n", edev->did);
+		return retval;
+	}
+	return 0;
+}
+
+int exofs_sysfs_init(void)
+{
+	exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
+	if (!exofs_kset) {
+		EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void exofs_sysfs_uninit(void)
+{
+	kset_unregister(exofs_kset);
+}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b05acb7..b0201ca 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -304,24 +304,23 @@
 
 /**
  * export_encode_fh - default export_operations->encode_fh function
- * @dentry:  the dentry to encode
+ * @inode:   the object to encode
  * @fh:      where to store the file handle fragment
  * @max_len: maximum length to store there
- * @connectable: whether to store parent information
+ * @parent:  parent directory inode, if wanted
  *
  * This default encode_fh function assumes that the 32 inode number
  * is suitable for locating an inode, and that the generation number
  * can be used to check that it is still valid.  It places them in the
  * filehandle fragment where export_decode_fh expects to find them.
  */
-static int export_encode_fh(struct dentry *dentry, struct fid *fid,
-		int *max_len, int connectable)
+static int export_encode_fh(struct inode *inode, struct fid *fid,
+		int *max_len, struct inode *parent)
 {
-	struct inode * inode = dentry->d_inode;
 	int len = *max_len;
 	int type = FILEID_INO32_GEN;
 
-	if (connectable && (len < 4)) {
+	if (parent && (len < 4)) {
 		*max_len = 4;
 		return 255;
 	} else if (len < 2) {
@@ -332,14 +331,9 @@
 	len = 2;
 	fid->i32.ino = inode->i_ino;
 	fid->i32.gen = inode->i_generation;
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		struct inode *parent;
-
-		spin_lock(&dentry->d_lock);
-		parent = dentry->d_parent->d_inode;
+	if (parent) {
 		fid->i32.parent_ino = parent->i_ino;
 		fid->i32.parent_gen = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
 		len = 4;
 		type = FILEID_INO32_GEN_PARENT;
 	}
@@ -352,11 +346,22 @@
 {
 	const struct export_operations *nop = dentry->d_sb->s_export_op;
 	int error;
+	struct dentry *p = NULL;
+	struct inode *inode = dentry->d_inode, *parent = NULL;
 
+	if (connectable && !S_ISDIR(inode->i_mode)) {
+		p = dget_parent(dentry);
+		/*
+		 * note that while p might've ceased to be our parent already,
+		 * it's still pinned by and still positive.
+		 */
+		parent = p->d_inode;
+	}
 	if (nop->encode_fh)
-		error = nop->encode_fh(dentry, fid->raw, max_len, connectable);
+		error = nop->encode_fh(inode, fid->raw, max_len, parent);
 	else
-		error = export_encode_fh(dentry, fid, max_len, connectable);
+		error = export_encode_fh(inode, fid, max_len, parent);
+	dput(p);
 
 	return error;
 }
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 030c6d2..1c36139 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -165,7 +165,6 @@
 		struct ext2_sb_info *sbi = EXT2_SB(sb);
 
 		percpu_counter_add(&sbi->s_freeblocks_counter, count);
-		sb->s_dirt = 1;
 	}
 }
 
@@ -180,7 +179,6 @@
 		free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
 		desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
 		spin_unlock(sb_bgl_lock(sbi, group_no));
-		sb->s_dirt = 1;
 		mark_buffer_dirty(bh);
 	}
 }
@@ -479,7 +477,7 @@
 }
 
 /**
- * ext2_free_blocks_sb() -- Free given blocks and update quota and i_blocks
+ * ext2_free_blocks() -- Free given blocks and update quota and i_blocks
  * @inode:		inode
  * @block:		start physcial block to free
  * @count:		number of blocks to free
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 8b15cf8..c13eb7b 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -81,7 +81,6 @@
 	spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
 	if (dir)
 		percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
-	sb->s_dirt = 1;
 	mark_buffer_dirty(bh);
 }
 
@@ -543,7 +542,6 @@
 	}
 	spin_unlock(sb_bgl_lock(sbi, group));
 
-	sb->s_dirt = 1;
 	mark_buffer_dirty(bh2);
 	if (test_opt(sb, GRPID)) {
 		inode->i_mode = mode;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index f9fa95f..264d315 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -90,7 +90,7 @@
 	}
 
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	ext2_discard_reservation(inode);
 	rsv = EXT2_I(inode)->i_block_alloc_info;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 38f8160..b3621cb 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -130,9 +130,6 @@
 
 	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
 
-	if (sb->s_dirt)
-		ext2_write_super(sb);
-
 	ext2_xattr_put_super(sb);
 	if (!(sb->s_flags & MS_RDONLY)) {
 		struct ext2_super_block *es = sbi->s_es;
@@ -307,7 +304,6 @@
 	.write_inode	= ext2_write_inode,
 	.evict_inode	= ext2_evict_inode,
 	.put_super	= ext2_put_super,
-	.write_super	= ext2_write_super,
 	.sync_fs	= ext2_sync_fs,
 	.statfs		= ext2_statfs,
 	.remount_fs	= ext2_remount,
@@ -358,11 +354,6 @@
 				    ext2_nfs_get_inode);
 }
 
-/* Yes, most of these are left as NULL!!
- * A NULL value implies the default, which works with ext2-like file
- * systems, but can be improved upon.
- * Currently only get_parent is required.
- */
 static const struct export_operations ext2_export_ops = {
 	.fh_to_dentry = ext2_fh_to_dentry,
 	.fh_to_parent = ext2_fh_to_parent,
@@ -1176,7 +1167,6 @@
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
 	if (wait)
 		sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
-	sb->s_dirt = 0;
 }
 
 /*
@@ -1209,8 +1199,6 @@
 {
 	if (!(sb->s_flags & MS_RDONLY))
 		ext2_sync_fs(sb, 1);
-	else
-		sb->s_dirt = 0;
 }
 
 static int ext2_remount (struct super_block * sb, int * flags, char * data)
@@ -1456,7 +1444,6 @@
 	struct buffer_head tmp_bh;
 	struct buffer_head *bh;
 
-	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 	while (towrite > 0) {
 		tocopy = sb->s_blocksize - offset < towrite ?
 				sb->s_blocksize - offset : towrite;
@@ -1486,16 +1473,13 @@
 		blk++;
 	}
 out:
-	if (len == towrite) {
-		mutex_unlock(&inode->i_mutex);
+	if (len == towrite)
 		return err;
-	}
 	if (inode->i_size < off+len-towrite)
 		i_size_write(inode, off+len-towrite);
 	inode->i_version++;
 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 	mark_inode_dirty(inode);
-	mutex_unlock(&inode->i_mutex);
 	return len - towrite;
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 6dcafc7..b6754db 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -339,7 +339,6 @@
 	spin_lock(&EXT2_SB(sb)->s_lock);
 	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
 	spin_unlock(&EXT2_SB(sb)->s_lock);
-	sb->s_dirt = 1;
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
 }
 
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index cc761ad8..92490e9 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,30 +21,15 @@
  *
  */
 
+#include <linux/compat.h>
 #include "ext3.h"
 
 static unsigned char ext3_filetype_table[] = {
 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
-static int ext3_readdir(struct file *, void *, filldir_t);
 static int ext3_dx_readdir(struct file * filp,
 			   void * dirent, filldir_t filldir);
-static int ext3_release_dir (struct inode * inode,
-				struct file * filp);
-
-const struct file_operations ext3_dir_operations = {
-	.llseek		= generic_file_llseek,
-	.read		= generic_read_dir,
-	.readdir	= ext3_readdir,		/* we take BKL. needed?*/
-	.unlocked_ioctl	= ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.fsync		= ext3_sync_file,	/* BKL held */
-	.release	= ext3_release_dir,
-};
-
 
 static unsigned char get_dtype(struct super_block *sb, int filetype)
 {
@@ -55,6 +40,25 @@
 	return (ext3_filetype_table[filetype]);
 }
 
+/**
+ * Check if the given dir-inode refers to an htree-indexed directory
+ * (or a directory which chould potentially get coverted to use htree
+ * indexing).
+ *
+ * Return 1 if it is a dx dir, 0 if not
+ */
+static int is_dx_dir(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+
+	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+		     EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+	     ((inode->i_size >> sb->s_blocksize_bits) == 1)))
+		return 1;
+
+	return 0;
+}
 
 int ext3_check_dir_entry (const char * function, struct inode * dir,
 			  struct ext3_dir_entry_2 * de,
@@ -94,18 +98,13 @@
 	unsigned long offset;
 	int i, stored;
 	struct ext3_dir_entry_2 *de;
-	struct super_block *sb;
 	int err;
 	struct inode *inode = filp->f_path.dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
 	int ret = 0;
 	int dir_has_error = 0;
 
-	sb = inode->i_sb;
-
-	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-				    EXT3_FEATURE_COMPAT_DIR_INDEX) &&
-	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
-	     ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+	if (is_dx_dir(inode)) {
 		err = ext3_dx_readdir(filp, dirent, filldir);
 		if (err != ERR_BAD_DX_DIR) {
 			ret = err;
@@ -227,22 +226,87 @@
 	return ret;
 }
 
+static inline int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+	return is_compat_task();
+#else
+	return (BITS_PER_LONG == 32);
+#endif
+}
+
 /*
  * These functions convert from the major/minor hash to an f_pos
- * value.
+ * value for dx directories
  *
- * Currently we only use major hash numer.  This is unfortunate, but
- * on 32-bit machines, the same VFS interface is used for lseek and
- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
- * lseek/telldir/seekdir will blow out spectacularly, and from within
- * the ext2 low-level routine, we don't know if we're being called by
- * a 64-bit version of the system call or the 32-bit version of the
- * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
- * cookie.  Sigh.
+ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
+ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
+ * directly on both 32-bit and 64-bit nodes, under such case, neither
+ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
  */
-#define hash2pos(major, minor)	(major >> 1)
-#define pos2maj_hash(pos)	((pos << 1) & 0xffffffff)
-#define pos2min_hash(pos)	(0)
+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+{
+	if ((filp->f_mode & FMODE_32BITHASH) ||
+	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+		return major >> 1;
+	else
+		return ((__u64)(major >> 1) << 32) | (__u64)minor;
+}
+
+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+{
+	if ((filp->f_mode & FMODE_32BITHASH) ||
+	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+		return (pos << 1) & 0xffffffff;
+	else
+		return ((pos >> 32) << 1) & 0xffffffff;
+}
+
+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+{
+	if ((filp->f_mode & FMODE_32BITHASH) ||
+	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+		return 0;
+	else
+		return pos & 0xffffffff;
+}
+
+/*
+ * Return 32- or 64-bit end-of-file for dx directories
+ */
+static inline loff_t ext3_get_htree_eof(struct file *filp)
+{
+	if ((filp->f_mode & FMODE_32BITHASH) ||
+	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+		return EXT3_HTREE_EOF_32BIT;
+	else
+		return EXT3_HTREE_EOF_64BIT;
+}
+
+
+/*
+ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
+ * non-htree and htree directories, where the "offset" is in terms
+ * of the filename hash value instead of the byte offset.
+ *
+ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
+ * we need to pass the max hash as the maximum allowable offset in
+ * the htree directory case.
+ *
+ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
+ *       will be invalid once the directory was converted into a dx directory
+ */
+loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+	struct inode *inode = file->f_mapping->host;
+	int dx_dir = is_dx_dir(inode);
+
+	if (likely(dx_dir))
+		return generic_file_llseek_size(file, offset, origin,
+					        ext3_get_htree_eof(file));
+	else
+		return generic_file_llseek(file, offset, origin);
+}
 
 /*
  * This structure holds the nodes of the red-black tree used to store
@@ -303,15 +367,16 @@
 }
 
 
-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
+static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
+							   loff_t pos)
 {
 	struct dir_private_info *p;
 
 	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
 	if (!p)
 		return NULL;
-	p->curr_hash = pos2maj_hash(pos);
-	p->curr_minor_hash = pos2min_hash(pos);
+	p->curr_hash = pos2maj_hash(filp, pos);
+	p->curr_minor_hash = pos2min_hash(filp, pos);
 	return p;
 }
 
@@ -401,7 +466,7 @@
 		printk("call_filldir: called with null fname?!?\n");
 		return 0;
 	}
-	curr_pos = hash2pos(fname->hash, fname->minor_hash);
+	curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
 	while (fname) {
 		error = filldir(dirent, fname->name,
 				fname->name_len, curr_pos,
@@ -426,13 +491,13 @@
 	int	ret;
 
 	if (!info) {
-		info = ext3_htree_create_dir_info(filp->f_pos);
+		info = ext3_htree_create_dir_info(filp, filp->f_pos);
 		if (!info)
 			return -ENOMEM;
 		filp->private_data = info;
 	}
 
-	if (filp->f_pos == EXT3_HTREE_EOF)
+	if (filp->f_pos == ext3_get_htree_eof(filp))
 		return 0;	/* EOF */
 
 	/* Some one has messed with f_pos; reset the world */
@@ -440,8 +505,8 @@
 		free_rb_tree_fname(&info->root);
 		info->curr_node = NULL;
 		info->extra_fname = NULL;
-		info->curr_hash = pos2maj_hash(filp->f_pos);
-		info->curr_minor_hash = pos2min_hash(filp->f_pos);
+		info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+		info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
 	}
 
 	/*
@@ -473,7 +538,7 @@
 			if (ret < 0)
 				return ret;
 			if (ret == 0) {
-				filp->f_pos = EXT3_HTREE_EOF;
+				filp->f_pos = ext3_get_htree_eof(filp);
 				break;
 			}
 			info->curr_node = rb_first(&info->root);
@@ -493,7 +558,7 @@
 			info->curr_minor_hash = fname->minor_hash;
 		} else {
 			if (info->next_hash == ~0) {
-				filp->f_pos = EXT3_HTREE_EOF;
+				filp->f_pos = ext3_get_htree_eof(filp);
 				break;
 			}
 			info->curr_hash = info->next_hash;
@@ -512,3 +577,15 @@
 
 	return 0;
 }
+
+const struct file_operations ext3_dir_operations = {
+	.llseek		= ext3_dir_llseek,
+	.read		= generic_read_dir,
+	.readdir	= ext3_readdir,
+	.unlocked_ioctl = ext3_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= ext3_compat_ioctl,
+#endif
+	.fsync		= ext3_sync_file,
+	.release	= ext3_release_dir,
+};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
index 7977973..e85ff15 100644
--- a/fs/ext3/ext3.h
+++ b/fs/ext3/ext3.h
@@ -920,7 +920,11 @@
 	u32		*seed;
 };
 
-#define EXT3_HTREE_EOF	0x7fffffff
+
+/* 32 and 64 bit signed EOF for dx directories */
+#define EXT3_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
+#define EXT3_HTREE_EOF_64BIT   ((1ULL << (64 - 1)) - 1)
+
 
 /*
  * Control parameters used by ext3_htree_next_block
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index d10231d..ede315c 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -198,8 +198,8 @@
 		return -1;
 	}
 	hash = hash & ~1;
-	if (hash == (EXT3_HTREE_EOF << 1))
-		hash = (EXT3_HTREE_EOF-1) << 1;
+	if (hash == (EXT3_HTREE_EOF_32BIT << 1))
+		hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
 	hinfo->hash = hash;
 	hinfo->minor_hash = minor_hash;
 	return 0;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index e3c39e4..082afd7 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -180,8 +180,7 @@
  * It's OK to put directory into a group unless
  * it has too many directories already (max_dirs) or
  * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
+ * it has too few free blocks left (min_blocks).
  * Parent's group is preferred, if it doesn't satisfy these
  * conditions we search cyclically through the rest. If none
  * of the groups look good we just look for a group with more
@@ -191,21 +190,16 @@
  * when we allocate an inode, within 0--255.
  */
 
-#define INODE_COST 64
-#define BLOCK_COST 256
-
 static int find_group_orlov(struct super_block *sb, struct inode *parent)
 {
 	int parent_group = EXT3_I(parent)->i_block_group;
 	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
 	int ngroups = sbi->s_groups_count;
 	int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
 	unsigned int freei, avefreei;
 	ext3_fsblk_t freeb, avefreeb;
-	ext3_fsblk_t blocks_per_dir;
 	unsigned int ndirs;
-	int max_debt, max_dirs, min_inodes;
+	int max_dirs, min_inodes;
 	ext3_grpblk_t min_blocks;
 	int group = -1, i;
 	struct ext3_group_desc *desc;
@@ -242,20 +236,10 @@
 		goto fallback;
 	}
 
-	blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
-
 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
 	min_inodes = avefreei - inodes_per_group / 4;
 	min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
 
-	max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
-	if (max_debt * INODE_COST > inodes_per_group)
-		max_debt = inodes_per_group / INODE_COST;
-	if (max_debt > 255)
-		max_debt = 255;
-	if (max_debt == 0)
-		max_debt = 1;
-
 	for (i = 0; i < ngroups; i++) {
 		group = (parent_group + i) % ngroups;
 		desc = ext3_get_group_desc (sb, group, NULL);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a09790a..9a4a5c4 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -272,18 +272,18 @@
 	if (ext3_mark_inode_dirty(handle, inode)) {
 		/* If that failed, just dquot_drop() and be done with that */
 		dquot_drop(inode);
-		end_writeback(inode);
+		clear_inode(inode);
 	} else {
 		ext3_xattr_delete_inode(handle, inode);
 		dquot_free_inode(inode);
 		dquot_drop(inode);
-		end_writeback(inode);
+		clear_inode(inode);
 		ext3_free_inode(handle, inode);
 	}
 	ext3_journal_stop(handle);
 	return;
 no_delete:
-	end_writeback(inode);
+	clear_inode(inode);
 	dquot_drop(inode);
 }
 
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 94ef7e6..8c3a44b 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -3015,7 +3015,6 @@
 			(unsigned long long)off, (unsigned long long)len);
 		return -EIO;
 	}
-	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 	bh = ext3_bread(handle, inode, blk, 1, &err);
 	if (!bh)
 		goto out;
@@ -3039,10 +3038,8 @@
 	}
 	brelse(bh);
 out:
-	if (err) {
-		mutex_unlock(&inode->i_mutex);
+	if (err)
 		return err;
-	}
 	if (inode->i_size < off + len) {
 		i_size_write(inode, off + len);
 		EXT3_I(inode)->i_disksize = inode->i_size;
@@ -3050,7 +3047,6 @@
 	inode->i_version++;
 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 	ext3_mark_inode_dirty(handle, inode);
-	mutex_unlock(&inode->i_mutex);
 	return len;
 }
 
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 9ed1bb1..c22f170 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -2,6 +2,8 @@
 	tristate "The Extended 4 (ext4) filesystem"
 	select JBD2
 	select CRC16
+	select CRYPTO
+	select CRYPTO_CRC32C
 	help
 	  This is the next generation of the ext3 filesystem.
 
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index c45c411..cee7812 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -90,8 +90,8 @@
 	 * unusual file system layouts.
 	 */
 	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
-		block_cluster = EXT4_B2C(sbi, (start -
-					       ext4_block_bitmap(sb, gdp)));
+		block_cluster = EXT4_B2C(sbi,
+					 ext4_block_bitmap(sb, gdp) - start);
 		if (block_cluster < num_clusters)
 			block_cluster = -1;
 		else if (block_cluster == num_clusters) {
@@ -102,7 +102,7 @@
 
 	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
 		inode_cluster = EXT4_B2C(sbi,
-					 start - ext4_inode_bitmap(sb, gdp));
+					 ext4_inode_bitmap(sb, gdp) - start);
 		if (inode_cluster < num_clusters)
 			inode_cluster = -1;
 		else if (inode_cluster == num_clusters) {
@@ -114,7 +114,7 @@
 	itbl_blk = ext4_inode_table(sb, gdp);
 	for (i = 0; i < sbi->s_itb_per_group; i++) {
 		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
-			c = EXT4_B2C(sbi, start - itbl_blk + i);
+			c = EXT4_B2C(sbi, itbl_blk + i - start);
 			if ((c < num_clusters) || (c == inode_cluster) ||
 			    (c == block_cluster) || (c == itbl_cluster))
 				continue;
@@ -168,12 +168,14 @@
 
 	/* If checksum is bad mark all blocks used to prevent allocation
 	 * essentially implementing a per-group read-only flag. */
-	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
 		ext4_error(sb, "Checksum bad for group %u", block_group);
 		ext4_free_group_clusters_set(sb, gdp, 0);
 		ext4_free_inodes_set(sb, gdp, 0);
 		ext4_itable_unused_set(sb, gdp, 0);
 		memset(bh->b_data, 0xff, sb->s_blocksize);
+		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+					   EXT4_BLOCKS_PER_GROUP(sb) / 8);
 		return;
 	}
 	memset(bh->b_data, 0, sb->s_blocksize);
@@ -210,6 +212,9 @@
 	 */
 	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
 			     sb->s_blocksize * 8, bh->b_data);
+	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, block_group, gdp);
 }
 
 /* Return the number of free blocks in a block group.  It is used when
@@ -276,9 +281,9 @@
 }
 
 static int ext4_valid_block_bitmap(struct super_block *sb,
-					struct ext4_group_desc *desc,
-					unsigned int block_group,
-					struct buffer_head *bh)
+				   struct ext4_group_desc *desc,
+				   unsigned int block_group,
+				   struct buffer_head *bh)
 {
 	ext4_grpblk_t offset;
 	ext4_grpblk_t next_zero_bit;
@@ -325,6 +330,23 @@
 			block_group, bitmap_blk);
 	return 0;
 }
+
+void ext4_validate_block_bitmap(struct super_block *sb,
+			       struct ext4_group_desc *desc,
+			       unsigned int block_group,
+			       struct buffer_head *bh)
+{
+	if (buffer_verified(bh))
+		return;
+
+	ext4_lock_group(sb, block_group);
+	if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
+	    ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
+					  EXT4_BLOCKS_PER_GROUP(sb) / 8))
+		set_buffer_verified(bh);
+	ext4_unlock_group(sb, block_group);
+}
+
 /**
  * ext4_read_block_bitmap()
  * @sb:			super block
@@ -355,12 +377,12 @@
 	}
 
 	if (bitmap_uptodate(bh))
-		return bh;
+		goto verify;
 
 	lock_buffer(bh);
 	if (bitmap_uptodate(bh)) {
 		unlock_buffer(bh);
-		return bh;
+		goto verify;
 	}
 	ext4_lock_group(sb, block_group);
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -379,7 +401,7 @@
 		 */
 		set_bitmap_uptodate(bh);
 		unlock_buffer(bh);
-		return bh;
+		goto verify;
 	}
 	/*
 	 * submit the buffer_head for reading
@@ -390,6 +412,9 @@
 	get_bh(bh);
 	submit_bh(READ, bh);
 	return bh;
+verify:
+	ext4_validate_block_bitmap(sb, desc, block_group, bh);
+	return bh;
 }
 
 /* Returns 0 on success, 1 on error */
@@ -412,7 +437,7 @@
 	}
 	clear_buffer_new(bh);
 	/* Panic or remount fs read-only if block bitmap is invalid */
-	ext4_valid_block_bitmap(sb, desc, block_group, bh);
+	ext4_validate_block_bitmap(sb, desc, block_group, bh);
 	return 0;
 }
 
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index fa3af81..b319721 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -29,3 +29,86 @@
 
 #endif  /*  EXT4FS_DEBUG  */
 
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+				  struct ext4_group_desc *gdp,
+				  struct buffer_head *bh, int sz)
+{
+	__u32 hi;
+	__u32 provided, calculated;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+	calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+	if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
+		hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
+		provided |= (hi << 16);
+	} else
+		calculated &= 0xFFFF;
+
+	return provided == calculated;
+}
+
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+				struct ext4_group_desc *gdp,
+				struct buffer_head *bh, int sz)
+{
+	__u32 csum;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+	gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+	if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
+		gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
+
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+				  struct ext4_group_desc *gdp,
+				  struct buffer_head *bh, int sz)
+{
+	__u32 hi;
+	__u32 provided, calculated;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+	calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+	if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
+		hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
+		provided |= (hi << 16);
+	} else
+		calculated &= 0xFFFF;
+
+	if (provided == calculated)
+		return 1;
+
+	ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
+	return 0;
+}
+
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+				struct ext4_group_desc *gdp,
+				struct buffer_head *bh, int sz)
+{
+	__u32 csum;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+	gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+	if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
+		gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index b867862..aa39e60 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -179,6 +179,18 @@
 			continue;
 		}
 
+		/* Check the checksum */
+		if (!buffer_verified(bh) &&
+		    !ext4_dirent_csum_verify(inode,
+				(struct ext4_dir_entry *)bh->b_data)) {
+			EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
+					"at offset %llu",
+					(unsigned long long)filp->f_pos);
+			filp->f_pos += sb->s_blocksize - offset;
+			continue;
+		}
+		set_buffer_verified(bh);
+
 revalidate:
 		/* If the dir block has changed since the last call to
 		 * readdir(2), then we might be pointing to an invalid
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c21b1de5..cfc4e01 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
 #endif
@@ -298,7 +299,9 @@
 	__le16	bg_free_inodes_count_lo;/* Free inodes count */
 	__le16	bg_used_dirs_count_lo;	/* Directories count */
 	__le16	bg_flags;		/* EXT4_BG_flags (INODE_UNINIT, etc) */
-	__u32	bg_reserved[2];		/* Likely block/inode bitmap checksum */
+	__le32  bg_exclude_bitmap_lo;   /* Exclude bitmap for snapshots */
+	__le16  bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */
+	__le16  bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */
 	__le16  bg_itable_unused_lo;	/* Unused inodes count */
 	__le16  bg_checksum;		/* crc16(sb_uuid+group+desc) */
 	__le32	bg_block_bitmap_hi;	/* Blocks bitmap block MSB */
@@ -308,9 +311,19 @@
 	__le16	bg_free_inodes_count_hi;/* Free inodes count MSB */
 	__le16	bg_used_dirs_count_hi;	/* Directories count MSB */
 	__le16  bg_itable_unused_hi;    /* Unused inodes count MSB */
-	__u32	bg_reserved2[3];
+	__le32  bg_exclude_bitmap_hi;   /* Exclude bitmap block MSB */
+	__le16  bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */
+	__le16  bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */
+	__u32   bg_reserved;
 };
 
+#define EXT4_BG_INODE_BITMAP_CSUM_HI_END	\
+	(offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \
+	 sizeof(__le16))
+#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END	\
+	(offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \
+	 sizeof(__le16))
+
 /*
  * Structure of a flex block group info
  */
@@ -650,7 +663,8 @@
 			__le16	l_i_file_acl_high;
 			__le16	l_i_uid_high;	/* these 2 fields */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
+			__le16	l_i_reserved;
 		} linux2;
 		struct {
 			__le16	h_i_reserved1;	/* Obsoleted fragment number/size which are removed in ext4 */
@@ -666,7 +680,7 @@
 		} masix2;
 	} osd2;				/* OS dependent 2 */
 	__le16	i_extra_isize;
-	__le16	i_pad1;
+	__le16	i_checksum_hi;	/* crc32c(uuid+inum+inode) BE */
 	__le32  i_ctime_extra;  /* extra Change time      (nsec << 2 | epoch) */
 	__le32  i_mtime_extra;  /* extra Modification time(nsec << 2 | epoch) */
 	__le32  i_atime_extra;  /* extra Access time      (nsec << 2 | epoch) */
@@ -768,7 +782,7 @@
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
-#define i_reserved2	osd2.linux2.l_i_reserved2
+#define i_checksum_lo	osd2.linux2.l_i_checksum_lo
 
 #elif defined(__GNU__)
 
@@ -908,6 +922,9 @@
 	 */
 	tid_t i_sync_tid;
 	tid_t i_datasync_tid;
+
+	/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+	__u32 i_csum_seed;
 };
 
 /*
@@ -1001,6 +1018,9 @@
 #define EXT4_ERRORS_PANIC		3	/* Panic */
 #define EXT4_ERRORS_DEFAULT		EXT4_ERRORS_CONTINUE
 
+/* Metadata checksum algorithm codes */
+#define EXT4_CRC32C_CHKSUM		1
+
 /*
  * Structure of the super block
  */
@@ -1087,7 +1107,7 @@
 	__le64  s_mmp_block;            /* Block for multi-mount protection */
 	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
 	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
-	__u8	s_reserved_char_pad;
+	__u8	s_checksum_type;	/* metadata checksum algorithm used */
 	__le16  s_reserved_pad;
 	__le64	s_kbytes_written;	/* nr of lifetime kilobytes written */
 	__le32	s_snapshot_inum;	/* Inode number of active snapshot */
@@ -1113,7 +1133,8 @@
 	__le32	s_usr_quota_inum;	/* inode for tracking user quota */
 	__le32	s_grp_quota_inum;	/* inode for tracking group quota */
 	__le32	s_overhead_clusters;	/* overhead blocks/clusters in fs */
-	__le32  s_reserved[109];        /* Padding to the end of the block */
+	__le32	s_reserved[108];	/* Padding to the end of the block */
+	__le32	s_checksum;		/* crc32c(superblock) */
 };
 
 #define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
@@ -1176,6 +1197,7 @@
 	struct proc_dir_entry *s_proc;
 	struct kobject s_kobj;
 	struct completion s_kobj_unregister;
+	struct super_block *s_sb;
 
 	/* Journaling */
 	struct journal_s *s_journal;
@@ -1266,6 +1288,12 @@
 
 	/* record the last minlen when FITRIM is called. */
 	atomic_t s_last_trim_minblks;
+
+	/* Reference to checksum algorithm driver via cryptoapi */
+	struct crypto_shash *s_chksum_driver;
+
+	/* Precomputed FS UUID checksum for seeding other checksums */
+	__u32 s_csum_seed;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1414,6 +1442,12 @@
 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE	0x0040
 #define EXT4_FEATURE_RO_COMPAT_QUOTA		0x0100
 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC		0x0200
+/*
+ * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM).  When
+ * METADATA_CSUM is set, group descriptor checksums use the same algorithm as
+ * all other data structures' checksums.  However, the METADATA_CSUM and
+ * GDT_CSUM bits are mutually exclusive.
+ */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM	0x0400
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION	0x0001
@@ -1461,7 +1495,8 @@
 					 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
 					 EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
 					 EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
-					 EXT4_FEATURE_RO_COMPAT_BIGALLOC)
+					 EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
+					 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)
 
 /*
  * Default values for user and/or group using reserved blocks
@@ -1527,6 +1562,18 @@
 };
 
 /*
+ * This is a bogus directory entry at the end of each leaf block that
+ * records checksums.
+ */
+struct ext4_dir_entry_tail {
+	__le32	det_reserved_zero1;	/* Pretend to be unused */
+	__le16	det_rec_len;		/* 12 */
+	__u8	det_reserved_zero2;	/* Zero name length */
+	__u8	det_reserved_ft;	/* 0xDE, fake file type */
+	__le32	det_checksum;		/* crc32c(uuid+inum+dirblock) */
+};
+
+/*
  * Ext4 directory file types.  Only the low 3 bits are used.  The
  * other bits are reserved for now.
  */
@@ -1541,6 +1588,8 @@
 
 #define EXT4_FT_MAX		8
 
+#define EXT4_FT_DIR_CSUM	0xDE
+
 /*
  * EXT4_DIR_PAD defines the directory entries boundaries
  *
@@ -1609,6 +1658,25 @@
 #define DX_HASH_HALF_MD4_UNSIGNED	4
 #define DX_HASH_TEA_UNSIGNED		5
 
+static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
+			      const void *address, unsigned int length)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(sbi->s_chksum_driver)];
+	} desc;
+	int err;
+
+	desc.shash.tfm = sbi->s_chksum_driver;
+	desc.shash.flags = 0;
+	*(u32 *)desc.ctx = crc;
+
+	err = crypto_shash_update(&desc.shash, address, length);
+	BUG_ON(err);
+
+	return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 /* hash info structure used by the directory hash */
@@ -1741,7 +1809,8 @@
 	__le16	mmp_check_interval;
 
 	__le16	mmp_pad1;
-	__le32	mmp_pad2[227];
+	__le32	mmp_pad2[226];
+	__le32	mmp_checksum;		/* crc32c(uuid+mmp_block) */
 };
 
 /* arguments passed to the mmp thread */
@@ -1784,8 +1853,24 @@
 
 /* bitmap.c */
 extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+				struct ext4_group_desc *gdp,
+				struct buffer_head *bh, int sz);
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+				  struct ext4_group_desc *gdp,
+				  struct buffer_head *bh, int sz);
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+				struct ext4_group_desc *gdp,
+				struct buffer_head *bh, int sz);
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+				  struct ext4_group_desc *gdp,
+				  struct buffer_head *bh, int sz);
 
 /* balloc.c */
+extern void ext4_validate_block_bitmap(struct super_block *sb,
+				       struct ext4_group_desc *desc,
+				       unsigned int block_group,
+				       struct buffer_head *bh);
 extern unsigned int ext4_block_group(struct super_block *sb,
 			ext4_fsblk_t blocknr);
 extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
@@ -1864,7 +1949,7 @@
 /* mballoc.c */
 extern long ext4_mb_stats;
 extern long ext4_mb_max_to_scan;
-extern int ext4_mb_init(struct super_block *, int);
+extern int ext4_mb_init(struct super_block *);
 extern int ext4_mb_release(struct super_block *);
 extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
 				struct ext4_allocation_request *, int *);
@@ -1936,6 +2021,8 @@
 extern int ext4_ext_migrate(struct inode *);
 
 /* namei.c */
+extern int ext4_dirent_csum_verify(struct inode *inode,
+				   struct ext4_dir_entry *dirent);
 extern int ext4_orphan_add(handle_t *, struct inode *);
 extern int ext4_orphan_del(handle_t *, struct inode *);
 extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
@@ -1950,6 +2037,10 @@
 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern int ext4_superblock_csum_verify(struct super_block *sb,
+				       struct ext4_super_block *es);
+extern void ext4_superblock_csum_set(struct super_block *sb,
+				     struct ext4_super_block *es);
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
@@ -2025,10 +2116,17 @@
 				struct ext4_group_desc *bg, __u32 count);
 extern void ext4_itable_unused_set(struct super_block *sb,
 				   struct ext4_group_desc *bg, __u32 count);
-extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group,
-				   struct ext4_group_desc *gdp);
-extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
+extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
 				       struct ext4_group_desc *gdp);
+extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
+				     struct ext4_group_desc *gdp);
+
+static inline int ext4_has_group_desc_csum(struct super_block *sb)
+{
+	return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					  EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+					  EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
+}
 
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
 {
@@ -2225,6 +2323,9 @@
 
 static inline void ext4_mark_super_dirty(struct super_block *sb)
 {
+	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+	ext4_superblock_csum_set(sb, es);
 	if (EXT4_SB(sb)->s_journal == NULL)
 		sb->s_dirt =1;
 }
@@ -2314,6 +2415,9 @@
 
 /* mmp.c */
 extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
+extern int ext4_mmp_csum_verify(struct super_block *sb,
+				struct mmp_struct *mmp);
 
 /* BH_Uninit flag: blocks are allocated but uninitialized on disk */
 enum ext4_state_bits {
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 0f58b86..cb1b2c9 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -63,9 +63,22 @@
  * ext4_inode has i_block array (60 bytes total).
  * The first 12 bytes store ext4_extent_header;
  * the remainder stores an array of ext4_extent.
+ * For non-inode extent blocks, ext4_extent_tail
+ * follows the array.
  */
 
 /*
+ * This is the extent tail on-disk structure.
+ * All other extent structures are 12 bytes long.  It turns out that
+ * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which
+ * covers all valid ext4 block sizes.  Therefore, this tail structure can be
+ * crammed into the end of the block without having to rebalance the tree.
+ */
+struct ext4_extent_tail {
+	__le32	et_checksum;	/* crc32c(uuid+inum+extent_block) */
+};
+
+/*
  * This is the extent on-disk structure.
  * It's used at the bottom of the tree.
  */
@@ -101,6 +114,17 @@
 
 #define EXT4_EXT_MAGIC		cpu_to_le16(0xf30a)
 
+#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
+	(sizeof(struct ext4_extent_header) + \
+	 (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max)))
+
+static inline struct ext4_extent_tail *
+find_ext4_extent_tail(struct ext4_extent_header *eh)
+{
+	return (struct ext4_extent_tail *)(((void *)eh) +
+					   EXT4_EXTENT_TAIL_OFFSET(eh));
+}
+
 /*
  * Array of ext4_ext_path contains path to some extent.
  * Creation/lookup routines use it for traversal/splitting/etc.
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index aca1790..90f7c2e 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -138,16 +138,23 @@
 }
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-			      handle_t *handle, struct super_block *sb)
+			      handle_t *handle, struct super_block *sb,
+			      int now)
 {
 	struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
 	int err = 0;
 
 	if (ext4_handle_valid(handle)) {
+		ext4_superblock_csum_set(sb,
+				(struct ext4_super_block *)bh->b_data);
 		err = jbd2_journal_dirty_metadata(handle, bh);
 		if (err)
 			ext4_journal_abort_handle(where, line, __func__,
 						  bh, handle, err);
+	} else if (now) {
+		ext4_superblock_csum_set(sb,
+				(struct ext4_super_block *)bh->b_data);
+		mark_buffer_dirty(bh);
 	} else
 		sb->s_dirt = 1;
 	return err;
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 83b20fc..f440e8f1 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -213,7 +213,8 @@
 				 struct buffer_head *bh);
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-			      handle_t *handle, struct super_block *sb);
+			      handle_t *handle, struct super_block *sb,
+			      int now);
 
 #define ext4_journal_get_write_access(handle, bh) \
 	__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
@@ -225,8 +226,10 @@
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
 	__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
 				     (bh))
+#define ext4_handle_dirty_super_now(handle, sb) \
+	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1)
 #define ext4_handle_dirty_super(handle, sb) \
-	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
+	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0)
 
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index abcdeab..91341ec 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -52,6 +52,46 @@
 #define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
 #define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
 
+static __le32 ext4_extent_block_csum(struct inode *inode,
+				     struct ext4_extent_header *eh)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	__u32 csum;
+
+	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
+			   EXT4_EXTENT_TAIL_OFFSET(eh));
+	return cpu_to_le32(csum);
+}
+
+static int ext4_extent_block_csum_verify(struct inode *inode,
+					 struct ext4_extent_header *eh)
+{
+	struct ext4_extent_tail *et;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	et = find_ext4_extent_tail(eh);
+	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
+		return 0;
+	return 1;
+}
+
+static void ext4_extent_block_csum_set(struct inode *inode,
+				       struct ext4_extent_header *eh)
+{
+	struct ext4_extent_tail *et;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	et = find_ext4_extent_tail(eh);
+	et->et_checksum = ext4_extent_block_csum(inode, eh);
+}
+
 static int ext4_split_extent(handle_t *handle,
 				struct inode *inode,
 				struct ext4_ext_path *path,
@@ -117,6 +157,7 @@
 {
 	int err;
 	if (path->p_bh) {
+		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
 		/* path points to block */
 		err = __ext4_handle_dirty_metadata(where, line, handle,
 						   inode, path->p_bh);
@@ -391,6 +432,12 @@
 		error_msg = "invalid extent entries";
 		goto corrupted;
 	}
+	/* Verify checksum on non-root extent tree nodes */
+	if (ext_depth(inode) != depth &&
+	    !ext4_extent_block_csum_verify(inode, eh)) {
+		error_msg = "extent tree corrupted";
+		goto corrupted;
+	}
 	return 0;
 
 corrupted:
@@ -412,6 +459,26 @@
 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
 }
 
+static int __ext4_ext_check_block(const char *function, unsigned int line,
+				  struct inode *inode,
+				  struct ext4_extent_header *eh,
+				  int depth,
+				  struct buffer_head *bh)
+{
+	int ret;
+
+	if (buffer_verified(bh))
+		return 0;
+	ret = ext4_ext_check(inode, eh, depth);
+	if (ret)
+		return ret;
+	set_buffer_verified(bh);
+	return ret;
+}
+
+#define ext4_ext_check_block(inode, eh, depth, bh)	\
+	__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
+
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 {
@@ -536,7 +603,7 @@
 	}
 
 	path->p_idx = l - 1;
-	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
+	ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
 		  ext4_idx_pblock(path->p_idx));
 
 #ifdef CHECK_BINSEARCH
@@ -668,8 +735,6 @@
 	i = depth;
 	/* walk through the tree */
 	while (i) {
-		int need_to_validate = 0;
-
 		ext_debug("depth %d: num %d, max %d\n",
 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
 
@@ -688,8 +753,6 @@
 				put_bh(bh);
 				goto err;
 			}
-			/* validate the extent entries */
-			need_to_validate = 1;
 		}
 		eh = ext_block_hdr(bh);
 		ppos++;
@@ -703,7 +766,7 @@
 		path[ppos].p_hdr = eh;
 		i--;
 
-		if (need_to_validate && ext4_ext_check(inode, eh, i))
+		if (ext4_ext_check_block(inode, eh, i, bh))
 			goto err;
 	}
 
@@ -914,6 +977,7 @@
 		le16_add_cpu(&neh->eh_entries, m);
 	}
 
+	ext4_extent_block_csum_set(inode, neh);
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
@@ -992,6 +1056,7 @@
 				sizeof(struct ext4_extent_idx) * m);
 			le16_add_cpu(&neh->eh_entries, m);
 		}
+		ext4_extent_block_csum_set(inode, neh);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
 
@@ -1089,6 +1154,7 @@
 	else
 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
 	neh->eh_magic = EXT4_EXT_MAGIC;
+	ext4_extent_block_csum_set(inode, neh);
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
@@ -1344,7 +1410,8 @@
 			return -EIO;
 		eh = ext_block_hdr(bh);
 		/* subtract from p_depth to get proper eh_depth */
-		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+		if (ext4_ext_check_block(inode, eh,
+					 path->p_depth - depth, bh)) {
 			put_bh(bh);
 			return -EIO;
 		}
@@ -1357,7 +1424,7 @@
 	if (bh == NULL)
 		return -EIO;
 	eh = ext_block_hdr(bh);
-	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+	if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
 		put_bh(bh);
 		return -EIO;
 	}
@@ -2644,8 +2711,8 @@
 				err = -EIO;
 				break;
 			}
-			if (ext4_ext_check(inode, ext_block_hdr(bh),
-							depth - i - 1)) {
+			if (ext4_ext_check_block(inode, ext_block_hdr(bh),
+							depth - i - 1, bh)) {
 				err = -EIO;
 				break;
 			}
@@ -4722,8 +4789,8 @@
 
 	/* Now release the pages */
 	if (last_page_offset > first_page_offset) {
-		truncate_inode_pages_range(mapping, first_page_offset,
-					   last_page_offset-1);
+		truncate_pagecache_range(inode, first_page_offset,
+					 last_page_offset - 1);
 	}
 
 	/* finish any pending end_io work */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index cb70f18..8c7642a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -95,7 +95,7 @@
 {
 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
 	int unaligned_aio = 0;
-	int ret;
+	ssize_t ret;
 
 	/*
 	 * If we have encountered a bitmap-format file, the size limit
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9f9acac..d48e8b1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -70,24 +70,27 @@
 				       ext4_group_t block_group,
 				       struct ext4_group_desc *gdp)
 {
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-
 	J_ASSERT_BH(bh, buffer_locked(bh));
 
 	/* If checksum is bad mark all blocks and inodes use to prevent
 	 * allocation, essentially implementing a per-group read-only flag. */
-	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
 		ext4_error(sb, "Checksum bad for group %u", block_group);
 		ext4_free_group_clusters_set(sb, gdp, 0);
 		ext4_free_inodes_set(sb, gdp, 0);
 		ext4_itable_unused_set(sb, gdp, 0);
 		memset(bh->b_data, 0xff, sb->s_blocksize);
+		ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+					   EXT4_INODES_PER_GROUP(sb) / 8);
 		return 0;
 	}
 
 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
 	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			bh->b_data);
+	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+				   EXT4_INODES_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, block_group, gdp);
 
 	return EXT4_INODES_PER_GROUP(sb);
 }
@@ -128,12 +131,12 @@
 		return NULL;
 	}
 	if (bitmap_uptodate(bh))
-		return bh;
+		goto verify;
 
 	lock_buffer(bh);
 	if (bitmap_uptodate(bh)) {
 		unlock_buffer(bh);
-		return bh;
+		goto verify;
 	}
 
 	ext4_lock_group(sb, block_group);
@@ -141,6 +144,7 @@
 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
 		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
+		set_buffer_verified(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
 		return bh;
@@ -154,7 +158,7 @@
 		 */
 		set_bitmap_uptodate(bh);
 		unlock_buffer(bh);
-		return bh;
+		goto verify;
 	}
 	/*
 	 * submit the buffer_head for reading
@@ -171,6 +175,20 @@
 			   block_group, bitmap_blk);
 		return NULL;
 	}
+
+verify:
+	ext4_lock_group(sb, block_group);
+	if (!buffer_verified(bh) &&
+	    !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+					   EXT4_INODES_PER_GROUP(sb) / 8)) {
+		ext4_unlock_group(sb, block_group);
+		put_bh(bh);
+		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
+			   "inode_bitmap = %llu", block_group, bitmap_blk);
+		return NULL;
+	}
+	ext4_unlock_group(sb, block_group);
+	set_buffer_verified(bh);
 	return bh;
 }
 
@@ -276,7 +294,9 @@
 		ext4_used_dirs_set(sb, gdp, count);
 		percpu_counter_dec(&sbi->s_dirs_counter);
 	}
-	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+				   EXT4_INODES_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, block_group, gdp);
 	ext4_unlock_group(sb, block_group);
 
 	percpu_counter_inc(&sbi->s_freeinodes_counter);
@@ -488,10 +508,12 @@
 	for (i = 0; i < ngroups; i++) {
 		grp = (parent_group + i) % ngroups;
 		desc = ext4_get_group_desc(sb, grp, NULL);
-		grp_free = ext4_free_inodes_count(sb, desc);
-		if (desc && grp_free && grp_free >= avefreei) {
-			*group = grp;
-			return 0;
+		if (desc) {
+			grp_free = ext4_free_inodes_count(sb, desc);
+			if (grp_free && grp_free >= avefreei) {
+				*group = grp;
+				return 0;
+			}
 		}
 	}
 
@@ -709,7 +731,7 @@
 
 got:
 	/* We may have to initialize the block bitmap if it isn't already */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+	if (ext4_has_group_desc_csum(sb) &&
 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 		struct buffer_head *block_bitmap_bh;
 
@@ -731,8 +753,11 @@
 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
 			ext4_free_group_clusters_set(sb, gdp,
 				ext4_free_clusters_after_init(sb, group, gdp));
-			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
-								gdp);
+			ext4_block_bitmap_csum_set(sb, group, gdp,
+						   block_bitmap_bh,
+						   EXT4_BLOCKS_PER_GROUP(sb) /
+						   8);
+			ext4_group_desc_csum_set(sb, group, gdp);
 		}
 		ext4_unlock_group(sb, group);
 
@@ -751,7 +776,7 @@
 		goto fail;
 
 	/* Update the relevant bg descriptor fields */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+	if (ext4_has_group_desc_csum(sb)) {
 		int free;
 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 
@@ -772,7 +797,10 @@
 			ext4_itable_unused_set(sb, gdp,
 					(EXT4_INODES_PER_GROUP(sb) - ino));
 		up_read(&grp->alloc_sem);
+	} else {
+		ext4_lock_group(sb, group);
 	}
+
 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
 	if (S_ISDIR(mode)) {
 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
@@ -782,10 +810,12 @@
 			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
 		}
 	}
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-		gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
-		ext4_unlock_group(sb, group);
+	if (ext4_has_group_desc_csum(sb)) {
+		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
+					   EXT4_INODES_PER_GROUP(sb) / 8);
+		ext4_group_desc_csum_set(sb, group, gdp);
 	}
+	ext4_unlock_group(sb, group);
 
 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
@@ -850,6 +880,19 @@
 	inode->i_generation = sbi->s_next_generation++;
 	spin_unlock(&sbi->s_next_gen_lock);
 
+	/* Precompute checksum seed for inode metadata */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+		__u32 csum;
+		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+		__le32 inum = cpu_to_le32(inode->i_ino);
+		__le32 gen = cpu_to_le32(inode->i_generation);
+		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+				   sizeof(inum));
+		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+					      sizeof(gen));
+	}
+
 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
@@ -1140,7 +1183,7 @@
 skip_zeroout:
 	ext4_lock_group(sb, group);
 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
-	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+	ext4_group_desc_csum_set(sb, group, gdp);
 	ext4_unlock_group(sb, group);
 
 	BUFFER_TRACE(group_desc_bh,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 07eaf56..02bc8cb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -47,6 +47,73 @@
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
+static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+			      struct ext4_inode_info *ei)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	__u16 csum_lo;
+	__u16 csum_hi = 0;
+	__u32 csum;
+
+	csum_lo = raw->i_checksum_lo;
+	raw->i_checksum_lo = 0;
+	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+		csum_hi = raw->i_checksum_hi;
+		raw->i_checksum_hi = 0;
+	}
+
+	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
+			   EXT4_INODE_SIZE(inode->i_sb));
+
+	raw->i_checksum_lo = csum_lo;
+	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+		raw->i_checksum_hi = csum_hi;
+
+	return csum;
+}
+
+static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+				  struct ext4_inode_info *ei)
+{
+	__u32 provided, calculated;
+
+	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+	    cpu_to_le32(EXT4_OS_LINUX) ||
+	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	provided = le16_to_cpu(raw->i_checksum_lo);
+	calculated = ext4_inode_csum(inode, raw, ei);
+	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
+	else
+		calculated &= 0xFFFF;
+
+	return provided == calculated;
+}
+
+static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+				struct ext4_inode_info *ei)
+{
+	__u32 csum;
+
+	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+	    cpu_to_le32(EXT4_OS_LINUX) ||
+	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	csum = ext4_inode_csum(inode, raw, ei);
+	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
+	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
+}
+
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
 					      loff_t new_size)
 {
@@ -3517,8 +3584,7 @@
 				b = table;
 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
 			num = EXT4_INODES_PER_GROUP(sb);
-			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+			if (ext4_has_group_desc_csum(sb))
 				num -= ext4_itable_unused_count(sb, gdp);
 			table += num / inodes_per_block;
 			if (end > table)
@@ -3646,6 +3712,39 @@
 	if (ret < 0)
 		goto bad_inode;
 	raw_inode = ext4_raw_inode(&iloc);
+
+	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+		    EXT4_INODE_SIZE(inode->i_sb)) {
+			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
+				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
+				EXT4_INODE_SIZE(inode->i_sb));
+			ret = -EIO;
+			goto bad_inode;
+		}
+	} else
+		ei->i_extra_isize = 0;
+
+	/* Precompute checksum seed for inode metadata */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+		__u32 csum;
+		__le32 inum = cpu_to_le32(inode->i_ino);
+		__le32 gen = raw_inode->i_generation;
+		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+				   sizeof(inum));
+		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+					      sizeof(gen));
+	}
+
+	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
+		EXT4_ERROR_INODE(inode, "checksum invalid");
+		ret = -EIO;
+		goto bad_inode;
+	}
+
 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
@@ -3725,12 +3824,6 @@
 	}
 
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
-		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-		    EXT4_INODE_SIZE(inode->i_sb)) {
-			ret = -EIO;
-			goto bad_inode;
-		}
 		if (ei->i_extra_isize == 0) {
 			/* The extra space is currently unused. Use it. */
 			ei->i_extra_isize = sizeof(struct ext4_inode) -
@@ -3742,8 +3835,7 @@
 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
 				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
 		}
-	} else
-		ei->i_extra_isize = 0;
+	}
 
 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
@@ -3942,7 +4034,7 @@
 			EXT4_SET_RO_COMPAT_FEATURE(sb,
 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
 			ext4_handle_sync(handle);
-			err = ext4_handle_dirty_super(handle, sb);
+			err = ext4_handle_dirty_super_now(handle, sb);
 		}
 	}
 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -3969,6 +4061,8 @@
 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
 	}
 
+	ext4_inode_csum_set(inode, raw_inode, ei);
+
 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
 	if (!err)
@@ -4213,7 +4307,8 @@
 	 * will return the blocks that include the delayed allocation
 	 * blocks for this file.
 	 */
-	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
+				EXT4_I(inode)->i_reserved_data_blocks);
 
 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
 	return 0;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6eee255..e34deac 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -38,7 +38,7 @@
 		handle_t *handle = NULL;
 		int err, migrate = 0;
 		struct ext4_iloc iloc;
-		unsigned int oldflags;
+		unsigned int oldflags, mask, i;
 		unsigned int jflag;
 
 		if (!inode_owner_or_capable(inode))
@@ -115,9 +115,14 @@
 		if (err)
 			goto flags_err;
 
-		flags = flags & EXT4_FL_USER_MODIFIABLE;
-		flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
-		ei->i_flags = flags;
+		for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
+			if (!(mask & EXT4_FL_USER_MODIFIABLE))
+				continue;
+			if (mask & flags)
+				ext4_set_inode_flag(inode, i);
+			else
+				ext4_clear_inode_flag(inode, i);
+		}
 
 		ext4_set_inode_flags(inode);
 		inode->i_ctime = ext4_current_time(inode);
@@ -152,6 +157,13 @@
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
 
+		if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+				EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+			ext4_warning(sb, "Setting inode version is not "
+				     "supported with metadata_csum enabled.");
+			return -ENOTTY;
+		}
+
 		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 99ab428..1cd6994 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -788,7 +788,7 @@
 	int first_block;
 	struct super_block *sb;
 	struct buffer_head *bhs;
-	struct buffer_head **bh;
+	struct buffer_head **bh = NULL;
 	struct inode *inode;
 	char *data;
 	char *bitmap;
@@ -2375,7 +2375,7 @@
 	return 0;
 }
 
-int ext4_mb_init(struct super_block *sb, int needs_recovery)
+int ext4_mb_init(struct super_block *sb)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	unsigned i, j;
@@ -2517,6 +2517,9 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
+	if (sbi->s_proc)
+		remove_proc_entry("mb_groups", sbi->s_proc);
+
 	if (sbi->s_group_info) {
 		for (i = 0; i < ngroups; i++) {
 			grinfo = ext4_get_group_info(sb, i);
@@ -2564,8 +2567,6 @@
 	}
 
 	free_percpu(sbi->s_locality_groups);
-	if (sbi->s_proc)
-		remove_proc_entry("mb_groups", sbi->s_proc);
 
 	return 0;
 }
@@ -2797,7 +2798,9 @@
 	}
 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
 	ext4_free_group_clusters_set(sb, gdp, len);
-	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
+				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
 
 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
@@ -3071,13 +3074,9 @@
 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
 {
 	struct ext4_prealloc_space *pa = ac->ac_pa;
-	int len;
 
-	if (pa && pa->pa_type == MB_INODE_PA) {
-		len = ac->ac_b_ex.fe_len;
-		pa->pa_free += len;
-	}
-
+	if (pa && pa->pa_type == MB_INODE_PA)
+		pa->pa_free += ac->ac_b_ex.fe_len;
 }
 
 /*
@@ -4636,6 +4635,7 @@
 		 */
 		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
 		if (!new_entry) {
+			ext4_mb_unload_buddy(&e4b);
 			err = -ENOMEM;
 			goto error_return;
 		}
@@ -4659,7 +4659,9 @@
 
 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
 	ext4_free_group_clusters_set(sb, gdp, ret);
-	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, block_group, gdp);
 	ext4_unlock_group(sb, block_group);
 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 
@@ -4803,7 +4805,9 @@
 	mb_free_blocks(NULL, &e4b, bit, count);
 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
-	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
+				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+	ext4_group_desc_csum_set(sb, block_group, desc);
 	ext4_unlock_group(sb, block_group);
 	percpu_counter_add(&sbi->s_freeclusters_counter,
 			   EXT4_B2C(sbi, blocks_freed));
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index ed6548d..f99a131 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -6,12 +6,45 @@
 
 #include "ext4.h"
 
+/* Checksumming functions */
+static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	int offset = offsetof(struct mmp_struct, mmp_checksum);
+	__u32 csum;
+
+	csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+
+	return cpu_to_le32(csum);
+}
+
+int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+}
+
+void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+}
+
 /*
  * Write the MMP block using WRITE_SYNC to try to get the block on-disk
  * faster.
  */
-static int write_mmp_block(struct buffer_head *bh)
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
 {
+	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+
+	ext4_mmp_csum_set(sb, mmp);
 	mark_buffer_dirty(bh);
 	lock_buffer(bh);
 	bh->b_end_io = end_buffer_write_sync;
@@ -59,7 +92,8 @@
 	}
 
 	mmp = (struct mmp_struct *)((*bh)->b_data);
-	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
+	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
+	    !ext4_mmp_csum_verify(sb, mmp))
 		return -EINVAL;
 
 	return 0;
@@ -120,7 +154,7 @@
 		mmp->mmp_time = cpu_to_le64(get_seconds());
 		last_update_time = jiffies;
 
-		retval = write_mmp_block(bh);
+		retval = write_mmp_block(sb, bh);
 		/*
 		 * Don't spew too many error messages. Print one every
 		 * (s_mmp_update_interval * 60) seconds.
@@ -200,7 +234,7 @@
 	mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
 	mmp->mmp_time = cpu_to_le64(get_seconds());
 
-	retval = write_mmp_block(bh);
+	retval = write_mmp_block(sb, bh);
 
 failed:
 	kfree(data);
@@ -299,7 +333,7 @@
 	seq = mmp_new_seq();
 	mmp->mmp_seq = cpu_to_le32(seq);
 
-	retval = write_mmp_block(bh);
+	retval = write_mmp_block(sb, bh);
 	if (retval)
 		goto failed;
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e2a3f4b..5845cd9 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -145,6 +145,14 @@
 	u16 size;
 };
 
+/*
+ * This goes at the end of each htree block.
+ */
+struct dx_tail {
+	u32 dt_reserved;
+	__le32 dt_checksum;	/* crc32c(uuid+inum+dirblock) */
+};
+
 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
 static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
 static inline unsigned dx_get_hash(struct dx_entry *entry);
@@ -180,6 +188,230 @@
 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
 			     struct inode *inode);
 
+/* checksumming functions */
+#define EXT4_DIRENT_TAIL(block, blocksize) \
+	((struct ext4_dir_entry_tail *)(((void *)(block)) + \
+					((blocksize) - \
+					 sizeof(struct ext4_dir_entry_tail))))
+
+static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+				   unsigned int blocksize)
+{
+	memset(t, 0, sizeof(struct ext4_dir_entry_tail));
+	t->det_rec_len = ext4_rec_len_to_disk(
+			sizeof(struct ext4_dir_entry_tail), blocksize);
+	t->det_reserved_ft = EXT4_FT_DIR_CSUM;
+}
+
+/* Walk through a dirent block to find a checksum "dirent" at the tail */
+static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+						   struct ext4_dir_entry *de)
+{
+	struct ext4_dir_entry_tail *t;
+
+#ifdef PARANOID
+	struct ext4_dir_entry *d, *top;
+
+	d = de;
+	top = (struct ext4_dir_entry *)(((void *)de) +
+		(EXT4_BLOCK_SIZE(inode->i_sb) -
+		sizeof(struct ext4_dir_entry_tail)));
+	while (d < top && d->rec_len)
+		d = (struct ext4_dir_entry *)(((void *)d) +
+		    le16_to_cpu(d->rec_len));
+
+	if (d != top)
+		return NULL;
+
+	t = (struct ext4_dir_entry_tail *)d;
+#else
+	t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
+#endif
+
+	if (t->det_reserved_zero1 ||
+	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+	    t->det_reserved_zero2 ||
+	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
+		return NULL;
+
+	return t;
+}
+
+static __le32 ext4_dirent_csum(struct inode *inode,
+			       struct ext4_dir_entry *dirent, int size)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	__u32 csum;
+
+	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+	return cpu_to_le32(csum);
+}
+
+int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+	struct ext4_dir_entry_tail *t;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	t = get_dirent_tail(inode, dirent);
+	if (!t) {
+		EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+				 "leaf for checksum.  Please run e2fsck -D.");
+		return 0;
+	}
+
+	if (t->det_checksum != ext4_dirent_csum(inode, dirent,
+						(void *)t - (void *)dirent))
+		return 0;
+
+	return 1;
+}
+
+static void ext4_dirent_csum_set(struct inode *inode,
+				 struct ext4_dir_entry *dirent)
+{
+	struct ext4_dir_entry_tail *t;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	t = get_dirent_tail(inode, dirent);
+	if (!t) {
+		EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+				 "leaf for checksum.  Please run e2fsck -D.");
+		return;
+	}
+
+	t->det_checksum = ext4_dirent_csum(inode, dirent,
+					   (void *)t - (void *)dirent);
+}
+
+static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
+						struct inode *inode,
+						struct buffer_head *bh)
+{
+	ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+	return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
+static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+					       struct ext4_dir_entry *dirent,
+					       int *offset)
+{
+	struct ext4_dir_entry *dp;
+	struct dx_root_info *root;
+	int count_offset;
+
+	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+		count_offset = 8;
+	else if (le16_to_cpu(dirent->rec_len) == 12) {
+		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
+		if (le16_to_cpu(dp->rec_len) !=
+		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+			return NULL;
+		root = (struct dx_root_info *)(((void *)dp + 12));
+		if (root->reserved_zero ||
+		    root->info_length != sizeof(struct dx_root_info))
+			return NULL;
+		count_offset = 32;
+	} else
+		return NULL;
+
+	if (offset)
+		*offset = count_offset;
+	return (struct dx_countlimit *)(((void *)dirent) + count_offset);
+}
+
+static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
+			   int count_offset, int count, struct dx_tail *t)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	__u32 csum, old_csum;
+	int size;
+
+	size = count_offset + (count * sizeof(struct dx_entry));
+	old_csum = t->dt_checksum;
+	t->dt_checksum = 0;
+	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+	csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
+	t->dt_checksum = old_csum;
+
+	return cpu_to_le32(csum);
+}
+
+static int ext4_dx_csum_verify(struct inode *inode,
+			       struct ext4_dir_entry *dirent)
+{
+	struct dx_countlimit *c;
+	struct dx_tail *t;
+	int count_offset, limit, count;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	c = get_dx_countlimit(inode, dirent, &count_offset);
+	if (!c) {
+		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+		return 1;
+	}
+	limit = le16_to_cpu(c->limit);
+	count = le16_to_cpu(c->count);
+	if (count_offset + (limit * sizeof(struct dx_entry)) >
+	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+		EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+				 "tree checksum found.  Run e2fsck -D.");
+		return 1;
+	}
+	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+	if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
+					    count, t))
+		return 0;
+	return 1;
+}
+
+static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+	struct dx_countlimit *c;
+	struct dx_tail *t;
+	int count_offset, limit, count;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	c = get_dx_countlimit(inode, dirent, &count_offset);
+	if (!c) {
+		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+		return;
+	}
+	limit = le16_to_cpu(c->limit);
+	count = le16_to_cpu(c->count);
+	if (count_offset + (limit * sizeof(struct dx_entry)) >
+	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+		EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+				 "tree checksum.  Run e2fsck -D.");
+		return;
+	}
+	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+	t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
+}
+
+static inline int ext4_handle_dirty_dx_node(handle_t *handle,
+					    struct inode *inode,
+					    struct buffer_head *bh)
+{
+	ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+	return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 /*
  * p is at least 6 bytes before the end of page
  */
@@ -239,12 +471,20 @@
 {
 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
 		EXT4_DIR_REC_LEN(2) - infosize;
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		entry_space -= sizeof(struct dx_tail);
 	return entry_space / sizeof(struct dx_entry);
 }
 
 static inline unsigned dx_node_limit(struct inode *dir)
 {
 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		entry_space -= sizeof(struct dx_tail);
 	return entry_space / sizeof(struct dx_entry);
 }
 
@@ -390,6 +630,15 @@
 		goto fail;
 	}
 
+	if (!buffer_verified(bh) &&
+	    !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
+		ext4_warning(dir->i_sb, "Root failed checksum");
+		brelse(bh);
+		*err = ERR_BAD_DX_DIR;
+		goto fail;
+	}
+	set_buffer_verified(bh);
+
 	entries = (struct dx_entry *) (((char *)&root->info) +
 				       root->info.info_length);
 
@@ -450,6 +699,17 @@
 		if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
 			goto fail2;
 		at = entries = ((struct dx_node *) bh->b_data)->entries;
+
+		if (!buffer_verified(bh) &&
+		    !ext4_dx_csum_verify(dir,
+					 (struct ext4_dir_entry *)bh->b_data)) {
+			ext4_warning(dir->i_sb, "Node failed checksum");
+			brelse(bh);
+			*err = ERR_BAD_DX_DIR;
+			goto fail;
+		}
+		set_buffer_verified(bh);
+
 		if (dx_get_limit(entries) != dx_node_limit (dir)) {
 			ext4_warning(dir->i_sb,
 				     "dx entry: limit != node limit");
@@ -549,6 +809,15 @@
 		if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
 				      0, &err)))
 			return err; /* Failure */
+
+		if (!buffer_verified(bh) &&
+		    !ext4_dx_csum_verify(dir,
+					 (struct ext4_dir_entry *)bh->b_data)) {
+			ext4_warning(dir->i_sb, "Node failed checksum");
+			return -EIO;
+		}
+		set_buffer_verified(bh);
+
 		p++;
 		brelse(p->bh);
 		p->bh = bh;
@@ -577,6 +846,11 @@
 	if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
 		return err;
 
+	if (!buffer_verified(bh) &&
+	    !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+		return -EIO;
+	set_buffer_verified(bh);
+
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
@@ -936,6 +1210,15 @@
 			brelse(bh);
 			goto next;
 		}
+		if (!buffer_verified(bh) &&
+		    !ext4_dirent_csum_verify(dir,
+				(struct ext4_dir_entry *)bh->b_data)) {
+			EXT4_ERROR_INODE(dir, "checksumming directory "
+					 "block %lu", (unsigned long)block);
+			brelse(bh);
+			goto next;
+		}
+		set_buffer_verified(bh);
 		i = search_dirblock(bh, dir, d_name,
 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
 		if (i == 1) {
@@ -987,6 +1270,16 @@
 		if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
 			goto errout;
 
+		if (!buffer_verified(bh) &&
+		    !ext4_dirent_csum_verify(dir,
+				(struct ext4_dir_entry *)bh->b_data)) {
+			EXT4_ERROR_INODE(dir, "checksumming directory "
+					 "block %lu", (unsigned long)block);
+			brelse(bh);
+			*err = -EIO;
+			goto errout;
+		}
+		set_buffer_verified(bh);
 		retval = search_dirblock(bh, dir, d_name,
 					 block << EXT4_BLOCK_SIZE_BITS(sb),
 					 res_dir);
@@ -1037,6 +1330,12 @@
 			EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
 			return ERR_PTR(-EIO);
 		}
+		if (unlikely(ino == dir->i_ino)) {
+			EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
+					 dentry->d_name.len,
+					 dentry->d_name.name);
+			return ERR_PTR(-EIO);
+		}
 		inode = ext4_iget(dir->i_sb, ino);
 		if (inode == ERR_PTR(-ESTALE)) {
 			EXT4_ERROR_INODE(dir,
@@ -1156,8 +1455,14 @@
 	char *data1 = (*bh)->b_data, *data2;
 	unsigned split, move, size;
 	struct ext4_dir_entry_2 *de = NULL, *de2;
+	struct ext4_dir_entry_tail *t;
+	int	csum_size = 0;
 	int	err = 0, i;
 
+	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
+
 	bh2 = ext4_append (handle, dir, &newblock, &err);
 	if (!(bh2)) {
 		brelse(*bh);
@@ -1204,10 +1509,20 @@
 	/* Fancy dance to stay within two buffers */
 	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
 	de = dx_pack_dirents(data1, blocksize);
-	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+					   (char *) de,
 					   blocksize);
-	de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2,
+	de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
+					    (char *) de2,
 					    blocksize);
+	if (csum_size) {
+		t = EXT4_DIRENT_TAIL(data2, blocksize);
+		initialize_dirent_tail(t, blocksize);
+
+		t = EXT4_DIRENT_TAIL(data1, blocksize);
+		initialize_dirent_tail(t, blocksize);
+	}
+
 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
 
@@ -1218,10 +1533,10 @@
 		de = de2;
 	}
 	dx_insert_block(frame, hash2 + continued, newblock);
-	err = ext4_handle_dirty_metadata(handle, dir, bh2);
+	err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
 	if (err)
 		goto journal_error;
-	err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
+	err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
 	if (err)
 		goto journal_error;
 	brelse(bh2);
@@ -1258,11 +1573,16 @@
 	unsigned short	reclen;
 	int		nlen, rlen, err;
 	char		*top;
+	int		csum_size = 0;
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	reclen = EXT4_DIR_REC_LEN(namelen);
 	if (!de) {
 		de = (struct ext4_dir_entry_2 *)bh->b_data;
-		top = bh->b_data + blocksize - reclen;
+		top = bh->b_data + (blocksize - csum_size) - reclen;
 		while ((char *) de <= top) {
 			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -EIO;
@@ -1295,11 +1615,8 @@
 		de = de1;
 	}
 	de->file_type = EXT4_FT_UNKNOWN;
-	if (inode) {
-		de->inode = cpu_to_le32(inode->i_ino);
-		ext4_set_de_type(dir->i_sb, de, inode->i_mode);
-	} else
-		de->inode = 0;
+	de->inode = cpu_to_le32(inode->i_ino);
+	ext4_set_de_type(dir->i_sb, de, inode->i_mode);
 	de->name_len = namelen;
 	memcpy(de->name, name, namelen);
 	/*
@@ -1318,7 +1635,7 @@
 	dir->i_version++;
 	ext4_mark_inode_dirty(handle, dir);
 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-	err = ext4_handle_dirty_metadata(handle, dir, bh);
+	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
 	if (err)
 		ext4_std_error(dir->i_sb, err);
 	return 0;
@@ -1339,6 +1656,7 @@
 	struct dx_frame	frames[2], *frame;
 	struct dx_entry *entries;
 	struct ext4_dir_entry_2	*de, *de2;
+	struct ext4_dir_entry_tail *t;
 	char		*data1, *top;
 	unsigned	len;
 	int		retval;
@@ -1346,6 +1664,11 @@
 	struct dx_hash_info hinfo;
 	ext4_lblk_t  block;
 	struct fake_dirent *fde;
+	int		csum_size = 0;
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	blocksize =  dir->i_sb->s_blocksize;
 	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
@@ -1366,7 +1689,7 @@
 		brelse(bh);
 		return -EIO;
 	}
-	len = ((char *) root) + blocksize - (char *) de;
+	len = ((char *) root) + (blocksize - csum_size) - (char *) de;
 
 	/* Allocate new block for the 0th block's dirents */
 	bh2 = ext4_append(handle, dir, &block, &retval);
@@ -1382,8 +1705,15 @@
 	top = data1 + len;
 	while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
 		de = de2;
-	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+					   (char *) de,
 					   blocksize);
+
+	if (csum_size) {
+		t = EXT4_DIRENT_TAIL(data1, blocksize);
+		initialize_dirent_tail(t, blocksize);
+	}
+
 	/* Initialize the root; the dot dirents already exist */
 	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
 	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
@@ -1408,8 +1738,8 @@
 	frame->bh = bh;
 	bh = bh2;
 
-	ext4_handle_dirty_metadata(handle, dir, frame->bh);
-	ext4_handle_dirty_metadata(handle, dir, bh);
+	ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+	ext4_handle_dirty_dirent_node(handle, dir, bh);
 
 	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
 	if (!de) {
@@ -1445,11 +1775,17 @@
 	struct inode *dir = dentry->d_parent->d_inode;
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
+	struct ext4_dir_entry_tail *t;
 	struct super_block *sb;
 	int	retval;
 	int	dx_fallback=0;
 	unsigned blocksize;
 	ext4_lblk_t block, blocks;
+	int	csum_size = 0;
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	sb = dir->i_sb;
 	blocksize = sb->s_blocksize;
@@ -1468,6 +1804,11 @@
 		bh = ext4_bread(handle, dir, block, 0, &retval);
 		if(!bh)
 			return retval;
+		if (!buffer_verified(bh) &&
+		    !ext4_dirent_csum_verify(dir,
+				(struct ext4_dir_entry *)bh->b_data))
+			return -EIO;
+		set_buffer_verified(bh);
 		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
 		if (retval != -ENOSPC) {
 			brelse(bh);
@@ -1484,7 +1825,13 @@
 		return retval;
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	de->inode = 0;
-	de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
+	de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
+
+	if (csum_size) {
+		t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
+		initialize_dirent_tail(t, blocksize);
+	}
+
 	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
 	brelse(bh);
 	if (retval == 0)
@@ -1516,6 +1863,11 @@
 	if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
 		goto cleanup;
 
+	if (!buffer_verified(bh) &&
+	    !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+		goto journal_error;
+	set_buffer_verified(bh);
+
 	BUFFER_TRACE(bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, bh);
 	if (err)
@@ -1583,7 +1935,7 @@
 			dxtrace(dx_show_index("node", frames[1].entries));
 			dxtrace(dx_show_index("node",
 			       ((struct dx_node *) bh2->b_data)->entries));
-			err = ext4_handle_dirty_metadata(handle, dir, bh2);
+			err = ext4_handle_dirty_dx_node(handle, dir, bh2);
 			if (err)
 				goto journal_error;
 			brelse (bh2);
@@ -1609,7 +1961,7 @@
 			if (err)
 				goto journal_error;
 		}
-		err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
+		err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
 		if (err) {
 			ext4_std_error(inode->i_sb, err);
 			goto cleanup;
@@ -1641,12 +1993,17 @@
 {
 	struct ext4_dir_entry_2 *de, *pde;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
+	int csum_size = 0;
 	int i, err;
 
+	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
+
 	i = 0;
 	pde = NULL;
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
-	while (i < bh->b_size) {
+	while (i < bh->b_size - csum_size) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
@@ -1667,7 +2024,7 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			err = ext4_handle_dirty_metadata(handle, dir, bh);
+			err = ext4_handle_dirty_dirent_node(handle, dir, bh);
 			if (unlikely(err)) {
 				ext4_std_error(dir->i_sb, err);
 				return err;
@@ -1809,9 +2166,15 @@
 	struct inode *inode;
 	struct buffer_head *dir_block = NULL;
 	struct ext4_dir_entry_2 *de;
+	struct ext4_dir_entry_tail *t;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
+	int csum_size = 0;
 	int err, retries = 0;
 
+	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		csum_size = sizeof(struct ext4_dir_entry_tail);
+
 	if (EXT4_DIR_LINK_MAX(dir))
 		return -EMLINK;
 
@@ -1852,16 +2215,24 @@
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	de = ext4_next_entry(de, blocksize);
 	de->inode = cpu_to_le32(dir->i_ino);
-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
+	de->rec_len = ext4_rec_len_to_disk(blocksize -
+					   (csum_size + EXT4_DIR_REC_LEN(1)),
 					   blocksize);
 	de->name_len = 2;
 	strcpy(de->name, "..");
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	set_nlink(inode, 2);
+
+	if (csum_size) {
+		t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+		initialize_dirent_tail(t, blocksize);
+	}
+
 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-	err = ext4_handle_dirty_metadata(handle, inode, dir_block);
+	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
 	if (err)
 		goto out_clear_inode;
+	set_buffer_verified(dir_block);
 	err = ext4_mark_inode_dirty(handle, inode);
 	if (!err)
 		err = ext4_add_entry(handle, dentry, inode);
@@ -1911,6 +2282,14 @@
 				     inode->i_ino);
 		return 1;
 	}
+	if (!buffer_verified(bh) &&
+	    !ext4_dirent_csum_verify(inode,
+			(struct ext4_dir_entry *)bh->b_data)) {
+		EXT4_ERROR_INODE(inode, "checksum error reading directory "
+				 "lblock 0");
+		return -EIO;
+	}
+	set_buffer_verified(bh);
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	de1 = ext4_next_entry(de, sb->s_blocksize);
 	if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -1942,6 +2321,14 @@
 				offset += sb->s_blocksize;
 				continue;
 			}
+			if (!buffer_verified(bh) &&
+			    !ext4_dirent_csum_verify(inode,
+					(struct ext4_dir_entry *)bh->b_data)) {
+				EXT4_ERROR_INODE(inode, "checksum error "
+						 "reading directory lblock 0");
+				return -EIO;
+			}
+			set_buffer_verified(bh);
 			de = (struct ext4_dir_entry_2 *) bh->b_data;
 		}
 		if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
@@ -2010,7 +2397,7 @@
 	/* Insert this inode at the head of the on-disk orphan list... */
 	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
 	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_super_now(handle, sb);
 	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
 	if (!err)
 		err = rc;
@@ -2083,7 +2470,7 @@
 		if (err)
 			goto out_brelse;
 		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-		err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
+		err = ext4_handle_dirty_super_now(handle, inode->i_sb);
 	} else {
 		struct ext4_iloc iloc2;
 		struct inode *i_prev =
@@ -2442,6 +2829,11 @@
 		dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
 		if (!dir_bh)
 			goto end_rename;
+		if (!buffer_verified(dir_bh) &&
+		    !ext4_dirent_csum_verify(old_inode,
+				(struct ext4_dir_entry *)dir_bh->b_data))
+			goto end_rename;
+		set_buffer_verified(dir_bh);
 		if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
 				old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
 			goto end_rename;
@@ -2472,7 +2864,7 @@
 					ext4_current_time(new_dir);
 		ext4_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-		retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
 		if (unlikely(retval)) {
 			ext4_std_error(new_dir->i_sb, retval);
 			goto end_rename;
@@ -2526,7 +2918,8 @@
 		PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
 						cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-		retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh);
+		retval = ext4_handle_dirty_dirent_node(handle, old_inode,
+						       dir_bh);
 		if (retval) {
 			ext4_std_error(old_dir->i_sb, retval);
 			goto end_rename;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 59fa0be..7ea6cbb 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -161,6 +161,8 @@
 	if (flex_gd == NULL)
 		goto out3;
 
+	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+		goto out2;
 	flex_gd->count = flexbg_size;
 
 	flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
@@ -796,7 +798,7 @@
 	ext4_kvfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_super_now(handle, sb);
 	if (err)
 		ext4_std_error(sb, err);
 
@@ -968,6 +970,8 @@
 		goto exit_err;
 	}
 
+	ext4_superblock_csum_set(sb, (struct ext4_super_block *)data);
+
 	while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
 		struct buffer_head *bh;
 
@@ -1067,6 +1071,54 @@
 	return err;
 }
 
+static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
+{
+	struct buffer_head *bh = sb_getblk(sb, block);
+	if (!bh)
+		return NULL;
+
+	if (bitmap_uptodate(bh))
+		return bh;
+
+	lock_buffer(bh);
+	if (bh_submit_read(bh) < 0) {
+		unlock_buffer(bh);
+		brelse(bh);
+		return NULL;
+	}
+	unlock_buffer(bh);
+
+	return bh;
+}
+
+static int ext4_set_bitmap_checksums(struct super_block *sb,
+				     ext4_group_t group,
+				     struct ext4_group_desc *gdp,
+				     struct ext4_new_group_data *group_data)
+{
+	struct buffer_head *bh;
+
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 0;
+
+	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+	if (!bh)
+		return -EIO;
+	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
+				   EXT4_INODES_PER_GROUP(sb) / 8);
+	brelse(bh);
+
+	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
+	if (!bh)
+		return -EIO;
+	ext4_block_bitmap_csum_set(sb, group, gdp, bh,
+				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+	brelse(bh);
+
+	return 0;
+}
+
 /*
  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  */
@@ -1093,18 +1145,24 @@
 		 */
 		gdb_bh = sbi->s_group_desc[gdb_num];
 		/* Update group descriptor block for new group */
-		gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data +
+		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
 						 gdb_off * EXT4_DESC_SIZE(sb));
 
 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
+		if (err) {
+			ext4_std_error(sb, err);
+			break;
+		}
+
 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
 		ext4_free_group_clusters_set(sb, gdp,
 					     EXT4_B2C(sbi, group_data->free_blocks_count));
 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
 		gdp->bg_flags = cpu_to_le16(*bg_flags);
-		gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+		ext4_group_desc_csum_set(sb, group, gdp);
 
 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
 		if (unlikely(err)) {
@@ -1343,17 +1401,14 @@
 			   (1 + ext4_bg_num_gdb(sb, group + i) +
 			    le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
 		group_data[i].free_blocks_count = blocks_per_group - overhead;
-		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-					       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+		if (ext4_has_group_desc_csum(sb))
 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
 					       EXT4_BG_INODE_UNINIT;
 		else
 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
 	}
 
-	if (last_group == n_group &&
-	    EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+	if (last_group == n_group && ext4_has_group_desc_csum(sb))
 		/* We need to initialize block bitmap of last group. */
 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
 
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 436b422..eb7aa3e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -112,6 +112,48 @@
 #define IS_EXT3_SB(sb) (0)
 #endif
 
+static int ext4_verify_csum_type(struct super_block *sb,
+				 struct ext4_super_block *es)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
+}
+
+static __le32 ext4_superblock_csum(struct super_block *sb,
+				   struct ext4_super_block *es)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	int offset = offsetof(struct ext4_super_block, s_checksum);
+	__u32 csum;
+
+	csum = ext4_chksum(sbi, ~0, (char *)es, offset);
+
+	return cpu_to_le32(csum);
+}
+
+int ext4_superblock_csum_verify(struct super_block *sb,
+				struct ext4_super_block *es)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return 1;
+
+	return es->s_checksum == ext4_superblock_csum(sb, es);
+}
+
+void ext4_superblock_csum_set(struct super_block *sb,
+			      struct ext4_super_block *es)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	es->s_checksum = ext4_superblock_csum(sb, es);
+}
+
 void *ext4_kvmalloc(size_t size, gfp_t flags)
 {
 	void *ret;
@@ -497,6 +539,7 @@
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
 	       sb->s_id, function, line, current->comm, &vaf);
 	va_end(args);
+	save_error_info(sb, function, line);
 
 	ext4_handle_error(sb);
 }
@@ -905,6 +948,8 @@
 	unlock_super(sb);
 	kobject_put(&sbi->s_kobj);
 	wait_for_completion(&sbi->s_kobj_unregister);
+	if (sbi->s_chksum_driver)
+		crypto_free_shash(sbi->s_chksum_driver);
 	kfree(sbi->s_blockgroup_lock);
 	kfree(sbi);
 }
@@ -1007,7 +1052,7 @@
 void ext4_clear_inode(struct inode *inode)
 {
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	dquot_drop(inode);
 	ext4_discard_preallocations(inode);
 	if (EXT4_I(inode)->jinode) {
@@ -1922,43 +1967,69 @@
 	return 0;
 }
 
-__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
-			    struct ext4_group_desc *gdp)
+static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+				   struct ext4_group_desc *gdp)
 {
+	int offset;
 	__u16 crc = 0;
+	__le32 le_group = cpu_to_le32(block_group);
 
-	if (sbi->s_es->s_feature_ro_compat &
-	    cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-		int offset = offsetof(struct ext4_group_desc, bg_checksum);
-		__le32 le_group = cpu_to_le32(block_group);
+	if ((sbi->s_es->s_feature_ro_compat &
+	     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
+		/* Use new metadata_csum algorithm */
+		__u16 old_csum;
+		__u32 csum32;
 
-		crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
-		crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
-		crc = crc16(crc, (__u8 *)gdp, offset);
-		offset += sizeof(gdp->bg_checksum); /* skip checksum */
-		/* for checksum of struct ext4_group_desc do the rest...*/
-		if ((sbi->s_es->s_feature_incompat &
-		     cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
-		    offset < le16_to_cpu(sbi->s_es->s_desc_size))
-			crc = crc16(crc, (__u8 *)gdp + offset,
-				    le16_to_cpu(sbi->s_es->s_desc_size) -
-					offset);
+		old_csum = gdp->bg_checksum;
+		gdp->bg_checksum = 0;
+		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+				     sizeof(le_group));
+		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
+				     sbi->s_desc_size);
+		gdp->bg_checksum = old_csum;
+
+		crc = csum32 & 0xFFFF;
+		goto out;
 	}
 
+	/* old crc16 code */
+	offset = offsetof(struct ext4_group_desc, bg_checksum);
+
+	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
+	crc = crc16(crc, (__u8 *)gdp, offset);
+	offset += sizeof(gdp->bg_checksum); /* skip checksum */
+	/* for checksum of struct ext4_group_desc do the rest...*/
+	if ((sbi->s_es->s_feature_incompat &
+	     cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
+	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
+		crc = crc16(crc, (__u8 *)gdp + offset,
+			    le16_to_cpu(sbi->s_es->s_desc_size) -
+				offset);
+
+out:
 	return cpu_to_le16(crc);
 }
 
-int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
+int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
 				struct ext4_group_desc *gdp)
 {
-	if ((sbi->s_es->s_feature_ro_compat &
-	     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
-	    (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
+	if (ext4_has_group_desc_csum(sb) &&
+	    (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb),
+						      block_group, gdp)))
 		return 0;
 
 	return 1;
 }
 
+void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
+			      struct ext4_group_desc *gdp)
+{
+	if (!ext4_has_group_desc_csum(sb))
+		return;
+	gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp);
+}
+
 /* Called at mount-time, super-block is locked */
 static int ext4_check_descriptors(struct super_block *sb,
 				  ext4_group_t *first_not_zeroed)
@@ -2013,7 +2084,7 @@
 			return 0;
 		}
 		ext4_lock_group(sb, i);
-		if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
 				 "Checksum for group %u failed (%u!=%u)",
 				 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
@@ -2417,6 +2488,23 @@
 	return count;
 }
 
+static ssize_t trigger_test_error(struct ext4_attr *a,
+				  struct ext4_sb_info *sbi,
+				  const char *buf, size_t count)
+{
+	int len = count;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (len && buf[len-1] == '\n')
+		len--;
+
+	if (len)
+		ext4_error(sbi->s_sb, "%.*s", len, buf);
+	return count;
+}
+
 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
 static struct ext4_attr ext4_attr_##_name = {			\
 	.attr = {.name = __stringify(_name), .mode = _mode },	\
@@ -2447,6 +2535,7 @@
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
 
 static struct attribute *ext4_attrs[] = {
 	ATTR_LIST(delayed_allocation_blocks),
@@ -2461,6 +2550,7 @@
 	ATTR_LIST(mb_stream_req),
 	ATTR_LIST(mb_group_prealloc),
 	ATTR_LIST(max_writeback_mb_bump),
+	ATTR_LIST(trigger_fs_error),
 	NULL,
 };
 
@@ -2957,6 +3047,44 @@
 	kthread_stop(ext4_lazyinit_task);
 }
 
+static int set_journal_csum_feature_set(struct super_block *sb)
+{
+	int ret = 1;
+	int compat, incompat;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+		/* journal checksum v2 */
+		compat = 0;
+		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
+	} else {
+		/* journal checksum v1 */
+		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+		incompat = 0;
+	}
+
+	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+		ret = jbd2_journal_set_features(sbi->s_journal,
+				compat, 0,
+				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+				incompat);
+	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
+		ret = jbd2_journal_set_features(sbi->s_journal,
+				compat, 0,
+				incompat);
+		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
+				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+	} else {
+		jbd2_journal_clear_features(sbi->s_journal,
+				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+				JBD2_FEATURE_INCOMPAT_CSUM_V2);
+	}
+
+	return ret;
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
 	char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -2993,6 +3121,7 @@
 		goto out_free_orig;
 	}
 	sb->s_fs_info = sbi;
+	sbi->s_sb = sb;
 	sbi->s_mount_opt = 0;
 	sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
 	sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
@@ -3032,13 +3161,54 @@
 	 * Note: s_es must be initialized as soon as possible because
 	 *       some ext4 macro-instructions depend on its value
 	 */
-	es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+	es = (struct ext4_super_block *) (bh->b_data + offset);
 	sbi->s_es = es;
 	sb->s_magic = le16_to_cpu(es->s_magic);
 	if (sb->s_magic != EXT4_SUPER_MAGIC)
 		goto cantfind_ext4;
 	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
 
+	/* Warn if metadata_csum and gdt_csum are both set. */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+	    EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+		ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+			     "redundant flags; please run fsck.");
+
+	/* Check for a known checksum algorithm */
+	if (!ext4_verify_csum_type(sb, es)) {
+		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+			 "unknown checksum algorithm.");
+		silent = 1;
+		goto cantfind_ext4;
+	}
+
+	/* Load the checksum driver */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+		if (IS_ERR(sbi->s_chksum_driver)) {
+			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
+			ret = PTR_ERR(sbi->s_chksum_driver);
+			sbi->s_chksum_driver = NULL;
+			goto failed_mount;
+		}
+	}
+
+	/* Check superblock checksum */
+	if (!ext4_superblock_csum_verify(sb, es)) {
+		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+			 "invalid superblock checksum.  Run e2fsck?");
+		silent = 1;
+		goto cantfind_ext4;
+	}
+
+	/* Precompute checksum seed for all metadata */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+					       sizeof(es->s_uuid));
+
 	/* Set defaults before we parse the mount options */
 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
 	set_opt(sb, INIT_INODE_TABLE);
@@ -3200,7 +3370,7 @@
 			       "Can't read superblock on 2nd try");
 			goto failed_mount;
 		}
-		es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
+		es = (struct ext4_super_block *)(bh->b_data + offset);
 		sbi->s_es = es;
 		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
 			ext4_msg(sb, KERN_ERR,
@@ -3392,6 +3562,7 @@
 					  GFP_KERNEL);
 	if (sbi->s_group_desc == NULL) {
 		ext4_msg(sb, KERN_ERR, "not enough memory");
+		ret = -ENOMEM;
 		goto failed_mount;
 	}
 
@@ -3449,6 +3620,7 @@
 	}
 	if (err) {
 		ext4_msg(sb, KERN_ERR, "insufficient memory");
+		ret = err;
 		goto failed_mount3;
 	}
 
@@ -3506,26 +3678,17 @@
 		goto no_journal;
 	}
 
-	if (ext4_blocks_count(es) > 0xffffffffULL &&
+	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) &&
 	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
 				       JBD2_FEATURE_INCOMPAT_64BIT)) {
 		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
 		goto failed_mount_wq;
 	}
 
-	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
-		jbd2_journal_set_features(sbi->s_journal,
-				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
-		jbd2_journal_set_features(sbi->s_journal,
-				JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
-		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
-				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-	} else {
-		jbd2_journal_clear_features(sbi->s_journal,
-				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+	if (!set_journal_csum_feature_set(sb)) {
+		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
+			 "feature set");
+		goto failed_mount_wq;
 	}
 
 	/* We have now updated the journal if required, so we can
@@ -3606,7 +3769,8 @@
 		goto failed_mount4;
 	}
 
-	ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
+	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+		sb->s_flags |= MS_RDONLY;
 
 	/* determine the minimum size of new large inodes, if present */
 	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
@@ -3641,7 +3805,7 @@
 	}
 
 	ext4_ext_init(sb);
-	err = ext4_mb_init(sb, needs_recovery);
+	err = ext4_mb_init(sb);
 	if (err) {
 		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
 			 err);
@@ -3724,6 +3888,8 @@
 		brelse(sbi->s_group_desc[i]);
 	ext4_kvfree(sbi->s_group_desc);
 failed_mount:
+	if (sbi->s_chksum_driver)
+		crypto_free_shash(sbi->s_chksum_driver);
 	if (sbi->s_proc) {
 		remove_proc_entry("options", sbi->s_proc);
 		remove_proc_entry(sb->s_id, ext4_proc_root);
@@ -3847,7 +4013,7 @@
 		goto out_bdev;
 	}
 
-	es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+	es = (struct ext4_super_block *) (bh->b_data + offset);
 	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
 	    !(le32_to_cpu(es->s_feature_incompat) &
 	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -4039,6 +4205,7 @@
 				&EXT4_SB(sb)->s_freeinodes_counter));
 	sb->s_dirt = 0;
 	BUFFER_TRACE(sbh, "marking dirty");
+	ext4_superblock_csum_set(sb, es);
 	mark_buffer_dirty(sbh);
 	if (sync) {
 		error = sync_dirty_buffer(sbh);
@@ -4333,7 +4500,7 @@
 				struct ext4_group_desc *gdp =
 					ext4_get_group_desc(sb, g, NULL);
 
-				if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
+				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
 					ext4_msg(sb, KERN_ERR,
 	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
 		g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
@@ -4758,7 +4925,6 @@
 		return -EIO;
 	}
 
-	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 	bh = ext4_bread(handle, inode, blk, 1, &err);
 	if (!bh)
 		goto out;
@@ -4774,16 +4940,13 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
 	brelse(bh);
 out:
-	if (err) {
-		mutex_unlock(&inode->i_mutex);
+	if (err)
 		return err;
-	}
 	if (inode->i_size < off + len) {
 		i_size_write(inode, off + len);
 		EXT4_I(inode)->i_disksize = inode->i_size;
 		ext4_mark_inode_dirty(handle, inode);
 	}
-	mutex_unlock(&inode->i_mutex);
 	return len;
 }
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e88748e..e56c9ed 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -122,6 +122,58 @@
 	NULL
 };
 
+static __le32 ext4_xattr_block_csum(struct inode *inode,
+				    sector_t block_nr,
+				    struct ext4_xattr_header *hdr)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	__u32 csum, old;
+
+	old = hdr->h_checksum;
+	hdr->h_checksum = 0;
+	if (le32_to_cpu(hdr->h_refcount) != 1) {
+		block_nr = cpu_to_le64(block_nr);
+		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+				   sizeof(block_nr));
+	} else
+		csum = ei->i_csum_seed;
+	csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
+			   EXT4_BLOCK_SIZE(inode->i_sb));
+	hdr->h_checksum = old;
+	return cpu_to_le32(csum);
+}
+
+static int ext4_xattr_block_csum_verify(struct inode *inode,
+					sector_t block_nr,
+					struct ext4_xattr_header *hdr)
+{
+	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+		return 0;
+	return 1;
+}
+
+static void ext4_xattr_block_csum_set(struct inode *inode,
+				      sector_t block_nr,
+				      struct ext4_xattr_header *hdr)
+{
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+		return;
+
+	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+}
+
+static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
+						struct inode *inode,
+						struct buffer_head *bh)
+{
+	ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
+	return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 static inline const struct xattr_handler *
 ext4_xattr_handler(int name_index)
 {
@@ -156,12 +208,22 @@
 }
 
 static inline int
-ext4_xattr_check_block(struct buffer_head *bh)
+ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
 {
+	int error;
+
+	if (buffer_verified(bh))
+		return 0;
+
 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
 		return -EIO;
-	return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+	if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+		return -EIO;
+	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+	if (!error)
+		set_buffer_verified(bh);
+	return error;
 }
 
 static inline int
@@ -224,7 +286,7 @@
 		goto cleanup;
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext4_xattr_check_block(bh)) {
+	if (ext4_xattr_check_block(inode, bh)) {
 bad_block:
 		EXT4_ERROR_INODE(inode, "bad block %llu",
 				 EXT4_I(inode)->i_file_acl);
@@ -369,7 +431,7 @@
 		goto cleanup;
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext4_xattr_check_block(bh)) {
+	if (ext4_xattr_check_block(inode, bh)) {
 		EXT4_ERROR_INODE(inode, "bad block %llu",
 				 EXT4_I(inode)->i_file_acl);
 		error = -EIO;
@@ -492,7 +554,7 @@
 		if (ce)
 			mb_cache_entry_release(ce);
 		unlock_buffer(bh);
-		error = ext4_handle_dirty_metadata(handle, inode, bh);
+		error = ext4_handle_dirty_xattr_block(handle, inode, bh);
 		if (IS_SYNC(inode))
 			ext4_handle_sync(handle);
 		dquot_free_block(inode, 1);
@@ -662,7 +724,7 @@
 		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
 			atomic_read(&(bs->bh->b_count)),
 			le32_to_cpu(BHDR(bs->bh)->h_refcount));
-		if (ext4_xattr_check_block(bs->bh)) {
+		if (ext4_xattr_check_block(inode, bs->bh)) {
 			EXT4_ERROR_INODE(inode, "bad block %llu",
 					 EXT4_I(inode)->i_file_acl);
 			error = -EIO;
@@ -725,9 +787,9 @@
 			if (error == -EIO)
 				goto bad_block;
 			if (!error)
-				error = ext4_handle_dirty_metadata(handle,
-								   inode,
-								   bs->bh);
+				error = ext4_handle_dirty_xattr_block(handle,
+								      inode,
+								      bs->bh);
 			if (error)
 				goto cleanup;
 			goto inserted;
@@ -796,9 +858,9 @@
 				ea_bdebug(new_bh, "reusing; refcount now=%d",
 					le32_to_cpu(BHDR(new_bh)->h_refcount));
 				unlock_buffer(new_bh);
-				error = ext4_handle_dirty_metadata(handle,
-								   inode,
-								   new_bh);
+				error = ext4_handle_dirty_xattr_block(handle,
+								      inode,
+								      new_bh);
 				if (error)
 					goto cleanup_dquot;
 			}
@@ -855,8 +917,8 @@
 			set_buffer_uptodate(new_bh);
 			unlock_buffer(new_bh);
 			ext4_xattr_cache_insert(new_bh);
-			error = ext4_handle_dirty_metadata(handle,
-							   inode, new_bh);
+			error = ext4_handle_dirty_xattr_block(handle,
+							      inode, new_bh);
 			if (error)
 				goto cleanup;
 		}
@@ -1193,7 +1255,7 @@
 		error = -EIO;
 		if (!bh)
 			goto cleanup;
-		if (ext4_xattr_check_block(bh)) {
+		if (ext4_xattr_check_block(inode, bh)) {
 			EXT4_ERROR_INODE(inode, "bad block %llu",
 					 EXT4_I(inode)->i_file_acl);
 			error = -EIO;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 25b7387..91f31ca7 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -27,7 +27,9 @@
 	__le32	h_refcount;	/* reference count */
 	__le32	h_blocks;	/* number of disk blocks used */
 	__le32	h_hash;		/* hash value of all attributes */
-	__u32	h_reserved[4];	/* zero right now */
+	__le32	h_checksum;	/* crc32c(uuid+id+xattrblock) */
+				/* id = inum if refcount=1, blknum otherwise */
+	__u32	h_reserved[3];	/* zero right now */
 };
 
 struct ext4_xattr_ibody_header {
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index aca191b..6eaa28c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -98,8 +98,8 @@
 
 	*bh = sb_bread(sb, phys);
 	if (*bh == NULL) {
-		fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed",
-		       (llu)phys);
+		fat_msg_ratelimit(sb, KERN_ERR,
+			"Directory bread(block %llu) failed", (llu)phys);
 		/* skip this block */
 		*pos = (iblock + 1) << sb->s_blocksize_bits;
 		goto next;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 66994f3..fc35c5c 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -82,6 +82,7 @@
 	int fatent_shift;
 	struct fatent_operations *fatent_ops;
 	struct inode *fat_inode;
+	struct inode *fsinfo_inode;
 
 	struct ratelimit_state ratelimit;
 
@@ -334,6 +335,11 @@
 	__fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
 __printf(3, 4) __cold
 void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+#define fat_msg_ratelimit(sb, level, fmt, args...)	\
+	do {	\
+			if (__ratelimit(&MSDOS_SB(sb)->ratelimit))	\
+				fat_msg(sb, level, fmt, ## args);	\
+	 } while (0)
 extern int fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
 extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 2e81ac0..31f08ab 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -308,6 +308,16 @@
 	}
 }
 
+static void mark_fsinfo_dirty(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+	if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
+		return;
+
+	__mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
+}
+
 static inline int fat_ent_update_ptr(struct super_block *sb,
 				     struct fat_entry *fatent,
 				     int offset, sector_t blocknr)
@@ -498,7 +508,6 @@
 				sbi->prev_free = entry;
 				if (sbi->free_clusters != -1)
 					sbi->free_clusters--;
-				sb->s_dirt = 1;
 
 				cluster[idx_clus] = entry;
 				idx_clus++;
@@ -520,11 +529,11 @@
 	/* Couldn't allocate the free entries */
 	sbi->free_clusters = 0;
 	sbi->free_clus_valid = 1;
-	sb->s_dirt = 1;
 	err = -ENOSPC;
 
 out:
 	unlock_fat(sbi);
+	mark_fsinfo_dirty(sb);
 	fatent_brelse(&fatent);
 	if (!err) {
 		if (inode_needs_sync(inode))
@@ -549,7 +558,7 @@
 	struct fat_entry fatent;
 	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
 	int i, err, nr_bhs;
-	int first_cl = cluster;
+	int first_cl = cluster, dirty_fsinfo = 0;
 
 	nr_bhs = 0;
 	fatent_init(&fatent);
@@ -587,7 +596,7 @@
 		ops->ent_put(&fatent, FAT_ENT_FREE);
 		if (sbi->free_clusters != -1) {
 			sbi->free_clusters++;
-			sb->s_dirt = 1;
+			dirty_fsinfo = 1;
 		}
 
 		if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
@@ -617,6 +626,8 @@
 	for (i = 0; i < nr_bhs; i++)
 		brelse(bhs[i]);
 	unlock_fat(sbi);
+	if (dirty_fsinfo)
+		mark_fsinfo_dirty(sb);
 
 	return err;
 }
@@ -677,7 +688,7 @@
 	}
 	sbi->free_clusters = free;
 	sbi->free_clus_valid = 1;
-	sb->s_dirt = 1;
+	mark_fsinfo_dirty(sb);
 	fatent_brelse(&fatent);
 out:
 	unlock_fat(sbi);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 21687e3..a3d81eb 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -454,42 +454,16 @@
 		fat_truncate_blocks(inode, 0);
 	}
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	fat_cache_inval_inode(inode);
 	fat_detach(inode);
 }
 
-static void fat_write_super(struct super_block *sb)
-{
-	lock_super(sb);
-	sb->s_dirt = 0;
-
-	if (!(sb->s_flags & MS_RDONLY))
-		fat_clusters_flush(sb);
-	unlock_super(sb);
-}
-
-static int fat_sync_fs(struct super_block *sb, int wait)
-{
-	int err = 0;
-
-	if (sb->s_dirt) {
-		lock_super(sb);
-		sb->s_dirt = 0;
-		err = fat_clusters_flush(sb);
-		unlock_super(sb);
-	}
-
-	return err;
-}
-
 static void fat_put_super(struct super_block *sb)
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
-	if (sb->s_dirt)
-		fat_write_super(sb);
-
+	iput(sbi->fsinfo_inode);
 	iput(sbi->fat_inode);
 
 	unload_nls(sbi->nls_disk);
@@ -661,7 +635,18 @@
 
 static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+	int err;
+
+	if (inode->i_ino == MSDOS_FSINFO_INO) {
+		struct super_block *sb = inode->i_sb;
+
+		lock_super(sb);
+		err = fat_clusters_flush(sb);
+		unlock_super(sb);
+	} else
+		err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+
+	return err;
 }
 
 int fat_sync_inode(struct inode *inode)
@@ -678,8 +663,6 @@
 	.write_inode	= fat_write_inode,
 	.evict_inode	= fat_evict_inode,
 	.put_super	= fat_put_super,
-	.write_super	= fat_write_super,
-	.sync_fs	= fat_sync_fs,
 	.statfs		= fat_statfs,
 	.remount_fs	= fat_remount,
 
@@ -752,10 +735,9 @@
 }
 
 static int
-fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
+fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
 	int len = *lenp;
-	struct inode *inode =  de->d_inode;
 	u32 ipos_h, ipos_m, ipos_l;
 
 	if (len < 5) {
@@ -771,9 +753,9 @@
 	fh[1] = inode->i_generation;
 	fh[2] = ipos_h;
 	fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-	spin_lock(&de->d_lock);
-	fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
-	spin_unlock(&de->d_lock);
+	fh[4] = ipos_l;
+	if (parent)
+		fh[4] |= MSDOS_I(parent)->i_logstart;
 	return 3;
 }
 
@@ -1244,6 +1226,7 @@
 		   void (*setup)(struct super_block *))
 {
 	struct inode *root_inode = NULL, *fat_inode = NULL;
+	struct inode *fsinfo_inode = NULL;
 	struct buffer_head *bh;
 	struct fat_boot_sector *b;
 	struct msdos_sb_info *sbi;
@@ -1490,6 +1473,14 @@
 		goto out_fail;
 	MSDOS_I(fat_inode)->i_pos = 0;
 	sbi->fat_inode = fat_inode;
+
+	fsinfo_inode = new_inode(sb);
+	if (!fsinfo_inode)
+		goto out_fail;
+	fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+	sbi->fsinfo_inode = fsinfo_inode;
+	insert_inode_hash(fsinfo_inode);
+
 	root_inode = new_inode(sb);
 	if (!root_inode)
 		goto out_fail;
@@ -1516,6 +1507,8 @@
 		fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
 
 out_fail:
+	if (fsinfo_inode)
+		iput(fsinfo_inode);
 	if (fat_inode)
 		iput(fat_inode);
 	unload_nls(sbi->nls_io);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index d078b75..81b70e6 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -442,28 +442,24 @@
 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {	
 	struct file *filp;
+	int fput_needed;
 	long err = -EBADF;
 
-	filp = fget_raw(fd);
+	filp = fget_raw_light(fd, &fput_needed);
 	if (!filp)
 		goto out;
 
 	if (unlikely(filp->f_mode & FMODE_PATH)) {
-		if (!check_fcntl_cmd(cmd)) {
-			fput(filp);
-			goto out;
-		}
+		if (!check_fcntl_cmd(cmd))
+			goto out1;
 	}
 
 	err = security_file_fcntl(filp, cmd, arg);
-	if (err) {
-		fput(filp);
-		return err;
-	}
+	if (!err)
+		err = do_fcntl(fd, cmd, arg, filp);
 
-	err = do_fcntl(fd, cmd, arg, filp);
-
- 	fput(filp);
+out1:
+ 	fput_light(filp, fput_needed);
 out:
 	return err;
 }
@@ -473,26 +469,21 @@
 		unsigned long, arg)
 {	
 	struct file * filp;
-	long err;
+	long err = -EBADF;
+	int fput_needed;
 
-	err = -EBADF;
-	filp = fget_raw(fd);
+	filp = fget_raw_light(fd, &fput_needed);
 	if (!filp)
 		goto out;
 
 	if (unlikely(filp->f_mode & FMODE_PATH)) {
-		if (!check_fcntl_cmd(cmd)) {
-			fput(filp);
-			goto out;
-		}
+		if (!check_fcntl_cmd(cmd))
+			goto out1;
 	}
 
 	err = security_file_fcntl(filp, cmd, arg);
-	if (err) {
-		fput(filp);
-		return err;
-	}
-	err = -EBADF;
+	if (err)
+		goto out1;
 	
 	switch (cmd) {
 		case F_GETLK64:
@@ -507,7 +498,8 @@
 			err = do_fcntl(fd, cmd, arg, filp);
 			break;
 	}
-	fput(filp);
+out1:
+	fput_light(filp, fput_needed);
 out:
 	return err;
 }
diff --git a/fs/file_table.c b/fs/file_table.c
index 70f2a0f..a305d9e 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -34,7 +34,6 @@
 	.max_files = NR_FILE
 };
 
-DECLARE_LGLOCK(files_lglock);
 DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
@@ -421,9 +420,9 @@
  */
 void file_sb_list_add(struct file *file, struct super_block *sb)
 {
-	lg_local_lock(files_lglock);
+	lg_local_lock(&files_lglock);
 	__file_sb_list_add(file, sb);
-	lg_local_unlock(files_lglock);
+	lg_local_unlock(&files_lglock);
 }
 
 /**
@@ -436,9 +435,9 @@
 void file_sb_list_del(struct file *file)
 {
 	if (!list_empty(&file->f_u.fu_list)) {
-		lg_local_lock_cpu(files_lglock, file_list_cpu(file));
+		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
 		list_del_init(&file->f_u.fu_list);
-		lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
+		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
 	}
 }
 
@@ -485,7 +484,7 @@
 	struct file *f;
 
 retry:
-	lg_global_lock(files_lglock);
+	lg_global_lock(&files_lglock);
 	do_file_list_for_each_entry(sb, f) {
 		struct vfsmount *mnt;
 		if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
@@ -502,12 +501,12 @@
 		file_release_write(f);
 		mnt = mntget(f->f_path.mnt);
 		/* This can sleep, so we can't hold the spinlock. */
-		lg_global_unlock(files_lglock);
+		lg_global_unlock(&files_lglock);
 		mnt_drop_write(mnt);
 		mntput(mnt);
 		goto retry;
 	} while_file_list_for_each_entry;
-	lg_global_unlock(files_lglock);
+	lg_global_unlock(&files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -525,6 +524,6 @@
 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
 	files_defer_init();
-	lg_lock_init(files_lglock);
+	lg_lock_init(&files_lglock, "files_lglock");
 	percpu_counter_init(&nr_files, 0);
 } 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index cf9ef91..ef67c95 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -355,6 +355,6 @@
 vxfs_evict_inode(struct inode *ip)
 {
 	truncate_inode_pages(&ip->i_data, 0);
-	end_writeback(ip);
+	clear_inode(ip);
 	call_rcu(&ip->i_rcu, vxfs_i_callback);
 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 539f36c..41a3ccf 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -231,11 +231,8 @@
 
 static void inode_sync_complete(struct inode *inode)
 {
-	/*
-	 * Prevent speculative execution through
-	 * spin_unlock(&wb->list_lock);
-	 */
-
+	inode->i_state &= ~I_SYNC;
+	/* Waiters must see I_SYNC cleared before being woken up */
 	smp_mb();
 	wake_up_bit(&inode->i_state, __I_SYNC);
 }
@@ -329,10 +326,12 @@
 }
 
 /*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
  */
-static void inode_wait_for_writeback(struct inode *inode,
-				     struct bdi_writeback *wb)
+static void __inode_wait_for_writeback(struct inode *inode)
+	__releases(inode->i_lock)
+	__acquires(inode->i_lock)
 {
 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
 	wait_queue_head_t *wqh;
@@ -340,70 +339,119 @@
 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
 	while (inode->i_state & I_SYNC) {
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&wb->list_lock);
 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
-		spin_lock(&wb->list_lock);
 		spin_lock(&inode->i_lock);
 	}
 }
 
 /*
- * Write out an inode's dirty pages.  Called under wb->list_lock and
- * inode->i_lock.  Either the caller has an active reference on the inode or
- * the inode has I_WILL_FREE set.
- *
- * If `wait' is set, wait on the writeout.
- *
- * The whole writeout design is quite complex and fragile.  We want to avoid
- * starvation of particular inodes when others are being redirtied, prevent
- * livelocks, etc.
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
+ */
+void inode_wait_for_writeback(struct inode *inode)
+{
+	spin_lock(&inode->i_lock);
+	__inode_wait_for_writeback(inode);
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+	__releases(inode->i_lock)
+{
+	DEFINE_WAIT(wait);
+	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+	int sleep;
+
+	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+	sleep = inode->i_state & I_SYNC;
+	spin_unlock(&inode->i_lock);
+	if (sleep)
+		schedule();
+	finish_wait(wqh, &wait);
+}
+
+/*
+ * Find proper writeback list for the inode depending on its current state and
+ * possibly also change of its state while we were doing writeback.  Here we
+ * handle things such as livelock prevention or fairness of writeback among
+ * inodes. This function can be called only by flusher thread - noone else
+ * processes all inodes in writeback lists and requeueing inodes behind flusher
+ * thread's back can have unexpected consequences.
+ */
+static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+			  struct writeback_control *wbc)
+{
+	if (inode->i_state & I_FREEING)
+		return;
+
+	/*
+	 * Sync livelock prevention. Each inode is tagged and synced in one
+	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
+	 * the dirty time to prevent enqueue and sync it again.
+	 */
+	if ((inode->i_state & I_DIRTY) &&
+	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
+		inode->dirtied_when = jiffies;
+
+	if (wbc->pages_skipped) {
+		/*
+		 * writeback is not making progress due to locked
+		 * buffers. Skip this inode for now.
+		 */
+		redirty_tail(inode, wb);
+		return;
+	}
+
+	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+		/*
+		 * We didn't write back all the pages.  nfs_writepages()
+		 * sometimes bales out without doing anything.
+		 */
+		if (wbc->nr_to_write <= 0) {
+			/* Slice used up. Queue for next turn. */
+			requeue_io(inode, wb);
+		} else {
+			/*
+			 * Writeback blocked by something other than
+			 * congestion. Delay the inode for some time to
+			 * avoid spinning on the CPU (100% iowait)
+			 * retrying writeback of the dirty page/inode
+			 * that cannot be performed immediately.
+			 */
+			redirty_tail(inode, wb);
+		}
+	} else if (inode->i_state & I_DIRTY) {
+		/*
+		 * Filesystems can dirty the inode during writeback operations,
+		 * such as delayed allocation during submission or metadata
+		 * updates after data IO completion.
+		 */
+		redirty_tail(inode, wb);
+	} else {
+		/* The inode is clean. Remove from writeback lists. */
+		list_del_init(&inode->i_wb_list);
+	}
+}
+
+/*
+ * Write out an inode and its dirty pages. Do not update the writeback list
+ * linkage. That is left to the caller. The caller is also responsible for
+ * setting I_SYNC flag and calling inode_sync_complete() to clear it.
  */
 static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
-		       struct writeback_control *wbc)
+__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+			 struct writeback_control *wbc)
 {
 	struct address_space *mapping = inode->i_mapping;
 	long nr_to_write = wbc->nr_to_write;
 	unsigned dirty;
 	int ret;
 
-	assert_spin_locked(&wb->list_lock);
-	assert_spin_locked(&inode->i_lock);
-
-	if (!atomic_read(&inode->i_count))
-		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
-	else
-		WARN_ON(inode->i_state & I_WILL_FREE);
-
-	if (inode->i_state & I_SYNC) {
-		/*
-		 * If this inode is locked for writeback and we are not doing
-		 * writeback-for-data-integrity, move it to b_more_io so that
-		 * writeback can proceed with the other inodes on s_io.
-		 *
-		 * We'll have another go at writing back this inode when we
-		 * completed a full scan of b_io.
-		 */
-		if (wbc->sync_mode != WB_SYNC_ALL) {
-			requeue_io(inode, wb);
-			trace_writeback_single_inode_requeue(inode, wbc,
-							     nr_to_write);
-			return 0;
-		}
-
-		/*
-		 * It's a data-integrity sync.  We must wait.
-		 */
-		inode_wait_for_writeback(inode, wb);
-	}
-
-	BUG_ON(inode->i_state & I_SYNC);
-
-	/* Set I_SYNC, reset I_DIRTY_PAGES */
-	inode->i_state |= I_SYNC;
-	inode->i_state &= ~I_DIRTY_PAGES;
-	spin_unlock(&inode->i_lock);
-	spin_unlock(&wb->list_lock);
+	WARN_ON(!(inode->i_state & I_SYNC));
 
 	ret = do_writepages(mapping, wbc);
 
@@ -424,6 +472,9 @@
 	 * write_inode()
 	 */
 	spin_lock(&inode->i_lock);
+	/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+		inode->i_state &= ~I_DIRTY_PAGES;
 	dirty = inode->i_state & I_DIRTY;
 	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
 	spin_unlock(&inode->i_lock);
@@ -433,60 +484,67 @@
 		if (ret == 0)
 			ret = err;
 	}
+	trace_writeback_single_inode(inode, wbc, nr_to_write);
+	return ret;
+}
+
+/*
+ * Write out an inode's dirty pages. Either the caller has an active reference
+ * on the inode or the inode has I_WILL_FREE set.
+ *
+ * This function is designed to be called for writing back one inode which
+ * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+ * and does more profound writeback list handling in writeback_sb_inodes().
+ */
+static int
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+		       struct writeback_control *wbc)
+{
+	int ret = 0;
+
+	spin_lock(&inode->i_lock);
+	if (!atomic_read(&inode->i_count))
+		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+	else
+		WARN_ON(inode->i_state & I_WILL_FREE);
+
+	if (inode->i_state & I_SYNC) {
+		if (wbc->sync_mode != WB_SYNC_ALL)
+			goto out;
+		/*
+		 * It's a data-integrity sync. We must wait. Since callers hold
+		 * inode reference or inode has I_WILL_FREE set, it cannot go
+		 * away under us.
+		 */
+		__inode_wait_for_writeback(inode);
+	}
+	WARN_ON(inode->i_state & I_SYNC);
+	/*
+	 * Skip inode if it is clean. We don't want to mess with writeback
+	 * lists in this function since flusher thread may be doing for example
+	 * sync in parallel and if we move the inode, it could get skipped. So
+	 * here we make sure inode is on some writeback list and leave it there
+	 * unless we have completely cleaned the inode.
+	 */
+	if (!(inode->i_state & I_DIRTY))
+		goto out;
+	inode->i_state |= I_SYNC;
+	spin_unlock(&inode->i_lock);
+
+	ret = __writeback_single_inode(inode, wb, wbc);
 
 	spin_lock(&wb->list_lock);
 	spin_lock(&inode->i_lock);
-	inode->i_state &= ~I_SYNC;
-	if (!(inode->i_state & I_FREEING)) {
-		/*
-		 * Sync livelock prevention. Each inode is tagged and synced in
-		 * one shot. If still dirty, it will be redirty_tail()'ed below.
-		 * Update the dirty time to prevent enqueue and sync it again.
-		 */
-		if ((inode->i_state & I_DIRTY) &&
-		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
-			inode->dirtied_when = jiffies;
-
-		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
-			/*
-			 * We didn't write back all the pages.  nfs_writepages()
-			 * sometimes bales out without doing anything.
-			 */
-			inode->i_state |= I_DIRTY_PAGES;
-			if (wbc->nr_to_write <= 0) {
-				/*
-				 * slice used up: queue for next turn
-				 */
-				requeue_io(inode, wb);
-			} else {
-				/*
-				 * Writeback blocked by something other than
-				 * congestion. Delay the inode for some time to
-				 * avoid spinning on the CPU (100% iowait)
-				 * retrying writeback of the dirty page/inode
-				 * that cannot be performed immediately.
-				 */
-				redirty_tail(inode, wb);
-			}
-		} else if (inode->i_state & I_DIRTY) {
-			/*
-			 * Filesystems can dirty the inode during writeback
-			 * operations, such as delayed allocation during
-			 * submission or metadata updates after data IO
-			 * completion.
-			 */
-			redirty_tail(inode, wb);
-		} else {
-			/*
-			 * The inode is clean.  At this point we either have
-			 * a reference to the inode or it's on it's way out.
-			 * No need to add it back to the LRU.
-			 */
-			list_del_init(&inode->i_wb_list);
-		}
-	}
+	/*
+	 * If inode is clean, remove it from writeback lists. Otherwise don't
+	 * touch it. See comment above for explanation.
+	 */
+	if (!(inode->i_state & I_DIRTY))
+		list_del_init(&inode->i_wb_list);
+	spin_unlock(&wb->list_lock);
 	inode_sync_complete(inode);
-	trace_writeback_single_inode(inode, wbc, nr_to_write);
+out:
+	spin_unlock(&inode->i_lock);
 	return ret;
 }
 
@@ -580,29 +638,58 @@
 			redirty_tail(inode, wb);
 			continue;
 		}
-		__iget(inode);
+		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+			/*
+			 * If this inode is locked for writeback and we are not
+			 * doing writeback-for-data-integrity, move it to
+			 * b_more_io so that writeback can proceed with the
+			 * other inodes on s_io.
+			 *
+			 * We'll have another go at writing back this inode
+			 * when we completed a full scan of b_io.
+			 */
+			spin_unlock(&inode->i_lock);
+			requeue_io(inode, wb);
+			trace_writeback_sb_inodes_requeue(inode);
+			continue;
+		}
+		spin_unlock(&wb->list_lock);
+
+		/*
+		 * We already requeued the inode if it had I_SYNC set and we
+		 * are doing WB_SYNC_NONE writeback. So this catches only the
+		 * WB_SYNC_ALL case.
+		 */
+		if (inode->i_state & I_SYNC) {
+			/* Wait for I_SYNC. This function drops i_lock... */
+			inode_sleep_on_writeback(inode);
+			/* Inode may be gone, start again */
+			spin_lock(&wb->list_lock);
+			continue;
+		}
+		inode->i_state |= I_SYNC;
+		spin_unlock(&inode->i_lock);
+
 		write_chunk = writeback_chunk_size(wb->bdi, work);
 		wbc.nr_to_write = write_chunk;
 		wbc.pages_skipped = 0;
 
-		writeback_single_inode(inode, wb, &wbc);
+		/*
+		 * We use I_SYNC to pin the inode in memory. While it is set
+		 * evict_inode() will wait so the inode cannot be freed.
+		 */
+		__writeback_single_inode(inode, wb, &wbc);
 
 		work->nr_pages -= write_chunk - wbc.nr_to_write;
 		wrote += write_chunk - wbc.nr_to_write;
+		spin_lock(&wb->list_lock);
+		spin_lock(&inode->i_lock);
 		if (!(inode->i_state & I_DIRTY))
 			wrote++;
-		if (wbc.pages_skipped) {
-			/*
-			 * writeback is not making progress due to locked
-			 * buffers.  Skip this inode for now.
-			 */
-			redirty_tail(inode, wb);
-		}
+		requeue_inode(inode, wb, &wbc);
+		inode_sync_complete(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&wb->list_lock);
-		iput(inode);
-		cond_resched();
-		spin_lock(&wb->list_lock);
+		cond_resched_lock(&wb->list_lock);
 		/*
 		 * bail out to wb_writeback() often enough to check
 		 * background threshold and other termination conditions.
@@ -796,8 +883,10 @@
 			trace_writeback_wait(wb->bdi, work);
 			inode = wb_inode(wb->b_more_io.prev);
 			spin_lock(&inode->i_lock);
-			inode_wait_for_writeback(inode, wb);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&wb->list_lock);
+			/* This function drops i_lock... */
+			inode_sleep_on_writeback(inode);
+			spin_lock(&wb->list_lock);
 		}
 	}
 	spin_unlock(&wb->list_lock);
@@ -1331,7 +1420,6 @@
 int write_inode_now(struct inode *inode, int sync)
 {
 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-	int ret;
 	struct writeback_control wbc = {
 		.nr_to_write = LONG_MAX,
 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -1343,12 +1431,7 @@
 		wbc.nr_to_write = 0;
 
 	might_sleep();
-	spin_lock(&wb->list_lock);
-	spin_lock(&inode->i_lock);
-	ret = writeback_single_inode(inode, wb, &wbc);
-	spin_unlock(&inode->i_lock);
-	spin_unlock(&wb->list_lock);
-	return ret;
+	return writeback_single_inode(inode, wb, &wbc);
 }
 EXPORT_SYMBOL(write_inode_now);
 
@@ -1365,15 +1448,7 @@
  */
 int sync_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-	int ret;
-
-	spin_lock(&wb->list_lock);
-	spin_lock(&inode->i_lock);
-	ret = writeback_single_inode(inode, wb, wbc);
-	spin_unlock(&inode->i_lock);
-	spin_unlock(&wb->list_lock);
-	return ret;
+	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
 }
 EXPORT_SYMBOL(sync_inode);
 
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 42593c5..03ff5b1 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -75,19 +75,13 @@
 				     unsigned global_limit)
 {
 	unsigned long t;
-	char tmp[32];
 	unsigned limit = (1 << 16) - 1;
 	int err;
 
-	if (*ppos || count >= sizeof(tmp) - 1)
+	if (*ppos)
 		return -EINVAL;
 
-	if (copy_from_user(tmp, buf, count))
-		return -EINVAL;
-
-	tmp[count] = '\0';
-
-	err = strict_strtoul(tmp, 0, &t);
+	err = kstrtoul_from_user(buf, count, 0, &t);
 	if (err)
 		return err;
 
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index df5ac04..334e0b1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -775,6 +775,8 @@
 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
 			  struct kstat *stat)
 {
+	unsigned int blkbits;
+
 	stat->dev = inode->i_sb->s_dev;
 	stat->ino = attr->ino;
 	stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -790,7 +792,13 @@
 	stat->ctime.tv_nsec = attr->ctimensec;
 	stat->size = attr->size;
 	stat->blocks = attr->blocks;
-	stat->blksize = (1 << inode->i_blkbits);
+
+	if (attr->blksize != 0)
+		blkbits = ilog2(attr->blksize);
+	else
+		blkbits = inode->i_sb->s_blocksize_bits;
+
+	stat->blksize = 1 << blkbits;
 }
 
 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
@@ -863,6 +871,7 @@
 		if (stat) {
 			generic_fillattr(inode, stat);
 			stat->mode = fi->orig_i_mode;
+			stat->ino = fi->orig_ino;
 		}
 	}
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 504e61b..b321a68 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -962,7 +962,9 @@
 	if (err)
 		goto out;
 
-	file_update_time(file);
+	err = file_update_time(file);
+	if (err)
+		goto out;
 
 	if (file->f_flags & O_DIRECT) {
 		written = generic_file_direct_write(iocb, iov, &nr_segs,
@@ -2171,6 +2173,44 @@
 	return ret;
 }
 
+long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+			    loff_t length)
+{
+	struct fuse_file *ff = file->private_data;
+	struct fuse_conn *fc = ff->fc;
+	struct fuse_req *req;
+	struct fuse_fallocate_in inarg = {
+		.fh = ff->fh,
+		.offset = offset,
+		.length = length,
+		.mode = mode
+	};
+	int err;
+
+	if (fc->no_fallocate)
+		return -EOPNOTSUPP;
+
+	req = fuse_get_req(fc);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	req->in.h.opcode = FUSE_FALLOCATE;
+	req->in.h.nodeid = ff->nodeid;
+	req->in.numargs = 1;
+	req->in.args[0].size = sizeof(inarg);
+	req->in.args[0].value = &inarg;
+	fuse_request_send(fc, req);
+	err = req->out.h.error;
+	if (err == -ENOSYS) {
+		fc->no_fallocate = 1;
+		err = -EOPNOTSUPP;
+	}
+	fuse_put_request(fc, req);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(fuse_file_fallocate);
+
 static const struct file_operations fuse_file_operations = {
 	.llseek		= fuse_file_llseek,
 	.read		= do_sync_read,
@@ -2188,6 +2228,7 @@
 	.unlocked_ioctl	= fuse_file_ioctl,
 	.compat_ioctl	= fuse_file_compat_ioctl,
 	.poll		= fuse_file_poll,
+	.fallocate	= fuse_file_fallocate,
 };
 
 static const struct file_operations fuse_direct_io_file_operations = {
@@ -2204,6 +2245,7 @@
 	.unlocked_ioctl	= fuse_file_ioctl,
 	.compat_ioctl	= fuse_file_compat_ioctl,
 	.poll		= fuse_file_poll,
+	.fallocate	= fuse_file_fallocate,
 	/* no splice_read */
 };
 
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 572cefc..771fb63 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -82,6 +82,9 @@
 	    preserve the original mode */
 	umode_t orig_i_mode;
 
+	/** 64 bit inode number */
+	u64 orig_ino;
+
 	/** Version of last attribute change */
 	u64 attr_version;
 
@@ -478,6 +481,9 @@
 	/** Are BSD file locking primitives not implemented by fs? */
 	unsigned no_flock:1;
 
+	/** Is fallocate not implemented by fs? */
+	unsigned no_fallocate:1;
+
 	/** The number of requests waiting for completion */
 	atomic_t num_waiting;
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 26783eb..1cd6165 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -91,6 +91,7 @@
 	fi->nlookup = 0;
 	fi->attr_version = 0;
 	fi->writectr = 0;
+	fi->orig_ino = 0;
 	INIT_LIST_HEAD(&fi->write_files);
 	INIT_LIST_HEAD(&fi->queued_writes);
 	INIT_LIST_HEAD(&fi->writepages);
@@ -122,7 +123,7 @@
 static void fuse_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (inode->i_sb->s_flags & MS_ACTIVE) {
 		struct fuse_conn *fc = get_fuse_conn(inode);
 		struct fuse_inode *fi = get_fuse_inode(inode);
@@ -139,6 +140,18 @@
 	return 0;
 }
 
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static ino_t fuse_squash_ino(u64 ino64)
+{
+	ino_t ino = (ino_t) ino64;
+	if (sizeof(ino_t) < sizeof(u64))
+		ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
+	return ino;
+}
+
 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
 				   u64 attr_valid)
 {
@@ -148,7 +161,7 @@
 	fi->attr_version = ++fc->attr_version;
 	fi->i_time = attr_valid;
 
-	inode->i_ino     = attr->ino;
+	inode->i_ino     = fuse_squash_ino(attr->ino);
 	inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
 	set_nlink(inode, attr->nlink);
 	inode->i_uid     = attr->uid;
@@ -174,6 +187,8 @@
 	fi->orig_i_mode = inode->i_mode;
 	if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
 		inode->i_mode &= ~S_ISVTX;
+
+	fi->orig_ino = attr->ino;
 }
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
@@ -627,12 +642,10 @@
 	return ERR_PTR(err);
 }
 
-static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-			   int connectable)
+static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+			   struct inode *parent)
 {
-	struct inode *inode = dentry->d_inode;
-	bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
-	int len = encode_parent ? 6 : 3;
+	int len = parent ? 6 : 3;
 	u64 nodeid;
 	u32 generation;
 
@@ -648,14 +661,9 @@
 	fh[1] = (u32)(nodeid & 0xffffffff);
 	fh[2] = generation;
 
-	if (encode_parent) {
-		struct inode *parent;
-
-		spin_lock(&dentry->d_lock);
-		parent = dentry->d_parent->d_inode;
+	if (parent) {
 		nodeid = get_fuse_inode(parent)->nodeid;
 		generation = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
 
 		fh[3] = (u32)(nodeid >> 32);
 		fh[4] = (u32)(nodeid & 0xffffffff);
@@ -663,7 +671,7 @@
 	}
 
 	*max_len = len;
-	return encode_parent ? 0x82 : 0x81;
+	return parent ? 0x82 : 0x81;
 }
 
 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 70ba891..e8ed6d4 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -28,15 +28,14 @@
 #define GFS2_LARGE_FH_SIZE 8
 #define GFS2_OLD_FH_SIZE 10
 
-static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
-			  int connectable)
+static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
+			  struct inode *parent)
 {
 	__be32 *fh = (__force __be32 *)p;
-	struct inode *inode = dentry->d_inode;
 	struct super_block *sb = inode->i_sb;
 	struct gfs2_inode *ip = GFS2_I(inode);
 
-	if (connectable && (*len < GFS2_LARGE_FH_SIZE)) {
+	if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
 		*len = GFS2_LARGE_FH_SIZE;
 		return 255;
 	} else if (*len < GFS2_SMALL_FH_SIZE) {
@@ -50,14 +49,10 @@
 	fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
 	*len = GFS2_SMALL_FH_SIZE;
 
-	if (!connectable || inode == sb->s_root->d_inode)
+	if (!parent || inode == sb->s_root->d_inode)
 		return *len;
 
-	spin_lock(&dentry->d_lock);
-	inode = dentry->d_parent->d_inode;
-	ip = GFS2_I(inode);
-	igrab(inode);
-	spin_unlock(&dentry->d_lock);
+	ip = GFS2_I(parent);
 
 	fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
 	fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
@@ -65,8 +60,6 @@
 	fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
 	*len = GFS2_LARGE_FH_SIZE;
 
-	iput(inode);
-
 	return *len;
 }
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6172fa7..713e621 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1554,7 +1554,7 @@
 out:
 	/* Case 3 starts here */
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	gfs2_dir_hash_inval(ip);
 	ip->i_gl->gl_object = NULL;
 	flush_delayed_work_sync(&ip->i_gl->gl_work);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 737dbeb..761ec06 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -532,7 +532,7 @@
 void hfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
 		HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
 		iput(HFS_I(inode)->rsrc_inode);
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index c640ba5..09addc8 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -31,6 +31,7 @@
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
 	struct hfsplus_vh *vh = sbi->s_vhdr;
 	struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
+	u32 cnid = (unsigned long)dentry->d_fsdata;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -41,8 +42,12 @@
 	vh->finder_info[0] = bvh->finder_info[0] =
 		cpu_to_be32(parent_ino(dentry));
 
-	/* Bootloader */
-	vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino);
+	/*
+	 * Bootloader. Just using the inode here breaks in the case of
+	 * hard links - the firmware wants the ID of the hard link file,
+	 * but the inode points at the indirect inode
+	 */
+	vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
 
 	/* Per spec, the OS X system folder - same as finder_info[0] here */
 	vh->finder_info[5] = bvh->finder_info[5] =
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ceb1c28..a9bca4b 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -154,7 +154,7 @@
 {
 	dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (HFSPLUS_IS_RSRC(inode)) {
 		HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
 		iput(HFSPLUS_I(inode)->rsrc_inode);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 7daf4b8..90effcc 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -56,7 +56,7 @@
 	DECLARE_COMPLETION_ONSTACK(wait);
 	struct bio *bio;
 	int ret = 0;
-	unsigned int io_size;
+	u64 io_size;
 	loff_t start;
 	int offset;
 
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 07c516b..2afa5bb 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -240,7 +240,7 @@
 static void hostfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (HOSTFS_I(inode)->fd != -1) {
 		close_file(&HOSTFS_I(inode)->fd);
 		HOSTFS_I(inode)->fd = -1;
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index 7a5eb2c..cdb84a8 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -16,9 +16,9 @@
 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
 {
 	struct quad_buffer_head qbh;
-	u32 *bmp;
+	__le32 *bmp;
 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
-	if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
+	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
 		hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
 		goto fail1;
 	}
@@ -62,7 +62,7 @@
 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
 {
 	struct quad_buffer_head qbh;
-	unsigned *bmp;
+	__le32 *bmp;
 	unsigned bs = near & ~0x3fff;
 	unsigned nr = (near & 0x3fff) & ~(n - 1);
 	/*unsigned mnr;*/
@@ -236,7 +236,7 @@
 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 {
 	struct quad_buffer_head qbh;
-	u32 *bmp;
+	__le32 *bmp;
 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
 	if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
@@ -254,7 +254,7 @@
 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
 {
 	struct quad_buffer_head qbh;
-	u32 *bmp;
+	__le32 *bmp;
 	struct hpfs_sb_info *sbi = hpfs_sb(s);
 	/*printk("2 - ");*/
 	if (!n) return;
@@ -299,7 +299,7 @@
 	int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
 	int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
 	int i, j;
-	u32 *bmp;
+	__le32 *bmp;
 	struct quad_buffer_head qbh;
 	if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
 		for (j = 0; j < 512; j++) {
@@ -351,7 +351,7 @@
 		hpfs_free_sectors(s, dno, 4);
 	} else {
 		struct quad_buffer_head qbh;
-		u32 *bmp;
+		__le32 *bmp;
 		unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
 			return;
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 08b503e..4bae4a4a 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -20,7 +20,7 @@
 	int c1, c2 = 0;
 	go_down:
 	if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
-	if (btree->internal) {
+	if (bp_internal(btree)) {
 		for (i = 0; i < btree->n_used_nodes; i++)
 			if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
 				a = le32_to_cpu(btree->u.internal[i].down);
@@ -82,7 +82,7 @@
 		brelse(bh);
 		return -1;
 	}
-	if (btree->internal) {
+	if (bp_internal(btree)) {
 		a = le32_to_cpu(btree->u.internal[n].down);
 		btree->u.internal[n].file_secno = cpu_to_le32(-1);
 		mark_buffer_dirty(bh);
@@ -129,12 +129,12 @@
 		}
 		if (a == node && fnod) {
 			anode->up = cpu_to_le32(node);
-			anode->btree.fnode_parent = 1;
+			anode->btree.flags |= BP_fnode_parent;
 			anode->btree.n_used_nodes = btree->n_used_nodes;
 			anode->btree.first_free = btree->first_free;
 			anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
 			memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
-			btree->internal = 1;
+			btree->flags |= BP_internal;
 			btree->n_free_nodes = 11;
 			btree->n_used_nodes = 1;
 			btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
@@ -184,7 +184,10 @@
 			hpfs_free_sectors(s, ra, 1);
 			if ((anode = hpfs_map_anode(s, na, &bh))) {
 				anode->up = cpu_to_le32(up);
-				anode->btree.fnode_parent = up == node && fnod;
+				if (up == node && fnod)
+					anode->btree.flags |= BP_fnode_parent;
+				else
+					anode->btree.flags &= ~BP_fnode_parent;
 				mark_buffer_dirty(bh);
 				brelse(bh);
 			}
@@ -198,7 +201,7 @@
 		if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
 			anode = new_anode;
 			/*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
-			anode->btree.internal = 1;
+			anode->btree.flags |= BP_internal;
 			anode->btree.n_used_nodes = 1;
 			anode->btree.n_free_nodes = 59;
 			anode->btree.first_free = cpu_to_le16(16);
@@ -215,7 +218,8 @@
 	}
 	if ((anode = hpfs_map_anode(s, na, &bh))) {
 		anode->up = cpu_to_le32(node);
-		if (fnod) anode->btree.fnode_parent = 1;
+		if (fnod)
+			anode->btree.flags |= BP_fnode_parent;
 		mark_buffer_dirty(bh);
 		brelse(bh);
 	}
@@ -234,18 +238,19 @@
 	}
 	ranode->up = cpu_to_le32(node);
 	memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
-	if (fnod) ranode->btree.fnode_parent = 1;
-	ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
-	if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+	if (fnod)
+		ranode->btree.flags |= BP_fnode_parent;
+	ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
+	if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
 		struct anode *unode;
 		if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
 			unode->up = cpu_to_le32(ra);
-			unode->btree.fnode_parent = 0;
+			unode->btree.flags &= ~BP_fnode_parent;
 			mark_buffer_dirty(bh1);
 			brelse(bh1);
 		}
 	}
-	btree->internal = 1;
+	btree->flags |= BP_internal;
 	btree->n_free_nodes = fnod ? 10 : 58;
 	btree->n_used_nodes = 2;
 	btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
@@ -278,7 +283,7 @@
 	int d1, d2;
 	go_down:
 	d2 = 0;
-	while (btree1->internal) {
+	while (bp_internal(btree1)) {
 		ano = le32_to_cpu(btree1->u.internal[pos].down);
 		if (level) brelse(bh);
 		if (hpfs_sb(s)->sb_chk)
@@ -412,13 +417,13 @@
 			btree->n_free_nodes = 8;
 			btree->n_used_nodes = 0;
 			btree->first_free = cpu_to_le16(8);
-			btree->internal = 0;
+			btree->flags &= ~BP_internal;
 			mark_buffer_dirty(bh);
 		} else hpfs_free_sectors(s, f, 1);
 		brelse(bh);
 		return;
 	}
-	while (btree->internal) {
+	while (bp_internal(btree)) {
 		nodes = btree->n_used_nodes + btree->n_free_nodes;
 		for (i = 0; i < btree->n_used_nodes; i++)
 			if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
@@ -479,13 +484,13 @@
 	struct extended_attribute *ea;
 	struct extended_attribute *ea_end;
 	if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
-	if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
+	if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
 	else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
 	ea_end = fnode_end_ea(fnode);
 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
-		if (ea->indirect)
-			hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
-	hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
+		if (ea_indirect(ea))
+			hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
+	hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
 	brelse(bh);
 	hpfs_free_sectors(s, fno, 1);
 }
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 9ecde27..f49d149 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -156,7 +156,6 @@
 
 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 {
-	PRINTK(("hpfs_mark_4buffers_dirty\n"));
 	memcpy(qbh->bh[0]->b_data, qbh->data, 512);
 	memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
 	memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2fa0089..b8472f8 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -87,7 +87,7 @@
 			ret = -EIOERROR;
 			goto out;
 		}
-		if (!fno->dirflag) {
+		if (!fnode_is_dir(fno)) {
 			e = 1;
 			hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
 					(unsigned long)inode->i_ino);
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 1e0e2ac..3228c52 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -153,7 +153,7 @@
 		}
 		de->length = cpu_to_le16(36);
 		de->down = 1;
-		*(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr);
+		*(__le32 *)((char *)de + 32) = cpu_to_le32(ptr);
 	}
 }
 
@@ -177,7 +177,7 @@
 	memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
 	memset(de, 0, d_size);
 	if (down_ptr) {
-		*(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
+		*(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
 		de->down = 1;
 	}
 	de->length = cpu_to_le16(d_size);
@@ -656,7 +656,7 @@
 				del->down = 0;
 				d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
 			} else if (down)
-				*(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
+				*(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
 		} else goto endm;
 		if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
 			printk("HPFS: out of memory for dtree balancing\n");
@@ -672,7 +672,7 @@
 			de_prev->down = 1;
 			dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
 		}
-		*(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
+		*(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
 		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
@@ -1015,7 +1015,7 @@
 		kfree(name2);
 		return NULL;
 	}	
-	if (!upf->dirflag) {
+	if (!fnode_is_dir(upf)) {
 		brelse(bh);
 		hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
 		kfree(name2);
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index d8b84d1..bcaafcd 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -23,15 +23,15 @@
 			return;
 		}
 		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-		if (ea->indirect) {
+		if (ea_indirect(ea)) {
 			if (ea_valuelen(ea) != 8) {
-				hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
+				hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x",
 					ano ? "anode" : "sectors", a, pos);
 				return;
 			}
 			if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
 				return;
-			hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
+			hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
 		}
 		pos += ea->namelen + ea_valuelen(ea) + 5;
 	}
@@ -81,7 +81,7 @@
 	struct extended_attribute *ea_end = fnode_end_ea(fnode);
 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect)
+			if (ea_indirect(ea))
 				goto indirect;
 			if (ea_valuelen(ea) >= size)
 				return -EINVAL;
@@ -91,7 +91,7 @@
 		}
 	a = le32_to_cpu(fnode->ea_secno);
 	len = le32_to_cpu(fnode->ea_size_l);
-	ano = fnode->ea_anode;
+	ano = fnode_in_anode(fnode);
 	pos = 0;
 	while (pos < len) {
 		ea = (struct extended_attribute *)ex;
@@ -101,10 +101,10 @@
 			return -EIO;
 		}
 		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
-		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
 			return -EIO;
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect)
+			if (ea_indirect(ea))
 				goto indirect;
 			if (ea_valuelen(ea) >= size)
 				return -EINVAL;
@@ -119,7 +119,7 @@
 indirect:
 	if (ea_len(ea) >= size)
 		return -EINVAL;
-	if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf))
+	if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf))
 		return -EIO;
 	buf[ea_len(ea)] = 0;
 	return 0;
@@ -136,8 +136,8 @@
 	struct extended_attribute *ea_end = fnode_end_ea(fnode);
 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect)
-				return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+			if (ea_indirect(ea))
+				return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
 			if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
 				printk("HPFS: out of memory for EA\n");
 				return NULL;
@@ -148,7 +148,7 @@
 		}
 	a = le32_to_cpu(fnode->ea_secno);
 	len = le32_to_cpu(fnode->ea_size_l);
-	ano = fnode->ea_anode;
+	ano = fnode_in_anode(fnode);
 	pos = 0;
 	while (pos < len) {
 		char ex[4 + 255 + 1 + 8];
@@ -159,11 +159,11 @@
 			return NULL;
 		}
 		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
-		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
 			return NULL;
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect)
-				return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+			if (ea_indirect(ea))
+				return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
 			if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
 				printk("HPFS: out of memory for EA\n");
 				return NULL;
@@ -199,9 +199,9 @@
 	struct extended_attribute *ea_end = fnode_end_ea(fnode);
 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect) {
+			if (ea_indirect(ea)) {
 				if (ea_len(ea) == size)
-					set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+					set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
 			} else if (ea_valuelen(ea) == size) {
 				memcpy(ea_data(ea), data, size);
 			}
@@ -209,7 +209,7 @@
 		}
 	a = le32_to_cpu(fnode->ea_secno);
 	len = le32_to_cpu(fnode->ea_size_l);
-	ano = fnode->ea_anode;
+	ano = fnode_in_anode(fnode);
 	pos = 0;
 	while (pos < len) {
 		char ex[4 + 255 + 1 + 8];
@@ -220,12 +220,12 @@
 			return;
 		}
 		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
 			return;
 		if (!strcmp(ea->name, key)) {
-			if (ea->indirect) {
+			if (ea_indirect(ea)) {
 				if (ea_len(ea) == size)
-					set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+					set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
 			}
 			else {
 				if (ea_valuelen(ea) == size)
@@ -246,7 +246,7 @@
 	if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
 		hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
 			(unsigned long)inode->i_ino,
-			le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
+			le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
 		return;
 	}
 	if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
@@ -276,7 +276,7 @@
 		fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
 		fnode->ea_size_s = cpu_to_le16(0);
 		fnode->ea_secno = cpu_to_le32(n);
-		fnode->ea_anode = cpu_to_le32(0);
+		fnode->flags &= ~FNODE_anode;
 		mark_buffer_dirty(bh);
 		brelse(bh);
 	}
@@ -288,9 +288,9 @@
 			secno q = hpfs_alloc_sector(s, fno, 1, 0);
 			if (!q) goto bail;
 			fnode->ea_secno = cpu_to_le32(q);
-			fnode->ea_anode = 0;
+			fnode->flags &= ~FNODE_anode;
 			len++;
-		} else if (!fnode->ea_anode) {
+		} else if (!fnode_in_anode(fnode)) {
 			if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
 				len++;
 			} else {
@@ -310,7 +310,7 @@
 				anode->u.external[0].length = cpu_to_le32(len);
 				mark_buffer_dirty(bh);
 				brelse(bh);
-				fnode->ea_anode = 1;
+				fnode->flags |= FNODE_anode;
 				fnode->ea_secno = cpu_to_le32(a_s);*/
 				secno new_sec;
 				int i;
@@ -338,7 +338,7 @@
 				len = (pos + 511) >> 9;
 			}
 		}
-		if (fnode->ea_anode) {
+		if (fnode_in_anode(fnode)) {
 			if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
 						     0, len) != -1) {
 				len++;
@@ -351,16 +351,16 @@
 	h[1] = strlen(key);
 	h[2] = size & 0xff;
 	h[3] = size >> 8;
-	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
-	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
-	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
+	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
+	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
+	if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
 	fnode->ea_size_l = cpu_to_le32(pos);
 	ret:
 	hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
 	return;
 	bail:
 	if (le32_to_cpu(fnode->ea_secno))
-		if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
+		if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
 		else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
 	else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
 }
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 8b0650a..cce025a 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -51,11 +51,11 @@
   u8 n_rootdir_entries[2];
   u8 n_sectors_s[2];
   u8 media_byte;
-  u16 sectors_per_fat;
-  u16 sectors_per_track;
-  u16 heads_per_cyl;
-  u32 n_hidden_sectors;
-  u32 n_sectors_l;		/* size of partition */
+  __le16 sectors_per_fat;
+  __le16 sectors_per_track;
+  __le16 heads_per_cyl;
+  __le32 n_hidden_sectors;
+  __le32 n_sectors_l;		/* size of partition */
   u8 drive_number;
   u8 mbz;
   u8 sig_28h;			/* 28h */
@@ -63,7 +63,7 @@
   u8 vol_label[11];
   u8 sig_hpfs[8];		/* "HPFS    " */
   u8 pad[448];
-  u16 magic;			/* aa55 */
+  __le16 magic;			/* aa55 */
 };
 
 
@@ -75,28 +75,28 @@
 
 struct hpfs_super_block
 {
-  u32 magic;				/* f995 e849 */
-  u32 magic1;				/* fa53 e9c5, more magic? */
+  __le32 magic;				/* f995 e849 */
+  __le32 magic1;			/* fa53 e9c5, more magic? */
   u8 version;				/* version of a filesystem  usually 2 */
   u8 funcversion;			/* functional version - oldest version
   					   of filesystem that can understand
 					   this disk */
-  u16 zero;				/* 0 */
-  fnode_secno root;			/* fnode of root directory */
-  secno n_sectors;			/* size of filesystem */
-  u32 n_badblocks;			/* number of bad blocks */
-  secno bitmaps;			/* pointers to free space bit maps */
-  u32 zero1;				/* 0 */
-  secno badblocks;			/* bad block list */
-  u32 zero3;				/* 0 */
-  time32_t last_chkdsk;			/* date last checked, 0 if never */
-  time32_t last_optimize;		/* date last optimized, 0 if never */
-  secno n_dir_band;			/* number of sectors in dir band */
-  secno dir_band_start;			/* first sector in dir band */
-  secno dir_band_end;			/* last sector in dir band */
-  secno dir_band_bitmap;		/* free space map, 1 dnode per bit */
+  __le16 zero;				/* 0 */
+  __le32 root;				/* fnode of root directory */
+  __le32 n_sectors;			/* size of filesystem */
+  __le32 n_badblocks;			/* number of bad blocks */
+  __le32 bitmaps;			/* pointers to free space bit maps */
+  __le32 zero1;				/* 0 */
+  __le32 badblocks;			/* bad block list */
+  __le32 zero3;				/* 0 */
+  __le32 last_chkdsk;			/* date last checked, 0 if never */
+  __le32 last_optimize;			/* date last optimized, 0 if never */
+  __le32 n_dir_band;			/* number of sectors in dir band */
+  __le32 dir_band_start;			/* first sector in dir band */
+  __le32 dir_band_end;			/* last sector in dir band */
+  __le32 dir_band_bitmap;		/* free space map, 1 dnode per bit */
   u8 volume_name[32];			/* not used */
-  secno user_id_table;			/* 8 preallocated sectors - user id */
+  __le32 user_id_table;			/* 8 preallocated sectors - user id */
   u32 zero6[103];			/* 0 */
 };
 
@@ -109,8 +109,8 @@
 
 struct hpfs_spare_block
 {
-  u32 magic;				/* f991 1849 */
-  u32 magic1;				/* fa52 29c5, more magic? */
+  __le32 magic;				/* f991 1849 */
+  __le32 magic1;				/* fa52 29c5, more magic? */
 
 #ifdef __LITTLE_ENDIAN
   u8 dirty: 1;				/* 0 clean, 1 "improperly stopped" */
@@ -153,21 +153,21 @@
   u8 mm_contlgulty;
   u8 unused;
 
-  secno hotfix_map;			/* info about remapped bad sectors */
-  u32 n_spares_used;			/* number of hotfixes */
-  u32 n_spares;				/* number of spares in hotfix map */
-  u32 n_dnode_spares_free;		/* spare dnodes unused */
-  u32 n_dnode_spares;			/* length of spare_dnodes[] list,
+  __le32 hotfix_map;			/* info about remapped bad sectors */
+  __le32 n_spares_used;			/* number of hotfixes */
+  __le32 n_spares;			/* number of spares in hotfix map */
+  __le32 n_dnode_spares_free;		/* spare dnodes unused */
+  __le32 n_dnode_spares;		/* length of spare_dnodes[] list,
 					   follows in this block*/
-  secno code_page_dir;			/* code page directory block */
-  u32 n_code_pages;			/* number of code pages */
-  u32 super_crc;			/* on HPFS386 and LAN Server this is
+  __le32 code_page_dir;			/* code page directory block */
+  __le32 n_code_pages;			/* number of code pages */
+  __le32 super_crc;			/* on HPFS386 and LAN Server this is
   					   checksum of superblock, on normal
 					   OS/2 unused */
-  u32 spare_crc;			/* on HPFS386 checksum of spareblock */
-  u32 zero1[15];			/* unused */
-  dnode_secno spare_dnodes[100];	/* emergency free dnode list */
-  u32 zero2[1];				/* room for more? */
+  __le32 spare_crc;			/* on HPFS386 checksum of spareblock */
+  __le32 zero1[15];			/* unused */
+  __le32 spare_dnodes[100];		/* emergency free dnode list */
+  __le32 zero2[1];			/* room for more? */
 };
 
 /* The bad block list is 4 sectors long.  The first word must be zero,
@@ -202,18 +202,18 @@
 
 struct code_page_directory
 {
-  u32 magic;				/* 4945 21f7 */
-  u32 n_code_pages;			/* number of pointers following */
-  u32 zero1[2];
+  __le32 magic;				/* 4945 21f7 */
+  __le32 n_code_pages;			/* number of pointers following */
+  __le32 zero1[2];
   struct {
-    u16 ix;				/* index */
-    u16 code_page_number;		/* code page number */
-    u32 bounds;				/* matches corresponding word
+    __le16 ix;				/* index */
+    __le16 code_page_number;		/* code page number */
+    __le32 bounds;			/* matches corresponding word
 					   in data block */
-    secno code_page_data;		/* sector number of a code_page_data
+    __le32 code_page_data;		/* sector number of a code_page_data
 					   containing c.p. array */
-    u16 index;				/* index in c.p. array in that sector*/
-    u16 unknown;			/* some unknown value; usually 0;
+    __le16 index;			/* index in c.p. array in that sector*/
+    __le16 unknown;			/* some unknown value; usually 0;
     					   2 in Japanese version */
   } array[31];				/* unknown length */
 };
@@ -224,19 +224,19 @@
 
 struct code_page_data
 {
-  u32 magic;				/* 8945 21f7 */
-  u32 n_used;				/* # elements used in c_p_data[] */
-  u32 bounds[3];			/* looks a bit like
+  __le32 magic;				/* 8945 21f7 */
+  __le32 n_used;			/* # elements used in c_p_data[] */
+  __le32 bounds[3];			/* looks a bit like
 					     (beg1,end1), (beg2,end2)
 					   one byte each */
-  u16 offs[3];				/* offsets from start of sector
+  __le16 offs[3];			/* offsets from start of sector
 					   to start of c_p_data[ix] */
   struct {
-    u16 ix;				/* index */
-    u16 code_page_number;		/* code page number */
-    u16 unknown;			/* the same as in cp directory */
+    __le16 ix;				/* index */
+    __le16 code_page_number;		/* code page number */
+    __le16 unknown;			/* the same as in cp directory */
     u8 map[128];			/* upcase table for chars 80..ff */
-    u16 zero2;
+    __le16 zero2;
   } code_page[3];
   u8 incognita[78];
 };
@@ -278,8 +278,8 @@
 #define DNODE_MAGIC   0x77e40aae
 
 struct dnode {
-  u32 magic;				/* 77e4 0aae */
-  u32 first_free;			/* offset from start of dnode to
+  __le32 magic;				/* 77e4 0aae */
+  __le32 first_free;			/* offset from start of dnode to
 					   first free dir entry */
 #ifdef __LITTLE_ENDIAN
   u8 root_dnode: 1;			/* Is it root dnode? */
@@ -293,14 +293,14 @@
   u8 root_dnode: 1;			/* Is it root dnode? */
 #endif
   u8 increment_me2[3];
-  secno up;				/* (root dnode) directory's fnode
+  __le32 up;				/* (root dnode) directory's fnode
 					   (nonroot) parent dnode */
-  dnode_secno self;			/* pointer to this dnode */
+  __le32 self;			/* pointer to this dnode */
   u8 dirent[2028];			/* one or more dirents */
 };
 
 struct hpfs_dirent {
-  u16 length;				/* offset to next dirent */
+  __le16 length;			/* offset to next dirent */
 
 #ifdef __LITTLE_ENDIAN
   u8 first: 1;				/* set on phony ^A^A (".") entry */
@@ -346,12 +346,12 @@
   u8 read_only: 1;			/* dos attrib */
 #endif
 
-  fnode_secno fnode;			/* fnode giving allocation info */
-  time32_t write_date;			/* mtime */
-  u32 file_size;			/* file length, bytes */
-  time32_t read_date;			/* atime */
-  time32_t creation_date;			/* ctime */
-  u32 ea_size;				/* total EA length, bytes */
+  __le32 fnode;				/* fnode giving allocation info */
+  __le32 write_date;			/* mtime */
+  __le32 file_size;			/* file length, bytes */
+  __le32 read_date;			/* atime */
+  __le32 creation_date;			/* ctime */
+  __le32 ea_size;			/* total EA length, bytes */
   u8 no_of_acls;			/* number of ACL's (low 3 bits) */
   u8 ix;				/* code page index (of filename), see
 					   struct code_page_data */
@@ -375,50 +375,36 @@
 
 struct bplus_leaf_node
 {
-  u32 file_secno;			/* first file sector in extent */
-  u32 length;				/* length, sectors */
-  secno disk_secno;			/* first corresponding disk sector */
+  __le32 file_secno;			/* first file sector in extent */
+  __le32 length;			/* length, sectors */
+  __le32 disk_secno;			/* first corresponding disk sector */
 };
 
 struct bplus_internal_node
 {
-  u32 file_secno;			/* subtree maps sectors < this  */
-  anode_secno down;			/* pointer to subtree */
+  __le32 file_secno;			/* subtree maps sectors < this  */
+  __le32 down;				/* pointer to subtree */
 };
 
+enum {
+	BP_hbff = 1,
+	BP_fnode_parent = 0x20,
+	BP_binary_search = 0x40,
+	BP_internal = 0x80
+};
 struct bplus_header
 {
-#ifdef __LITTLE_ENDIAN
-  u8 hbff: 1;			/* high bit of first free entry offset */
-  u8 flag1234: 4;
-  u8 fnode_parent: 1;			/* ? we're pointed to by an fnode,
+  u8 flags;				/* bit 0 - high bit of first free entry offset
+					   bit 5 - we're pointed to by an fnode,
 					   the data btree or some ea or the
-					   main ea bootage pointer ea_secno */
-					/* also can get set in fnodes, which
-					   may be a chkdsk glitch or may mean
-					   this bit is irrelevant in fnodes,
-					   or this interpretation is all wet */
-  u8 binary_search: 1;			/* suggest binary search (unused) */
-  u8 internal: 1;			/* 1 -> (internal) tree of anodes
-					   0 -> (leaf) list of extents */
-#else
-  u8 internal: 1;			/* 1 -> (internal) tree of anodes
-					   0 -> (leaf) list of extents */
-  u8 binary_search: 1;			/* suggest binary search (unused) */
-  u8 fnode_parent: 1;			/* ? we're pointed to by an fnode,
-					   the data btree or some ea or the
-					   main ea bootage pointer ea_secno */
-					/* also can get set in fnodes, which
-					   may be a chkdsk glitch or may mean
-					   this bit is irrelevant in fnodes,
-					   or this interpretation is all wet */
-  u8 flag1234: 4;
-  u8 hbff: 1;			/* high bit of first free entry offset */
-#endif
+					   main ea bootage pointer ea_secno
+					   bit 6 - suggest binary search (unused)
+					   bit 7 - 1 -> (internal) tree of anodes
+						   0 -> (leaf) list of extents */
   u8 fill[3];
   u8 n_free_nodes;			/* free nodes in following array */
   u8 n_used_nodes;			/* used nodes in following array */
-  u16 first_free;			/* offset from start of header to
+  __le16 first_free;			/* offset from start of header to
 					   first free node in array */
   union {
     struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -428,6 +414,16 @@
   } u;
 };
 
+static inline bool bp_internal(struct bplus_header *bp)
+{
+	return bp->flags & BP_internal;
+}
+
+static inline bool bp_fnode_parent(struct bplus_header *bp)
+{
+	return bp->flags & BP_fnode_parent;
+}
+
 /* fnode: root of allocation b+ tree, and EA's */
 
 /* Every file and every directory has one fnode, pointed to by the directory
@@ -436,62 +432,56 @@
 
 #define FNODE_MAGIC 0xf7e40aae
 
+enum {FNODE_anode = cpu_to_le16(2), FNODE_dir = cpu_to_le16(256)};
 struct fnode
 {
-  u32 magic;				/* f7e4 0aae */
-  u32 zero1[2];				/* read history */
+  __le32 magic;				/* f7e4 0aae */
+  __le32 zero1[2];			/* read history */
   u8 len, name[15];			/* true length, truncated name */
-  fnode_secno up;			/* pointer to file's directory fnode */
-  secno acl_size_l;
-  secno acl_secno;
-  u16 acl_size_s;
+  __le32 up;				/* pointer to file's directory fnode */
+  __le32 acl_size_l;
+  __le32 acl_secno;
+  __le16 acl_size_s;
   u8 acl_anode;
   u8 zero2;				/* history bit count */
-  u32 ea_size_l;			/* length of disk-resident ea's */
-  secno ea_secno;			/* first sector of disk-resident ea's*/
-  u16 ea_size_s;			/* length of fnode-resident ea's */
+  __le32 ea_size_l;			/* length of disk-resident ea's */
+  __le32 ea_secno;			/* first sector of disk-resident ea's*/
+  __le16 ea_size_s;			/* length of fnode-resident ea's */
 
-#ifdef __LITTLE_ENDIAN
-  u8 flag0: 1;
-  u8 ea_anode: 1;			/* 1 -> ea_secno is an anode */
-  u8 flag234567: 6;
-#else
-  u8 flag234567: 6;
-  u8 ea_anode: 1;			/* 1 -> ea_secno is an anode */
-  u8 flag0: 1;
-#endif
-
-#ifdef __LITTLE_ENDIAN
-  u8 dirflag: 1;			/* 1 -> directory.  first & only extent
+  __le16 flags;				/* bit 1 set -> ea_secno is an anode */
+					/* bit 8 set -> directory.  first & only extent
 					   points to dnode. */
-  u8 flag9012345: 7;
-#else
-  u8 flag9012345: 7;
-  u8 dirflag: 1;			/* 1 -> directory.  first & only extent
-					   points to dnode. */
-#endif
-
   struct bplus_header btree;		/* b+ tree, 8 extents or 12 subtrees */
   union {
     struct bplus_leaf_node external[8];
     struct bplus_internal_node internal[12];
   } u;
 
-  u32 file_size;			/* file length, bytes */
-  u32 n_needea;				/* number of EA's with NEEDEA set */
+  __le32 file_size;			/* file length, bytes */
+  __le32 n_needea;			/* number of EA's with NEEDEA set */
   u8 user_id[16];			/* unused */
-  u16 ea_offs;				/* offset from start of fnode
+  __le16 ea_offs;			/* offset from start of fnode
 					   to first fnode-resident ea */
   u8 dasd_limit_treshhold;
   u8 dasd_limit_delta;
-  u32 dasd_limit;
-  u32 dasd_usage;
+  __le32 dasd_limit;
+  __le32 dasd_usage;
   u8 ea[316];				/* zero or more EA's, packed together
 					   with no alignment padding.
 					   (Do not use this name, get here
 					   via fnode + ea_offs. I think.) */
 };
 
+static inline bool fnode_in_anode(struct fnode *p)
+{
+	return (p->flags & FNODE_anode) != 0;
+}
+
+static inline bool fnode_is_dir(struct fnode *p)
+{
+	return (p->flags & FNODE_dir) != 0;
+}
+
 
 /* anode: 99.44% pure allocation tree */
 
@@ -499,9 +489,9 @@
 
 struct anode
 {
-  u32 magic;				/* 37e4 0aae */
-  anode_secno self;			/* pointer to this anode */
-  secno up;				/* parent anode or fnode */
+  __le32 magic;				/* 37e4 0aae */
+  __le32 self;				/* pointer to this anode */
+  __le32 up;				/* parent anode or fnode */
 
   struct bplus_header btree;		/* b+tree, 40 extents or 60 subtrees */
   union {
@@ -509,7 +499,7 @@
     struct bplus_internal_node internal[60];
   } u;
 
-  u32 fill[3];				/* unused */
+  __le32 fill[3];			/* unused */
 };
 
 
@@ -528,32 +518,23 @@
    run, or in multiple runs.  Flags in the fnode tell whether the EA list
    is immediate, in a single run, or in multiple runs. */
 
+enum {EA_indirect = 1, EA_anode = 2, EA_needea = 128 };
 struct extended_attribute
 {
-#ifdef __LITTLE_ENDIAN
-  u8 indirect: 1;			/* 1 -> value gives sector number
+  u8 flags;				/* bit 0 set -> value gives sector number
 					   where real value starts */
-  u8 anode: 1;				/* 1 -> sector is an anode
+					/* bit 1 set -> sector is an anode
 					   that points to fragmented value */
-  u8 flag23456: 5;
-  u8 needea: 1;				/* required ea */
-#else
-  u8 needea: 1;				/* required ea */
-  u8 flag23456: 5;
-  u8 anode: 1;				/* 1 -> sector is an anode
-					   that points to fragmented value */
-  u8 indirect: 1;			/* 1 -> value gives sector number
-					   where real value starts */
-#endif
+					/* bit 7 set -> required ea */
   u8 namelen;				/* length of name, bytes */
   u8 valuelen_lo;			/* length of value, bytes */
   u8 valuelen_hi;			/* length of value, bytes */
-  u8 name[0];
+  u8 name[];
   /*
     u8 name[namelen];			ascii attrib name
     u8 nul;				terminating '\0', not counted
     u8 value[valuelen];			value, arbitrary
-      if this.indirect, valuelen is 8 and the value is
+      if this.flags & 1, valuelen is 8 and the value is
         u32 length;			real length of value, bytes
         secno secno;			sector address where it starts
       if this.anode, the above sector number is the root of an anode tree
@@ -561,6 +542,16 @@
   */
 };
 
+static inline bool ea_indirect(struct extended_attribute *ea)
+{
+	return ea->flags & EA_indirect;
+}
+
+static inline bool ea_in_anode(struct extended_attribute *ea)
+{
+	return ea->flags & EA_anode;
+}
+
 /*
    Local Variables:
    comment-column: 40
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index de94617..c07ef1f 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -35,13 +35,6 @@
 
 #define CHKCOND(x,y) if (!(x)) printk y
 
-#ifdef DBG
-#define PRINTK(x) printk x
-#else
-#undef PRINTK
-#define PRINTK(x)
-#endif
-
 struct hpfs_inode_info {
 	loff_t mmu_private;
 	ino_t i_parent_dir;	/* (directories) gives fnode of parent dir */
@@ -82,7 +75,7 @@
 	unsigned char *sb_cp_table;	/* code page tables: */
 					/* 	128 bytes uppercasing table & */
 					/*	128 bytes lowercasing table */
-	unsigned *sb_bmp_dir;		/* main bitmap directory */
+	__le32 *sb_bmp_dir;		/* main bitmap directory */
 	unsigned sb_c_bitmap;		/* current bitmap */
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
 	int sb_timeshift;
@@ -100,7 +93,7 @@
 static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
 {
   CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
-  return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4));
+  return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4));
 }
 
 /* The first dir entry in a dnode */
@@ -148,12 +141,12 @@
 
 static inline secno ea_sec(struct extended_attribute *ea)
 {
-	return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen)));
+	return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen)));
 }
 
 static inline secno ea_len(struct extended_attribute *ea)
 {
-	return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen)));
+	return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen)));
 }
 
 static inline char *ea_data(struct extended_attribute *ea)
@@ -178,7 +171,7 @@
 	dst->not_8x3 = n;
 }
 
-static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n)
+static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n)
 {
 	int i;
 	if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
@@ -275,10 +268,10 @@
 
 /* map.c */
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
-unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
+__le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
+__le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
-secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+__le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 3b2cec2..ed671e0 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -110,7 +110,7 @@
 			}
 		}
 	}
-	if (fnode->dirflag) {
+	if (fnode_is_dir(fnode)) {
 		int n_dnodes, n_subdirs;
 		i->i_mode |= S_IFDIR;
 		i->i_op = &hpfs_dir_iops;
@@ -299,7 +299,7 @@
 void hpfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (!inode->i_nlink) {
 		hpfs_lock(inode->i_sb);
 		hpfs_remove_fnode(inode->i_sb, inode->i_ino);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index a790821..4acb19d 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -8,12 +8,12 @@
 
 #include "hpfs_fn.h"
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
+__le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
 {
 	return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
 }
 
-unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+__le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
 			 struct quad_buffer_head *qbh, char *id)
 {
 	secno sec;
@@ -89,18 +89,18 @@
 	return cp_table;
 }
 
-secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
+__le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
 {
 	struct buffer_head *bh;
 	int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
 	int i;
-	secno *b;
+	__le32 *b;
 	if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
 		printk("HPFS: can't allocate memory for bitmap directory\n");
 		return NULL;
 	}	
 	for (i=0;i<n;i++) {
-		secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
+		__le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
 		if (!d) {
 			kfree(b);
 			return NULL;
@@ -130,16 +130,16 @@
 					(unsigned long)ino);
 				goto bail;
 			}
-			if (!fnode->dirflag) {
+			if (!fnode_is_dir(fnode)) {
 				if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
-				    (fnode->btree.internal ? 12 : 8)) {
+				    (bp_internal(&fnode->btree) ? 12 : 8)) {
 					hpfs_error(s,
 					   "bad number of nodes in fnode %08lx",
 					    (unsigned long)ino);
 					goto bail;
 				}
 				if (le16_to_cpu(fnode->btree.first_free) !=
-				    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
+				    8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
 					hpfs_error(s,
 					    "bad first_free pointer in fnode %08lx",
 					    (unsigned long)ino);
@@ -187,12 +187,12 @@
 				goto bail;
 			}
 			if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
-			    (anode->btree.internal ? 60 : 40)) {
+			    (bp_internal(&anode->btree) ? 60 : 40)) {
 				hpfs_error(s, "bad number of nodes in anode %08x", ano);
 				goto bail;
 			}
 			if (le16_to_cpu(anode->btree.first_free) !=
-			    8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
+			    8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
 				hpfs_error(s, "bad first_free pointer in anode %08x", ano);
 				goto bail;
 			}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 30dd7b1..9083ef8 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -70,7 +70,7 @@
 	fnode->len = len;
 	memcpy(fnode->name, name, len > 15 ? 15 : len);
 	fnode->up = cpu_to_le32(dir->i_ino);
-	fnode->dirflag = 1;
+	fnode->flags |= FNODE_dir;
 	fnode->btree.n_free_nodes = 7;
 	fnode->btree.n_used_nodes = 1;
 	fnode->btree.first_free = cpu_to_le16(0x14);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 54f6ecc..706a12c 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -572,7 +572,7 @@
 		mark_buffer_dirty(bh2);
 	}
 
-	if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) {
+	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
 		if (errs >= 2) {
 			printk("HPFS: Hotfixes not supported here, try chkdsk\n");
 			mark_dirty(s, 0);
@@ -645,7 +645,7 @@
 		root->i_mtime.tv_nsec = 0;
 		root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
 		root->i_ctime.tv_nsec = 0;
-		hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size);
+		hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
 		hpfs_i(root)->i_parent_dir = root->i_ino;
 		if (root->i_size == -1)
 			root->i_size = 2048;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a80e45a..d4f93b5 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -614,7 +614,7 @@
 
 void hppfs_evict_inode(struct inode *ino)
 {
-	end_writeback(ino);
+	clear_inode(ino);
 	dput(HPPFS_I(ino)->proc_dentry);
 	mntput(ino->i_sb->s_fs_info);
 }
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001ef01..cc9281b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -393,7 +393,7 @@
 static void hugetlbfs_evict_inode(struct inode *inode)
 {
 	truncate_hugepages(inode, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 }
 
 static inline void
diff --git a/fs/inode.c b/fs/inode.c
index da93f7d..c99163b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -486,7 +486,7 @@
 }
 EXPORT_SYMBOL(__remove_inode_hash);
 
-void end_writeback(struct inode *inode)
+void clear_inode(struct inode *inode)
 {
 	might_sleep();
 	/*
@@ -500,11 +500,10 @@
 	BUG_ON(!list_empty(&inode->i_data.private_list));
 	BUG_ON(!(inode->i_state & I_FREEING));
 	BUG_ON(inode->i_state & I_CLEAR);
-	inode_sync_wait(inode);
 	/* don't need i_lock here, no concurrent mods to i_state */
 	inode->i_state = I_FREEING | I_CLEAR;
 }
-EXPORT_SYMBOL(end_writeback);
+EXPORT_SYMBOL(clear_inode);
 
 /*
  * Free the inode passed in, removing it from the lists it is still connected
@@ -531,12 +530,20 @@
 
 	inode_sb_list_del(inode);
 
+	/*
+	 * Wait for flusher thread to be done with the inode so that filesystem
+	 * does not start destroying it while writeback is still running. Since
+	 * the inode has I_FREEING set, flusher thread won't start new work on
+	 * the inode.  We just have to wait for running writeback to finish.
+	 */
+	inode_wait_for_writeback(inode);
+
 	if (op->evict_inode) {
 		op->evict_inode(inode);
 	} else {
 		if (inode->i_data.nrpages)
 			truncate_inode_pages(&inode->i_data, 0);
-		end_writeback(inode);
+		clear_inode(inode);
 	}
 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
 		bd_forget(inode);
@@ -1480,10 +1487,30 @@
 	return 0;
 }
 
+/*
+ * This does the actual work of updating an inodes time or version.  Must have
+ * had called mnt_want_write() before calling this.
+ */
+static int update_time(struct inode *inode, struct timespec *time, int flags)
+{
+	if (inode->i_op->update_time)
+		return inode->i_op->update_time(inode, time, flags);
+
+	if (flags & S_ATIME)
+		inode->i_atime = *time;
+	if (flags & S_VERSION)
+		inode_inc_iversion(inode);
+	if (flags & S_CTIME)
+		inode->i_ctime = *time;
+	if (flags & S_MTIME)
+		inode->i_mtime = *time;
+	mark_inode_dirty_sync(inode);
+	return 0;
+}
+
 /**
  *	touch_atime	-	update the access time
- *	@mnt: mount the inode is accessed on
- *	@dentry: dentry accessed
+ *	@path: the &struct path to update
  *
  *	Update the accessed time on an inode and mark it for writeback.
  *	This function automatically handles read only file systems and media,
@@ -1518,12 +1545,83 @@
 	if (mnt_want_write(mnt))
 		return;
 
-	inode->i_atime = now;
-	mark_inode_dirty_sync(inode);
+	/*
+	 * File systems can error out when updating inodes if they need to
+	 * allocate new space to modify an inode (such is the case for
+	 * Btrfs), but since we touch atime while walking down the path we
+	 * really don't care if we failed to update the atime of the file,
+	 * so just ignore the return value.
+	 */
+	update_time(inode, &now, S_ATIME);
 	mnt_drop_write(mnt);
 }
 EXPORT_SYMBOL(touch_atime);
 
+/*
+ * The logic we want is
+ *
+ *	if suid or (sgid and xgrp)
+ *		remove privs
+ */
+int should_remove_suid(struct dentry *dentry)
+{
+	umode_t mode = dentry->d_inode->i_mode;
+	int kill = 0;
+
+	/* suid always must be killed */
+	if (unlikely(mode & S_ISUID))
+		kill = ATTR_KILL_SUID;
+
+	/*
+	 * sgid without any exec bits is just a mandatory locking mark; leave
+	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
+	 */
+	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+		kill |= ATTR_KILL_SGID;
+
+	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
+		return kill;
+
+	return 0;
+}
+EXPORT_SYMBOL(should_remove_suid);
+
+static int __remove_suid(struct dentry *dentry, int kill)
+{
+	struct iattr newattrs;
+
+	newattrs.ia_valid = ATTR_FORCE | kill;
+	return notify_change(dentry, &newattrs);
+}
+
+int file_remove_suid(struct file *file)
+{
+	struct dentry *dentry = file->f_path.dentry;
+	struct inode *inode = dentry->d_inode;
+	int killsuid;
+	int killpriv;
+	int error = 0;
+
+	/* Fast path for nothing security related */
+	if (IS_NOSEC(inode))
+		return 0;
+
+	killsuid = should_remove_suid(dentry);
+	killpriv = security_inode_need_killpriv(dentry);
+
+	if (killpriv < 0)
+		return killpriv;
+	if (killpriv)
+		error = security_inode_killpriv(dentry);
+	if (!error && killsuid)
+		error = __remove_suid(dentry, killsuid);
+	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+		inode->i_flags |= S_NOSEC;
+
+	return error;
+}
+EXPORT_SYMBOL(file_remove_suid);
+
 /**
  *	file_update_time	-	update mtime and ctime time
  *	@file: file accessed
@@ -1533,18 +1631,20 @@
  *	usage in the file write path of filesystems, and filesystems may
  *	choose to explicitly ignore update via this function with the
  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
- *	timestamps are handled by the server.
+ *	timestamps are handled by the server.  This can return an error for
+ *	file systems who need to allocate space in order to update an inode.
  */
 
-void file_update_time(struct file *file)
+int file_update_time(struct file *file)
 {
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct timespec now;
-	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+	int sync_it = 0;
+	int ret;
 
 	/* First try to exhaust all avenues to not sync */
 	if (IS_NOCMTIME(inode))
-		return;
+		return 0;
 
 	now = current_fs_time(inode->i_sb);
 	if (!timespec_equal(&inode->i_mtime, &now))
@@ -1557,21 +1657,16 @@
 		sync_it |= S_VERSION;
 
 	if (!sync_it)
-		return;
+		return 0;
 
 	/* Finally allowed to write? Takes lock. */
 	if (mnt_want_write_file(file))
-		return;
+		return 0;
 
-	/* Only change inode inside the lock region */
-	if (sync_it & S_VERSION)
-		inode_inc_iversion(inode);
-	if (sync_it & S_CTIME)
-		inode->i_ctime = now;
-	if (sync_it & S_MTIME)
-		inode->i_mtime = now;
-	mark_inode_dirty_sync(inode);
+	ret = update_time(inode, &now, sync_it);
 	mnt_drop_write_file(file);
+
+	return ret;
 }
 EXPORT_SYMBOL(file_update_time);
 
@@ -1741,3 +1836,50 @@
 	return false;
 }
 EXPORT_SYMBOL(inode_owner_or_capable);
+
+/*
+ * Direct i/o helper functions
+ */
+static void __inode_dio_wait(struct inode *inode)
+{
+	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+	do {
+		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+		if (atomic_read(&inode->i_dio_count))
+			schedule();
+	} while (atomic_read(&inode->i_dio_count));
+	finish_wait(wq, &q.wait);
+}
+
+/**
+ * inode_dio_wait - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+void inode_dio_wait(struct inode *inode)
+{
+	if (atomic_read(&inode->i_dio_count))
+		__inode_dio_wait(inode);
+}
+EXPORT_SYMBOL(inode_dio_wait);
+
+/*
+ * inode_dio_done - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+void inode_dio_done(struct inode *inode)
+{
+	if (atomic_dec_and_test(&inode->i_dio_count))
+		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
+EXPORT_SYMBOL(inode_dio_done);
diff --git a/fs/internal.h b/fs/internal.h
index 9962c59..18bc216 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -56,7 +56,7 @@
 
 extern void __init mnt_init(void);
 
-DECLARE_BRLOCK(vfsmount_lock);
+extern struct lglock vfsmount_lock;
 
 
 /*
@@ -100,6 +100,7 @@
 
 extern long do_handle_open(int mountdirfd,
 			   struct file_handle __user *ufh, int open_flag);
+extern int open_check_o_direct(struct file *f);
 
 /*
  * inode.c
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 5e6dbe89..e50170c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -50,7 +50,7 @@
 
 	ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
 	if (ioc) {
-		ioc_ioprio_changed(ioc, ioprio);
+		ioc->ioprio = ioprio;
 		put_io_context(ioc);
 	}
 
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index dd4687f..aa4356d 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -107,12 +107,11 @@
 }
 
 static int
-isofs_export_encode_fh(struct dentry *dentry,
+isofs_export_encode_fh(struct inode *inode,
 		       __u32 *fh32,
 		       int *max_len,
-		       int connectable)
+		       struct inode *parent)
 {
-	struct inode * inode = dentry->d_inode;
 	struct iso_inode_info * ei = ISOFS_I(inode);
 	int len = *max_len;
 	int type = 1;
@@ -124,7 +123,7 @@
 	 * offset of the inode and the upper 16 bits of fh32[1] to
 	 * hold the offset of the parent.
 	 */
-	if (connectable && (len < 5)) {
+	if (parent && (len < 5)) {
 		*max_len = 5;
 		return 255;
 	} else if (len < 3) {
@@ -136,16 +135,12 @@
 	fh32[0] = ei->i_iget5_block;
  	fh16[2] = (__u16)ei->i_iget5_offset;  /* fh16 [sic] */
 	fh32[2] = inode->i_generation;
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		struct inode *parent;
+	if (parent) {
 		struct iso_inode_info *eparent;
-		spin_lock(&dentry->d_lock);
-		parent = dentry->d_parent->d_inode;
 		eparent = ISOFS_I(parent);
 		fh32[3] = eparent->i_iget5_block;
 		fh16[3] = (__u16)eparent->i_iget5_offset;  /* fh16 [sic] */
 		fh32[4] = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
 		len = 5;
 		type = 2;
 	}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 05f0754..08c0304 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -508,20 +508,19 @@
 	/*
 	 * We need to make sure that any blocks that were recently written out
 	 * --- perhaps by log_do_checkpoint() --- are flushed out before we
-	 * drop the transactions from the journal. It's unlikely this will be
-	 * necessary, especially with an appropriately sized journal, but we
-	 * need this to guarantee correctness.  Fortunately
-	 * cleanup_journal_tail() doesn't get called all that often.
+	 * drop the transactions from the journal. Similarly we need to be sure
+	 * superblock makes it to disk before next transaction starts reusing
+	 * freed space (otherwise we could replay some blocks of the new
+	 * transaction thinking they belong to the old one). So we use
+	 * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
+	 * with an appropriately sized journal, but we need this to guarantee
+	 * correctness.  Fortunately cleanup_journal_tail() doesn't get called
+	 * all that often.
 	 */
-	if (journal->j_flags & JFS_BARRIER)
-		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+	journal_update_sb_log_tail(journal, first_tid, blocknr,
+				   WRITE_FLUSH_FUA);
 
 	spin_lock(&journal->j_state_lock);
-	if (!tid_gt(first_tid, journal->j_tail_sequence)) {
-		spin_unlock(&journal->j_state_lock);
-		/* Someone else cleaned up journal so return 0 */
-		return 0;
-	}
 	/* OK, update the superblock to recover the freed space.
 	 * Physical blocks come first: have we wrapped beyond the end of
 	 * the log?  */
@@ -539,8 +538,6 @@
 	journal->j_tail_sequence = first_tid;
 	journal->j_tail = blocknr;
 	spin_unlock(&journal->j_state_lock);
-	if (!(journal->j_flags & JFS_ABORT))
-		journal_update_superblock(journal, 1);
 	return 0;
 }
 
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index f2b9a57..52c15c7 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -298,6 +298,7 @@
 	int tag_flag;
 	int i;
 	struct blk_plug plug;
+	int write_op = WRITE;
 
 	/*
 	 * First job: lock down the current transaction and wait for
@@ -307,7 +308,16 @@
 	/* Do we need to erase the effects of a prior journal_flush? */
 	if (journal->j_flags & JFS_FLUSHED) {
 		jbd_debug(3, "super block updated\n");
-		journal_update_superblock(journal, 1);
+		mutex_lock(&journal->j_checkpoint_mutex);
+		/*
+		 * We hold j_checkpoint_mutex so tail cannot change under us.
+		 * We don't need any special data guarantees for writing sb
+		 * since journal is empty and it is ok for write to be
+		 * flushed only with transaction commit.
+		 */
+		journal_update_sb_log_tail(journal, journal->j_tail_sequence,
+					   journal->j_tail, WRITE_SYNC);
+		mutex_unlock(&journal->j_checkpoint_mutex);
 	} else {
 		jbd_debug(3, "superblock not updated\n");
 	}
@@ -413,13 +423,16 @@
 
 	jbd_debug (3, "JBD: commit phase 2\n");
 
+	if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
+		write_op = WRITE_SYNC;
+
 	/*
 	 * Now start flushing things to disk, in the order they appear
 	 * on the transaction lists.  Data blocks go first.
 	 */
 	blk_start_plug(&plug);
 	err = journal_submit_data_buffers(journal, commit_transaction,
-					  WRITE_SYNC);
+					  write_op);
 	blk_finish_plug(&plug);
 
 	/*
@@ -478,7 +491,7 @@
 
 	blk_start_plug(&plug);
 
-	journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
+	journal_write_revoke_records(journal, commit_transaction, write_op);
 
 	/*
 	 * If we found any dirty or locked buffers, then we should have
@@ -649,7 +662,7 @@
 				clear_buffer_dirty(bh);
 				set_buffer_uptodate(bh);
 				bh->b_end_io = journal_end_buffer_io_sync;
-				submit_bh(WRITE_SYNC, bh);
+				submit_bh(write_op, bh);
 			}
 			cond_resched();
 
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 0971e92..425c2f2 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -563,6 +563,8 @@
 	spin_unlock(&journal->j_state_lock);
 #endif
 	spin_lock(&journal->j_state_lock);
+	if (!tid_geq(journal->j_commit_waited, tid))
+		journal->j_commit_waited = tid;
 	while (tid_gt(tid, journal->j_commit_sequence)) {
 		jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
 				  tid, journal->j_commit_sequence);
@@ -921,8 +923,33 @@
 
 	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
 
-	/* Add the dynamic fields and write it to disk. */
-	journal_update_superblock(journal, 1);
+	/*
+	 * As a special case, if the on-disk copy is already marked as needing
+	 * no recovery (s_start == 0), then we can safely defer the superblock
+	 * update until the next commit by setting JFS_FLUSHED.  This avoids
+	 * attempting a write to a potential-readonly device.
+	 */
+	if (sb->s_start == 0) {
+		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+			"(start %u, seq %d, errno %d)\n",
+			journal->j_tail, journal->j_tail_sequence,
+			journal->j_errno);
+		journal->j_flags |= JFS_FLUSHED;
+	} else {
+		/* Lock here to make assertions happy... */
+		mutex_lock(&journal->j_checkpoint_mutex);
+		/*
+		 * Update log tail information. We use WRITE_FUA since new
+		 * transaction will start reusing journal space and so we
+		 * must make sure information about current log tail is on
+		 * disk before that.
+		 */
+		journal_update_sb_log_tail(journal,
+					   journal->j_tail_sequence,
+					   journal->j_tail,
+					   WRITE_FUA);
+		mutex_unlock(&journal->j_checkpoint_mutex);
+	}
 	return journal_start_thread(journal);
 }
 
@@ -999,35 +1026,15 @@
 	return journal_reset(journal);
 }
 
-/**
- * void journal_update_superblock() - Update journal sb on disk.
- * @journal: The journal to update.
- * @wait: Set to '0' if you don't want to wait for IO completion.
- *
- * Update a journal's dynamic superblock fields and write it to disk,
- * optionally waiting for the IO to complete.
- */
-void journal_update_superblock(journal_t *journal, int wait)
+static void journal_write_superblock(journal_t *journal, int write_op)
 {
-	journal_superblock_t *sb = journal->j_superblock;
 	struct buffer_head *bh = journal->j_sb_buffer;
+	int ret;
 
-	/*
-	 * As a special case, if the on-disk copy is already marked as needing
-	 * no recovery (s_start == 0) and there are no outstanding transactions
-	 * in the filesystem, then we can safely defer the superblock update
-	 * until the next commit by setting JFS_FLUSHED.  This avoids
-	 * attempting a write to a potential-readonly device.
-	 */
-	if (sb->s_start == 0 && journal->j_tail_sequence ==
-				journal->j_transaction_sequence) {
-		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
-			"(start %u, seq %d, errno %d)\n",
-			journal->j_tail, journal->j_tail_sequence,
-			journal->j_errno);
-		goto out;
-	}
-
+	trace_journal_write_superblock(journal, write_op);
+	if (!(journal->j_flags & JFS_BARRIER))
+		write_op &= ~(REQ_FUA | REQ_FLUSH);
+	lock_buffer(bh);
 	if (buffer_write_io_error(bh)) {
 		char b[BDEVNAME_SIZE];
 		/*
@@ -1045,44 +1052,102 @@
 		set_buffer_uptodate(bh);
 	}
 
+	get_bh(bh);
+	bh->b_end_io = end_buffer_write_sync;
+	ret = submit_bh(write_op, bh);
+	wait_on_buffer(bh);
+	if (buffer_write_io_error(bh)) {
+		clear_buffer_write_io_error(bh);
+		set_buffer_uptodate(bh);
+		ret = -EIO;
+	}
+	if (ret) {
+		char b[BDEVNAME_SIZE];
+		printk(KERN_ERR "JBD: Error %d detected "
+		       "when updating journal superblock for %s.\n",
+		       ret, journal_dev_name(journal, b));
+	}
+}
+
+/**
+ * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
+ * @journal: The journal to update.
+ * @tail_tid: TID of the new transaction at the tail of the log
+ * @tail_block: The first block of the transaction at the tail of the log
+ * @write_op: With which operation should we write the journal sb
+ *
+ * Update a journal's superblock information about log tail and write it to
+ * disk, waiting for the IO to complete.
+ */
+void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+				unsigned int tail_block, int write_op)
+{
+	journal_superblock_t *sb = journal->j_superblock;
+
+	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+	jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
+		  tail_block, tail_tid);
+
+	sb->s_sequence = cpu_to_be32(tail_tid);
+	sb->s_start    = cpu_to_be32(tail_block);
+
+	journal_write_superblock(journal, write_op);
+
+	/* Log is no longer empty */
 	spin_lock(&journal->j_state_lock);
-	jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
-		  journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+	WARN_ON(!sb->s_sequence);
+	journal->j_flags &= ~JFS_FLUSHED;
+	spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * mark_journal_empty() - Mark on disk journal as empty.
+ * @journal: The journal to update.
+ *
+ * Update a journal's dynamic superblock fields to show that journal is empty.
+ * Write updated superblock to disk waiting for IO to complete.
+ */
+static void mark_journal_empty(journal_t *journal)
+{
+	journal_superblock_t *sb = journal->j_superblock;
+
+	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+	spin_lock(&journal->j_state_lock);
+	jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
+        	  journal->j_tail_sequence);
 
 	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
-	sb->s_start    = cpu_to_be32(journal->j_tail);
-	sb->s_errno    = cpu_to_be32(journal->j_errno);
+	sb->s_start    = cpu_to_be32(0);
 	spin_unlock(&journal->j_state_lock);
 
-	BUFFER_TRACE(bh, "marking dirty");
-	mark_buffer_dirty(bh);
-	if (wait) {
-		sync_dirty_buffer(bh);
-		if (buffer_write_io_error(bh)) {
-			char b[BDEVNAME_SIZE];
-			printk(KERN_ERR "JBD: I/O error detected "
-			       "when updating journal superblock for %s.\n",
-			       journal_dev_name(journal, b));
-			clear_buffer_write_io_error(bh);
-			set_buffer_uptodate(bh);
-		}
-	} else
-		write_dirty_buffer(bh, WRITE);
-
-	trace_jbd_update_superblock_end(journal, wait);
-out:
-	/* If we have just flushed the log (by marking s_start==0), then
-	 * any future commit will have to be careful to update the
-	 * superblock again to re-record the true start of the log. */
+	journal_write_superblock(journal, WRITE_FUA);
 
 	spin_lock(&journal->j_state_lock);
-	if (sb->s_start)
-		journal->j_flags &= ~JFS_FLUSHED;
-	else
-		journal->j_flags |= JFS_FLUSHED;
+	/* Log is empty */
+	journal->j_flags |= JFS_FLUSHED;
 	spin_unlock(&journal->j_state_lock);
 }
 
+/**
+ * journal_update_sb_errno() - Update error in the journal.
+ * @journal: The journal to update.
+ *
+ * Update a journal's errno.  Write updated superblock to disk waiting for IO
+ * to complete.
+ */
+static void journal_update_sb_errno(journal_t *journal)
+{
+	journal_superblock_t *sb = journal->j_superblock;
+
+	spin_lock(&journal->j_state_lock);
+	jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
+        	  journal->j_errno);
+	sb->s_errno = cpu_to_be32(journal->j_errno);
+	spin_unlock(&journal->j_state_lock);
+
+	journal_write_superblock(journal, WRITE_SYNC);
+}
+
 /*
  * Read the superblock for a given journal, performing initial
  * validation of the format.
@@ -1251,6 +1316,8 @@
 
 	/* Force any old transactions to disk */
 
+	/* We cannot race with anybody but must keep assertions happy */
+	mutex_lock(&journal->j_checkpoint_mutex);
 	/* Totally anal locking here... */
 	spin_lock(&journal->j_list_lock);
 	while (journal->j_checkpoint_transactions != NULL) {
@@ -1266,16 +1333,14 @@
 
 	if (journal->j_sb_buffer) {
 		if (!is_journal_aborted(journal)) {
-			/* We can now mark the journal as empty. */
-			journal->j_tail = 0;
 			journal->j_tail_sequence =
 				++journal->j_transaction_sequence;
-			journal_update_superblock(journal, 1);
-		} else {
+			mark_journal_empty(journal);
+		} else
 			err = -EIO;
-		}
 		brelse(journal->j_sb_buffer);
 	}
+	mutex_unlock(&journal->j_checkpoint_mutex);
 
 	if (journal->j_inode)
 		iput(journal->j_inode);
@@ -1455,7 +1520,6 @@
 {
 	int err = 0;
 	transaction_t *transaction = NULL;
-	unsigned int old_tail;
 
 	spin_lock(&journal->j_state_lock);
 
@@ -1490,6 +1554,7 @@
 	if (is_journal_aborted(journal))
 		return -EIO;
 
+	mutex_lock(&journal->j_checkpoint_mutex);
 	cleanup_journal_tail(journal);
 
 	/* Finally, mark the journal as really needing no recovery.
@@ -1497,14 +1562,9 @@
 	 * the magic code for a fully-recovered superblock.  Any future
 	 * commits of data to the journal will restore the current
 	 * s_start value. */
+	mark_journal_empty(journal);
+	mutex_unlock(&journal->j_checkpoint_mutex);
 	spin_lock(&journal->j_state_lock);
-	old_tail = journal->j_tail;
-	journal->j_tail = 0;
-	spin_unlock(&journal->j_state_lock);
-	journal_update_superblock(journal, 1);
-	spin_lock(&journal->j_state_lock);
-	journal->j_tail = old_tail;
-
 	J_ASSERT(!journal->j_running_transaction);
 	J_ASSERT(!journal->j_committing_transaction);
 	J_ASSERT(!journal->j_checkpoint_transactions);
@@ -1544,8 +1604,12 @@
 		write ? "Clearing" : "Ignoring");
 
 	err = journal_skip_recovery(journal);
-	if (write)
-		journal_update_superblock(journal, 1);
+	if (write) {
+		/* Lock to make assertions happy... */
+		mutex_lock(&journal->j_checkpoint_mutex);
+		mark_journal_empty(journal);
+		mutex_unlock(&journal->j_checkpoint_mutex);
+	}
 
  no_recovery:
 	return err;
@@ -1613,7 +1677,7 @@
 	__journal_abort_hard(journal);
 
 	if (errno)
-		journal_update_superblock(journal, 1);
+		journal_update_sb_errno(journal);
 }
 
 /**
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index b2a7e52..febc10d 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1433,8 +1433,6 @@
 		}
 	}
 
-	if (handle->h_sync)
-		transaction->t_synchronous_commit = 1;
 	current->journal_info = NULL;
 	spin_lock(&journal->j_state_lock);
 	spin_lock(&transaction->t_handle_lock);
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index f32f346..69a48c2 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -1,6 +1,8 @@
 config JBD2
 	tristate
 	select CRC32
+	select CRYPTO
+	select CRYPTO_CRC32C
 	help
 	  This is a generic journaling layer for block devices that support
 	  both 32-bit and 64-bit block numbers.  It is currently used by
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 840f70f..216f429 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -85,6 +85,24 @@
 	__brelse(bh);
 }
 
+static void jbd2_commit_block_csum_set(journal_t *j,
+				       struct journal_head *descriptor)
+{
+	struct commit_header *h;
+	__u32 csum;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return;
+
+	h = (struct commit_header *)(jh2bh(descriptor)->b_data);
+	h->h_chksum_type = 0;
+	h->h_chksum_size = 0;
+	h->h_chksum[0] = 0;
+	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+			   j->j_blocksize);
+	h->h_chksum[0] = cpu_to_be32(csum);
+}
+
 /*
  * Done it all: now submit the commit record.  We should have
  * cleaned up our previous buffers by now, so if we are in abort
@@ -128,6 +146,7 @@
 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 	}
+	jbd2_commit_block_csum_set(journal, descriptor);
 
 	JBUFFER_TRACE(descriptor, "submit commit block");
 	lock_buffer(bh);
@@ -301,6 +320,44 @@
 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
+static void jbd2_descr_block_csum_set(journal_t *j,
+				      struct journal_head *descriptor)
+{
+	struct jbd2_journal_block_tail *tail;
+	__u32 csum;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return;
+
+	tail = (struct jbd2_journal_block_tail *)
+			(jh2bh(descriptor)->b_data + j->j_blocksize -
+			sizeof(struct jbd2_journal_block_tail));
+	tail->t_checksum = 0;
+	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+			   j->j_blocksize);
+	tail->t_checksum = cpu_to_be32(csum);
+}
+
+static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+				    struct buffer_head *bh, __u32 sequence)
+{
+	struct page *page = bh->b_page;
+	__u8 *addr;
+	__u32 csum;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return;
+
+	sequence = cpu_to_be32(sequence);
+	addr = kmap_atomic(page, KM_USER0);
+	csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+			  sizeof(sequence));
+	csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
+			  bh->b_size);
+	kunmap_atomic(addr, KM_USER0);
+
+	tag->t_checksum = cpu_to_be32(csum);
+}
 /*
  * jbd2_journal_commit_transaction
  *
@@ -334,6 +391,10 @@
 	unsigned long first_block;
 	tid_t first_tid;
 	int update_tail;
+	int csum_size = 0;
+
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		csum_size = sizeof(struct jbd2_journal_block_tail);
 
 	/*
 	 * First job: lock down the current transaction and wait for
@@ -627,7 +688,9 @@
 
 		tag = (journal_block_tag_t *) tagp;
 		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
-		tag->t_flags = cpu_to_be32(tag_flag);
+		tag->t_flags = cpu_to_be16(tag_flag);
+		jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
+					commit_transaction->t_tid);
 		tagp += tag_bytes;
 		space_left -= tag_bytes;
 
@@ -643,7 +706,7 @@
 
 		if (bufs == journal->j_wbufsize ||
 		    commit_transaction->t_buffers == NULL ||
-		    space_left < tag_bytes + 16) {
+		    space_left < tag_bytes + 16 + csum_size) {
 
 			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
 
@@ -651,8 +714,9 @@
                            submitting the IOs.  "tag" still points to
                            the last tag we set up. */
 
-			tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
+			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 
+			jbd2_descr_block_csum_set(journal, descriptor);
 start_journal_io:
 			for (i = 0; i < bufs; i++) {
 				struct buffer_head *bh = wbuf[i];
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1afb701..e9a3c4c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -97,6 +97,43 @@
 static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
+/* Checksumming functions */
+int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+{
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+}
+
+static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+{
+	__u32 csum, old_csum;
+
+	old_csum = sb->s_checksum;
+	sb->s_checksum = 0;
+	csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
+	sb->s_checksum = old_csum;
+
+	return cpu_to_be32(csum);
+}
+
+int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+{
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	return sb->s_checksum == jbd2_superblock_csum(j, sb);
+}
+
+void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+{
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return;
+
+	sb->s_checksum = jbd2_superblock_csum(j, sb);
+}
+
 /*
  * Helper function used to manage commit timeouts
  */
@@ -1348,6 +1385,7 @@
 	jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
 		  journal->j_errno);
 	sb->s_errno    = cpu_to_be32(journal->j_errno);
+	jbd2_superblock_csum_set(journal, sb);
 	read_unlock(&journal->j_state_lock);
 
 	jbd2_write_superblock(journal, WRITE_SYNC);
@@ -1376,6 +1414,9 @@
 		}
 	}
 
+	if (buffer_verified(bh))
+		return 0;
+
 	sb = journal->j_superblock;
 
 	err = -EINVAL;
@@ -1413,6 +1454,43 @@
 		goto out;
 	}
 
+	if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+		/* Can't have checksum v1 and v2 on at the same time! */
+		printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+		       "at the same time!\n");
+		goto out;
+	}
+
+	if (!jbd2_verify_csum_type(journal, sb)) {
+		printk(KERN_ERR "JBD: Unknown checksum type\n");
+		goto out;
+	}
+
+	/* Load the checksum driver */
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+		journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+		if (IS_ERR(journal->j_chksum_driver)) {
+			printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+			err = PTR_ERR(journal->j_chksum_driver);
+			journal->j_chksum_driver = NULL;
+			goto out;
+		}
+	}
+
+	/* Check superblock checksum */
+	if (!jbd2_superblock_csum_verify(journal, sb)) {
+		printk(KERN_ERR "JBD: journal checksum error\n");
+		goto out;
+	}
+
+	/* Precompute checksum seed for all metadata */
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+						   sizeof(sb->s_uuid));
+
+	set_buffer_verified(bh);
+
 	return 0;
 
 out:
@@ -1564,6 +1642,8 @@
 		iput(journal->j_inode);
 	if (journal->j_revoke)
 		jbd2_journal_destroy_revoke(journal);
+	if (journal->j_chksum_driver)
+		crypto_free_shash(journal->j_chksum_driver);
 	kfree(journal->j_wbuf);
 	kfree(journal);
 
@@ -1653,6 +1733,10 @@
 int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
 			  unsigned long ro, unsigned long incompat)
 {
+#define INCOMPAT_FEATURE_ON(f) \
+		((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f)))
+#define COMPAT_FEATURE_ON(f) \
+		((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f)))
 	journal_superblock_t *sb;
 
 	if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
@@ -1661,16 +1745,54 @@
 	if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
 		return 0;
 
+	/* Asking for checksumming v2 and v1?  Only give them v2. */
+	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
+	    compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+		compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+
 	jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
 		  compat, ro, incompat);
 
 	sb = journal->j_superblock;
 
+	/* If enabling v2 checksums, update superblock */
+	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+		sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+		sb->s_feature_compat &=
+			~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+
+		/* Load the checksum driver */
+		if (journal->j_chksum_driver == NULL) {
+			journal->j_chksum_driver = crypto_alloc_shash("crc32c",
+								      0, 0);
+			if (IS_ERR(journal->j_chksum_driver)) {
+				printk(KERN_ERR "JBD: Cannot load crc32c "
+				       "driver.\n");
+				journal->j_chksum_driver = NULL;
+				return 0;
+			}
+		}
+
+		/* Precompute checksum seed for all metadata */
+		if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+					      JBD2_FEATURE_INCOMPAT_CSUM_V2))
+			journal->j_csum_seed = jbd2_chksum(journal, ~0,
+							   sb->s_uuid,
+							   sizeof(sb->s_uuid));
+	}
+
+	/* If enabling v1 checksums, downgrade superblock */
+	if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+		sb->s_feature_incompat &=
+			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
+
 	sb->s_feature_compat    |= cpu_to_be32(compat);
 	sb->s_feature_ro_compat |= cpu_to_be32(ro);
 	sb->s_feature_incompat  |= cpu_to_be32(incompat);
 
 	return 1;
+#undef COMPAT_FEATURE_ON
+#undef INCOMPAT_FEATURE_ON
 }
 
 /*
@@ -1975,10 +2097,16 @@
  */
 size_t journal_tag_bytes(journal_t *journal)
 {
+	journal_block_tag_t tag;
+	size_t x = 0;
+
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		x += sizeof(tag.t_checksum);
+
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-		return JBD2_TAG_SIZE64;
+		return x + JBD2_TAG_SIZE64;
 	else
-		return JBD2_TAG_SIZE32;
+		return x + JBD2_TAG_SIZE32;
 }
 
 /*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index c1a0335..0131e43 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -174,6 +174,25 @@
 	return 0;
 }
 
+static int jbd2_descr_block_csum_verify(journal_t *j,
+					void *buf)
+{
+	struct jbd2_journal_block_tail *tail;
+	__u32 provided, calculated;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+			sizeof(struct jbd2_journal_block_tail));
+	provided = tail->t_checksum;
+	tail->t_checksum = 0;
+	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+	tail->t_checksum = provided;
+
+	provided = be32_to_cpu(provided);
+	return provided == calculated;
+}
 
 /*
  * Count the number of in-use tags in a journal descriptor block.
@@ -186,6 +205,9 @@
 	int			nr = 0, size = journal->j_blocksize;
 	int			tag_bytes = journal_tag_bytes(journal);
 
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		size -= sizeof(struct jbd2_journal_block_tail);
+
 	tagp = &bh->b_data[sizeof(journal_header_t)];
 
 	while ((tagp - bh->b_data + tag_bytes) <= size) {
@@ -193,10 +215,10 @@
 
 		nr++;
 		tagp += tag_bytes;
-		if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
+		if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID)))
 			tagp += 16;
 
-		if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
+		if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG))
 			break;
 	}
 
@@ -353,6 +375,41 @@
 	return 0;
 }
 
+static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+{
+	struct commit_header *h;
+	__u32 provided, calculated;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	h = buf;
+	provided = h->h_chksum[0];
+	h->h_chksum[0] = 0;
+	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+	h->h_chksum[0] = provided;
+
+	provided = be32_to_cpu(provided);
+	return provided == calculated;
+}
+
+static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+				      void *buf, __u32 sequence)
+{
+	__u32 provided, calculated;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	sequence = cpu_to_be32(sequence);
+	calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+				 sizeof(sequence));
+	calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
+	provided = be32_to_cpu(tag->t_checksum);
+
+	return provided == cpu_to_be32(calculated);
+}
+
 static int do_one_pass(journal_t *journal,
 			struct recovery_info *info, enum passtype pass)
 {
@@ -366,6 +423,7 @@
 	int			blocktype;
 	int			tag_bytes = journal_tag_bytes(journal);
 	__u32			crc32_sum = ~0; /* Transactional Checksums */
+	int			descr_csum_size = 0;
 
 	/*
 	 * First thing is to establish what we expect to find in the log
@@ -451,6 +509,18 @@
 
 		switch(blocktype) {
 		case JBD2_DESCRIPTOR_BLOCK:
+			/* Verify checksum first */
+			if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+					JBD2_FEATURE_INCOMPAT_CSUM_V2))
+				descr_csum_size =
+					sizeof(struct jbd2_journal_block_tail);
+			if (descr_csum_size > 0 &&
+			    !jbd2_descr_block_csum_verify(journal,
+							  bh->b_data)) {
+				err = -EIO;
+				goto failed;
+			}
+
 			/* If it is a valid descriptor block, replay it
 			 * in pass REPLAY; if journal_checksums enabled, then
 			 * calculate checksums in PASS_SCAN, otherwise,
@@ -481,11 +551,11 @@
 
 			tagp = &bh->b_data[sizeof(journal_header_t)];
 			while ((tagp - bh->b_data + tag_bytes)
-			       <= journal->j_blocksize) {
+			       <= journal->j_blocksize - descr_csum_size) {
 				unsigned long io_block;
 
 				tag = (journal_block_tag_t *) tagp;
-				flags = be32_to_cpu(tag->t_flags);
+				flags = be16_to_cpu(tag->t_flags);
 
 				io_block = next_log_block++;
 				wrap(journal, next_log_block);
@@ -516,6 +586,19 @@
 						goto skip_write;
 					}
 
+					/* Look for block corruption */
+					if (!jbd2_block_tag_csum_verify(
+						journal, tag, obh->b_data,
+						be32_to_cpu(tmp->h_sequence))) {
+						brelse(obh);
+						success = -EIO;
+						printk(KERN_ERR "JBD: Invalid "
+						       "checksum recovering "
+						       "block %llu in log\n",
+						       blocknr);
+						continue;
+					}
+
 					/* Find a buffer for the new
 					 * data being restored */
 					nbh = __getblk(journal->j_fs_dev,
@@ -650,6 +733,19 @@
 				}
 				crc32_sum = ~0;
 			}
+			if (pass == PASS_SCAN &&
+			    !jbd2_commit_block_csum_verify(journal,
+							   bh->b_data)) {
+				info->end_transaction = next_commit_ID;
+
+				if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
+				     JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
+					journal->j_failed_commit =
+						next_commit_ID;
+					brelse(bh);
+					break;
+				}
+			}
 			brelse(bh);
 			next_commit_ID++;
 			continue;
@@ -706,6 +802,25 @@
 	return err;
 }
 
+static int jbd2_revoke_block_csum_verify(journal_t *j,
+					 void *buf)
+{
+	struct jbd2_journal_revoke_tail *tail;
+	__u32 provided, calculated;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return 1;
+
+	tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+			sizeof(struct jbd2_journal_revoke_tail));
+	provided = tail->r_checksum;
+	tail->r_checksum = 0;
+	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+	tail->r_checksum = provided;
+
+	provided = be32_to_cpu(provided);
+	return provided == calculated;
+}
 
 /* Scan a revoke record, marking all blocks mentioned as revoked. */
 
@@ -720,6 +835,9 @@
 	offset = sizeof(jbd2_journal_revoke_header_t);
 	max = be32_to_cpu(header->r_count);
 
+	if (!jbd2_revoke_block_csum_verify(journal, header))
+		return -EINVAL;
+
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
 		record_len = 8;
 
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 6973705..f30b80b 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -578,6 +578,7 @@
 				    struct jbd2_revoke_record_s *record,
 				    int write_op)
 {
+	int csum_size = 0;
 	struct journal_head *descriptor;
 	int offset;
 	journal_header_t *header;
@@ -592,9 +593,13 @@
 	descriptor = *descriptorp;
 	offset = *offsetp;
 
+	/* Do we need to leave space at the end for a checksum? */
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		csum_size = sizeof(struct jbd2_journal_revoke_tail);
+
 	/* Make sure we have a descriptor with space left for the record */
 	if (descriptor) {
-		if (offset == journal->j_blocksize) {
+		if (offset >= journal->j_blocksize - csum_size) {
 			flush_descriptor(journal, descriptor, offset, write_op);
 			descriptor = NULL;
 		}
@@ -631,6 +636,24 @@
 	*offsetp = offset;
 }
 
+static void jbd2_revoke_csum_set(journal_t *j,
+				 struct journal_head *descriptor)
+{
+	struct jbd2_journal_revoke_tail *tail;
+	__u32 csum;
+
+	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+		return;
+
+	tail = (struct jbd2_journal_revoke_tail *)
+			(jh2bh(descriptor)->b_data + j->j_blocksize -
+			sizeof(struct jbd2_journal_revoke_tail));
+	tail->r_checksum = 0;
+	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+			   j->j_blocksize);
+	tail->r_checksum = cpu_to_be32(csum);
+}
+
 /*
  * Flush a revoke descriptor out to the journal.  If we are aborting,
  * this is a noop; otherwise we are generating a buffer which needs to
@@ -652,6 +675,8 @@
 
 	header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
 	header->r_count = cpu_to_be32(offset);
+	jbd2_revoke_csum_set(journal, descriptor);
+
 	set_buffer_jwrite(bh);
 	BUFFER_TRACE(bh, "write");
 	set_buffer_dirty(bh);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ddcd354..fb1ab953 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -162,8 +162,8 @@
 
 alloc_transaction:
 	if (!journal->j_running_transaction) {
-		new_transaction = kmem_cache_alloc(transaction_cache,
-						   gfp_mask | __GFP_ZERO);
+		new_transaction = kmem_cache_zalloc(transaction_cache,
+						    gfp_mask);
 		if (!new_transaction) {
 			/*
 			 * If __GFP_FS is not present, then we may be
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bb6f993..3d3092e 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -240,7 +240,7 @@
 	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
 		  __func__, inode->i_ino, inode->i_mode);
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	jffs2_do_clear_inode(c, f);
 }
 
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 55a0c1d..413ef89 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -32,6 +32,13 @@
 struct jffs2_mount_opts {
 	bool override_compr;
 	unsigned int compr;
+
+	/* The size of the reserved pool. The reserved pool is the JFFS2 flash
+	 * space which may only be used by root cannot be used by the other
+	 * users. This is implemented simply by means of not allowing the
+	 * latter users to write to the file system if the amount if the
+	 * available space is less then 'rp_size'. */
+	unsigned int rp_size;
 };
 
 /* A struct for the overall file system control.  Pointers to
@@ -126,6 +133,10 @@
 	struct jffs2_inodirty *wbuf_inodes;
 	struct rw_semaphore wbuf_sem;	/* Protects the write buffer */
 
+	struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+	int wbuf_queued;                /* non-zero delayed work is queued */
+	spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
+
 	unsigned char *oobbuf;
 	int oobavail; /* How many bytes are available for JFFS2 in OOB */
 #endif
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 6784d1e..0c96eb5 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -18,6 +18,37 @@
 #include "nodelist.h"
 #include "debug.h"
 
+/*
+ * Check whether the user is allowed to write.
+ */
+static int jffs2_rp_can_write(struct jffs2_sb_info *c)
+{
+	uint32_t avail;
+	struct jffs2_mount_opts *opts = &c->mount_opts;
+
+	avail = c->dirty_size + c->free_size + c->unchecked_size +
+		c->erasing_size - c->resv_blocks_write * c->sector_size
+		- c->nospc_dirty_size;
+
+	if (avail < 2 * opts->rp_size)
+		jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
+			  "erasing_size %u, unchecked_size %u, "
+			  "nr_erasing_blocks %u, avail %u, resrv %u\n",
+			  opts->rp_size, c->dirty_size, c->free_size,
+			  c->erasing_size, c->unchecked_size,
+			  c->nr_erasing_blocks, avail, c->nospc_dirty_size);
+
+	if (avail > opts->rp_size)
+		return 1;
+
+	/* Always allow root */
+	if (capable(CAP_SYS_RESOURCE))
+		return 1;
+
+	jffs2_dbg(1, "forbid writing\n");
+	return 0;
+}
+
 /**
  *	jffs2_reserve_space - request physical space to write nodes to flash
  *	@c: superblock info
@@ -55,6 +86,15 @@
 
 	spin_lock(&c->erase_completion_lock);
 
+	/*
+	 * Check if the free space is greater then size of the reserved pool.
+	 * If not, only allow root to proceed with writing.
+	 */
+	if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
 	/* this needs a little more thought (true <tglx> :)) */
 	while(ret == -EAGAIN) {
 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
@@ -158,6 +198,8 @@
 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
 		}
 	}
+
+out:
 	spin_unlock(&c->erase_completion_lock);
 	if (!ret)
 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 1cd3aec..bcd983d 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -95,6 +95,7 @@
 #define jffs2_ubivol(c) (0)
 #define jffs2_ubivol_setup(c) (0)
 #define jffs2_ubivol_cleanup(c) do {} while (0)
+#define jffs2_dirty_trigger(c) do {} while (0)
 
 #else /* NAND and/or ECC'd NOR support present */
 
@@ -135,14 +136,10 @@
 #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+void jffs2_dirty_trigger(struct jffs2_sb_info *c);
 
 #endif /* WRITEBUFFER */
 
-static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
-{
-	OFNI_BS_2SFFJ(c)->s_dirt = 1;
-}
-
 /* background.c */
 int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
 void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index dc0437e..1ea349f 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1266,19 +1266,25 @@
 			/* Symlink's inode data is the target path. Read it and
 			 * keep in RAM to facilitate quick follow symlink
 			 * operation. */
-			f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
+			uint32_t csize = je32_to_cpu(latest_node->csize);
+			if (csize > JFFS2_MAX_NAME_LEN) {
+				mutex_unlock(&f->sem);
+				jffs2_do_clear_inode(c, f);
+				return -ENAMETOOLONG;
+			}
+			f->target = kmalloc(csize + 1, GFP_KERNEL);
 			if (!f->target) {
-				JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
+				JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize);
 				mutex_unlock(&f->sem);
 				jffs2_do_clear_inode(c, f);
 				return -ENOMEM;
 			}
 
 			ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
-						je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
+					       csize, &retlen, (char *)f->target);
 
-			if (ret  || retlen != je32_to_cpu(latest_node->csize)) {
-				if (retlen != je32_to_cpu(latest_node->csize))
+			if (ret || retlen != csize) {
+				if (retlen != csize)
 					ret = -EIO;
 				kfree(f->target);
 				f->target = NULL;
@@ -1287,7 +1293,7 @@
 				return ret;
 			}
 
-			f->target[je32_to_cpu(latest_node->csize)] = '\0';
+			f->target[csize] = '\0';
 			dbg_readinode("symlink's target '%s' cached\n", f->target);
 		}
 
@@ -1415,6 +1421,7 @@
 		mutex_unlock(&f->sem);
 		jffs2_do_clear_inode(c, f);
 	}
+	jffs2_xattr_do_crccheck_inode(c, ic);
 	kfree (f);
 	return ret;
 }
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f9916f3..61ea413 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -63,21 +63,6 @@
 	inode_init_once(&f->vfs_inode);
 }
 
-static void jffs2_write_super(struct super_block *sb)
-{
-	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-
-	lock_super(sb);
-	sb->s_dirt = 0;
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		jffs2_dbg(1, "%s()\n", __func__);
-		jffs2_flush_wbuf_gc(c, 0);
-	}
-
-	unlock_super(sb);
-}
-
 static const char *jffs2_compr_name(unsigned int compr)
 {
 	switch (compr) {
@@ -105,6 +90,8 @@
 
 	if (opts->override_compr)
 		seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
+	if (opts->rp_size)
+		seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
 
 	return 0;
 }
@@ -113,8 +100,6 @@
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-	jffs2_write_super(sb);
-
 	mutex_lock(&c->alloc_sem);
 	jffs2_flush_wbuf_pad(c);
 	mutex_unlock(&c->alloc_sem);
@@ -171,15 +156,18 @@
  * JFFS2 mount options.
  *
  * Opt_override_compr: override default compressor
+ * Opt_rp_size: size of reserved pool in KiB
  * Opt_err: just end of array marker
  */
 enum {
 	Opt_override_compr,
+	Opt_rp_size,
 	Opt_err,
 };
 
 static const match_table_t tokens = {
 	{Opt_override_compr, "compr=%s"},
+	{Opt_rp_size, "rp_size=%u"},
 	{Opt_err, NULL},
 };
 
@@ -187,6 +175,7 @@
 {
 	substring_t args[MAX_OPT_ARGS];
 	char *p, *name;
+	unsigned int opt;
 
 	if (!data)
 		return 0;
@@ -224,6 +213,17 @@
 			kfree(name);
 			c->mount_opts.override_compr = true;
 			break;
+		case Opt_rp_size:
+			if (match_int(&args[0], &opt))
+				return -EINVAL;
+			opt *= 1024;
+			if (opt > c->mtd->size) {
+				pr_warn("Too large reserve pool specified, max "
+					"is %llu KB\n", c->mtd->size / 1024);
+				return -EINVAL;
+			}
+			c->mount_opts.rp_size = opt;
+			break;
 		default:
 			pr_err("Error: unrecognized mount option '%s' or missing value\n",
 			       p);
@@ -251,7 +251,6 @@
 	.alloc_inode =	jffs2_alloc_inode,
 	.destroy_inode =jffs2_destroy_inode,
 	.put_super =	jffs2_put_super,
-	.write_super =	jffs2_write_super,
 	.statfs =	jffs2_statfs,
 	.remount_fs =	jffs2_remount_fs,
 	.evict_inode =	jffs2_evict_inode,
@@ -319,9 +318,6 @@
 
 	jffs2_dbg(2, "%s()\n", __func__);
 
-	if (sb->s_dirt)
-		jffs2_write_super(sb);
-
 	mutex_lock(&c->alloc_sem);
 	jffs2_flush_wbuf_pad(c);
 	mutex_unlock(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 74d9be1..6f4529d 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -20,6 +20,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
+#include <linux/writeback.h>
 
 #include "nodelist.h"
 
@@ -85,7 +86,7 @@
 {
 	struct jffs2_inodirty *new;
 
-	/* Mark the superblock dirty so that kupdated will flush... */
+	/* Schedule delayed write-buffer write-out */
 	jffs2_dirty_trigger(c);
 
 	if (jffs2_wbuf_pending_for_ino(c, ino))
@@ -1148,6 +1149,47 @@
 	return 1;
 }
 
+static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+
+	dwork = container_of(work, struct delayed_work, work);
+	return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
+}
+
+static void delayed_wbuf_sync(struct work_struct *work)
+{
+	struct jffs2_sb_info *c = work_to_sb(work);
+	struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+	spin_lock(&c->wbuf_dwork_lock);
+	c->wbuf_queued = 0;
+	spin_unlock(&c->wbuf_dwork_lock);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		jffs2_dbg(1, "%s()\n", __func__);
+		jffs2_flush_wbuf_gc(c, 0);
+	}
+}
+
+void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+{
+	struct super_block *sb = OFNI_BS_2SFFJ(c);
+	unsigned long delay;
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	spin_lock(&c->wbuf_dwork_lock);
+	if (!c->wbuf_queued) {
+		jffs2_dbg(1, "%s()\n", __func__);
+		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+		queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+		c->wbuf_queued = 1;
+	}
+	spin_unlock(&c->wbuf_dwork_lock);
+}
+
 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 {
 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
@@ -1169,6 +1211,8 @@
 
 	/* Initialise write buffer */
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 	c->wbuf_pagesize = c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1207,8 +1251,8 @@
 
 	/* Initialize write buffer */
 	init_rwsem(&c->wbuf_sem);
-
-
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 	c->wbuf_pagesize =  c->mtd->erasesize;
 
 	/* Find a suitable c->sector_size
@@ -1267,6 +1311,9 @@
 
 	/* Initialize write buffer */
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
 	c->wbuf_pagesize = c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1299,6 +1346,8 @@
 		return 0;
 
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
 	c->wbuf_pagesize =  c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index b55b803..3034e97 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -11,6 +11,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#define JFFS2_XATTR_IS_CORRUPTED	1
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -153,7 +155,7 @@
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rx.hdr_crc), crc);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	}
 	totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
 	if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -169,7 +171,7 @@
 			    je32_to_cpu(rx.xid), xd->xid,
 			    je32_to_cpu(rx.version), xd->version);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	}
 	xd->xprefix = rx.xprefix;
 	xd->name_len = rx.name_len;
@@ -227,12 +229,12 @@
 	data[xd->name_len] = '\0';
 	crc = crc32(0, data, length);
 	if (crc != xd->data_crc) {
-		JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)"
+		JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)"
 			      " at %#08x, read: 0x%08x calculated: 0x%08x\n",
 			      ref_offset(xd->node), xd->data_crc, crc);
 		kfree(data);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	}
 
 	xd->flags |= JFFS2_XFLAGS_HOT;
@@ -270,7 +272,7 @@
 	if (xd->xname)
 		return 0;
 	if (xd->flags & JFFS2_XFLAGS_INVALID)
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	if (unlikely(is_xattr_datum_unchecked(c, xd)))
 		rc = do_verify_xattr_datum(c, xd);
 	if (!rc)
@@ -435,6 +437,8 @@
  *   is called to release xattr related objects when unmounting. 
  * check_xattr_ref_inode(c, ic)
  *   is used to confirm inode does not have duplicate xattr name/value pair.
+ * jffs2_xattr_do_crccheck_inode(c, ic)
+ *   is used to force xattr data integrity check during the initial gc scan.
  * -------------------------------------------------- */
 static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
 {
@@ -462,7 +466,7 @@
 	if (crc != je32_to_cpu(rr.node_crc)) {
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rr.node_crc), crc);
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	}
 	if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
 	    || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -472,7 +476,7 @@
 			    offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
 			    je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
 			    je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
-		return -EIO;
+		return JFFS2_XATTR_IS_CORRUPTED;
 	}
 	ref->ino = je32_to_cpu(rr.ino);
 	ref->xid = je32_to_cpu(rr.xid);
@@ -682,6 +686,11 @@
 	return rc;
 }
 
+void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+	check_xattr_ref_inode(c, ic);
+}
+
 /* -------- xattr subsystem functions ---------------
  * jffs2_init_xattr_subsystem(c)
  *   is used to initialize semaphore and list_head, and some variables.
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 7be4beb..467ff37 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -77,6 +77,7 @@
 extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
 							 uint32_t xid, uint32_t version);
 
+extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 
@@ -108,6 +109,7 @@
 #define jffs2_build_xattr_subsystem(c)
 #define jffs2_clear_xattr_subsystem(c)
 
+#define jffs2_xattr_do_crccheck_inode(c, ic)
 #define jffs2_xattr_delete_inode(c, ic)
 #define jffs2_xattr_free_inode(c, ic)
 #define jffs2_verify_xattr(c)			(1)
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77b69b2..4692bf3 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -169,7 +169,7 @@
 	} else {
 		truncate_inode_pages(&inode->i_data, 0);
 	}
-	end_writeback(inode);
+	clear_inode(inode);
 	dquot_drop(inode);
 }
 
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index ba1dc2e..ca0a080 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -56,7 +56,7 @@
 	u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
 	int status;
 
-	status = lockd_up();
+	status = lockd_up(nlm_init->net);
 	if (status < 0)
 		return ERR_PTR(status);
 
@@ -65,7 +65,7 @@
 				   nlm_init->hostname, nlm_init->noresvport,
 				   nlm_init->net);
 	if (host == NULL) {
-		lockd_down();
+		lockd_down(nlm_init->net);
 		return ERR_PTR(-ENOLCK);
 	}
 
@@ -80,8 +80,10 @@
  */
 void nlmclnt_done(struct nlm_host *host)
 {
+	struct net *net = host->net;
+
 	nlmclnt_release_host(host);
-	lockd_down();
+	lockd_down(net);
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
 
@@ -220,11 +222,12 @@
 	struct nlm_wait	  *block;
 	struct file_lock *fl, *next;
 	u32 nsmstate;
+	struct net *net = host->net;
 
 	allow_signal(SIGKILL);
 
 	down_write(&host->h_rwsem);
-	lockd_up();	/* note: this cannot fail as lockd is already running */
+	lockd_up(net);	/* note: this cannot fail as lockd is already running */
 
 	dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
 
@@ -275,6 +278,6 @@
 
 	/* Release host handle after use */
 	nlmclnt_release_host(host);
-	lockd_down();
+	lockd_down(net);
 	return 0;
 }
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index f49b9af..80938fd 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -251,39 +251,40 @@
 	return err;
 }
 
-static int lockd_up_net(struct net *net)
+static int lockd_up_net(struct svc_serv *serv, struct net *net)
 {
 	struct lockd_net *ln = net_generic(net, lockd_net_id);
-	struct svc_serv *serv = nlmsvc_rqst->rq_server;
 	int error;
 
-	if (ln->nlmsvc_users)
+	if (ln->nlmsvc_users++)
 		return 0;
 
-	error = svc_rpcb_setup(serv, net);
+	error = svc_bind(serv, net);
 	if (error)
-		goto err_rpcb;
+		goto err_bind;
 
 	error = make_socks(serv, net);
 	if (error < 0)
 		goto err_socks;
+	dprintk("lockd_up_net: per-net data created; net=%p\n", net);
 	return 0;
 
 err_socks:
 	svc_rpcb_cleanup(serv, net);
-err_rpcb:
+err_bind:
+	ln->nlmsvc_users--;
 	return error;
 }
 
-static void lockd_down_net(struct net *net)
+static void lockd_down_net(struct svc_serv *serv, struct net *net)
 {
 	struct lockd_net *ln = net_generic(net, lockd_net_id);
-	struct svc_serv *serv = nlmsvc_rqst->rq_server;
 
 	if (ln->nlmsvc_users) {
 		if (--ln->nlmsvc_users == 0) {
 			nlm_shutdown_hosts_net(net);
 			svc_shutdown_net(serv, net);
+			dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
 		}
 	} else {
 		printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
@@ -292,22 +293,60 @@
 	}
 }
 
-/*
- * Bring up the lockd process if it's not already up.
- */
-int lockd_up(void)
+static int lockd_start_svc(struct svc_serv *serv)
+{
+	int error;
+
+	if (nlmsvc_rqst)
+		return 0;
+
+	/*
+	 * Create the kernel thread and wait for it to start.
+	 */
+	nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
+	if (IS_ERR(nlmsvc_rqst)) {
+		error = PTR_ERR(nlmsvc_rqst);
+		printk(KERN_WARNING
+			"lockd_up: svc_rqst allocation failed, error=%d\n",
+			error);
+		goto out_rqst;
+	}
+
+	svc_sock_update_bufs(serv);
+	serv->sv_maxconn = nlm_max_connections;
+
+	nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
+	if (IS_ERR(nlmsvc_task)) {
+		error = PTR_ERR(nlmsvc_task);
+		printk(KERN_WARNING
+			"lockd_up: kthread_run failed, error=%d\n", error);
+		goto out_task;
+	}
+	dprintk("lockd_up: service started\n");
+	return 0;
+
+out_task:
+	svc_exit_thread(nlmsvc_rqst);
+	nlmsvc_task = NULL;
+out_rqst:
+	nlmsvc_rqst = NULL;
+	return error;
+}
+
+static struct svc_serv *lockd_create_svc(void)
 {
 	struct svc_serv *serv;
-	int		error = 0;
-	struct net *net = current->nsproxy->net_ns;
 
-	mutex_lock(&nlmsvc_mutex);
 	/*
 	 * Check whether we're already up and running.
 	 */
 	if (nlmsvc_rqst) {
-		error = lockd_up_net(net);
-		goto out;
+		/*
+		 * Note: increase service usage, because later in case of error
+		 * svc_destroy() will be called.
+		 */
+		svc_get(nlmsvc_rqst->rq_server);
+		return nlmsvc_rqst->rq_server;
 	}
 
 	/*
@@ -318,59 +357,53 @@
 		printk(KERN_WARNING
 			"lockd_up: no pid, %d users??\n", nlmsvc_users);
 
-	error = -ENOMEM;
 	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
 	if (!serv) {
 		printk(KERN_WARNING "lockd_up: create service failed\n");
-		goto out;
+		return ERR_PTR(-ENOMEM);
+	}
+	dprintk("lockd_up: service created\n");
+	return serv;
+}
+
+/*
+ * Bring up the lockd process if it's not already up.
+ */
+int lockd_up(struct net *net)
+{
+	struct svc_serv *serv;
+	int error;
+
+	mutex_lock(&nlmsvc_mutex);
+
+	serv = lockd_create_svc();
+	if (IS_ERR(serv)) {
+		error = PTR_ERR(serv);
+		goto err_create;
 	}
 
-	error = make_socks(serv, net);
+	error = lockd_up_net(serv, net);
 	if (error < 0)
-		goto destroy_and_out;
+		goto err_net;
 
-	/*
-	 * Create the kernel thread and wait for it to start.
-	 */
-	nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-	if (IS_ERR(nlmsvc_rqst)) {
-		error = PTR_ERR(nlmsvc_rqst);
-		nlmsvc_rqst = NULL;
-		printk(KERN_WARNING
-			"lockd_up: svc_rqst allocation failed, error=%d\n",
-			error);
-		goto destroy_and_out;
-	}
+	error = lockd_start_svc(serv);
+	if (error < 0)
+		goto err_start;
 
-	svc_sock_update_bufs(serv);
-	serv->sv_maxconn = nlm_max_connections;
-
-	nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
-	if (IS_ERR(nlmsvc_task)) {
-		error = PTR_ERR(nlmsvc_task);
-		svc_exit_thread(nlmsvc_rqst);
-		nlmsvc_task = NULL;
-		nlmsvc_rqst = NULL;
-		printk(KERN_WARNING
-			"lockd_up: kthread_run failed, error=%d\n", error);
-		goto destroy_and_out;
-	}
-
+	nlmsvc_users++;
 	/*
 	 * Note: svc_serv structures have an initial use count of 1,
 	 * so we exit through here on both success and failure.
 	 */
-destroy_and_out:
+err_net:
 	svc_destroy(serv);
-out:
-	if (!error) {
-		struct lockd_net *ln = net_generic(net, lockd_net_id);
-
-		ln->nlmsvc_users++;
-		nlmsvc_users++;
-	}
+err_create:
 	mutex_unlock(&nlmsvc_mutex);
 	return error;
+
+err_start:
+	lockd_down_net(serv, net);
+	goto err_net;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
@@ -378,14 +411,13 @@
  * Decrement the user count and bring down lockd if we're the last.
  */
 void
-lockd_down(void)
+lockd_down(struct net *net)
 {
 	mutex_lock(&nlmsvc_mutex);
+	lockd_down_net(nlmsvc_rqst->rq_server, net);
 	if (nlmsvc_users) {
-		if (--nlmsvc_users) {
-			lockd_down_net(current->nsproxy->net_ns);
+		if (--nlmsvc_users)
 			goto out;
-		}
 	} else {
 		printk(KERN_ERR "lockd_down: no users! task=%p\n",
 			nlmsvc_task);
@@ -397,7 +429,9 @@
 		BUG();
 	}
 	kthread_stop(nlmsvc_task);
+	dprintk("lockd_down: service stopped\n");
 	svc_exit_thread(nlmsvc_rqst);
+	dprintk("lockd_down: service destroyed\n");
 	nlmsvc_task = NULL;
 	nlmsvc_rqst = NULL;
 out:
diff --git a/fs/locks.c b/fs/locks.c
index 4f441e4..814c51d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1636,12 +1636,13 @@
 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
 	struct file *filp;
+	int fput_needed;
 	struct file_lock *lock;
 	int can_sleep, unlock;
 	int error;
 
 	error = -EBADF;
-	filp = fget(fd);
+	filp = fget_light(fd, &fput_needed);
 	if (!filp)
 		goto out;
 
@@ -1674,7 +1675,7 @@
 	locks_free_lock(lock);
 
  out_putf:
-	fput(filp);
+	fput_light(filp, fput_needed);
  out:
 	return error;
 }
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index e3ab5e5..f1cb512c50 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2175,7 +2175,7 @@
 		}
 	}
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	/* Cheaper version of write_inode.  All changes are concealed in
 	 * aliases, which are moved back.  No write to the medium happens.
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fcb05d2..2a503ad 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -32,7 +32,7 @@
 		minix_truncate(inode);
 	}
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (!inode->i_nlink)
 		minix_free_inode(inode);
 }
diff --git a/fs/namei.c b/fs/namei.c
index 93ff12b..7d69419 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -449,7 +449,7 @@
 	mntget(nd->path.mnt);
 
 	rcu_read_unlock();
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 	nd->flags &= ~LOOKUP_RCU;
 	return 0;
 
@@ -507,14 +507,14 @@
 		if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
 			spin_unlock(&dentry->d_lock);
 			rcu_read_unlock();
-			br_read_unlock(vfsmount_lock);
+			br_read_unlock(&vfsmount_lock);
 			return -ECHILD;
 		}
 		BUG_ON(nd->inode != dentry->d_inode);
 		spin_unlock(&dentry->d_lock);
 		mntget(nd->path.mnt);
 		rcu_read_unlock();
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 	}
 
 	if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -681,15 +681,15 @@
 	struct mount *parent;
 	struct dentry *mountpoint;
 
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	parent = mnt->mnt_parent;
 	if (&parent->mnt == path->mnt) {
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 		return 0;
 	}
 	mntget(&parent->mnt);
 	mountpoint = dget(mnt->mnt_mountpoint);
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 	dput(path->dentry);
 	path->dentry = mountpoint;
 	mntput(path->mnt);
@@ -947,7 +947,7 @@
 	if (!(nd->flags & LOOKUP_ROOT))
 		nd->root.mnt = NULL;
 	rcu_read_unlock();
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 	return -ECHILD;
 }
 
@@ -1125,8 +1125,8 @@
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
  */
-static int do_lookup(struct nameidata *nd, struct qstr *name,
-			struct path *path, struct inode **inode)
+static int lookup_fast(struct nameidata *nd, struct qstr *name,
+		       struct path *path, struct inode **inode)
 {
 	struct vfsmount *mnt = nd->path.mnt;
 	struct dentry *dentry, *parent = nd->path.dentry;
@@ -1208,7 +1208,7 @@
 			goto need_lookup;
 		}
 	}
-done:
+
 	path->mnt = mnt;
 	path->dentry = dentry;
 	err = follow_managed(path, nd->flags);
@@ -1222,6 +1222,17 @@
 	return 0;
 
 need_lookup:
+	return 1;
+}
+
+/* Fast lookup failed, do it the slow way */
+static int lookup_slow(struct nameidata *nd, struct qstr *name,
+		       struct path *path)
+{
+	struct dentry *dentry, *parent;
+	int err;
+
+	parent = nd->path.dentry;
 	BUG_ON(nd->inode != parent->d_inode);
 
 	mutex_lock(&parent->d_inode->i_mutex);
@@ -1229,7 +1240,16 @@
 	mutex_unlock(&parent->d_inode->i_mutex);
 	if (IS_ERR(dentry))
 		return PTR_ERR(dentry);
-	goto done;
+	path->mnt = nd->path.mnt;
+	path->dentry = dentry;
+	err = follow_managed(path, nd->flags);
+	if (unlikely(err < 0)) {
+		path_put_conditional(path, nd);
+		return err;
+	}
+	if (err)
+		nd->flags |= LOOKUP_JUMPED;
+	return 0;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1265,7 +1285,7 @@
 		if (!(nd->flags & LOOKUP_ROOT))
 			nd->root.mnt = NULL;
 		rcu_read_unlock();
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 	}
 }
 
@@ -1301,21 +1321,26 @@
 	 */
 	if (unlikely(type != LAST_NORM))
 		return handle_dots(nd, type);
-	err = do_lookup(nd, name, path, &inode);
+	err = lookup_fast(nd, name, path, &inode);
 	if (unlikely(err)) {
-		terminate_walk(nd);
-		return err;
+		if (err < 0)
+			goto out_err;
+
+		err = lookup_slow(nd, name, path);
+		if (err < 0)
+			goto out_err;
+
+		inode = path->dentry->d_inode;
 	}
-	if (!inode) {
-		path_to_nameidata(path, nd);
-		terminate_walk(nd);
-		return -ENOENT;
-	}
+	err = -ENOENT;
+	if (!inode)
+		goto out_path_put;
+
 	if (should_follow_link(inode, follow)) {
 		if (nd->flags & LOOKUP_RCU) {
 			if (unlikely(unlazy_walk(nd, path->dentry))) {
-				terminate_walk(nd);
-				return -ECHILD;
+				err = -ECHILD;
+				goto out_err;
 			}
 		}
 		BUG_ON(inode != path->dentry->d_inode);
@@ -1324,6 +1349,12 @@
 	path_to_nameidata(path, nd);
 	nd->inode = inode;
 	return 0;
+
+out_path_put:
+	path_to_nameidata(path, nd);
+out_err:
+	terminate_walk(nd);
+	return err;
 }
 
 /*
@@ -1452,7 +1483,8 @@
  */
 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
 {
-	unsigned long a, mask, hash, len;
+	unsigned long a, b, adata, bdata, mask, hash, len;
+	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
 	hash = a = 0;
 	len = -sizeof(unsigned long);
@@ -1460,17 +1492,18 @@
 		hash = (hash + a) * 9;
 		len += sizeof(unsigned long);
 		a = load_unaligned_zeropad(name+len);
-		/* Do we have any NUL or '/' bytes in this word? */
-		mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
-	} while (!mask);
+		b = a ^ REPEAT_BYTE('/');
+	} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
 
-	/* The mask *below* the first high bit set */
-	mask = (mask - 1) & ~mask;
-	mask >>= 7;
-	hash += a & mask;
+	adata = prep_zero_mask(a, adata, &constants);
+	bdata = prep_zero_mask(b, bdata, &constants);
+
+	mask = create_zero_mask(adata | bdata);
+
+	hash += a & zero_bytemask(mask);
 	*hashp = fold_hash(hash);
 
-	return len + count_masked_bytes(mask);
+	return len + find_zero(mask);
 }
 
 #else
@@ -1618,7 +1651,7 @@
 		nd->path = nd->root;
 		nd->inode = inode;
 		if (flags & LOOKUP_RCU) {
-			br_read_lock(vfsmount_lock);
+			br_read_lock(&vfsmount_lock);
 			rcu_read_lock();
 			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
 		} else {
@@ -1631,7 +1664,7 @@
 
 	if (*name=='/') {
 		if (flags & LOOKUP_RCU) {
-			br_read_lock(vfsmount_lock);
+			br_read_lock(&vfsmount_lock);
 			rcu_read_lock();
 			set_root_rcu(nd);
 		} else {
@@ -1644,7 +1677,7 @@
 			struct fs_struct *fs = current->fs;
 			unsigned seq;
 
-			br_read_lock(vfsmount_lock);
+			br_read_lock(&vfsmount_lock);
 			rcu_read_lock();
 
 			do {
@@ -1680,7 +1713,7 @@
 			if (fput_needed)
 				*fp = file;
 			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-			br_read_lock(vfsmount_lock);
+			br_read_lock(&vfsmount_lock);
 			rcu_read_lock();
 		} else {
 			path_get(&file->f_path);
@@ -2167,6 +2200,10 @@
 	int want_write = 0;
 	int acc_mode = op->acc_mode;
 	struct file *filp;
+	struct inode *inode;
+	int symlink_ok = 0;
+	struct path save_parent = { .dentry = NULL, .mnt = NULL };
+	bool retried = false;
 	int error;
 
 	nd->flags &= ~LOOKUP_PARENT;
@@ -2198,30 +2235,23 @@
 	}
 
 	if (!(open_flag & O_CREAT)) {
-		int symlink_ok = 0;
 		if (nd->last.name[nd->last.len])
 			nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 		if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
 			symlink_ok = 1;
 		/* we _can_ be in RCU mode here */
-		error = walk_component(nd, path, &nd->last, LAST_NORM,
-					!symlink_ok);
-		if (error < 0)
-			return ERR_PTR(error);
-		if (error) /* symlink */
-			return NULL;
-		/* sayonara */
-		error = complete_walk(nd);
-		if (error)
-			return ERR_PTR(error);
-
-		error = -ENOTDIR;
-		if (nd->flags & LOOKUP_DIRECTORY) {
-			if (!nd->inode->i_op->lookup)
+		error = lookup_fast(nd, &nd->last, path, &inode);
+		if (unlikely(error)) {
+			if (error < 0)
 				goto exit;
+
+			error = lookup_slow(nd, &nd->last, path);
+			if (error < 0)
+				goto exit;
+
+			inode = path->dentry->d_inode;
 		}
-		audit_inode(pathname, nd->path.dentry);
-		goto ok;
+		goto finish_lookup;
 	}
 
 	/* create side of things */
@@ -2239,6 +2269,7 @@
 	if (nd->last.name[nd->last.len])
 		goto exit;
 
+retry_lookup:
 	mutex_lock(&dir->d_inode->i_mutex);
 
 	dentry = lookup_hash(nd);
@@ -2300,22 +2331,49 @@
 	if (error)
 		nd->flags |= LOOKUP_JUMPED;
 
+	BUG_ON(nd->flags & LOOKUP_RCU);
+	inode = path->dentry->d_inode;
+finish_lookup:
+	/* we _can_ be in RCU mode here */
 	error = -ENOENT;
-	if (!path->dentry->d_inode)
-		goto exit_dput;
+	if (!inode) {
+		path_to_nameidata(path, nd);
+		goto exit;
+	}
 
-	if (path->dentry->d_inode->i_op->follow_link)
+	if (should_follow_link(inode, !symlink_ok)) {
+		if (nd->flags & LOOKUP_RCU) {
+			if (unlikely(unlazy_walk(nd, path->dentry))) {
+				error = -ECHILD;
+				goto exit;
+			}
+		}
+		BUG_ON(inode != path->dentry->d_inode);
 		return NULL;
+	}
 
-	path_to_nameidata(path, nd);
-	nd->inode = path->dentry->d_inode;
+	if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
+		path_to_nameidata(path, nd);
+	} else {
+		save_parent.dentry = nd->path.dentry;
+		save_parent.mnt = mntget(path->mnt);
+		nd->path.dentry = path->dentry;
+
+	}
+	nd->inode = inode;
 	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 	error = complete_walk(nd);
-	if (error)
+	if (error) {
+		path_put(&save_parent);
 		return ERR_PTR(error);
+	}
 	error = -EISDIR;
-	if (S_ISDIR(nd->inode->i_mode))
+	if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
 		goto exit;
+	error = -ENOTDIR;
+	if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
+		goto exit;
+	audit_inode(pathname, nd->path.dentry);
 ok:
 	if (!S_ISREG(nd->inode->i_mode))
 		will_truncate = 0;
@@ -2331,6 +2389,20 @@
 	if (error)
 		goto exit;
 	filp = nameidata_to_filp(nd);
+	if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
+		BUG_ON(save_parent.dentry != dir);
+		path_put(&nd->path);
+		nd->path = save_parent;
+		nd->inode = dir->d_inode;
+		save_parent.mnt = NULL;
+		save_parent.dentry = NULL;
+		if (want_write) {
+			mnt_drop_write(nd->path.mnt);
+			want_write = 0;
+		}
+		retried = true;
+		goto retry_lookup;
+	}
 	if (!IS_ERR(filp)) {
 		error = ima_file_check(filp, op->acc_mode);
 		if (error) {
@@ -2350,7 +2422,8 @@
 out:
 	if (want_write)
 		mnt_drop_write(nd->path.mnt);
-	path_put(&nd->path);
+	path_put(&save_parent);
+	terminate_walk(nd);
 	return filp;
 
 exit_mutex_unlock:
@@ -2413,6 +2486,12 @@
 	if (base)
 		fput(base);
 	release_open_intent(nd);
+	if (filp == ERR_PTR(-EOPENSTALE)) {
+		if (flags & LOOKUP_RCU)
+			filp = ERR_PTR(-ECHILD);
+		else
+			filp = ERR_PTR(-ESTALE);
+	}
 	return filp;
 
 out_filp:
diff --git a/fs/namespace.c b/fs/namespace.c
index e608199..1e4a5fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,7 +397,7 @@
 {
 	int ret = 0;
 
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
 	/*
 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +431,15 @@
 	 */
 	smp_wmb();
 	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	return ret;
 }
 
 static void __mnt_unmake_readonly(struct mount *mnt)
 {
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	mnt->mnt.mnt_flags &= ~MNT_READONLY;
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 }
 
 int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +451,7 @@
 	if (atomic_long_read(&sb->s_remove_count))
 		return -EBUSY;
 
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
 		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
 			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +473,7 @@
 		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
 			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 
 	return err;
 }
@@ -522,14 +522,14 @@
 {
 	struct mount *child_mnt;
 
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
 	if (child_mnt) {
 		mnt_add_count(child_mnt, 1);
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 		return &child_mnt->mnt;
 	} else {
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 		return NULL;
 	}
 }
@@ -714,9 +714,9 @@
 	mnt->mnt.mnt_sb = root->d_sb;
 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	mnt->mnt_parent = mnt;
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	return &mnt->mnt;
 }
 EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -745,9 +745,9 @@
 		mnt->mnt.mnt_root = dget(root);
 		mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 		mnt->mnt_parent = mnt;
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 
 		if (flag & CL_SLAVE) {
 			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -803,35 +803,36 @@
 {
 put_again:
 #ifdef CONFIG_SMP
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	if (likely(atomic_read(&mnt->mnt_longterm))) {
 		mnt_add_count(mnt, -1);
-		br_read_unlock(vfsmount_lock);
+		br_read_unlock(&vfsmount_lock);
 		return;
 	}
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	mnt_add_count(mnt, -1);
 	if (mnt_get_count(mnt)) {
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 		return;
 	}
 #else
 	mnt_add_count(mnt, -1);
 	if (likely(mnt_get_count(mnt)))
 		return;
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 #endif
 	if (unlikely(mnt->mnt_pinned)) {
 		mnt_add_count(mnt, mnt->mnt_pinned + 1);
 		mnt->mnt_pinned = 0;
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 		acct_auto_close_mnt(&mnt->mnt);
 		goto put_again;
 	}
+
 	list_del(&mnt->mnt_instance);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	mntfree(mnt);
 }
 
@@ -857,21 +858,21 @@
 
 void mnt_pin(struct vfsmount *mnt)
 {
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	real_mount(mnt)->mnt_pinned++;
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *m)
 {
 	struct mount *mnt = real_mount(m);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	if (mnt->mnt_pinned) {
 		mnt_add_count(mnt, 1);
 		mnt->mnt_pinned--;
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_unpin);
 
@@ -988,12 +989,12 @@
 	BUG_ON(!m);
 
 	/* write lock needed for mnt_get_count */
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	for (p = mnt; p; p = next_mnt(p, mnt)) {
 		actual_refs += mnt_get_count(p);
 		minimum_refs += 2;
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 
 	if (actual_refs > minimum_refs)
 		return 0;
@@ -1020,10 +1021,10 @@
 {
 	int ret = 1;
 	down_read(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	if (propagate_mount_busy(real_mount(mnt), 2))
 		ret = 0;
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_read(&namespace_sem);
 	return ret;
 }
@@ -1040,13 +1041,13 @@
 			struct dentry *dentry;
 			struct mount *m;
 
-			br_write_lock(vfsmount_lock);
+			br_write_lock(&vfsmount_lock);
 			dentry = mnt->mnt_mountpoint;
 			m = mnt->mnt_parent;
 			mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 			mnt->mnt_parent = mnt;
 			m->mnt_ghosts--;
-			br_write_unlock(vfsmount_lock);
+			br_write_unlock(&vfsmount_lock);
 			dput(dentry);
 			mntput(&m->mnt);
 		}
@@ -1073,8 +1074,9 @@
 		list_del_init(&p->mnt_expire);
 		list_del_init(&p->mnt_list);
 		__touch_mnt_namespace(p->mnt_ns);
+		if (p->mnt_ns)
+			__mnt_make_shortterm(p);
 		p->mnt_ns = NULL;
-		__mnt_make_shortterm(p);
 		list_del_init(&p->mnt_child);
 		if (mnt_has_parent(p)) {
 			p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1114,12 @@
 		 * probably don't strictly need the lock here if we examined
 		 * all race cases, but it's a slowpath.
 		 */
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		if (mnt_get_count(mnt) != 2) {
-			br_write_unlock(vfsmount_lock);
+			br_write_unlock(&vfsmount_lock);
 			return -EBUSY;
 		}
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 
 		if (!xchg(&mnt->mnt_expiry_mark, 1))
 			return -EAGAIN;
@@ -1159,7 +1161,7 @@
 	}
 
 	down_write(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	event++;
 
 	if (!(flags & MNT_DETACH))
@@ -1171,7 +1173,7 @@
 			umount_tree(mnt, 1, &umount_list);
 		retval = 0;
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 	return retval;
@@ -1286,19 +1288,19 @@
 			q = clone_mnt(p, p->mnt.mnt_root, flag);
 			if (!q)
 				goto Enomem;
-			br_write_lock(vfsmount_lock);
+			br_write_lock(&vfsmount_lock);
 			list_add_tail(&q->mnt_list, &res->mnt_list);
 			attach_mnt(q, &path);
-			br_write_unlock(vfsmount_lock);
+			br_write_unlock(&vfsmount_lock);
 		}
 	}
 	return res;
 Enomem:
 	if (res) {
 		LIST_HEAD(umount_list);
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		umount_tree(res, 0, &umount_list);
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 		release_mounts(&umount_list);
 	}
 	return NULL;
@@ -1318,9 +1320,9 @@
 {
 	LIST_HEAD(umount_list);
 	down_write(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	umount_tree(real_mount(mnt), 0, &umount_list);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 }
@@ -1448,7 +1450,7 @@
 	if (err)
 		goto out_cleanup_ids;
 
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 
 	if (IS_MNT_SHARED(dest_mnt)) {
 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1469,7 @@
 		list_del_init(&child->mnt_hash);
 		commit_tree(child);
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 
 	return 0;
 
@@ -1565,10 +1567,10 @@
 			goto out_unlock;
 	}
 
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
 		change_mnt_propagation(m, type);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 
  out_unlock:
 	up_write(&namespace_sem);
@@ -1617,9 +1619,9 @@
 
 	err = graft_tree(mnt, path);
 	if (err) {
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		umount_tree(mnt, 0, &umount_list);
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 	}
 out2:
 	unlock_mount(path);
@@ -1677,16 +1679,16 @@
 	else
 		err = do_remount_sb(sb, flags, data, 0);
 	if (!err) {
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
 		mnt->mnt.mnt_flags = mnt_flags;
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 	}
 	up_write(&sb->s_umount);
 	if (!err) {
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		touch_mnt_namespace(mnt->mnt_ns);
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 	}
 	return err;
 }
@@ -1893,9 +1895,9 @@
 	/* remove m from any expiration list it may be on */
 	if (!list_empty(&mnt->mnt_expire)) {
 		down_write(&namespace_sem);
-		br_write_lock(vfsmount_lock);
+		br_write_lock(&vfsmount_lock);
 		list_del_init(&mnt->mnt_expire);
-		br_write_unlock(vfsmount_lock);
+		br_write_unlock(&vfsmount_lock);
 		up_write(&namespace_sem);
 	}
 	mntput(m);
@@ -1911,11 +1913,11 @@
 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
 {
 	down_write(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 
 	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
 
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 }
 EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1937,7 @@
 		return;
 
 	down_write(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 
 	/* extract from the expiration list every vfsmount that matches the
 	 * following criteria:
@@ -1954,7 +1956,7 @@
 		touch_mnt_namespace(mnt->mnt_ns);
 		umount_tree(mnt, 1, &umounts);
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 
 	release_mounts(&umounts);
@@ -2218,9 +2220,9 @@
 	struct mount *mnt = real_mount(m);
 	if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
 		return;
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	atomic_dec(&mnt->mnt_longterm);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 #endif
 }
 
@@ -2250,9 +2252,9 @@
 		return ERR_PTR(-ENOMEM);
 	}
 	new_ns->root = new;
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	list_add_tail(&new_ns->list, &new->mnt_list);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 
 	/*
 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2416,9 +2418,9 @@
 int path_is_under(struct path *path1, struct path *path2)
 {
 	int res;
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 	return res;
 }
 EXPORT_SYMBOL(path_is_under);
@@ -2505,7 +2507,7 @@
 	/* make sure we can reach put_old from new_root */
 	if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
 		goto out4;
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	detach_mnt(new_mnt, &parent_path);
 	detach_mnt(root_mnt, &root_parent);
 	/* mount old root on put_old */
@@ -2513,7 +2515,7 @@
 	/* mount new_root on / */
 	attach_mnt(new_mnt, &root_parent);
 	touch_mnt_namespace(current->nsproxy->mnt_ns);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	chroot_fs_refs(&root, &new);
 	error = 0;
 out4:
@@ -2576,7 +2578,7 @@
 	for (u = 0; u < HASH_SIZE; u++)
 		INIT_LIST_HEAD(&mount_hashtable[u]);
 
-	br_lock_init(vfsmount_lock);
+	br_lock_init(&vfsmount_lock);
 
 	err = sysfs_init();
 	if (err)
@@ -2596,9 +2598,9 @@
 	if (!atomic_dec_and_test(&ns->count))
 		return;
 	down_write(&namespace_sem);
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	umount_tree(ns->root, 0, &umount_list);
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 	kfree(ns);
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 3ff5fcc..122e260 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -221,6 +221,10 @@
 
 	already_written = 0;
 
+	errno = file_update_time(file);
+	if (errno)
+		goto outrel;
+
 	bouncebuffer = vmalloc(bufsize);
 	if (!bouncebuffer) {
 		errno = -EIO;	/* -ENOMEM */
@@ -252,8 +256,6 @@
 	}
 	vfree(bouncebuffer);
 
-	file_update_time(file);
-
 	*ppos = pos;
 
 	if (pos > i_size_read(inode)) {
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 87484fb..333df07 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -292,7 +292,7 @@
 ncp_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	if (S_ISDIR(inode->i_mode)) {
 		DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index 4af803f..54cc0cd 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -23,17 +23,17 @@
 	unsigned long    flags;		/* NCP_MOUNT_* flags */
 	unsigned int	 int_flags;	/* internal flags */
 #define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
-	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
+	uid_t		 mounted_uid;	/* Who may umount() this filesystem? */
 	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
 	unsigned int     ncp_fd;	/* The socket to the ncp port */
 	unsigned int     time_out;	/* How long should I wait after
 					   sending a NCP request? */
 	unsigned int     retry_count;	/* And how often should I retry? */
 	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
-	__kernel_uid32_t uid;
-	__kernel_gid32_t gid;
-	__kernel_mode_t  file_mode;
-	__kernel_mode_t  dir_mode;
+	uid_t		 uid;
+	gid_t		 gid;
+	umode_t		 file_mode;
+	umode_t		 dir_mode;
 	int		 info_fd;
 };
 
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a0e6c5..f90f4f5 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -29,9 +29,20 @@
 
 	  If unsure, say N.
 
+config NFS_V2
+	bool "NFS client support for NFS version 2"
+	depends on NFS_FS
+	default y
+	help
+	  This option enables support for version 2 of the NFS protocol
+	  (RFC 1094) in the kernel's NFS client.
+
+	  If unsure, say Y.
+
 config NFS_V3
 	bool "NFS client support for NFS version 3"
 	depends on NFS_FS
+	default y
 	help
 	  This option enables support for version 3 of the NFS protocol
 	  (RFC 1813) in the kernel's NFS client.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index b58613d..7ddd45d 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -4,11 +4,12 @@
 
 obj-$(CONFIG_NFS_FS) += nfs.o
 
-nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
-			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o \
+			   direct.o pagelist.o read.o symlink.o unlink.o \
 			   write.o namespace.o mount_clnt.o \
 			   dns_resolve.o cache_lib.o
 nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+nfs-$(CONFIG_NFS_V2)	+= proc.o nfs2xdr.o
 nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
 nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
 nfs-$(CONFIG_NFS_V4)	+= nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7f6a23f..7ae8a60 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -187,7 +187,6 @@
 	struct parallel_io *par = bio->bi_private;
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
 
 	do {
 		struct page *page = bvec->bv_page;
@@ -198,9 +197,12 @@
 			SetPageUptodate(page);
 	} while (bvec >= bio->bi_io_vec);
 	if (!uptodate) {
-		if (!rdata->pnfs_error)
-			rdata->pnfs_error = -EIO;
-		pnfs_set_lo_fail(rdata->lseg);
+		struct nfs_read_data *rdata = par->data;
+		struct nfs_pgio_header *header = rdata->header;
+
+		if (!header->pnfs_error)
+			header->pnfs_error = -EIO;
+		pnfs_set_lo_fail(header->lseg);
 	}
 	bio_put(bio);
 	put_parallel(par);
@@ -221,7 +223,7 @@
 {
 	struct nfs_read_data *rdata = data;
 
-	rdata->task.tk_status = rdata->pnfs_error;
+	rdata->task.tk_status = rdata->header->pnfs_error;
 	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
 	schedule_work(&rdata->task.u.tk_work);
 }
@@ -229,6 +231,7 @@
 static enum pnfs_try_status
 bl_read_pagelist(struct nfs_read_data *rdata)
 {
+	struct nfs_pgio_header *header = rdata->header;
 	int i, hole;
 	struct bio *bio = NULL;
 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -239,7 +242,7 @@
 	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
 
 	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
-	       rdata->npages, f_offset, (unsigned int)rdata->args.count);
+	       rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
 
 	par = alloc_parallel(rdata);
 	if (!par)
@@ -249,17 +252,17 @@
 
 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
 	/* Code assumes extents are page-aligned */
-	for (i = pg_index; i < rdata->npages; i++) {
+	for (i = pg_index; i < rdata->pages.npages; i++) {
 		if (!extent_length) {
 			/* We've used up the previous extent */
 			bl_put_extent(be);
 			bl_put_extent(cow_read);
 			bio = bl_submit_bio(READ, bio);
 			/* Get the next one */
-			be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
 					     isect, &cow_read);
 			if (!be) {
-				rdata->pnfs_error = -EIO;
+				header->pnfs_error = -EIO;
 				goto out;
 			}
 			extent_length = be->be_length -
@@ -282,11 +285,12 @@
 			struct pnfs_block_extent *be_read;
 
 			be_read = (hole && cow_read) ? cow_read : be;
-			bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+			bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
+						 READ,
 						 isect, pages[i], be_read,
 						 bl_end_io_read, par);
 			if (IS_ERR(bio)) {
-				rdata->pnfs_error = PTR_ERR(bio);
+				header->pnfs_error = PTR_ERR(bio);
 				bio = NULL;
 				goto out;
 			}
@@ -294,9 +298,9 @@
 		isect += PAGE_CACHE_SECTORS;
 		extent_length -= PAGE_CACHE_SECTORS;
 	}
-	if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
 		rdata->res.eof = 1;
-		rdata->res.count = rdata->inode->i_size - f_offset;
+		rdata->res.count = header->inode->i_size - f_offset;
 	} else {
 		rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
 	}
@@ -345,7 +349,6 @@
 	struct parallel_io *par = bio->bi_private;
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
 
 	do {
 		struct page *page = bvec->bv_page;
@@ -358,9 +361,12 @@
 	} while (bvec >= bio->bi_io_vec);
 
 	if (unlikely(!uptodate)) {
-		if (!wdata->pnfs_error)
-			wdata->pnfs_error = -EIO;
-		pnfs_set_lo_fail(wdata->lseg);
+		struct nfs_write_data *data = par->data;
+		struct nfs_pgio_header *header = data->header;
+
+		if (!header->pnfs_error)
+			header->pnfs_error = -EIO;
+		pnfs_set_lo_fail(header->lseg);
 	}
 	bio_put(bio);
 	put_parallel(par);
@@ -370,12 +376,13 @@
 {
 	struct parallel_io *par = bio->bi_private;
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+	struct nfs_write_data *data = par->data;
+	struct nfs_pgio_header *header = data->header;
 
 	if (!uptodate) {
-		if (!wdata->pnfs_error)
-			wdata->pnfs_error = -EIO;
-		pnfs_set_lo_fail(wdata->lseg);
+		if (!header->pnfs_error)
+			header->pnfs_error = -EIO;
+		pnfs_set_lo_fail(header->lseg);
 	}
 	bio_put(bio);
 	put_parallel(par);
@@ -391,9 +398,9 @@
 	dprintk("%s enter\n", __func__);
 	task = container_of(work, struct rpc_task, u.tk_work);
 	wdata = container_of(task, struct nfs_write_data, task);
-	if (likely(!wdata->pnfs_error)) {
+	if (likely(!wdata->header->pnfs_error)) {
 		/* Marks for LAYOUTCOMMIT */
-		mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+		mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
 				     wdata->args.offset, wdata->args.count);
 	}
 	pnfs_ld_write_done(wdata);
@@ -404,12 +411,12 @@
 {
 	struct nfs_write_data *wdata = data;
 
-	if (unlikely(wdata->pnfs_error)) {
-		bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+	if (unlikely(wdata->header->pnfs_error)) {
+		bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
 					num_se);
 	}
 
-	wdata->task.tk_status = wdata->pnfs_error;
+	wdata->task.tk_status = wdata->header->pnfs_error;
 	wdata->verf.committed = NFS_FILE_SYNC;
 	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
 	schedule_work(&wdata->task.u.tk_work);
@@ -540,6 +547,7 @@
 static enum pnfs_try_status
 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
 {
+	struct nfs_pgio_header *header = wdata->header;
 	int i, ret, npg_zero, pg_index, last = 0;
 	struct bio *bio = NULL;
 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -552,7 +560,7 @@
 	pgoff_t index;
 	u64 temp;
 	int npg_per_block =
-	    NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+	    NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
 
 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
 	/* At this point, wdata->pages is a (sequential) list of nfs_pages.
@@ -566,7 +574,7 @@
 	/* At this point, have to be more careful with error handling */
 
 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
-	be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+	be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
 	if (!be || !is_writable(be, isect)) {
 		dprintk("%s no matching extents!\n", __func__);
 		goto out_mds;
@@ -597,10 +605,10 @@
 			dprintk("%s zero %dth page: index %lu isect %llu\n",
 				__func__, npg_zero, index,
 				(unsigned long long)isect);
-			page = bl_find_get_zeroing_page(wdata->inode, index,
+			page = bl_find_get_zeroing_page(header->inode, index,
 							cow_read);
 			if (unlikely(IS_ERR(page))) {
-				wdata->pnfs_error = PTR_ERR(page);
+				header->pnfs_error = PTR_ERR(page);
 				goto out;
 			} else if (page == NULL)
 				goto next_page;
@@ -612,7 +620,7 @@
 					__func__, ret);
 				end_page_writeback(page);
 				page_cache_release(page);
-				wdata->pnfs_error = ret;
+				header->pnfs_error = ret;
 				goto out;
 			}
 			if (likely(!bl_push_one_short_extent(be->be_inval)))
@@ -620,11 +628,11 @@
 			else {
 				end_page_writeback(page);
 				page_cache_release(page);
-				wdata->pnfs_error = -ENOMEM;
+				header->pnfs_error = -ENOMEM;
 				goto out;
 			}
 			/* FIXME: This should be done in bi_end_io */
-			mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+			mark_extents_written(BLK_LSEG2EXT(header->lseg),
 					     page->index << PAGE_CACHE_SHIFT,
 					     PAGE_CACHE_SIZE);
 
@@ -632,7 +640,7 @@
 						 isect, page, be,
 						 bl_end_io_write_zero, par);
 			if (IS_ERR(bio)) {
-				wdata->pnfs_error = PTR_ERR(bio);
+				header->pnfs_error = PTR_ERR(bio);
 				bio = NULL;
 				goto out;
 			}
@@ -647,16 +655,16 @@
 
 	/* Middle pages */
 	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
-	for (i = pg_index; i < wdata->npages; i++) {
+	for (i = pg_index; i < wdata->pages.npages; i++) {
 		if (!extent_length) {
 			/* We've used up the previous extent */
 			bl_put_extent(be);
 			bio = bl_submit_bio(WRITE, bio);
 			/* Get the next one */
-			be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
 					     isect, NULL);
 			if (!be || !is_writable(be, isect)) {
-				wdata->pnfs_error = -EINVAL;
+				header->pnfs_error = -EINVAL;
 				goto out;
 			}
 			if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
@@ -664,7 +672,7 @@
 								be->be_inval)))
 					par->bse_count++;
 				else {
-					wdata->pnfs_error = -ENOMEM;
+					header->pnfs_error = -ENOMEM;
 					goto out;
 				}
 			}
@@ -677,15 +685,15 @@
 			if (unlikely(ret)) {
 				dprintk("%s bl_mark_sectors_init fail %d\n",
 					__func__, ret);
-				wdata->pnfs_error = ret;
+				header->pnfs_error = ret;
 				goto out;
 			}
 		}
-		bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+		bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
 					 isect, pages[i], be,
 					 bl_end_io_write, par);
 		if (IS_ERR(bio)) {
-			wdata->pnfs_error = PTR_ERR(bio);
+			header->pnfs_error = PTR_ERR(bio);
 			bio = NULL;
 			goto out;
 		}
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a5c88a5..c965542 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -123,7 +123,7 @@
 	uint8_t *dataptr;
 	DECLARE_WAITQUEUE(wq, current);
 	int offset, len, i, rc;
-	struct net *net = server->nfs_client->net;
+	struct net *net = server->nfs_client->cl_net;
 	struct nfs_net *nn = net_generic(net, nfs_net_id);
 	struct bl_dev_msg *reply = &nn->bl_mount_reply;
 
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index eb95f50..23ff18f 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -106,7 +106,7 @@
 {
 	int ret;
 
-	ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET,
+	ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
 				nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
 	if (ret <= 0)
 		goto out_err;
@@ -114,7 +114,7 @@
 	dprintk("NFS: Callback listener port = %u (af %u)\n",
 			nfs_callback_tcpport, PF_INET);
 
-	ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6,
+	ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
 				nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
 	if (ret > 0) {
 		nfs_callback_tcpport6 = ret;
@@ -183,7 +183,7 @@
 	 * fore channel connection.
 	 * Returns the input port (0) and sets the svc_serv bc_xprt on success
 	 */
-	ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0,
+	ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
 			      SVC_SOCK_ANONYMOUS);
 	if (ret < 0) {
 		rqstp = ERR_PTR(ret);
@@ -253,6 +253,7 @@
 	char svc_name[12];
 	int ret = 0;
 	int minorversion_setup;
+	struct net *net = &init_net;
 
 	mutex_lock(&nfs_callback_mutex);
 	if (cb_info->users++ || cb_info->task != NULL) {
@@ -265,6 +266,12 @@
 		goto out_err;
 	}
 
+	ret = svc_bind(serv, net);
+	if (ret < 0) {
+		printk(KERN_WARNING "NFS: bind callback service failed\n");
+		goto out_err;
+	}
+
 	minorversion_setup =  nfs_minorversion_callback_svc_setup(minorversion,
 					serv, xprt, &rqstp, &callback_svc);
 	if (!minorversion_setup) {
@@ -306,6 +313,8 @@
 	dprintk("NFS: Couldn't create callback socket or server thread; "
 		"err = %d\n", ret);
 	cb_info->users--;
+	if (serv)
+		svc_shutdown_net(serv, net);
 	goto out;
 }
 
@@ -320,6 +329,7 @@
 	cb_info->users--;
 	if (cb_info->users == 0 && cb_info->task != NULL) {
 		kthread_stop(cb_info->task);
+		svc_shutdown_net(cb_info->serv, &init_net);
 		svc_exit_thread(cb_info->rqst);
 		cb_info->serv = NULL;
 		cb_info->rqst = NULL;
@@ -332,7 +342,7 @@
 int
 check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
-	char *p = svc_gss_principal(rqstp);
+	char *p = rqstp->rq_cred.cr_principal;
 
 	if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
 		return 1;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 95bfc24..e64b01d 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -455,9 +455,9 @@
 	args->csa_nrclists = ntohl(*p++);
 	args->csa_rclists = NULL;
 	if (args->csa_nrclists) {
-		args->csa_rclists = kmalloc(args->csa_nrclists *
-					    sizeof(*args->csa_rclists),
-					    GFP_KERNEL);
+		args->csa_rclists = kmalloc_array(args->csa_nrclists,
+						  sizeof(*args->csa_rclists),
+						  GFP_KERNEL);
 		if (unlikely(args->csa_rclists == NULL))
 			goto out;
 
@@ -696,7 +696,7 @@
 				       const struct cb_sequenceres *res)
 {
 	__be32 *p;
-	unsigned status = res->csr_status;
+	__be32 status = res->csr_status;
 
 	if (unlikely(status != 0))
 		goto out;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 60f7e4e..f005b5b 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -65,7 +65,7 @@
 static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
 {
 	int ret = 0;
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (clp->rpc_ops->version != 4 || minorversion != 0)
 		return ret;
@@ -90,7 +90,9 @@
  * RPC cruft for NFS
  */
 static const struct rpc_version *nfs_version[5] = {
+#ifdef CONFIG_NFS_V2
 	[2]			= &nfs_version2,
+#endif
 #ifdef CONFIG_NFS_V3
 	[3]			= &nfs_version3,
 #endif
@@ -129,6 +131,7 @@
 #endif  /* CONFIG_NFS_V3_ACL */
 
 struct nfs_client_initdata {
+	unsigned long init_flags;
 	const char *hostname;
 	const struct sockaddr *addr;
 	size_t addrlen;
@@ -172,7 +175,7 @@
 	clp->cl_rpcclient = ERR_PTR(-EINVAL);
 
 	clp->cl_proto = cl_init->proto;
-	clp->net = get_net(cl_init->net);
+	clp->cl_net = get_net(cl_init->net);
 
 #ifdef CONFIG_NFS_V4
 	err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -182,7 +185,6 @@
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
-	clp->cl_boot_time = CURRENT_TIME;
 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
 	clp->cl_minorversion = cl_init->minorversion;
 	clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -205,8 +207,8 @@
 static void nfs4_shutdown_session(struct nfs_client *clp)
 {
 	if (nfs4_has_session(clp)) {
-		nfs4_deviceid_purge_client(clp);
 		nfs4_destroy_session(clp->cl_session);
+		nfs4_destroy_clientid(clp);
 	}
 
 }
@@ -235,6 +237,9 @@
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+	kfree(clp->cl_serverowner);
+	kfree(clp->cl_serverscope);
+	kfree(clp->cl_implid);
 }
 
 /* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -248,7 +253,7 @@
 /* nfs_client_lock held */
 static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
 {
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (clp->cl_cb_ident)
 		idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -301,10 +306,8 @@
 	if (clp->cl_machine_cred != NULL)
 		put_rpccred(clp->cl_machine_cred);
 
-	put_net(clp->net);
+	put_net(clp->cl_net);
 	kfree(clp->cl_hostname);
-	kfree(clp->server_scope);
-	kfree(clp->impl_id);
 	kfree(clp);
 
 	dprintk("<-- nfs_free_client()\n");
@@ -321,7 +324,7 @@
 		return;
 
 	dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
-	nn = net_generic(clp->net, nfs_net_id);
+	nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
 		list_del(&clp->cl_share_link);
@@ -456,6 +459,8 @@
 	    clp->cl_cons_state == NFS_CS_SESSION_INITING))
 		return false;
 
+	smp_rmb();
+
 	/* Match the version and minorversion */
 	if (clp->rpc_ops->version != 4 ||
 	    clp->cl_minorversion != minorversion)
@@ -504,68 +509,27 @@
 	return NULL;
 }
 
+static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+{
+	return clp->cl_cons_state != NFS_CS_INITING;
+}
+
+int nfs_wait_client_init_complete(const struct nfs_client *clp)
+{
+	return wait_event_killable(nfs_client_active_wq,
+			nfs_client_init_is_complete(clp));
+}
+
 /*
- * Look up a client by IP address and protocol version
- * - creates a new record if one doesn't yet exist
+ * Found an existing client.  Make sure it's ready before returning.
  */
 static struct nfs_client *
-nfs_get_client(const struct nfs_client_initdata *cl_init,
-	       const struct rpc_timeout *timeparms,
-	       const char *ip_addr,
-	       rpc_authflavor_t authflavour,
-	       int noresvport)
+nfs_found_client(const struct nfs_client_initdata *cl_init,
+		 struct nfs_client *clp)
 {
-	struct nfs_client *clp, *new = NULL;
 	int error;
-	struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
 
-	dprintk("--> nfs_get_client(%s,v%u)\n",
-		cl_init->hostname ?: "", cl_init->rpc_ops->version);
-
-	/* see if the client already exists */
-	do {
-		spin_lock(&nn->nfs_client_lock);
-
-		clp = nfs_match_client(cl_init);
-		if (clp)
-			goto found_client;
-		if (new)
-			goto install_client;
-
-		spin_unlock(&nn->nfs_client_lock);
-
-		new = nfs_alloc_client(cl_init);
-	} while (!IS_ERR(new));
-
-	dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
-	return new;
-
-	/* install a new client and return with it unready */
-install_client:
-	clp = new;
-	list_add(&clp->cl_share_link, &nn->nfs_client_list);
-	spin_unlock(&nn->nfs_client_lock);
-
-	error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
-					      authflavour, noresvport);
-	if (error < 0) {
-		nfs_put_client(clp);
-		return ERR_PTR(error);
-	}
-	dprintk("--> nfs_get_client() = %p [new]\n", clp);
-	return clp;
-
-	/* found an existing client
-	 * - make sure it's ready before returning
-	 */
-found_client:
-	spin_unlock(&nn->nfs_client_lock);
-
-	if (new)
-		nfs_free_client(new);
-
-	error = wait_event_killable(nfs_client_active_wq,
-				clp->cl_cons_state < NFS_CS_INITING);
+	error = nfs_wait_client_init_complete(clp);
 	if (error < 0) {
 		nfs_put_client(clp);
 		return ERR_PTR(-ERESTARTSYS);
@@ -577,38 +541,70 @@
 		return ERR_PTR(error);
 	}
 
-	BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+	smp_rmb();
 
-	dprintk("--> nfs_get_client() = %p [share]\n", clp);
+	dprintk("<-- %s found nfs_client %p for %s\n",
+		__func__, clp, cl_init->hostname ?: "");
 	return clp;
 }
 
 /*
+ * Look up a client by IP address and protocol version
+ * - creates a new record if one doesn't yet exist
+ */
+static struct nfs_client *
+nfs_get_client(const struct nfs_client_initdata *cl_init,
+	       const struct rpc_timeout *timeparms,
+	       const char *ip_addr,
+	       rpc_authflavor_t authflavour)
+{
+	struct nfs_client *clp, *new = NULL;
+	struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
+
+	dprintk("--> nfs_get_client(%s,v%u)\n",
+		cl_init->hostname ?: "", cl_init->rpc_ops->version);
+
+	/* see if the client already exists */
+	do {
+		spin_lock(&nn->nfs_client_lock);
+
+		clp = nfs_match_client(cl_init);
+		if (clp) {
+			spin_unlock(&nn->nfs_client_lock);
+			if (new)
+				nfs_free_client(new);
+			return nfs_found_client(cl_init, clp);
+		}
+		if (new) {
+			list_add(&new->cl_share_link, &nn->nfs_client_list);
+			spin_unlock(&nn->nfs_client_lock);
+			new->cl_flags = cl_init->init_flags;
+			return cl_init->rpc_ops->init_client(new,
+						timeparms, ip_addr,
+						authflavour);
+		}
+
+		spin_unlock(&nn->nfs_client_lock);
+
+		new = nfs_alloc_client(cl_init);
+	} while (!IS_ERR(new));
+
+	dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
+		cl_init->hostname ?: "", PTR_ERR(new));
+	return new;
+}
+
+/*
  * Mark a server as ready or failed
  */
 void nfs_mark_client_ready(struct nfs_client *clp, int state)
 {
+	smp_wmb();
 	clp->cl_cons_state = state;
 	wake_up_all(&nfs_client_active_wq);
 }
 
 /*
- * With sessions, the client is not marked ready until after a
- * successful EXCHANGE_ID and CREATE_SESSION.
- *
- * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
- * other versions of NFS can be tried.
- */
-int nfs4_check_client_ready(struct nfs_client *clp)
-{
-	if (!nfs4_has_session(clp))
-		return 0;
-	if (clp->cl_cons_state < NFS_CS_READY)
-		return -EPROTONOSUPPORT;
-	return 0;
-}
-
-/*
  * Initialise the timeout values for a connection
  */
 static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -654,12 +650,11 @@
  */
 static int nfs_create_rpc_client(struct nfs_client *clp,
 				 const struct rpc_timeout *timeparms,
-				 rpc_authflavor_t flavor,
-				 int discrtry, int noresvport)
+				 rpc_authflavor_t flavor)
 {
 	struct rpc_clnt		*clnt = NULL;
 	struct rpc_create_args args = {
-		.net		= clp->net,
+		.net		= clp->cl_net,
 		.protocol	= clp->cl_proto,
 		.address	= (struct sockaddr *)&clp->cl_addr,
 		.addrsize	= clp->cl_addrlen,
@@ -670,9 +665,9 @@
 		.authflavor	= flavor,
 	};
 
-	if (discrtry)
+	if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
 		args.flags |= RPC_CLNT_CREATE_DISCRTRY;
-	if (noresvport)
+	if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
 	if (!IS_ERR(clp->cl_rpcclient))
@@ -713,7 +708,7 @@
 		.nfs_version	= clp->rpc_ops->version,
 		.noresvport	= server->flags & NFS_MOUNT_NORESVPORT ?
 					1 : 0,
-		.net		= clp->net,
+		.net		= clp->cl_net,
 	};
 
 	if (nlm_init.nfs_version > 3)
@@ -805,36 +800,43 @@
 	return 0;
 }
 
-/*
- * Initialise an NFS2 or NFS3 client
+/**
+ * nfs_init_client - Initialise an NFS2 or NFS3 client
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: IP presentation address (not used)
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
  */
-int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
-		    const char *ip_addr, rpc_authflavor_t authflavour,
-		    int noresvport)
+struct nfs_client *nfs_init_client(struct nfs_client *clp,
+		    const struct rpc_timeout *timeparms,
+		    const char *ip_addr, rpc_authflavor_t authflavour)
 {
 	int error;
 
 	if (clp->cl_cons_state == NFS_CS_READY) {
 		/* the client is already initialised */
 		dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
-		return 0;
+		return clp;
 	}
 
 	/*
 	 * Create a client RPC handle for doing FSSTAT with UNIX auth only
 	 * - RFC 2623, sec 2.3.2
 	 */
-	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
-				      0, noresvport);
+	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
 	if (error < 0)
 		goto error;
 	nfs_mark_client_ready(clp, NFS_CS_READY);
-	return 0;
+	return clp;
 
 error:
 	nfs_mark_client_ready(clp, error);
+	nfs_put_client(clp);
 	dprintk("<-- nfs_init_client() = xerror %d\n", error);
-	return error;
+	return ERR_PTR(error);
 }
 
 /*
@@ -847,7 +849,7 @@
 		.hostname = data->nfs_server.hostname,
 		.addr = (const struct sockaddr *)&data->nfs_server.address,
 		.addrlen = data->nfs_server.addrlen,
-		.rpc_ops = &nfs_v2_clientops,
+		.rpc_ops = NULL,
 		.proto = data->nfs_server.protocol,
 		.net = data->net,
 	};
@@ -857,17 +859,28 @@
 
 	dprintk("--> nfs_init_server()\n");
 
-#ifdef CONFIG_NFS_V3
-	if (data->version == 3)
-		cl_init.rpc_ops = &nfs_v3_clientops;
+	switch (data->version) {
+#ifdef CONFIG_NFS_V2
+	case 2:
+		cl_init.rpc_ops = &nfs_v2_clientops;
+		break;
 #endif
+#ifdef CONFIG_NFS_V3
+	case 3:
+		cl_init.rpc_ops = &nfs_v3_clientops;
+		break;
+#endif
+	default:
+		return -EPROTONOSUPPORT;
+	}
 
 	nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
 			data->timeo, data->retrans);
+	if (data->flags & NFS_MOUNT_NORESVPORT)
+		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
-			     data->flags & NFS_MOUNT_NORESVPORT);
+	clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
 	if (IS_ERR(clp)) {
 		dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
 		return PTR_ERR(clp);
@@ -880,7 +893,7 @@
 	server->options = data->options;
 	server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
 		NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
-		NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+		NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
 
 	if (data->rsize)
 		server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1048,7 +1061,7 @@
 static void nfs_server_insert_lists(struct nfs_server *server)
 {
 	struct nfs_client *clp = server->nfs_client;
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	spin_lock(&nn->nfs_client_lock);
 	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1078,7 @@
 
 	if (clp == NULL)
 		return;
-	nn = net_generic(clp->net, nfs_net_id);
+	nn = net_generic(clp->cl_net, nfs_net_id);
 	spin_lock(&nn->nfs_client_lock);
 	list_del_rcu(&server->client_link);
 	if (list_empty(&clp->cl_superblocks))
@@ -1333,21 +1346,27 @@
 		 * so that the client back channel can find the
 		 * nfs_client struct
 		 */
-		clp->cl_cons_state = NFS_CS_SESSION_INITING;
+		nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
 	}
 #endif /* CONFIG_NFS_V4_1 */
 
 	return nfs4_init_callback(clp);
 }
 
-/*
- * Initialise an NFS4 client record
+/**
+ * nfs4_init_client - Initialise an NFS4 client record
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: callback IP address in presentation format
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
  */
-int nfs4_init_client(struct nfs_client *clp,
-		     const struct rpc_timeout *timeparms,
-		     const char *ip_addr,
-		     rpc_authflavor_t authflavour,
-		     int noresvport)
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+				    const struct rpc_timeout *timeparms,
+				    const char *ip_addr,
+				    rpc_authflavor_t authflavour)
 {
 	char buf[INET6_ADDRSTRLEN + 1];
 	int error;
@@ -1355,14 +1374,14 @@
 	if (clp->cl_cons_state == NFS_CS_READY) {
 		/* the client is initialised already */
 		dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
-		return 0;
+		return clp;
 	}
 
 	/* Check NFS protocol revision and initialize RPC op vector */
 	clp->rpc_ops = &nfs_v4_clientops;
 
-	error = nfs_create_rpc_client(clp, timeparms, authflavour,
-				      1, noresvport);
+	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+	error = nfs_create_rpc_client(clp, timeparms, authflavour);
 	if (error < 0)
 		goto error;
 
@@ -1395,12 +1414,13 @@
 
 	if (!nfs4_has_session(clp))
 		nfs_mark_client_ready(clp, NFS_CS_READY);
-	return 0;
+	return clp;
 
 error:
 	nfs_mark_client_ready(clp, error);
+	nfs_put_client(clp);
 	dprintk("<-- nfs4_init_client() = xerror %d\n", error);
-	return error;
+	return ERR_PTR(error);
 }
 
 /*
@@ -1429,9 +1449,11 @@
 
 	dprintk("--> nfs4_set_client()\n");
 
+	if (server->flags & NFS_MOUNT_NORESVPORT)
+		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
-			     server->flags & NFS_MOUNT_NORESVPORT);
+	clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
 	if (IS_ERR(clp)) {
 		error = PTR_ERR(clp);
 		goto error;
@@ -1465,8 +1487,8 @@
  * the MDS.
  */
 struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
-		const struct sockaddr *ds_addr,
-		int ds_addrlen, int ds_proto)
+		const struct sockaddr *ds_addr, int ds_addrlen,
+		int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
 {
 	struct nfs_client_initdata cl_init = {
 		.addr = ds_addr,
@@ -1474,14 +1496,9 @@
 		.rpc_ops = &nfs_v4_clientops,
 		.proto = ds_proto,
 		.minorversion = mds_clp->cl_minorversion,
-		.net = mds_clp->net,
+		.net = mds_clp->cl_net,
 	};
-	struct rpc_timeout ds_timeout = {
-		.to_initval = 15 * HZ,
-		.to_maxval = 15 * HZ,
-		.to_retries = 1,
-		.to_exponential = 1,
-	};
+	struct rpc_timeout ds_timeout;
 	struct nfs_client *clp;
 
 	/*
@@ -1489,8 +1506,9 @@
 	 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
 	 * (section 13.1 RFC 5661).
 	 */
+	nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
 	clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
-			     mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+			     mds_clp->cl_rpcclient->cl_auth->au_flavor);
 
 	dprintk("<-- %s %p\n", __func__, clp);
 	return clp;
@@ -1701,7 +1719,7 @@
 				rpc_protocol(parent_server->client),
 				parent_server->client->cl_timeout,
 				parent_client->cl_mvops->minor_version,
-				parent_client->net);
+				parent_client->cl_net);
 	if (error < 0)
 		goto error;
 
@@ -1805,6 +1823,7 @@
 	idr_init(&nn->cb_ident_idr);
 #endif
 	spin_lock_init(&nn->nfs_client_lock);
+	nn->boot_time = CURRENT_TIME;
 }
 
 #ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 89af1d2..bd3a960 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -316,6 +316,10 @@
  * nfs_client_return_marked_delegations - return previously marked delegations
  * @clp: nfs_client to process
  *
+ * Note that this function is designed to be called by the state
+ * manager thread. For this reason, it cannot flush the dirty data,
+ * since that could deadlock in case of a state recovery error.
+ *
  * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
@@ -340,11 +344,9 @@
 								server);
 			rcu_read_unlock();
 
-			if (delegation != NULL) {
-				filemap_flush(inode->i_mapping);
+			if (delegation != NULL)
 				err = __nfs_inode_return_delegation(inode,
 								delegation, 0);
-			}
 			iput(inode);
 			if (!err)
 				goto restart;
@@ -380,6 +382,10 @@
  * nfs_inode_return_delegation - synchronously return a delegation
  * @inode: inode to process
  *
+ * This routine will always flush any dirty data to disk on the
+ * assumption that if we need to return the delegation, then
+ * we should stop caching.
+ *
  * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_return_delegation(struct inode *inode)
@@ -389,10 +395,10 @@
 	struct nfs_delegation *delegation;
 	int err = 0;
 
+	nfs_wb_all(inode);
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
 		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL) {
-			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
 		}
 	}
@@ -538,6 +544,8 @@
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
+	filemap_flush(inode->i_mapping);
+
 	rcu_read_lock();
 	delegation = rcu_dereference(NFS_I(inode)->delegation);
 
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index cd6a7a8..72709c4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -66,6 +66,7 @@
 
 static inline int nfs_inode_return_delegation(struct inode *inode)
 {
+	nfs_wb_all(inode);
 	return 0;
 }
 #endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index eedd24d..f430057 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -475,6 +475,29 @@
 }
 
 static
+bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
+{
+	if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
+		return false;
+	if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
+		return true;
+	if (filp->f_pos == 0)
+		return true;
+	return false;
+}
+
+/*
+ * This function is called by the lookup code to request the use of
+ * readdirplus to accelerate any future lookups in the same
+ * directory.
+ */
+static
+void nfs_advise_use_readdirplus(struct inode *dir)
+{
+	set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
+}
+
+static
 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
 {
 	struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -871,7 +894,7 @@
 	desc->file = filp;
 	desc->dir_cookie = &dir_ctx->dir_cookie;
 	desc->decode = NFS_PROTO(inode)->decode_dirent;
-	desc->plus = NFS_USE_READDIRPLUS(inode);
+	desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
 
 	nfs_block_sillyrename(dentry);
 	res = nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -1111,7 +1134,7 @@
 	if (!inode) {
 		if (nfs_neg_need_reval(dir, dentry, nd))
 			goto out_bad;
-		goto out_valid;
+		goto out_valid_noent;
 	}
 
 	if (is_bad_inode(inode)) {
@@ -1140,7 +1163,7 @@
 	if (fhandle == NULL || fattr == NULL)
 		goto out_error;
 
-	error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
 	if (error)
 		goto out_bad;
 	if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1153,6 +1176,9 @@
 out_set_verifier:
 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
  out_valid:
+	/* Success: notify readdir to use READDIRPLUS */
+	nfs_advise_use_readdirplus(dir);
+ out_valid_noent:
 	dput(parent);
 	dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
 			__func__, dentry->d_parent->d_name.name,
@@ -1296,7 +1322,7 @@
 	parent = dentry->d_parent;
 	/* Protect against concurrent sillydeletes */
 	nfs_block_sillyrename(parent);
-	error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
 	if (error == -ENOENT)
 		goto no_entry;
 	if (error < 0) {
@@ -1308,6 +1334,9 @@
 	if (IS_ERR(res))
 		goto out_unblock_sillyrename;
 
+	/* Success: notify readdir to use READDIRPLUS */
+	nfs_advise_use_readdirplus(dir);
+
 no_entry:
 	res = d_materialise_unique(dentry, inode);
 	if (res != NULL) {
@@ -1325,10 +1354,10 @@
 }
 
 #ifdef CONFIG_NFS_V4
-static int nfs_open_revalidate(struct dentry *, struct nameidata *);
+static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *);
 
 const struct dentry_operations nfs4_dentry_operations = {
-	.d_revalidate	= nfs_open_revalidate,
+	.d_revalidate	= nfs4_lookup_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
 	.d_automount	= nfs_d_automount,
@@ -1490,13 +1519,11 @@
 	return nfs_lookup(dir, dentry, nd);
 }
 
-static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
 	struct dentry *parent = NULL;
 	struct inode *inode;
 	struct inode *dir;
-	struct nfs_open_context *ctx;
-	struct iattr attr;
 	int openflags, ret = 0;
 
 	if (nd->flags & LOOKUP_RCU)
@@ -1525,57 +1552,13 @@
 	/* We cannot do exclusive creation on a positive dentry */
 	if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 		goto no_open_dput;
-	/* We can't create new files here */
-	openflags &= ~(O_CREAT|O_EXCL);
 
-	ctx = create_nfs_open_context(dentry, openflags);
-	ret = PTR_ERR(ctx);
-	if (IS_ERR(ctx))
-		goto out;
+	/* Let f_op->open() actually open (and revalidate) the file */
+	ret = 1;
 
-	attr.ia_valid = ATTR_OPEN;
-	if (openflags & O_TRUNC) {
-		attr.ia_valid |= ATTR_SIZE;
-		attr.ia_size = 0;
-		nfs_wb_all(inode);
-	}
-
-	/*
-	 * Note: we're not holding inode->i_mutex and so may be racing with
-	 * operations that change the directory. We therefore save the
-	 * change attribute *before* we do the RPC call.
-	 */
-	inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
-	if (IS_ERR(inode)) {
-		ret = PTR_ERR(inode);
-		switch (ret) {
-		case -EPERM:
-		case -EACCES:
-		case -EDQUOT:
-		case -ENOSPC:
-		case -EROFS:
-			goto out_put_ctx;
-		default:
-			goto out_drop;
-		}
-	}
-	iput(inode);
-	if (inode != dentry->d_inode)
-		goto out_drop;
-
-	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
-	ret = nfs_intent_set_file(nd, ctx);
-	if (ret >= 0)
-		ret = 1;
 out:
 	dput(parent);
 	return ret;
-out_drop:
-	d_drop(dentry);
-	ret = 0;
-out_put_ctx:
-	put_nfs_open_context(ctx);
-	goto out;
 
 no_open_dput:
 	dput(parent);
@@ -1643,7 +1626,7 @@
 	if (dentry->d_inode)
 		goto out;
 	if (fhandle->size == 0) {
-		error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+		error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
 		if (error)
 			goto out_error;
 	}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 481be7f..9a4cbfc 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
 
 #include "internal.h"
 #include "iostat.h"
+#include "pnfs.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
@@ -81,16 +82,19 @@
 	struct completion	completion;	/* wait for i/o completion */
 
 	/* commit state */
-	struct list_head	rewrite_list;	/* saved nfs_write_data structs */
-	struct nfs_write_data *	commit_data;	/* special write_data for commits */
+	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
+	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
+	struct work_struct	work;
 	int			flags;
 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 	struct nfs_writeverf	verf;		/* unstable write verifier */
 };
 
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
-static const struct rpc_call_ops nfs_write_direct_ops;
+static void nfs_direct_write_schedule_work(struct work_struct *work);
 
 static inline void get_dreq(struct nfs_direct_req *dreq)
 {
@@ -124,22 +128,6 @@
 	return -EINVAL;
 }
 
-static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
-{
-	unsigned int npages;
-	unsigned int i;
-
-	if (count == 0)
-		return;
-	pages += (pgbase >> PAGE_SHIFT);
-	npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	for (i = 0; i < npages; i++) {
-		struct page *page = pages[i];
-		if (!PageCompound(page))
-			set_page_dirty(page);
-	}
-}
-
 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 {
 	unsigned int i;
@@ -147,26 +135,30 @@
 		page_cache_release(pages[i]);
 }
 
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+			      struct nfs_direct_req *dreq)
+{
+	cinfo->lock = &dreq->lock;
+	cinfo->mds = &dreq->mds_cinfo;
+	cinfo->ds = &dreq->ds_cinfo;
+	cinfo->dreq = dreq;
+	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
+}
+
 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 {
 	struct nfs_direct_req *dreq;
 
-	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
+	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 	if (!dreq)
 		return NULL;
 
 	kref_init(&dreq->kref);
 	kref_get(&dreq->kref);
 	init_completion(&dreq->completion);
-	INIT_LIST_HEAD(&dreq->rewrite_list);
-	dreq->iocb = NULL;
-	dreq->ctx = NULL;
-	dreq->l_ctx = NULL;
+	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
+	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 	spin_lock_init(&dreq->lock);
-	atomic_set(&dreq->io_count, 0);
-	dreq->count = 0;
-	dreq->error = 0;
-	dreq->flags = 0;
 
 	return dreq;
 }
@@ -226,47 +218,80 @@
 	nfs_direct_req_release(dreq);
 }
 
-/*
- * We must hold a reference to all the pages in this direct read request
- * until the RPCs complete.  This could be long *after* we are woken up in
- * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
- */
-static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
+static void nfs_direct_readpage_release(struct nfs_page *req)
 {
-	struct nfs_read_data *data = calldata;
-
-	nfs_readpage_result(task, data);
+	dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
+		req->wb_context->dentry->d_inode->i_sb->s_id,
+		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+		req->wb_bytes,
+		(long long)req_offset(req));
+	nfs_release_request(req);
 }
 
-static void nfs_direct_read_release(void *calldata)
+static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 {
+	unsigned long bytes = 0;
+	struct nfs_direct_req *dreq = hdr->dreq;
 
-	struct nfs_read_data *data = calldata;
-	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
-	int status = data->task.tk_status;
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+		goto out_put;
 
 	spin_lock(&dreq->lock);
-	if (unlikely(status < 0)) {
-		dreq->error = status;
-		spin_unlock(&dreq->lock);
-	} else {
-		dreq->count += data->res.count;
-		spin_unlock(&dreq->lock);
-		nfs_direct_dirty_pages(data->pagevec,
-				data->args.pgbase,
-				data->res.count);
-	}
-	nfs_direct_release_pages(data->pagevec, data->npages);
+	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+		dreq->error = hdr->error;
+	else
+		dreq->count += hdr->good_bytes;
+	spin_unlock(&dreq->lock);
 
+	while (!list_empty(&hdr->pages)) {
+		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+		struct page *page = req->wb_page;
+
+		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+			if (bytes > hdr->good_bytes)
+				zero_user(page, 0, PAGE_SIZE);
+			else if (hdr->good_bytes - bytes < PAGE_SIZE)
+				zero_user_segment(page,
+					hdr->good_bytes & ~PAGE_MASK,
+					PAGE_SIZE);
+		}
+		if (!PageCompound(page)) {
+			if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+				if (bytes < hdr->good_bytes)
+					set_page_dirty(page);
+			} else
+				set_page_dirty(page);
+		}
+		bytes += req->wb_bytes;
+		nfs_list_remove_request(req);
+		nfs_direct_readpage_release(req);
+	}
+out_put:
 	if (put_dreq(dreq))
 		nfs_direct_complete(dreq);
-	nfs_readdata_free(data);
+	hdr->release(hdr);
 }
 
-static const struct rpc_call_ops nfs_read_direct_ops = {
-	.rpc_call_prepare = nfs_read_prepare,
-	.rpc_call_done = nfs_direct_read_result,
-	.rpc_release = nfs_direct_read_release,
+static void nfs_read_sync_pgio_error(struct list_head *head)
+{
+	struct nfs_page *req;
+
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_release_request(req);
+	}
+}
+
+static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
+{
+	get_dreq(hdr->dreq);
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
+	.error_cleanup = nfs_read_sync_pgio_error,
+	.init_hdr = nfs_direct_pgio_init,
+	.completion = nfs_direct_read_completion,
 };
 
 /*
@@ -276,107 +301,82 @@
  * handled automatically by nfs_direct_read_result().  Otherwise, if
  * no requests have been sent, just return an error.
  */
-static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
 						const struct iovec *iov,
 						loff_t pos)
 {
+	struct nfs_direct_req *dreq = desc->pg_dreq;
 	struct nfs_open_context *ctx = dreq->ctx;
 	struct inode *inode = ctx->dentry->d_inode;
 	unsigned long user_addr = (unsigned long)iov->iov_base;
 	size_t count = iov->iov_len;
 	size_t rsize = NFS_SERVER(inode)->rsize;
-	struct rpc_task *task;
-	struct rpc_message msg = {
-		.rpc_cred = ctx->cred,
-	};
-	struct rpc_task_setup task_setup_data = {
-		.rpc_client = NFS_CLIENT(inode),
-		.rpc_message = &msg,
-		.callback_ops = &nfs_read_direct_ops,
-		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
-	};
 	unsigned int pgbase;
 	int result;
 	ssize_t started = 0;
+	struct page **pagevec = NULL;
+	unsigned int npages;
 
 	do {
-		struct nfs_read_data *data;
 		size_t bytes;
+		int i;
 
 		pgbase = user_addr & ~PAGE_MASK;
-		bytes = min(rsize,count);
+		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
 
 		result = -ENOMEM;
-		data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
-		if (unlikely(!data))
+		npages = nfs_page_array_len(pgbase, bytes);
+		if (!pagevec)
+			pagevec = kmalloc(npages * sizeof(struct page *),
+					  GFP_KERNEL);
+		if (!pagevec)
 			break;
-
 		down_read(&current->mm->mmap_sem);
 		result = get_user_pages(current, current->mm, user_addr,
-					data->npages, 1, 0, data->pagevec, NULL);
+					npages, 1, 0, pagevec, NULL);
 		up_read(&current->mm->mmap_sem);
-		if (result < 0) {
-			nfs_readdata_free(data);
+		if (result < 0)
 			break;
-		}
-		if ((unsigned)result < data->npages) {
+		if ((unsigned)result < npages) {
 			bytes = result * PAGE_SIZE;
 			if (bytes <= pgbase) {
-				nfs_direct_release_pages(data->pagevec, result);
-				nfs_readdata_free(data);
+				nfs_direct_release_pages(pagevec, result);
 				break;
 			}
 			bytes -= pgbase;
-			data->npages = result;
+			npages = result;
 		}
 
-		get_dreq(dreq);
+		for (i = 0; i < npages; i++) {
+			struct nfs_page *req;
+			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
+			/* XXX do we need to do the eof zeroing found in async_filler? */
+			req = nfs_create_request(dreq->ctx, dreq->inode,
+						 pagevec[i],
+						 pgbase, req_len);
+			if (IS_ERR(req)) {
+				result = PTR_ERR(req);
+				break;
+			}
+			req->wb_index = pos >> PAGE_SHIFT;
+			req->wb_offset = pos & ~PAGE_MASK;
+			if (!nfs_pageio_add_request(desc, req)) {
+				result = desc->pg_error;
+				nfs_release_request(req);
+				break;
+			}
+			pgbase = 0;
+			bytes -= req_len;
+			started += req_len;
+			user_addr += req_len;
+			pos += req_len;
+			count -= req_len;
+		}
+		/* The nfs_page now hold references to these pages */
+		nfs_direct_release_pages(pagevec, npages);
+	} while (count != 0 && result >= 0);
 
-		data->req = (struct nfs_page *) dreq;
-		data->inode = inode;
-		data->cred = msg.rpc_cred;
-		data->args.fh = NFS_FH(inode);
-		data->args.context = ctx;
-		data->args.lock_context = dreq->l_ctx;
-		data->args.offset = pos;
-		data->args.pgbase = pgbase;
-		data->args.pages = data->pagevec;
-		data->args.count = bytes;
-		data->res.fattr = &data->fattr;
-		data->res.eof = 0;
-		data->res.count = bytes;
-		nfs_fattr_init(&data->fattr);
-		msg.rpc_argp = &data->args;
-		msg.rpc_resp = &data->res;
-
-		task_setup_data.task = &data->task;
-		task_setup_data.callback_data = data;
-		NFS_PROTO(inode)->read_setup(data, &msg);
-
-		task = rpc_run_task(&task_setup_data);
-		if (IS_ERR(task))
-			break;
-		rpc_put_task(task);
-
-		dprintk("NFS: %5u initiated direct read call "
-			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
-				data->task.tk_pid,
-				inode->i_sb->s_id,
-				(long long)NFS_FILEID(inode),
-				bytes,
-				(unsigned long long)data->args.offset);
-
-		started += bytes;
-		user_addr += bytes;
-		pos += bytes;
-		/* FIXME: Remove this unnecessary math from final patch */
-		pgbase += bytes;
-		pgbase &= ~PAGE_MASK;
-		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
-
-		count -= bytes;
-	} while (count != 0);
+	kfree(pagevec);
 
 	if (started)
 		return started;
@@ -388,15 +388,19 @@
 					      unsigned long nr_segs,
 					      loff_t pos)
 {
+	struct nfs_pageio_descriptor desc;
 	ssize_t result = -EINVAL;
 	size_t requested_bytes = 0;
 	unsigned long seg;
 
+	nfs_pageio_init_read(&desc, dreq->inode,
+			     &nfs_direct_read_completion_ops);
 	get_dreq(dreq);
+	desc.pg_dreq = dreq;
 
 	for (seg = 0; seg < nr_segs; seg++) {
 		const struct iovec *vec = &iov[seg];
-		result = nfs_direct_read_schedule_segment(dreq, vec, pos);
+		result = nfs_direct_read_schedule_segment(&desc, vec, pos);
 		if (result < 0)
 			break;
 		requested_bytes += result;
@@ -405,6 +409,8 @@
 		pos += vec->iov_len;
 	}
 
+	nfs_pageio_complete(&desc);
+
 	/*
 	 * If no bytes were started, return the error, and let the
 	 * generic layer handle the completion.
@@ -441,104 +447,71 @@
 	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
 	if (!result)
 		result = nfs_direct_wait(dreq);
+	NFS_I(inode)->read_io += result;
 out_release:
 	nfs_direct_req_release(dreq);
 out:
 	return result;
 }
 
-static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
+static void nfs_inode_dio_write_done(struct inode *inode)
 {
-	while (!list_empty(&dreq->rewrite_list)) {
-		struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
-		list_del(&data->pages);
-		nfs_direct_release_pages(data->pagevec, data->npages);
-		nfs_writedata_free(data);
-	}
+	nfs_zap_mapping(inode, inode->i_mapping);
+	inode_dio_done(inode);
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 {
-	struct inode *inode = dreq->inode;
-	struct list_head *p;
-	struct nfs_write_data *data;
-	struct rpc_task *task;
-	struct rpc_message msg = {
-		.rpc_cred = dreq->ctx->cred,
-	};
-	struct rpc_task_setup task_setup_data = {
-		.rpc_client = NFS_CLIENT(inode),
-		.rpc_message = &msg,
-		.callback_ops = &nfs_write_direct_ops,
-		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
-	};
+	struct nfs_pageio_descriptor desc;
+	struct nfs_page *req, *tmp;
+	LIST_HEAD(reqs);
+	struct nfs_commit_info cinfo;
+	LIST_HEAD(failed);
+
+	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
+	spin_lock(cinfo.lock);
+	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
+	spin_unlock(cinfo.lock);
 
 	dreq->count = 0;
 	get_dreq(dreq);
 
-	list_for_each(p, &dreq->rewrite_list) {
-		data = list_entry(p, struct nfs_write_data, pages);
+	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
+			      &nfs_direct_write_completion_ops);
+	desc.pg_dreq = dreq;
 
-		get_dreq(dreq);
-
-		/* Use stable writes */
-		data->args.stable = NFS_FILE_SYNC;
-
-		/*
-		 * Reset data->res.
-		 */
-		nfs_fattr_init(&data->fattr);
-		data->res.count = data->args.count;
-		memset(&data->verf, 0, sizeof(data->verf));
-
-		/*
-		 * Reuse data->task; data->args should not have changed
-		 * since the original request was sent.
-		 */
-		task_setup_data.task = &data->task;
-		task_setup_data.callback_data = data;
-		msg.rpc_argp = &data->args;
-		msg.rpc_resp = &data->res;
-		NFS_PROTO(inode)->write_setup(data, &msg);
-
-		/*
-		 * We're called via an RPC callback, so BKL is already held.
-		 */
-		task = rpc_run_task(&task_setup_data);
-		if (!IS_ERR(task))
-			rpc_put_task(task);
-
-		dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
-				data->task.tk_pid,
-				inode->i_sb->s_id,
-				(long long)NFS_FILEID(inode),
-				data->args.count,
-				(unsigned long long)data->args.offset);
+	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+		if (!nfs_pageio_add_request(&desc, req)) {
+			nfs_list_add_request(req, &failed);
+			spin_lock(cinfo.lock);
+			dreq->flags = 0;
+			dreq->error = -EIO;
+			spin_unlock(cinfo.lock);
+		}
+		nfs_release_request(req);
 	}
+	nfs_pageio_complete(&desc);
+
+	while (!list_empty(&failed))
+		nfs_unlock_and_release_request(req);
 
 	if (put_dreq(dreq))
-		nfs_direct_write_complete(dreq, inode);
+		nfs_direct_write_complete(dreq, dreq->inode);
 }
 
-static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
+static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 {
-	struct nfs_write_data *data = calldata;
-
-	/* Call the NFS version-specific code */
-	NFS_PROTO(data->inode)->commit_done(task, data);
-}
-
-static void nfs_direct_commit_release(void *calldata)
-{
-	struct nfs_write_data *data = calldata;
-	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+	struct nfs_direct_req *dreq = data->dreq;
+	struct nfs_commit_info cinfo;
+	struct nfs_page *req;
 	int status = data->task.tk_status;
 
+	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 	if (status < 0) {
 		dprintk("NFS: %5u commit failed with error %d.\n",
-				data->task.tk_pid, status);
+			data->task.tk_pid, status);
 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
 		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
@@ -546,62 +519,47 @@
 	}
 
 	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
-	nfs_direct_write_complete(dreq, data->inode);
-	nfs_commit_free(data);
+	while (!list_empty(&data->pages)) {
+		req = nfs_list_entry(data->pages.next);
+		nfs_list_remove_request(req);
+		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
+			/* Note the rewrite will go through mds */
+			nfs_mark_request_commit(req, NULL, &cinfo);
+		} else
+			nfs_release_request(req);
+		nfs_unlock_and_release_request(req);
+	}
+
+	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+		nfs_direct_write_complete(dreq, data->inode);
 }
 
-static const struct rpc_call_ops nfs_commit_direct_ops = {
-	.rpc_call_prepare = nfs_write_prepare,
-	.rpc_call_done = nfs_direct_commit_result,
-	.rpc_release = nfs_direct_commit_release,
+static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
+{
+	/* There is no lock to clear */
+}
+
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
+	.completion = nfs_direct_commit_complete,
+	.error_cleanup = nfs_direct_error_cleanup,
 };
 
 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 {
-	struct nfs_write_data *data = dreq->commit_data;
-	struct rpc_task *task;
-	struct rpc_message msg = {
-		.rpc_argp = &data->args,
-		.rpc_resp = &data->res,
-		.rpc_cred = dreq->ctx->cred,
-	};
-	struct rpc_task_setup task_setup_data = {
-		.task = &data->task,
-		.rpc_client = NFS_CLIENT(dreq->inode),
-		.rpc_message = &msg,
-		.callback_ops = &nfs_commit_direct_ops,
-		.callback_data = data,
-		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
-	};
+	int res;
+	struct nfs_commit_info cinfo;
+	LIST_HEAD(mds_list);
 
-	data->inode = dreq->inode;
-	data->cred = msg.rpc_cred;
-
-	data->args.fh = NFS_FH(data->inode);
-	data->args.offset = 0;
-	data->args.count = 0;
-	data->args.context = dreq->ctx;
-	data->args.lock_context = dreq->l_ctx;
-	data->res.count = 0;
-	data->res.fattr = &data->fattr;
-	data->res.verf = &data->verf;
-	nfs_fattr_init(&data->fattr);
-
-	NFS_PROTO(data->inode)->commit_setup(data, &msg);
-
-	/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
-	dreq->commit_data = NULL;
-
-	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
-
-	task = rpc_run_task(&task_setup_data);
-	if (!IS_ERR(task))
-		rpc_put_task(task);
+	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+	if (res < 0) /* res == -ENOMEM */
+		nfs_direct_write_reschedule(dreq);
 }
 
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
+	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 	int flags = dreq->flags;
 
 	dreq->flags = 0;
@@ -613,89 +571,32 @@
 			nfs_direct_write_reschedule(dreq);
 			break;
 		default:
-			if (dreq->commit_data != NULL)
-				nfs_commit_free(dreq->commit_data);
-			nfs_direct_free_writedata(dreq);
-			nfs_zap_mapping(inode, inode->i_mapping);
+			nfs_inode_dio_write_done(dreq->inode);
 			nfs_direct_complete(dreq);
 	}
 }
 
-static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 {
-	dreq->commit_data = nfs_commitdata_alloc();
-	if (dreq->commit_data != NULL)
-		dreq->commit_data->req = (struct nfs_page *) dreq;
+	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 }
+
 #else
-static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
-	dreq->commit_data = NULL;
 }
 
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 {
-	nfs_direct_free_writedata(dreq);
-	nfs_zap_mapping(inode, inode->i_mapping);
+	nfs_inode_dio_write_done(inode);
 	nfs_direct_complete(dreq);
 }
 #endif
 
-static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
-{
-	struct nfs_write_data *data = calldata;
-
-	nfs_writeback_done(task, data);
-}
-
 /*
  * NB: Return the value of the first error return code.  Subsequent
  *     errors after the first one are ignored.
  */
-static void nfs_direct_write_release(void *calldata)
-{
-	struct nfs_write_data *data = calldata;
-	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
-	int status = data->task.tk_status;
-
-	spin_lock(&dreq->lock);
-
-	if (unlikely(status < 0)) {
-		/* An error has occurred, so we should not commit */
-		dreq->flags = 0;
-		dreq->error = status;
-	}
-	if (unlikely(dreq->error != 0))
-		goto out_unlock;
-
-	dreq->count += data->res.count;
-
-	if (data->res.verf->committed != NFS_FILE_SYNC) {
-		switch (dreq->flags) {
-			case 0:
-				memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
-				dreq->flags = NFS_ODIRECT_DO_COMMIT;
-				break;
-			case NFS_ODIRECT_DO_COMMIT:
-				if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
-					dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
-					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
-				}
-		}
-	}
-out_unlock:
-	spin_unlock(&dreq->lock);
-
-	if (put_dreq(dreq))
-		nfs_direct_write_complete(dreq, data->inode);
-}
-
-static const struct rpc_call_ops nfs_write_direct_ops = {
-	.rpc_call_prepare = nfs_write_prepare,
-	.rpc_call_done = nfs_direct_write_result,
-	.rpc_release = nfs_direct_write_release,
-};
-
 /*
  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
@@ -703,132 +604,189 @@
  * handled automatically by nfs_direct_write_result().  Otherwise, if
  * no requests have been sent, just return an error.
  */
-static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
 						 const struct iovec *iov,
-						 loff_t pos, int sync)
+						 loff_t pos)
 {
+	struct nfs_direct_req *dreq = desc->pg_dreq;
 	struct nfs_open_context *ctx = dreq->ctx;
 	struct inode *inode = ctx->dentry->d_inode;
 	unsigned long user_addr = (unsigned long)iov->iov_base;
 	size_t count = iov->iov_len;
-	struct rpc_task *task;
-	struct rpc_message msg = {
-		.rpc_cred = ctx->cred,
-	};
-	struct rpc_task_setup task_setup_data = {
-		.rpc_client = NFS_CLIENT(inode),
-		.rpc_message = &msg,
-		.callback_ops = &nfs_write_direct_ops,
-		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
-	};
 	size_t wsize = NFS_SERVER(inode)->wsize;
 	unsigned int pgbase;
 	int result;
 	ssize_t started = 0;
+	struct page **pagevec = NULL;
+	unsigned int npages;
 
 	do {
-		struct nfs_write_data *data;
 		size_t bytes;
+		int i;
 
 		pgbase = user_addr & ~PAGE_MASK;
-		bytes = min(wsize,count);
+		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
 
 		result = -ENOMEM;
-		data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
-		if (unlikely(!data))
+		npages = nfs_page_array_len(pgbase, bytes);
+		if (!pagevec)
+			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
+		if (!pagevec)
 			break;
 
 		down_read(&current->mm->mmap_sem);
 		result = get_user_pages(current, current->mm, user_addr,
-					data->npages, 0, 0, data->pagevec, NULL);
+					npages, 0, 0, pagevec, NULL);
 		up_read(&current->mm->mmap_sem);
-		if (result < 0) {
-			nfs_writedata_free(data);
+		if (result < 0)
 			break;
-		}
-		if ((unsigned)result < data->npages) {
+
+		if ((unsigned)result < npages) {
 			bytes = result * PAGE_SIZE;
 			if (bytes <= pgbase) {
-				nfs_direct_release_pages(data->pagevec, result);
-				nfs_writedata_free(data);
+				nfs_direct_release_pages(pagevec, result);
 				break;
 			}
 			bytes -= pgbase;
-			data->npages = result;
+			npages = result;
 		}
 
-		get_dreq(dreq);
+		for (i = 0; i < npages; i++) {
+			struct nfs_page *req;
+			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 
-		list_move_tail(&data->pages, &dreq->rewrite_list);
+			req = nfs_create_request(dreq->ctx, dreq->inode,
+						 pagevec[i],
+						 pgbase, req_len);
+			if (IS_ERR(req)) {
+				result = PTR_ERR(req);
+				break;
+			}
+			nfs_lock_request(req);
+			req->wb_index = pos >> PAGE_SHIFT;
+			req->wb_offset = pos & ~PAGE_MASK;
+			if (!nfs_pageio_add_request(desc, req)) {
+				result = desc->pg_error;
+				nfs_unlock_and_release_request(req);
+				break;
+			}
+			pgbase = 0;
+			bytes -= req_len;
+			started += req_len;
+			user_addr += req_len;
+			pos += req_len;
+			count -= req_len;
+		}
+		/* The nfs_page now hold references to these pages */
+		nfs_direct_release_pages(pagevec, npages);
+	} while (count != 0 && result >= 0);
 
-		data->req = (struct nfs_page *) dreq;
-		data->inode = inode;
-		data->cred = msg.rpc_cred;
-		data->args.fh = NFS_FH(inode);
-		data->args.context = ctx;
-		data->args.lock_context = dreq->l_ctx;
-		data->args.offset = pos;
-		data->args.pgbase = pgbase;
-		data->args.pages = data->pagevec;
-		data->args.count = bytes;
-		data->args.stable = sync;
-		data->res.fattr = &data->fattr;
-		data->res.count = bytes;
-		data->res.verf = &data->verf;
-		nfs_fattr_init(&data->fattr);
-
-		task_setup_data.task = &data->task;
-		task_setup_data.callback_data = data;
-		msg.rpc_argp = &data->args;
-		msg.rpc_resp = &data->res;
-		NFS_PROTO(inode)->write_setup(data, &msg);
-
-		task = rpc_run_task(&task_setup_data);
-		if (IS_ERR(task))
-			break;
-		rpc_put_task(task);
-
-		dprintk("NFS: %5u initiated direct write call "
-			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
-				data->task.tk_pid,
-				inode->i_sb->s_id,
-				(long long)NFS_FILEID(inode),
-				bytes,
-				(unsigned long long)data->args.offset);
-
-		started += bytes;
-		user_addr += bytes;
-		pos += bytes;
-
-		/* FIXME: Remove this useless math from the final patch */
-		pgbase += bytes;
-		pgbase &= ~PAGE_MASK;
-		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
-
-		count -= bytes;
-	} while (count != 0);
+	kfree(pagevec);
 
 	if (started)
 		return started;
 	return result < 0 ? (ssize_t) result : -EFAULT;
 }
 
+static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+{
+	struct nfs_direct_req *dreq = hdr->dreq;
+	struct nfs_commit_info cinfo;
+	int bit = -1;
+	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+		goto out_put;
+
+	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+
+	spin_lock(&dreq->lock);
+
+	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+		dreq->flags = 0;
+		dreq->error = hdr->error;
+	}
+	if (dreq->error != 0)
+		bit = NFS_IOHDR_ERROR;
+	else {
+		dreq->count += hdr->good_bytes;
+		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+			bit = NFS_IOHDR_NEED_RESCHED;
+		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
+				bit = NFS_IOHDR_NEED_RESCHED;
+			else if (dreq->flags == 0) {
+				memcpy(&dreq->verf, hdr->verf,
+				       sizeof(dreq->verf));
+				bit = NFS_IOHDR_NEED_COMMIT;
+				dreq->flags = NFS_ODIRECT_DO_COMMIT;
+			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
+				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
+					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+					bit = NFS_IOHDR_NEED_RESCHED;
+				} else
+					bit = NFS_IOHDR_NEED_COMMIT;
+			}
+		}
+	}
+	spin_unlock(&dreq->lock);
+
+	while (!list_empty(&hdr->pages)) {
+		req = nfs_list_entry(hdr->pages.next);
+		nfs_list_remove_request(req);
+		switch (bit) {
+		case NFS_IOHDR_NEED_RESCHED:
+		case NFS_IOHDR_NEED_COMMIT:
+			kref_get(&req->wb_kref);
+			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+		}
+		nfs_unlock_and_release_request(req);
+	}
+
+out_put:
+	if (put_dreq(dreq))
+		nfs_direct_write_complete(dreq, hdr->inode);
+	hdr->release(hdr);
+}
+
+static void nfs_write_sync_pgio_error(struct list_head *head)
+{
+	struct nfs_page *req;
+
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_unlock_and_release_request(req);
+	}
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+	.error_cleanup = nfs_write_sync_pgio_error,
+	.init_hdr = nfs_direct_pgio_init,
+	.completion = nfs_direct_write_completion,
+};
+
 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 					       const struct iovec *iov,
 					       unsigned long nr_segs,
-					       loff_t pos, int sync)
+					       loff_t pos)
 {
+	struct nfs_pageio_descriptor desc;
+	struct inode *inode = dreq->inode;
 	ssize_t result = 0;
 	size_t requested_bytes = 0;
 	unsigned long seg;
 
+	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
+			      &nfs_direct_write_completion_ops);
+	desc.pg_dreq = dreq;
 	get_dreq(dreq);
+	atomic_inc(&inode->i_dio_count);
 
 	for (seg = 0; seg < nr_segs; seg++) {
 		const struct iovec *vec = &iov[seg];
-		result = nfs_direct_write_schedule_segment(dreq, vec,
-							   pos, sync);
+		result = nfs_direct_write_schedule_segment(&desc, vec, pos);
 		if (result < 0)
 			break;
 		requested_bytes += result;
@@ -836,12 +794,15 @@
 			break;
 		pos += vec->iov_len;
 	}
+	nfs_pageio_complete(&desc);
+	NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
 
 	/*
 	 * If no bytes were started, return the error, and let the
 	 * generic layer handle the completion.
 	 */
 	if (requested_bytes == 0) {
+		inode_dio_done(inode);
 		nfs_direct_req_release(dreq);
 		return result < 0 ? result : -EIO;
 	}
@@ -858,16 +819,10 @@
 	ssize_t result = -ENOMEM;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
 	struct nfs_direct_req *dreq;
-	size_t wsize = NFS_SERVER(inode)->wsize;
-	int sync = NFS_UNSTABLE;
 
 	dreq = nfs_direct_req_alloc();
 	if (!dreq)
 		goto out;
-	nfs_alloc_commit_data(dreq);
-
-	if (dreq->commit_data == NULL || count <= wsize)
-		sync = NFS_FILE_SYNC;
 
 	dreq->inode = inode;
 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
@@ -877,7 +832,7 @@
 	if (!is_sync_kiocb(iocb))
 		dreq->iocb = iocb;
 
-	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
 	if (!result)
 		result = nfs_direct_wait(dreq);
 out_release:
@@ -997,10 +952,15 @@
 	task_io_account_write(count);
 
 	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+	if (retval > 0) {
+		struct inode *inode = mapping->host;
 
-	if (retval > 0)
 		iocb->ki_pos = pos + retval;
-
+		spin_lock(&inode->i_lock);
+		if (i_size_read(inode) < iocb->ki_pos)
+			i_size_write(inode, iocb->ki_pos);
+		spin_unlock(&inode->i_lock);
+	}
 out:
 	return retval;
 }
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index aa9b709..a6708e6b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -174,6 +174,13 @@
 	if ((file->f_mode & FMODE_WRITE) == 0)
 		return 0;
 
+	/*
+	 * If we're holding a write delegation, then just start the i/o
+	 * but don't wait for completion (or send a commit).
+	 */
+	if (nfs_have_delegation(inode, FMODE_WRITE))
+		return filemap_fdatawrite(file->f_mapping);
+
 	/* Flush writes to the server and return any errors */
 	return vfs_fsync(file, 0);
 }
@@ -417,6 +424,7 @@
 
 	if (status < 0)
 		return status;
+	NFS_I(mapping->host)->write_io += copied;
 	return copied;
 }
 
@@ -871,12 +879,81 @@
 static int
 nfs4_file_open(struct inode *inode, struct file *filp)
 {
+	struct nfs_open_context *ctx;
+	struct dentry *dentry = filp->f_path.dentry;
+	struct dentry *parent = NULL;
+	struct inode *dir;
+	unsigned openflags = filp->f_flags;
+	struct iattr attr;
+	int err;
+
+	BUG_ON(inode != dentry->d_inode);
 	/*
-	 * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
-	 * this point, then something is very wrong
+	 * If no cached dentry exists or if it's negative, NFSv4 handled the
+	 * opens in ->lookup() or ->create().
+	 *
+	 * We only get this far for a cached positive dentry.  We skipped
+	 * revalidation, so handle it here by dropping the dentry and returning
+	 * -EOPENSTALE.  The VFS will retry the lookup/create/open.
 	 */
-	dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
-	return -ENOTDIR;
+
+	dprintk("NFS: open file(%s/%s)\n",
+		dentry->d_parent->d_name.name,
+		dentry->d_name.name);
+
+	if ((openflags & O_ACCMODE) == 3)
+		openflags--;
+
+	/* We can't create new files here */
+	openflags &= ~(O_CREAT|O_EXCL);
+
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+
+	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+	err = PTR_ERR(ctx);
+	if (IS_ERR(ctx))
+		goto out;
+
+	attr.ia_valid = ATTR_OPEN;
+	if (openflags & O_TRUNC) {
+		attr.ia_valid |= ATTR_SIZE;
+		attr.ia_size = 0;
+		nfs_wb_all(inode);
+	}
+
+	inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		switch (err) {
+		case -EPERM:
+		case -EACCES:
+		case -EDQUOT:
+		case -ENOSPC:
+		case -EROFS:
+			goto out_put_ctx;
+		default:
+			goto out_drop;
+		}
+	}
+	iput(inode);
+	if (inode != dentry->d_inode)
+		goto out_drop;
+
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	nfs_file_set_open_context(filp, ctx);
+	err = 0;
+
+out_put_ctx:
+	put_nfs_open_context(ctx);
+out:
+	dput(parent);
+	return err;
+
+out_drop:
+	d_drop(dentry);
+	err = -EOPENSTALE;
+	goto out_put_ctx;
 }
 
 const struct file_operations nfs4_file_operations = {
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index ae65c16..c817787 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -64,23 +64,12 @@
  * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
  * superblock across an automount point of some nature.
  */
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
-				  struct nfs_clone_mount *mntdata)
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
 {
 	struct nfs_fscache_key *key, *xkey;
 	struct nfs_server *nfss = NFS_SB(sb);
 	struct rb_node **p, *parent;
-	int diff, ulen;
-
-	if (uniq) {
-		ulen = strlen(uniq);
-	} else if (mntdata) {
-		struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
-		if (mnt_s->fscache_key) {
-			uniq = mnt_s->fscache_key->key.uniquifier;
-			ulen = mnt_s->fscache_key->key.uniq_len;
-		}
-	}
+	int diff;
 
 	if (!uniq) {
 		uniq = "";
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index b9c572d..c5b11b5 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -73,9 +73,7 @@
 extern void nfs_fscache_get_client_cookie(struct nfs_client *);
 extern void nfs_fscache_release_client_cookie(struct nfs_client *);
 
-extern void nfs_fscache_get_super_cookie(struct super_block *,
-					 const char *,
-					 struct nfs_clone_mount *);
+extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
 extern void nfs_fscache_release_super_cookie(struct super_block *);
 
 extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -172,12 +170,6 @@
 static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
 static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
 
-static inline void nfs_fscache_get_super_cookie(
-	struct super_block *sb,
-	const char *uniq,
-	struct nfs_clone_mount *mntdata)
-{
-}
 static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
 
 static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 4ca6f5c..8abfb19 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -150,7 +150,7 @@
 		goto out;
 
 	/* Start by getting the root filehandle from the server */
-	ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+	ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
 	if (ret < 0) {
 		dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
 		goto out;
@@ -178,87 +178,4 @@
 	return ret;
 }
 
-/*
- * get an NFS4 root dentry from the root filehandle
- */
-struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
-			     const char *devname)
-{
-	struct nfs_server *server = NFS_SB(sb);
-	struct nfs_fattr *fattr = NULL;
-	struct dentry *ret;
-	struct inode *inode;
-	void *name = kstrdup(devname, GFP_KERNEL);
-	int error;
-
-	dprintk("--> nfs4_get_root()\n");
-
-	if (!name)
-		return ERR_PTR(-ENOMEM);
-
-	/* get the info about the server and filesystem */
-	error = nfs4_server_capabilities(server, mntfh);
-	if (error < 0) {
-		dprintk("nfs_get_root: getcaps error = %d\n",
-			-error);
-		kfree(name);
-		return ERR_PTR(error);
-	}
-
-	fattr = nfs_alloc_fattr();
-	if (fattr == NULL) {
-		kfree(name);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	/* get the actual root for this mount */
-	error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
-	if (error < 0) {
-		dprintk("nfs_get_root: getattr error = %d\n", -error);
-		ret = ERR_PTR(error);
-		goto out;
-	}
-
-	if (fattr->valid & NFS_ATTR_FATTR_FSID &&
-	    !nfs_fsid_equal(&server->fsid, &fattr->fsid))
-		memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
-
-	inode = nfs_fhget(sb, mntfh, fattr);
-	if (IS_ERR(inode)) {
-		dprintk("nfs_get_root: get root inode failed\n");
-		ret = ERR_CAST(inode);
-		goto out;
-	}
-
-	error = nfs_superblock_set_dummy_root(sb, inode);
-	if (error != 0) {
-		ret = ERR_PTR(error);
-		goto out;
-	}
-
-	/* root dentries normally start off anonymous and get spliced in later
-	 * if the dentry tree reaches them; however if the dentry already
-	 * exists, we'll pick it up at this point and use it as the root
-	 */
-	ret = d_obtain_alias(inode);
-	if (IS_ERR(ret)) {
-		dprintk("nfs_get_root: get root dentry failed\n");
-		goto out;
-	}
-
-	security_d_instantiate(ret, inode);
-	spin_lock(&ret->d_lock);
-	if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
-		ret->d_fsdata = name;
-		name = NULL;
-	}
-	spin_unlock(&ret->d_lock);
-out:
-	if (name)
-		kfree(name);
-	nfs_free_fattr(fattr);
-	dprintk("<-- nfs4_get_root()\n");
-	return ret;
-}
-
 #endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index ba3019f..864c51e 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -57,6 +57,11 @@
 static const struct cred *id_resolver_cache;
 static struct key_type key_type_id_resolver_legacy;
 
+struct idmap {
+	struct rpc_pipe		*idmap_pipe;
+	struct key_construction	*idmap_key_cons;
+	struct mutex		idmap_mutex;
+};
 
 /**
  * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@
 					    name, namelen, type, data,
 					    data_size, NULL);
 	if (ret < 0) {
+		mutex_lock(&idmap->idmap_mutex);
 		ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
 					    name, namelen, type, data,
 					    data_size, idmap);
+		mutex_unlock(&idmap->idmap_mutex);
 	}
 	return ret;
 }
@@ -354,11 +361,6 @@
 /* idmap classic begins here */
 module_param(nfs_idmap_cache_timeout, int, 0644);
 
-struct idmap {
-	struct rpc_pipe		*idmap_pipe;
-	struct key_construction	*idmap_key_cons;
-};
-
 enum {
 	Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
 };
@@ -415,7 +417,7 @@
 static void nfs_idmap_unregister(struct nfs_client *clp,
 				      struct rpc_pipe *pipe)
 {
-	struct net *net = clp->net;
+	struct net *net = clp->cl_net;
 	struct super_block *pipefs_sb;
 
 	pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +431,7 @@
 				   struct idmap *idmap,
 				   struct rpc_pipe *pipe)
 {
-	struct net *net = clp->net;
+	struct net *net = clp->cl_net;
 	struct super_block *pipefs_sb;
 	int err = 0;
 
@@ -469,6 +471,7 @@
 		return error;
 	}
 	idmap->idmap_pipe = pipe;
+	mutex_init(&idmap->idmap_mutex);
 
 	clp->cl_idmap = idmap;
 	return 0;
@@ -530,9 +533,25 @@
 	struct nfs_net *nn = net_generic(net, nfs_net_id);
 	struct dentry *cl_dentry;
 	struct nfs_client *clp;
+	int err;
 
+restart:
 	spin_lock(&nn->nfs_client_lock);
 	list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+		/* Wait for initialisation to finish */
+		if (clp->cl_cons_state == NFS_CS_INITING) {
+			atomic_inc(&clp->cl_count);
+			spin_unlock(&nn->nfs_client_lock);
+			err = nfs_wait_client_init_complete(clp);
+			nfs_put_client(clp);
+			if (err)
+				return NULL;
+			goto restart;
+		}
+		/* Skip nfs_clients that failed to initialise */
+		if (clp->cl_cons_state < 0)
+			continue;
+		smp_rmb();
 		if (clp->rpc_ops != &nfs_v4_clientops)
 			continue;
 		cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
@@ -640,20 +659,16 @@
 	struct idmap_msg *im;
 	struct idmap *idmap = (struct idmap *)aux;
 	struct key *key = cons->key;
-	int ret;
+	int ret = -ENOMEM;
 
 	/* msg and im are freed in idmap_pipe_destroy_msg */
 	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-	if (IS_ERR(msg)) {
-		ret = PTR_ERR(msg);
+	if (!msg)
 		goto out0;
-	}
 
 	im = kmalloc(sizeof(*im), GFP_KERNEL);
-	if (IS_ERR(im)) {
-		ret = PTR_ERR(im);
+	if (!im)
 		goto out1;
-	}
 
 	ret = nfs_idmap_prepare_message(key->description, im, msg);
 	if (ret < 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8bbfa5..f729698 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -121,7 +121,7 @@
 void nfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	nfs_clear_inode(inode);
 }
 
@@ -285,9 +285,7 @@
 		inode->i_mode = fattr->mode;
 		if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
 				&& nfs_server_capable(inode, NFS_CAP_MODE))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		/* Why so? Because we want revalidate for devices/FIFOs, and
 		 * that's precisely what we have in nfs_file_inode_operations.
 		 */
@@ -300,8 +298,6 @@
 			inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
 			inode->i_fop = &nfs_dir_operations;
 			inode->i_data.a_ops = &nfs_dir_aops;
-			if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
-				set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
 			/* Deal with crossing mountpoints */
 			if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
 					fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
@@ -327,6 +323,8 @@
 		inode->i_gid = -2;
 		inode->i_blocks = 0;
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+		nfsi->write_io = 0;
+		nfsi->read_io = 0;
 
 		nfsi->read_cache_jiffies = fattr->time_start;
 		nfsi->attr_gencount = fattr->gencount;
@@ -337,24 +335,19 @@
 		if (fattr->valid & NFS_ATTR_FATTR_MTIME)
 			inode->i_mtime = fattr->mtime;
 		else if (nfs_server_capable(inode, NFS_CAP_MTIME))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_DATA;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_CTIME)
 			inode->i_ctime = fattr->ctime;
 		else if (nfs_server_capable(inode, NFS_CAP_CTIME))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
 			inode->i_version = fattr->change_attr;
 		else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_DATA;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_SIZE)
 			inode->i_size = nfs_size_to_loff_t(fattr->size);
 		else
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_DATA
 				| NFS_INO_REVAL_PAGECACHE;
 		if (fattr->valid & NFS_ATTR_FATTR_NLINK)
 			set_nlink(inode, fattr->nlink);
@@ -363,15 +356,11 @@
 		if (fattr->valid & NFS_ATTR_FATTR_OWNER)
 			inode->i_uid = fattr->uid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_GROUP)
 			inode->i_gid = fattr->gid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
-			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL;
+			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
 			inode->i_blocks = fattr->du.nfs2.blocks;
 		if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -429,8 +418,10 @@
 		return 0;
 
 	/* Write all dirty data */
-	if (S_ISREG(inode->i_mode))
+	if (S_ISREG(inode->i_mode)) {
+		nfs_inode_dio_wait(inode);
 		nfs_wb_all(inode);
+	}
 
 	fattr = nfs_alloc_fattr();
 	if (fattr == NULL)
@@ -514,6 +505,7 @@
 
 	/* Flush out writes to the server in order to update c/mtime.  */
 	if (S_ISREG(inode->i_mode)) {
+		nfs_inode_dio_wait(inode);
 		err = filemap_write_and_wait(inode->i_mapping);
 		if (err)
 			goto out;
@@ -654,6 +646,7 @@
 	nfs_init_lock_context(&ctx->lock_context);
 	ctx->lock_context.open_context = ctx;
 	INIT_LIST_HEAD(&ctx->list);
+	ctx->mdsthreshold = NULL;
 	return ctx;
 }
 
@@ -682,6 +675,7 @@
 		put_rpccred(ctx->cred);
 	dput(ctx->dentry);
 	nfs_sb_deactive(sb);
+	kfree(ctx->mdsthreshold);
 	kfree(ctx);
 }
 
@@ -870,6 +864,15 @@
 	return 0;
 }
 
+static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+{
+	if (nfs_have_delegated_attributes(inode))
+		return false;
+	return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+		|| nfs_attribute_timeout(inode)
+		|| NFS_STALE(inode);
+}
+
 /**
  * nfs_revalidate_mapping - Revalidate the pagecache
  * @inode - pointer to host inode
@@ -880,9 +883,7 @@
 	struct nfs_inode *nfsi = NFS_I(inode);
 	int ret = 0;
 
-	if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
-			|| nfs_attribute_cache_expired(inode)
-			|| NFS_STALE(inode)) {
+	if (nfs_mapping_need_revalidate_inode(inode)) {
 		ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
 		if (ret < 0)
 			goto out;
@@ -948,6 +949,8 @@
 	unsigned long invalid = 0;
 
 
+	if (nfs_have_delegated_attributes(inode))
+		return 0;
 	/* Has the inode gone and changed behind our back? */
 	if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
 		return -EIO;
@@ -960,7 +963,7 @@
 
 	/* Verify a few of the more important attributes */
 	if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
-		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+		invalid |= NFS_INO_INVALID_ATTR;
 
 	if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
 		cur_size = i_size_read(inode);
@@ -1279,14 +1282,26 @@
 			nfs_display_fhandle_hash(NFS_FH(inode)),
 			atomic_read(&inode->i_count), fattr->valid);
 
-	if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
-		goto out_fileid;
+	if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
+		printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+			"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+			NFS_SERVER(inode)->nfs_client->cl_hostname,
+			inode->i_sb->s_id, (long long)nfsi->fileid,
+			(long long)fattr->fileid);
+		goto out_err;
+	}
 
 	/*
 	 * Make sure the inode's type hasn't changed.
 	 */
-	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
-		goto out_changed;
+	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+		/*
+		* Big trouble! The inode has become a different object.
+		*/
+		printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
+				__func__, inode->i_ino, inode->i_mode, fattr->mode);
+		goto out_err;
+	}
 
 	server = NFS_SERVER(inode);
 	/* Update the fsid? */
@@ -1314,7 +1329,11 @@
 		if (inode->i_version != fattr->change_attr) {
 			dprintk("NFS: change_attr change on server for file %s/%ld\n",
 					inode->i_sb->s_id, inode->i_ino);
-			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+			invalid |= NFS_INO_INVALID_ATTR
+				| NFS_INO_INVALID_DATA
+				| NFS_INO_INVALID_ACCESS
+				| NFS_INO_INVALID_ACL
+				| NFS_INO_REVAL_PAGECACHE;
 			if (S_ISDIR(inode->i_mode))
 				nfs_force_lookup_revalidate(inode);
 			inode->i_version = fattr->change_attr;
@@ -1323,38 +1342,15 @@
 		invalid |= save_cache_validity;
 
 	if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
-		/* NFSv2/v3: Check if the mtime agrees */
-		if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
-			dprintk("NFS: mtime change on server for file %s/%ld\n",
-					inode->i_sb->s_id, inode->i_ino);
-			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-			if (S_ISDIR(inode->i_mode))
-				nfs_force_lookup_revalidate(inode);
-			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-		}
+		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
 	} else if (server->caps & NFS_CAP_MTIME)
 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_DATA
-				| NFS_INO_REVAL_PAGECACHE
 				| NFS_INO_REVAL_FORCED);
 
 	if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
-		/* If ctime has changed we should definitely clear access+acl caches */
-		if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
-			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
-			/* and probably clear data for a directory too as utimes can cause
-			 * havoc with our cache.
-			 */
-			if (S_ISDIR(inode->i_mode)) {
-				invalid |= NFS_INO_INVALID_DATA;
-				nfs_force_lookup_revalidate(inode);
-			}
-			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
-		}
+		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
 	} else if (server->caps & NFS_CAP_CTIME)
 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL
 				| NFS_INO_REVAL_FORCED);
 
 	/* Check if our cached file size is stale */
@@ -1466,12 +1462,6 @@
 		nfsi->cache_validity |= invalid;
 
 	return 0;
- out_changed:
-	/*
-	 * Big trouble! The inode has become a different object.
-	 */
-	printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
-			__func__, inode->i_ino, inode->i_mode, fattr->mode);
  out_err:
 	/*
 	 * No need to worry about unhashing the dentry, as the
@@ -1480,13 +1470,6 @@
 	 */
 	nfs_invalidate_inode(inode);
 	return -ESTALE;
-
- out_fileid:
-	printk(KERN_ERR "NFS: server %s error: fileid changed\n"
-		"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
-		NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
-		(long long)nfsi->fileid, (long long)fattr->fileid);
-	goto out_err;
 }
 
 
@@ -1500,7 +1483,7 @@
 void nfs4_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	pnfs_return_layout(inode);
 	pnfs_destroy_layout(NFS_I(inode));
 	/* If we are holding a delegation, return it! */
@@ -1547,7 +1530,6 @@
 	nfsi->delegation_state = 0;
 	init_rwsem(&nfsi->rwsem);
 	nfsi->layout = NULL;
-	atomic_set(&nfsi->commits_outstanding, 0);
 #endif
 }
 
@@ -1559,9 +1541,10 @@
 	INIT_LIST_HEAD(&nfsi->open_files);
 	INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
 	INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
-	INIT_LIST_HEAD(&nfsi->commit_list);
+	INIT_LIST_HEAD(&nfsi->commit_info.list);
 	nfsi->npages = 0;
-	nfsi->ncommit = 0;
+	nfsi->commit_info.ncommit = 0;
+	atomic_set(&nfsi->commit_info.rpcs_out, 0);
 	atomic_set(&nfsi->silly_count, 1);
 	INIT_HLIST_HEAD(&nfsi->silly_list);
 	init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b777bda..18f99ef 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -103,6 +103,7 @@
 	unsigned int		version;
 	unsigned int		minorversion;
 	char			*fscache_uniq;
+	bool			need_mount;
 
 	struct {
 		struct sockaddr_storage	address;
@@ -167,11 +168,13 @@
 					   struct nfs_fh *,
 					   struct nfs_fattr *,
 					   rpc_authflavor_t);
+extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
 extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
-extern int nfs4_check_client_ready(struct nfs_client *clp);
 extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
 					     const struct sockaddr *ds_addr,
-					     int ds_addrlen, int ds_proto);
+					     int ds_addrlen, int ds_proto,
+					     unsigned int ds_timeo,
+					     unsigned int ds_retrans);
 #ifdef CONFIG_PROC_FS
 extern int __init nfs_fs_proc_init(void);
 extern void nfs_fs_proc_exit(void);
@@ -185,21 +188,11 @@
 }
 #endif
 
-/* nfs4namespace.c */
-#ifdef CONFIG_NFS_V4
-extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
-#else
-static inline
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
-{
-	return ERR_PTR(-ENOENT);
-}
-#endif
-
 /* callback_xdr.c */
 extern struct svc_version nfs4_callback_version1;
 extern struct svc_version nfs4_callback_version4;
 
+struct nfs_pageio_descriptor;
 /* pagelist.c */
 extern int __init nfs_init_nfspagecache(void);
 extern void nfs_destroy_nfspagecache(void);
@@ -210,9 +203,13 @@
 
 extern int __init nfs_init_directcache(void);
 extern void nfs_destroy_directcache(void);
+extern bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount);
+extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+			      struct nfs_pgio_header *hdr,
+			      void (*release)(struct nfs_pgio_header *hdr));
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
 
 /* nfs2xdr.c */
-extern int nfs_stat_to_errno(enum nfs_stat);
 extern struct rpc_procinfo nfs_procedures[];
 extern int nfs2_decode_dirent(struct xdr_stream *,
 				struct nfs_entry *, int);
@@ -237,14 +234,13 @@
 extern struct rpc_procinfo nfs4_procedures[];
 #endif
 
-extern int nfs4_init_ds_session(struct nfs_client *clp);
+extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
 
 /* proc.c */
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
-extern int nfs_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
 			   const struct rpc_timeout *timeparms,
-			   const char *ip_addr, rpc_authflavor_t authflavour,
-			   int noresvport);
+			   const char *ip_addr, rpc_authflavor_t authflavour);
 
 /* dir.c */
 extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -280,9 +276,10 @@
 extern char *nfs_path(char **p, struct dentry *dentry,
 		      char *buffer, ssize_t buflen);
 extern struct vfsmount *nfs_d_automount(struct path *path);
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
-#endif
+struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
+			      struct nfs_fh *, struct nfs_fattr *);
+struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
+				 struct nfs_fattr *, rpc_authflavor_t);
 
 /* getroot.c */
 extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -294,46 +291,73 @@
 extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
 #endif
 
-struct nfs_pageio_descriptor;
+struct nfs_pgio_completion_ops;
 /* read.c */
-extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
-			     const struct rpc_call_ops *call_ops);
+extern struct nfs_read_header *nfs_readhdr_alloc(void);
+extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
+extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+			struct inode *inode,
+			const struct nfs_pgio_completion_ops *compl_ops);
+extern int nfs_initiate_read(struct rpc_clnt *clnt,
+			     struct nfs_read_data *data,
+			     const struct rpc_call_ops *call_ops, int flags);
 extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
-		struct list_head *head);
-
+			      struct nfs_pgio_header *hdr);
 extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
-		struct inode *inode);
+			struct inode *inode,
+			const struct nfs_pgio_completion_ops *compl_ops);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_readdata_release(struct nfs_read_data *rdata);
 
 /* write.c */
+extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+			struct inode *inode, int ioflags,
+			const struct nfs_pgio_completion_ops *compl_ops);
+extern struct nfs_write_header *nfs_writehdr_alloc(void);
+extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
 extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
-		struct list_head *head);
+			     struct nfs_pgio_header *hdr);
 extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
-				  struct inode *inode, int ioflags);
+			struct inode *inode, int ioflags,
+			const struct nfs_pgio_completion_ops *compl_ops);
 extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_writedata_release(struct nfs_write_data *wdata);
-extern void nfs_commit_free(struct nfs_write_data *p);
-extern int nfs_initiate_write(struct nfs_write_data *data,
-			      struct rpc_clnt *clnt,
+extern void nfs_commit_free(struct nfs_commit_data *p);
+extern int nfs_initiate_write(struct rpc_clnt *clnt,
+			      struct nfs_write_data *data,
 			      const struct rpc_call_ops *call_ops,
-			      int how);
+			      int how, int flags);
 extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
-extern int nfs_initiate_commit(struct nfs_write_data *data,
-			       struct rpc_clnt *clnt,
+extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct rpc_clnt *clnt,
+			       struct nfs_commit_data *data,
 			       const struct rpc_call_ops *call_ops,
-			       int how);
-extern void nfs_init_commit(struct nfs_write_data *data,
+			       int how, int flags);
+extern void nfs_init_commit(struct nfs_commit_data *data,
 			    struct list_head *head,
-			    struct pnfs_layout_segment *lseg);
+			    struct pnfs_layout_segment *lseg,
+			    struct nfs_commit_info *cinfo);
+int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+			 struct nfs_commit_info *cinfo, int max);
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+		    struct nfs_commit_info *cinfo);
+void nfs_mark_request_commit(struct nfs_page *req,
+			     struct pnfs_layout_segment *lseg,
+			     struct nfs_commit_info *cinfo);
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+			    int how, struct nfs_commit_info *cinfo);
 void nfs_retry_commit(struct list_head *page_list,
-		      struct pnfs_layout_segment *lseg);
-void nfs_commit_clear_lock(struct nfs_inode *nfsi);
-void nfs_commitdata_release(void *data);
-void nfs_commit_release_pages(struct nfs_write_data *data);
-void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head);
-void nfs_request_remove_commit_list(struct nfs_page *req);
+		      struct pnfs_layout_segment *lseg,
+		      struct nfs_commit_info *cinfo);
+void nfs_commitdata_release(struct nfs_commit_data *data);
+void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+				 struct nfs_commit_info *cinfo);
+void nfs_request_remove_commit_list(struct nfs_page *req,
+				    struct nfs_commit_info *cinfo);
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+		    struct inode *inode,
+		    struct nfs_direct_req *dreq);
 
 #ifdef CONFIG_MIGRATION
 extern int nfs_migrate_page(struct address_space *,
@@ -342,15 +366,20 @@
 #define nfs_migrate_page NULL
 #endif
 
+/* direct.c */
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+			      struct nfs_direct_req *dreq);
+static inline void nfs_inode_dio_wait(struct inode *inode)
+{
+	inode_dio_wait(inode);
+}
+
 /* nfs4proc.c */
 extern void __nfs4_read_done_cb(struct nfs_read_data *);
-extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
-extern int nfs4_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 			    const struct rpc_timeout *timeparms,
 			    const char *ip_addr,
-			    rpc_authflavor_t authflavour,
-			    int noresvport);
-extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
+			    rpc_authflavor_t authflavour);
 extern int _nfs4_call_sync(struct rpc_clnt *clnt,
 			   struct nfs_server *server,
 			   struct rpc_message *msg,
@@ -466,3 +495,15 @@
 		PAGE_SIZE - 1) >> PAGE_SHIFT;
 }
 
+/*
+ * Convert a struct timespec into a 64-bit change attribute
+ *
+ * This does approximately the same thing as timespec_to_ns(),
+ * but for calculation efficiency, we multiply the seconds by
+ * 1024*1024*1024.
+ */
+static inline
+u64 nfs_timespec_to_change_attr(const struct timespec *ts)
+{
+	return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
+}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index d51868e..08b9c93 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,11 +26,6 @@
 static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
-					struct nfs_fh *fh,
-					struct nfs_fattr *fattr,
-					rpc_authflavor_t authflavor);
-
 /*
  * nfs_path - reconstruct the path given an arbitrary dentry
  * @base - used to return pointer to the end of devname part of path
@@ -118,64 +113,6 @@
 	return ERR_PTR(-ENAMETOOLONG);
 }
 
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
-{
-	struct gss_api_mech *mech;
-	struct xdr_netobj oid;
-	int i;
-	rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
-
-	for (i = 0; i < flavors->num_flavors; i++) {
-		struct nfs4_secinfo_flavor *flavor;
-		flavor = &flavors->flavors[i];
-
-		if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
-			pseudoflavor = flavor->flavor;
-			break;
-		} else if (flavor->flavor == RPC_AUTH_GSS) {
-			oid.len  = flavor->gss.sec_oid4.len;
-			oid.data = flavor->gss.sec_oid4.data;
-			mech = gss_mech_get_by_OID(&oid);
-			if (!mech)
-				continue;
-			pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
-			gss_mech_put(mech);
-			break;
-		}
-	}
-
-	return pseudoflavor;
-}
-
-static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
-					      struct qstr *name,
-					      struct nfs_fh *fh,
-					      struct nfs_fattr *fattr)
-{
-	int err;
-
-	if (NFS_PROTO(dir)->version == 4)
-		return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
-
-	err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
-	if (err)
-		return ERR_PTR(err);
-	return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#else /* CONFIG_NFS_V4 */
-static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
-						     struct qstr *name,
-						     struct nfs_fh *fh,
-						     struct nfs_fattr *fattr)
-{
-	int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
-	if (err)
-		return ERR_PTR(err);
-	return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#endif /* CONFIG_NFS_V4 */
-
 /*
  * nfs_d_automount - Handle crossing a mountpoint on the server
  * @path - The mountpoint
@@ -191,10 +128,9 @@
 struct vfsmount *nfs_d_automount(struct path *path)
 {
 	struct vfsmount *mnt;
-	struct dentry *parent;
+	struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
 	struct nfs_fh *fh = NULL;
 	struct nfs_fattr *fattr = NULL;
-	struct rpc_clnt *client;
 
 	dprintk("--> nfs_d_automount()\n");
 
@@ -210,21 +146,7 @@
 
 	dprintk("%s: enter\n", __func__);
 
-	/* Look it up again to get its attributes */
-	parent = dget_parent(path->dentry);
-	client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
-	dput(parent);
-	if (IS_ERR(client)) {
-		mnt = ERR_CAST(client);
-		goto out;
-	}
-
-	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
-		mnt = nfs_do_refmount(client, path->dentry);
-	else
-		mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
-	rpc_shutdown_client(client);
-
+	mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
 	if (IS_ERR(mnt))
 		goto out;
 
@@ -297,10 +219,8 @@
  * @authflavor - security flavor to use when performing the mount
  *
  */
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
-					struct nfs_fh *fh,
-					struct nfs_fattr *fattr,
-					rpc_authflavor_t authflavor)
+struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
+				 struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
 {
 	struct nfs_clone_mount mountdata = {
 		.sb = dentry->d_sb,
@@ -333,3 +253,18 @@
 	dprintk("<-- nfs_do_submount() = %p\n", mnt);
 	return mnt;
 }
+
+struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
+			      struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+	int err;
+	struct dentry *parent = dget_parent(dentry);
+
+	/* Look it up again to get its attributes */
+	err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr);
+	dput(parent);
+	if (err != 0)
+		return ERR_PTR(err);
+
+	return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+}
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index aa14ec3..8a6394e 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -1,3 +1,7 @@
+/*
+ * NFS-private data for each "struct net".  Accessed with net_generic().
+ */
+
 #ifndef __NFS_NETNS_H__
 #define __NFS_NETNS_H__
 
@@ -20,6 +24,7 @@
 	struct idr cb_ident_idr; /* Protected by nfs_client_lock */
 #endif
 	spinlock_t nfs_client_lock;
+	struct timespec boot_time;
 };
 
 extern int nfs_net_id;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 1f56000..baf759b 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,6 +61,7 @@
 #define NFS_readdirres_sz	(1)
 #define NFS_statfsres_sz	(1+NFS_info_sz)
 
+static int nfs_stat_to_errno(enum nfs_stat);
 
 /*
  * While encoding arguments, set up the reply buffer in advance to
@@ -313,6 +314,8 @@
 	p = xdr_decode_time(p, &fattr->atime);
 	p = xdr_decode_time(p, &fattr->mtime);
 	xdr_decode_time(p, &fattr->ctime);
+	fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+
 	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
@@ -1109,7 +1112,7 @@
  * Returns a local errno value, or -EIO if the NFS status code is
  * not recognized.  This function is used jointly by NFSv2 and NFSv3.
  */
-int nfs_stat_to_errno(enum nfs_stat status)
+static int nfs_stat_to_errno(enum nfs_stat status)
 {
 	int i;
 
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 75c6829..2292a0f 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -142,7 +142,7 @@
 }
 
 static int
-nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct qstr *name,
 		 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
 	struct nfs3_diropargs	arg = {
@@ -810,11 +810,13 @@
 
 static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
 {
-	if (nfs3_async_handle_jukebox(task, data->inode))
+	struct inode *inode = data->header->inode;
+
+	if (nfs3_async_handle_jukebox(task, inode))
 		return -EAGAIN;
 
-	nfs_invalidate_atime(data->inode);
-	nfs_refresh_inode(data->inode, &data->fattr);
+	nfs_invalidate_atime(inode);
+	nfs_refresh_inode(inode, &data->fattr);
 	return 0;
 }
 
@@ -830,10 +832,12 @@
 
 static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
 {
-	if (nfs3_async_handle_jukebox(task, data->inode))
+	struct inode *inode = data->header->inode;
+
+	if (nfs3_async_handle_jukebox(task, inode))
 		return -EAGAIN;
 	if (task->tk_status >= 0)
-		nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
 	return 0;
 }
 
@@ -847,7 +851,12 @@
 	rpc_call_start(task);
 }
 
-static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+	rpc_call_start(task);
+}
+
+static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
 {
 	if (nfs3_async_handle_jukebox(task, data->inode))
 		return -EAGAIN;
@@ -855,7 +864,7 @@
 	return 0;
 }
 
-static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
 	msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
 }
@@ -875,6 +884,7 @@
 	.file_inode_ops	= &nfs3_file_inode_operations,
 	.file_ops	= &nfs_file_operations,
 	.getroot	= nfs3_proc_get_root,
+	.submount	= nfs_submount,
 	.getattr	= nfs3_proc_getattr,
 	.setattr	= nfs3_proc_setattr,
 	.lookup		= nfs3_proc_lookup,
@@ -906,6 +916,7 @@
 	.write_rpc_prepare = nfs3_proc_write_rpc_prepare,
 	.write_done	= nfs3_write_done,
 	.commit_setup	= nfs3_proc_commit_setup,
+	.commit_rpc_prepare = nfs3_proc_commit_rpc_prepare,
 	.commit_done	= nfs3_commit_done,
 	.lock		= nfs3_proc_lock,
 	.clear_acl_cache = nfs3_forget_cached_acls,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index a77cc9a..902de48 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -86,6 +86,8 @@
 				XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
 #define ACL3_setaclres_sz	(1+NFS3_post_op_attr_sz)
 
+static int nfs3_stat_to_errno(enum nfs_stat);
+
 /*
  * Map file type to S_IFMT bits
  */
@@ -675,6 +677,7 @@
 	p = xdr_decode_nfstime3(p, &fattr->atime);
 	p = xdr_decode_nfstime3(p, &fattr->mtime);
 	xdr_decode_nfstime3(p, &fattr->ctime);
+	fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
 
 	fattr->valid |= NFS_ATTR_FATTR_V3;
 	return 0;
@@ -725,12 +728,14 @@
 		goto out_overflow;
 
 	fattr->valid |= NFS_ATTR_FATTR_PRESIZE
+		| NFS_ATTR_FATTR_PRECHANGE
 		| NFS_ATTR_FATTR_PREMTIME
 		| NFS_ATTR_FATTR_PRECTIME;
 
 	p = xdr_decode_size3(p, &fattr->pre_size);
 	p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
 	xdr_decode_nfstime3(p, &fattr->pre_ctime);
+	fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
 
 	return 0;
 out_overflow:
@@ -1287,7 +1292,7 @@
  *	};
  */
 static void encode_commit3args(struct xdr_stream *xdr,
-			       const struct nfs_writeargs *args)
+			       const struct nfs_commitargs *args)
 {
 	__be32 *p;
 
@@ -1300,7 +1305,7 @@
 
 static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
 				     struct xdr_stream *xdr,
-				     const struct nfs_writeargs *args)
+				     const struct nfs_commitargs *args)
 {
 	encode_commit3args(xdr, args);
 }
@@ -1385,7 +1390,7 @@
 out:
 	return error;
 out_default:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1424,7 +1429,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1472,7 +1477,7 @@
 	error = decode_post_op_attr(xdr, result->dir_attr);
 	if (unlikely(error))
 		goto out;
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1513,7 +1518,7 @@
 out:
 	return error;
 out_default:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1554,7 +1559,7 @@
 out:
 	return error;
 out_default:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1636,7 +1641,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1706,7 +1711,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1770,7 +1775,7 @@
 	error = decode_wcc_data(xdr, result->dir_attr);
 	if (unlikely(error))
 		goto out;
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1809,7 +1814,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1853,7 +1858,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1896,7 +1901,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /**
@@ -2088,7 +2093,7 @@
 	error = decode_post_op_attr(xdr, result->dir_attr);
 	if (unlikely(error))
 		goto out;
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2156,7 +2161,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2232,7 +2237,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2295,7 +2300,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2319,7 +2324,7 @@
  */
 static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
 				   struct xdr_stream *xdr,
-				   struct nfs_writeres *result)
+				   struct nfs_commitres *result)
 {
 	enum nfs_stat status;
 	int error;
@@ -2336,7 +2341,7 @@
 out:
 	return error;
 out_status:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
@@ -2401,7 +2406,7 @@
 out:
 	return error;
 out_default:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2420,11 +2425,76 @@
 out:
 	return error;
 out_default:
-	return nfs_stat_to_errno(status);
+	return nfs3_stat_to_errno(status);
 }
 
 #endif  /* CONFIG_NFS_V3_ACL */
 
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+	int stat;
+	int errno;
+} nfs_errtbl[] = {
+	{ NFS_OK,		0		},
+	{ NFSERR_PERM,		-EPERM		},
+	{ NFSERR_NOENT,		-ENOENT		},
+	{ NFSERR_IO,		-errno_NFSERR_IO},
+	{ NFSERR_NXIO,		-ENXIO		},
+/*	{ NFSERR_EAGAIN,	-EAGAIN		}, */
+	{ NFSERR_ACCES,		-EACCES		},
+	{ NFSERR_EXIST,		-EEXIST		},
+	{ NFSERR_XDEV,		-EXDEV		},
+	{ NFSERR_NODEV,		-ENODEV		},
+	{ NFSERR_NOTDIR,	-ENOTDIR	},
+	{ NFSERR_ISDIR,		-EISDIR		},
+	{ NFSERR_INVAL,		-EINVAL		},
+	{ NFSERR_FBIG,		-EFBIG		},
+	{ NFSERR_NOSPC,		-ENOSPC		},
+	{ NFSERR_ROFS,		-EROFS		},
+	{ NFSERR_MLINK,		-EMLINK		},
+	{ NFSERR_NAMETOOLONG,	-ENAMETOOLONG	},
+	{ NFSERR_NOTEMPTY,	-ENOTEMPTY	},
+	{ NFSERR_DQUOT,		-EDQUOT		},
+	{ NFSERR_STALE,		-ESTALE		},
+	{ NFSERR_REMOTE,	-EREMOTE	},
+#ifdef EWFLUSH
+	{ NFSERR_WFLUSH,	-EWFLUSH	},
+#endif
+	{ NFSERR_BADHANDLE,	-EBADHANDLE	},
+	{ NFSERR_NOT_SYNC,	-ENOTSYNC	},
+	{ NFSERR_BAD_COOKIE,	-EBADCOOKIE	},
+	{ NFSERR_NOTSUPP,	-ENOTSUPP	},
+	{ NFSERR_TOOSMALL,	-ETOOSMALL	},
+	{ NFSERR_SERVERFAULT,	-EREMOTEIO	},
+	{ NFSERR_BADTYPE,	-EBADTYPE	},
+	{ NFSERR_JUKEBOX,	-EJUKEBOX	},
+	{ -1,			-EIO		}
+};
+
+/**
+ * nfs3_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized.  This function is used jointly by NFSv2 and NFSv3.
+ */
+static int nfs3_stat_to_errno(enum nfs_stat status)
+{
+	int i;
+
+	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+		if (nfs_errtbl[i].stat == (int)status)
+			return nfs_errtbl[i].errno;
+	}
+	dprintk("NFS: Unrecognized nfs status value: %u\n", status);
+	return nfs_errtbl[i].errno;
+}
+
+
 #define PROC(proc, argtype, restype, timer)				\
 [NFS3PROC_##proc] = {							\
 	.p_proc      = NFS3PROC_##proc,					\
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d75021..cc5900a 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -24,6 +24,8 @@
 	NFS4CLNT_RECALL_SLOT,
 	NFS4CLNT_LEASE_CONFIRM,
 	NFS4CLNT_SERVER_SCOPE_MISMATCH,
+	NFS4CLNT_PURGE_STATE,
+	NFS4CLNT_BIND_CONN_TO_SESSION,
 };
 
 enum nfs4_session_state {
@@ -52,11 +54,6 @@
 	const struct nfs4_state_maintenance_ops *state_renewal_ops;
 };
 
-struct nfs_unique_id {
-	struct rb_node rb_node;
-	__u64 id;
-};
-
 #define NFS_SEQID_CONFIRMED 1
 struct nfs_seqid_counter {
 	ktime_t create_time;
@@ -206,12 +203,18 @@
 extern const struct inode_operations nfs4_dir_inode_operations;
 
 /* nfs4namespace.c */
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
 struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
+			       struct nfs_fh *, struct nfs_fattr *);
 
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_destroy_clientid(struct nfs_client *clp);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -239,8 +242,8 @@
 		struct rpc_task *task);
 extern void nfs4_destroy_session(struct nfs4_session *session);
 extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *);
-extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
 extern int nfs4_init_session(struct nfs_server *server);
 extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
 		struct nfs_fsinfo *fsinfo);
@@ -292,7 +295,7 @@
 
 extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
 
-extern const u32 nfs4_fattr_bitmap[2];
+extern const u32 nfs4_fattr_bitmap[3];
 extern const u32 nfs4_statfs_bitmap[2];
 extern const u32 nfs4_pathconf_bitmap[2];
 extern const u32 nfs4_fsinfo_bitmap[3];
@@ -310,9 +313,9 @@
 #if defined(CONFIG_NFS_V4_1)
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
-extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
 #else
-static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 {
 }
 #endif /* CONFIG_NFS_V4_1 */
@@ -334,7 +337,7 @@
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
 extern void nfs41_handle_server_scope(struct nfs_client *,
-				      struct server_scope **);
+				      struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
 extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 5acfd9e..e134029 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,29 +82,76 @@
 	BUG();
 }
 
+static void filelayout_reset_write(struct nfs_write_data *data)
+{
+	struct nfs_pgio_header *hdr = data->header;
+	struct rpc_task *task = &data->task;
+
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		dprintk("%s Reset task %5u for i/o through MDS "
+			"(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+			data->task.tk_pid,
+			hdr->inode->i_sb->s_id,
+			(long long)NFS_FILEID(hdr->inode),
+			data->args.count,
+			(unsigned long long)data->args.offset);
+
+		task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+							&hdr->pages,
+							hdr->completion_ops);
+	}
+}
+
+static void filelayout_reset_read(struct nfs_read_data *data)
+{
+	struct nfs_pgio_header *hdr = data->header;
+	struct rpc_task *task = &data->task;
+
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		dprintk("%s Reset task %5u for i/o through MDS "
+			"(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+			data->task.tk_pid,
+			hdr->inode->i_sb->s_id,
+			(long long)NFS_FILEID(hdr->inode),
+			data->args.count,
+			(unsigned long long)data->args.offset);
+
+		task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+							&hdr->pages,
+							hdr->completion_ops);
+	}
+}
+
 static int filelayout_async_handle_error(struct rpc_task *task,
 					 struct nfs4_state *state,
 					 struct nfs_client *clp,
-					 int *reset)
+					 struct pnfs_layout_segment *lseg)
 {
-	struct nfs_server *mds_server = NFS_SERVER(state->inode);
+	struct inode *inode = lseg->pls_layout->plh_inode;
+	struct nfs_server *mds_server = NFS_SERVER(inode);
+	struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
 	struct nfs_client *mds_client = mds_server->nfs_client;
+	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
 
 	if (task->tk_status >= 0)
 		return 0;
-	*reset = 0;
 
 	switch (task->tk_status) {
 	/* MDS state errors */
 	case -NFS4ERR_DELEG_REVOKED:
 	case -NFS4ERR_ADMIN_REVOKED:
 	case -NFS4ERR_BAD_STATEID:
+		if (state == NULL)
+			break;
 		nfs_remove_bad_delegation(state->inode);
 	case -NFS4ERR_OPENMODE:
+		if (state == NULL)
+			break;
 		nfs4_schedule_stateid_recovery(mds_server, state);
 		goto wait_on_recovery;
 	case -NFS4ERR_EXPIRED:
-		nfs4_schedule_stateid_recovery(mds_server, state);
+		if (state != NULL)
+			nfs4_schedule_stateid_recovery(mds_server, state);
 		nfs4_schedule_lease_recovery(mds_client);
 		goto wait_on_recovery;
 	/* DS session errors */
@@ -118,7 +165,7 @@
 		dprintk("%s ERROR %d, Reset session. Exchangeid "
 			"flags 0x%x\n", __func__, task->tk_status,
 			clp->cl_exchange_flags);
-		nfs4_schedule_session_recovery(clp->cl_session);
+		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
 		break;
 	case -NFS4ERR_DELAY:
 	case -NFS4ERR_GRACE:
@@ -127,11 +174,48 @@
 		break;
 	case -NFS4ERR_RETRY_UNCACHED_REP:
 		break;
-	default:
-		dprintk("%s DS error. Retry through MDS %d\n", __func__,
+	/* Invalidate Layout errors */
+	case -NFS4ERR_PNFS_NO_LAYOUT:
+	case -ESTALE:           /* mapped NFS4ERR_STALE */
+	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
+	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
+	case -NFS4ERR_FHEXPIRED:
+	case -NFS4ERR_WRONG_TYPE:
+		dprintk("%s Invalid layout error %d\n", __func__,
 			task->tk_status);
-		*reset = 1;
-		break;
+		/*
+		 * Destroy layout so new i/o will get a new layout.
+		 * Layout will not be destroyed until all current lseg
+		 * references are put. Mark layout as invalid to resend failed
+		 * i/o and all i/o waiting on the slot table to the MDS until
+		 * layout is destroyed and a new valid layout is obtained.
+		 */
+		set_bit(NFS_LAYOUT_INVALID,
+				&NFS_I(inode)->layout->plh_flags);
+		pnfs_destroy_layout(NFS_I(inode));
+		rpc_wake_up(&tbl->slot_tbl_waitq);
+		goto reset;
+	/* RPC connection errors */
+	case -ECONNREFUSED:
+	case -EHOSTDOWN:
+	case -EHOSTUNREACH:
+	case -ENETUNREACH:
+	case -EIO:
+	case -ETIMEDOUT:
+	case -EPIPE:
+		dprintk("%s DS connection error %d\n", __func__,
+			task->tk_status);
+		if (!filelayout_test_devid_invalid(devid))
+			_pnfs_return_layout(inode);
+		filelayout_mark_devid_invalid(devid);
+		rpc_wake_up(&tbl->slot_tbl_waitq);
+		nfs4_ds_disconnect(clp);
+		/* fall through */
+	default:
+reset:
+		dprintk("%s Retry through MDS. Error %d\n", __func__,
+			task->tk_status);
+		return -NFS4ERR_RESET_TO_MDS;
 	}
 out:
 	task->tk_status = 0;
@@ -148,18 +232,17 @@
 static int filelayout_read_done_cb(struct rpc_task *task,
 				struct nfs_read_data *data)
 {
-	int reset = 0;
+	struct nfs_pgio_header *hdr = data->header;
+	int err;
 
-	dprintk("%s DS read\n", __func__);
+	err = filelayout_async_handle_error(task, data->args.context->state,
+					    data->ds_clp, hdr->lseg);
 
-	if (filelayout_async_handle_error(task, data->args.context->state,
-					  data->ds_clp, &reset) == -EAGAIN) {
-		dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-			__func__, data->ds_clp, data->ds_clp->cl_session);
-		if (reset) {
-			pnfs_set_lo_fail(data->lseg);
-			nfs4_reset_read(task, data);
-		}
+	switch (err) {
+	case -NFS4ERR_RESET_TO_MDS:
+		filelayout_reset_read(data);
+		return task->tk_status;
+	case -EAGAIN:
 		rpc_restart_call_prepare(task);
 		return -EAGAIN;
 	}
@@ -175,13 +258,15 @@
 static void
 filelayout_set_layoutcommit(struct nfs_write_data *wdata)
 {
-	if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+	struct nfs_pgio_header *hdr = wdata->header;
+
+	if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
 	    wdata->res.verf->committed == NFS_FILE_SYNC)
 		return;
 
 	pnfs_set_layoutcommit(wdata);
-	dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
-		(unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
+	dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
 }
 
 /*
@@ -191,8 +276,14 @@
  */
 static void filelayout_read_prepare(struct rpc_task *task, void *data)
 {
-	struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+	struct nfs_read_data *rdata = data;
 
+	if (filelayout_reset_to_mds(rdata->header->lseg)) {
+		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+		filelayout_reset_read(rdata);
+		rpc_exit(task, 0);
+		return;
+	}
 	rdata->read_done_cb = filelayout_read_done_cb;
 
 	if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
@@ -205,42 +296,47 @@
 
 static void filelayout_read_call_done(struct rpc_task *task, void *data)
 {
-	struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+	struct nfs_read_data *rdata = data;
 
 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
 
+	if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
+	    task->tk_status == 0)
+		return;
+
 	/* Note this may cause RPC to be resent */
-	rdata->mds_ops->rpc_call_done(task, data);
+	rdata->header->mds_ops->rpc_call_done(task, data);
 }
 
 static void filelayout_read_count_stats(struct rpc_task *task, void *data)
 {
-	struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+	struct nfs_read_data *rdata = data;
 
-	rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
+	rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
 }
 
 static void filelayout_read_release(void *data)
 {
-	struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+	struct nfs_read_data *rdata = data;
 
-	put_lseg(rdata->lseg);
-	rdata->mds_ops->rpc_release(data);
+	nfs_put_client(rdata->ds_clp);
+	rdata->header->mds_ops->rpc_release(data);
 }
 
 static int filelayout_write_done_cb(struct rpc_task *task,
 				struct nfs_write_data *data)
 {
-	int reset = 0;
+	struct nfs_pgio_header *hdr = data->header;
+	int err;
 
-	if (filelayout_async_handle_error(task, data->args.context->state,
-					  data->ds_clp, &reset) == -EAGAIN) {
-		dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-			__func__, data->ds_clp, data->ds_clp->cl_session);
-		if (reset) {
-			pnfs_set_lo_fail(data->lseg);
-			nfs4_reset_write(task, data);
-		}
+	err = filelayout_async_handle_error(task, data->args.context->state,
+					    data->ds_clp, hdr->lseg);
+
+	switch (err) {
+	case -NFS4ERR_RESET_TO_MDS:
+		filelayout_reset_write(data);
+		return task->tk_status;
+	case -EAGAIN:
 		rpc_restart_call_prepare(task);
 		return -EAGAIN;
 	}
@@ -250,7 +346,7 @@
 }
 
 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_write_data *data)
+static void prepare_to_resend_writes(struct nfs_commit_data *data)
 {
 	struct nfs_page *first = nfs_list_entry(data->pages.next);
 
@@ -261,19 +357,19 @@
 }
 
 static int filelayout_commit_done_cb(struct rpc_task *task,
-				     struct nfs_write_data *data)
+				     struct nfs_commit_data *data)
 {
-	int reset = 0;
+	int err;
 
-	if (filelayout_async_handle_error(task, data->args.context->state,
-					  data->ds_clp, &reset) == -EAGAIN) {
-		dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-			__func__, data->ds_clp, data->ds_clp->cl_session);
-		if (reset) {
-			prepare_to_resend_writes(data);
-			pnfs_set_lo_fail(data->lseg);
-		} else
-			rpc_restart_call_prepare(task);
+	err = filelayout_async_handle_error(task, NULL, data->ds_clp,
+					    data->lseg);
+
+	switch (err) {
+	case -NFS4ERR_RESET_TO_MDS:
+		prepare_to_resend_writes(data);
+		return -EAGAIN;
+	case -EAGAIN:
+		rpc_restart_call_prepare(task);
 		return -EAGAIN;
 	}
 
@@ -282,8 +378,14 @@
 
 static void filelayout_write_prepare(struct rpc_task *task, void *data)
 {
-	struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+	struct nfs_write_data *wdata = data;
 
+	if (filelayout_reset_to_mds(wdata->header->lseg)) {
+		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+		filelayout_reset_write(wdata);
+		rpc_exit(task, 0);
+		return;
+	}
 	if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
 				&wdata->args.seq_args, &wdata->res.seq_res,
 				task))
@@ -294,36 +396,66 @@
 
 static void filelayout_write_call_done(struct rpc_task *task, void *data)
 {
-	struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+	struct nfs_write_data *wdata = data;
+
+	if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
+	    task->tk_status == 0)
+		return;
+
+	/* Note this may cause RPC to be resent */
+	wdata->header->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_write_count_stats(struct rpc_task *task, void *data)
+{
+	struct nfs_write_data *wdata = data;
+
+	rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
+}
+
+static void filelayout_write_release(void *data)
+{
+	struct nfs_write_data *wdata = data;
+
+	nfs_put_client(wdata->ds_clp);
+	wdata->header->mds_ops->rpc_release(data);
+}
+
+static void filelayout_commit_prepare(struct rpc_task *task, void *data)
+{
+	struct nfs_commit_data *wdata = data;
+
+	if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+				&wdata->args.seq_args, &wdata->res.seq_res,
+				task))
+		return;
+
+	rpc_call_start(task);
+}
+
+static void filelayout_write_commit_done(struct rpc_task *task, void *data)
+{
+	struct nfs_commit_data *wdata = data;
 
 	/* Note this may cause RPC to be resent */
 	wdata->mds_ops->rpc_call_done(task, data);
 }
 
-static void filelayout_write_count_stats(struct rpc_task *task, void *data)
+static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
 {
-	struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+	struct nfs_commit_data *cdata = data;
 
-	rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
+	rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
 }
 
-static void filelayout_write_release(void *data)
+static void filelayout_commit_release(void *calldata)
 {
-	struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+	struct nfs_commit_data *data = calldata;
 
-	put_lseg(wdata->lseg);
-	wdata->mds_ops->rpc_release(data);
-}
-
-static void filelayout_commit_release(void *data)
-{
-	struct nfs_write_data *wdata = (struct nfs_write_data *)data;
-
-	nfs_commit_release_pages(wdata);
-	if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
-		nfs_commit_clear_lock(NFS_I(wdata->inode));
-	put_lseg(wdata->lseg);
-	nfs_commitdata_release(wdata);
+	data->completion_ops->completion(data);
+	put_lseg(data->lseg);
+	nfs_put_client(data->ds_clp);
+	nfs_commitdata_release(data);
 }
 
 static const struct rpc_call_ops filelayout_read_call_ops = {
@@ -341,16 +473,17 @@
 };
 
 static const struct rpc_call_ops filelayout_commit_call_ops = {
-	.rpc_call_prepare = filelayout_write_prepare,
-	.rpc_call_done = filelayout_write_call_done,
-	.rpc_count_stats = filelayout_write_count_stats,
+	.rpc_call_prepare = filelayout_commit_prepare,
+	.rpc_call_done = filelayout_write_commit_done,
+	.rpc_count_stats = filelayout_commit_count_stats,
 	.rpc_release = filelayout_commit_release,
 };
 
 static enum pnfs_try_status
 filelayout_read_pagelist(struct nfs_read_data *data)
 {
-	struct pnfs_layout_segment *lseg = data->lseg;
+	struct nfs_pgio_header *hdr = data->header;
+	struct pnfs_layout_segment *lseg = hdr->lseg;
 	struct nfs4_pnfs_ds *ds;
 	loff_t offset = data->args.offset;
 	u32 j, idx;
@@ -358,25 +491,20 @@
 	int status;
 
 	dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
-		__func__, data->inode->i_ino,
+		__func__, hdr->inode->i_ino,
 		data->args.pgbase, (size_t)data->args.count, offset);
 
-	if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
-		return PNFS_NOT_ATTEMPTED;
-
 	/* Retrieve the correct rpc_client for the byte range */
 	j = nfs4_fl_calc_j_index(lseg, offset);
 	idx = nfs4_fl_calc_ds_index(lseg, j);
 	ds = nfs4_fl_prepare_ds(lseg, idx);
-	if (!ds) {
-		/* Either layout fh index faulty, or ds connect failed */
-		set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-		set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+	if (!ds)
 		return PNFS_NOT_ATTEMPTED;
-	}
-	dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
+	dprintk("%s USE DS: %s cl_count %d\n", __func__,
+		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
 
 	/* No multipath support. Use first DS */
+	atomic_inc(&ds->ds_clp->cl_count);
 	data->ds_clp = ds->ds_clp;
 	fh = nfs4_fl_select_ds_fh(lseg, j);
 	if (fh)
@@ -386,8 +514,8 @@
 	data->mds_offset = offset;
 
 	/* Perform an asynchronous read to ds */
-	status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
-				   &filelayout_read_call_ops);
+	status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
+				  &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
 	BUG_ON(status != 0);
 	return PNFS_ATTEMPTED;
 }
@@ -396,32 +524,26 @@
 static enum pnfs_try_status
 filelayout_write_pagelist(struct nfs_write_data *data, int sync)
 {
-	struct pnfs_layout_segment *lseg = data->lseg;
+	struct nfs_pgio_header *hdr = data->header;
+	struct pnfs_layout_segment *lseg = hdr->lseg;
 	struct nfs4_pnfs_ds *ds;
 	loff_t offset = data->args.offset;
 	u32 j, idx;
 	struct nfs_fh *fh;
 	int status;
 
-	if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
-		return PNFS_NOT_ATTEMPTED;
-
 	/* Retrieve the correct rpc_client for the byte range */
 	j = nfs4_fl_calc_j_index(lseg, offset);
 	idx = nfs4_fl_calc_ds_index(lseg, j);
 	ds = nfs4_fl_prepare_ds(lseg, idx);
-	if (!ds) {
-		printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
-			__func__);
-		set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-		set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+	if (!ds)
 		return PNFS_NOT_ATTEMPTED;
-	}
-	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
-		data->inode->i_ino, sync, (size_t) data->args.count, offset,
-		ds->ds_remotestr);
+	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
+		__func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
+		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
 
 	data->write_done_cb = filelayout_write_done_cb;
+	atomic_inc(&ds->ds_clp->cl_count);
 	data->ds_clp = ds->ds_clp;
 	fh = nfs4_fl_select_ds_fh(lseg, j);
 	if (fh)
@@ -433,8 +555,9 @@
 	data->args.offset = filelayout_get_dserver_offset(lseg, offset);
 
 	/* Perform an asynchronous write */
-	status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
-				    &filelayout_write_call_ops, sync);
+	status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
+				    &filelayout_write_call_ops, sync,
+				    RPC_TASK_SOFTCONN);
 	BUG_ON(status != 0);
 	return PNFS_ATTEMPTED;
 }
@@ -650,10 +773,65 @@
 
 	dprintk("--> %s\n", __func__);
 	nfs4_fl_put_deviceid(fl->dsaddr);
-	kfree(fl->commit_buckets);
+	/* This assumes a single RW lseg */
+	if (lseg->pls_range.iomode == IOMODE_RW) {
+		struct nfs4_filelayout *flo;
+
+		flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
+		flo->commit_info.nbuckets = 0;
+		kfree(flo->commit_info.buckets);
+		flo->commit_info.buckets = NULL;
+	}
 	_filelayout_free_lseg(fl);
 }
 
+static int
+filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+			     struct nfs_commit_info *cinfo,
+			     gfp_t gfp_flags)
+{
+	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+	struct pnfs_commit_bucket *buckets;
+	int size;
+
+	if (fl->commit_through_mds)
+		return 0;
+	if (cinfo->ds->nbuckets != 0) {
+		/* This assumes there is only one IOMODE_RW lseg.  What
+		 * we really want to do is have a layout_hdr level
+		 * dictionary of <multipath_list4, fh> keys, each
+		 * associated with a struct list_head, populated by calls
+		 * to filelayout_write_pagelist().
+		 * */
+		return 0;
+	}
+
+	size = (fl->stripe_type == STRIPE_SPARSE) ?
+		fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+			  gfp_flags);
+	if (!buckets)
+		return -ENOMEM;
+	else {
+		int i;
+
+		spin_lock(cinfo->lock);
+		if (cinfo->ds->nbuckets != 0)
+			kfree(buckets);
+		else {
+			cinfo->ds->buckets = buckets;
+			cinfo->ds->nbuckets = size;
+			for (i = 0; i < size; i++) {
+				INIT_LIST_HEAD(&buckets[i].written);
+				INIT_LIST_HEAD(&buckets[i].committing);
+			}
+		}
+		spin_unlock(cinfo->lock);
+		return 0;
+	}
+}
+
 static struct pnfs_layout_segment *
 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
 		      struct nfs4_layoutget_res *lgr,
@@ -673,29 +851,6 @@
 		_filelayout_free_lseg(fl);
 		return NULL;
 	}
-
-	/* This assumes there is only one IOMODE_RW lseg.  What
-	 * we really want to do is have a layout_hdr level
-	 * dictionary of <multipath_list4, fh> keys, each
-	 * associated with a struct list_head, populated by calls
-	 * to filelayout_write_pagelist().
-	 * */
-	if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
-		int i;
-		int size = (fl->stripe_type == STRIPE_SPARSE) ?
-			fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
-
-		fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
-		if (!fl->commit_buckets) {
-			filelayout_free_lseg(&fl->generic_hdr);
-			return NULL;
-		}
-		fl->number_of_buckets = size;
-		for (i = 0; i < size; i++) {
-			INIT_LIST_HEAD(&fl->commit_buckets[i].written);
-			INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
-		}
-	}
 	return &fl->generic_hdr;
 }
 
@@ -716,8 +871,8 @@
 	    !nfs_generic_pg_test(pgio, prev, req))
 		return false;
 
-	p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
-	r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+	p_stripe = (u64)req_offset(prev);
+	r_stripe = (u64)req_offset(req);
 	stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
 
 	do_div(p_stripe, stripe_unit);
@@ -732,6 +887,16 @@
 {
 	BUG_ON(pgio->pg_lseg != NULL);
 
+	if (req->wb_offset != req->wb_pgbase) {
+		/*
+		 * Handling unaligned pages is difficult, because have to
+		 * somehow split a req in two in certain cases in the
+		 * pg.test code.  Avoid this by just not using pnfs
+		 * in this case.
+		 */
+		nfs_pageio_reset_read_mds(pgio);
+		return;
+	}
 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 					   req->wb_context,
 					   0,
@@ -747,8 +912,13 @@
 filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
 			 struct nfs_page *req)
 {
+	struct nfs_commit_info cinfo;
+	int status;
+
 	BUG_ON(pgio->pg_lseg != NULL);
 
+	if (req->wb_offset != req->wb_pgbase)
+		goto out_mds;
 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 					   req->wb_context,
 					   0,
@@ -757,7 +927,17 @@
 					   GFP_NOFS);
 	/* If no lseg, fall back to write through mds */
 	if (pgio->pg_lseg == NULL)
-		nfs_pageio_reset_write_mds(pgio);
+		goto out_mds;
+	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+	status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+	if (status < 0) {
+		put_lseg(pgio->pg_lseg);
+		pgio->pg_lseg = NULL;
+		goto out_mds;
+	}
+	return;
+out_mds:
+	nfs_pageio_reset_write_mds(pgio);
 }
 
 static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -784,43 +964,42 @@
  * If this will make the bucket empty, it will need to put the lseg reference.
  */
 static void
-filelayout_clear_request_commit(struct nfs_page *req)
+filelayout_clear_request_commit(struct nfs_page *req,
+				struct nfs_commit_info *cinfo)
 {
 	struct pnfs_layout_segment *freeme = NULL;
-	struct inode *inode = req->wb_context->dentry->d_inode;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(cinfo->lock);
 	if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
 		goto out;
+	cinfo->ds->nwritten--;
 	if (list_is_singular(&req->wb_list)) {
-		struct pnfs_layout_segment *lseg;
+		struct pnfs_commit_bucket *bucket;
 
-		/* From here we can find the bucket, but for the moment,
-		 * since there is only one relevant lseg...
-		 */
-		list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
-			if (lseg->pls_range.iomode == IOMODE_RW) {
-				freeme = lseg;
-				break;
-			}
-		}
+		bucket = list_first_entry(&req->wb_list,
+					  struct pnfs_commit_bucket,
+					  written);
+		freeme = bucket->wlseg;
+		bucket->wlseg = NULL;
 	}
 out:
-	nfs_request_remove_commit_list(req);
-	spin_unlock(&inode->i_lock);
+	nfs_request_remove_commit_list(req, cinfo);
+	spin_unlock(cinfo->lock);
 	put_lseg(freeme);
 }
 
 static struct list_head *
 filelayout_choose_commit_list(struct nfs_page *req,
-			      struct pnfs_layout_segment *lseg)
+			      struct pnfs_layout_segment *lseg,
+			      struct nfs_commit_info *cinfo)
 {
 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 	u32 i, j;
 	struct list_head *list;
+	struct pnfs_commit_bucket *buckets;
 
 	if (fl->commit_through_mds)
-		return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
+		return &cinfo->mds->list;
 
 	/* Note that we are calling nfs4_fl_calc_j_index on each page
 	 * that ends up being committed to a data server.  An attractive
@@ -828,31 +1007,33 @@
 	 * to store the value calculated in filelayout_write_pagelist
 	 * and just use that here.
 	 */
-	j = nfs4_fl_calc_j_index(lseg,
-				 (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+	j = nfs4_fl_calc_j_index(lseg, req_offset(req));
 	i = select_bucket_index(fl, j);
-	list = &fl->commit_buckets[i].written;
+	buckets = cinfo->ds->buckets;
+	list = &buckets[i].written;
 	if (list_empty(list)) {
 		/* Non-empty buckets hold a reference on the lseg.  That ref
 		 * is normally transferred to the COMMIT call and released
 		 * there.  It could also be released if the last req is pulled
 		 * off due to a rewrite, in which case it will be done in
-		 * filelayout_remove_commit_req
+		 * filelayout_clear_request_commit
 		 */
-		get_lseg(lseg);
+		buckets[i].wlseg = get_lseg(lseg);
 	}
 	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+	cinfo->ds->nwritten++;
 	return list;
 }
 
 static void
 filelayout_mark_request_commit(struct nfs_page *req,
-		struct pnfs_layout_segment *lseg)
+			       struct pnfs_layout_segment *lseg,
+			       struct nfs_commit_info *cinfo)
 {
 	struct list_head *list;
 
-	list = filelayout_choose_commit_list(req, lseg);
-	nfs_request_add_commit_list(req, list);
+	list = filelayout_choose_commit_list(req, lseg, cinfo);
+	nfs_request_add_commit_list(req, list, cinfo);
 }
 
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
@@ -880,7 +1061,7 @@
 	return flseg->fh_array[i];
 }
 
-static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
 {
 	struct pnfs_layout_segment *lseg = data->lseg;
 	struct nfs4_pnfs_ds *ds;
@@ -890,135 +1071,138 @@
 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
 	ds = nfs4_fl_prepare_ds(lseg, idx);
 	if (!ds) {
-		printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
-			__func__);
-		set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-		set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
 		prepare_to_resend_writes(data);
 		filelayout_commit_release(data);
 		return -EAGAIN;
 	}
-	dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
-	data->write_done_cb = filelayout_commit_done_cb;
+	dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
+		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
+	data->commit_done_cb = filelayout_commit_done_cb;
+	atomic_inc(&ds->ds_clp->cl_count);
 	data->ds_clp = ds->ds_clp;
 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
 	if (fh)
 		data->args.fh = fh;
-	return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
-				   &filelayout_commit_call_ops, how);
-}
-
-/*
- * This is only useful while we are using whole file layouts.
- */
-static struct pnfs_layout_segment *
-find_only_write_lseg_locked(struct inode *inode)
-{
-	struct pnfs_layout_segment *lseg;
-
-	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
-		if (lseg->pls_range.iomode == IOMODE_RW)
-			return lseg;
-	return NULL;
-}
-
-static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
-{
-	struct pnfs_layout_segment *rv;
-
-	spin_lock(&inode->i_lock);
-	rv = find_only_write_lseg_locked(inode);
-	if (rv)
-		get_lseg(rv);
-	spin_unlock(&inode->i_lock);
-	return rv;
+	return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
+				   &filelayout_commit_call_ops, how,
+				   RPC_TASK_SOFTCONN);
 }
 
 static int
-filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
-		spinlock_t *lock)
+transfer_commit_list(struct list_head *src, struct list_head *dst,
+		     struct nfs_commit_info *cinfo, int max)
 {
-	struct list_head *src = &bucket->written;
-	struct list_head *dst = &bucket->committing;
 	struct nfs_page *req, *tmp;
 	int ret = 0;
 
 	list_for_each_entry_safe(req, tmp, src, wb_list) {
 		if (!nfs_lock_request(req))
 			continue;
-		if (cond_resched_lock(lock))
+		kref_get(&req->wb_kref);
+		if (cond_resched_lock(cinfo->lock))
 			list_safe_reset_next(req, tmp, wb_list);
-		nfs_request_remove_commit_list(req);
+		nfs_request_remove_commit_list(req, cinfo);
 		clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
 		nfs_list_add_request(req, dst);
 		ret++;
-		if (ret == max)
+		if ((ret == max) && !cinfo->dreq)
 			break;
 	}
 	return ret;
 }
 
-/* Move reqs from written to committing lists, returning count of number moved.
- * Note called with i_lock held.
- */
-static int filelayout_scan_commit_lists(struct inode *inode, int max,
-		spinlock_t *lock)
+static int
+filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+			       struct nfs_commit_info *cinfo,
+			       int max)
 {
-	struct pnfs_layout_segment *lseg;
-	struct nfs4_filelayout_segment *fl;
+	struct list_head *src = &bucket->written;
+	struct list_head *dst = &bucket->committing;
+	int ret;
+
+	ret = transfer_commit_list(src, dst, cinfo, max);
+	if (ret) {
+		cinfo->ds->nwritten -= ret;
+		cinfo->ds->ncommitting += ret;
+		bucket->clseg = bucket->wlseg;
+		if (list_empty(src))
+			bucket->wlseg = NULL;
+		else
+			get_lseg(bucket->clseg);
+	}
+	return ret;
+}
+
+/* Move reqs from written to committing lists, returning count of number moved.
+ * Note called with cinfo->lock held.
+ */
+static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
+					int max)
+{
 	int i, rv = 0, cnt;
 
-	lseg = find_only_write_lseg_locked(inode);
-	if (!lseg)
-		goto out_done;
-	fl = FILELAYOUT_LSEG(lseg);
-	if (fl->commit_through_mds)
-		goto out_done;
-	for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
-		cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
-				max, lock);
+	for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+		cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
+						     cinfo, max);
 		max -= cnt;
 		rv += cnt;
 	}
-out_done:
 	return rv;
 }
 
-static unsigned int
-alloc_ds_commits(struct inode *inode, struct list_head *list)
+/* Pull everything off the committing lists and dump into @dst */
+static void filelayout_recover_commit_reqs(struct list_head *dst,
+					   struct nfs_commit_info *cinfo)
 {
-	struct pnfs_layout_segment *lseg;
-	struct nfs4_filelayout_segment *fl;
-	struct nfs_write_data *data;
+	struct pnfs_commit_bucket *b;
+	int i;
+
+	/* NOTE cinfo->lock is NOT held, relying on fact that this is
+	 * only called on single thread per dreq.
+	 * Can't take the lock because need to do put_lseg
+	 */
+	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+		if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
+			BUG_ON(!list_empty(&b->written));
+			put_lseg(b->wlseg);
+			b->wlseg = NULL;
+		}
+	}
+	cinfo->ds->nwritten = 0;
+}
+
+static unsigned int
+alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
+{
+	struct pnfs_ds_commit_info *fl_cinfo;
+	struct pnfs_commit_bucket *bucket;
+	struct nfs_commit_data *data;
 	int i, j;
 	unsigned int nreq = 0;
 
-	/* Won't need this when non-whole file layout segments are supported
-	 * instead we will use a pnfs_layout_hdr structure */
-	lseg = find_only_write_lseg(inode);
-	if (!lseg)
-		return 0;
-	fl = FILELAYOUT_LSEG(lseg);
-	for (i = 0; i < fl->number_of_buckets; i++) {
-		if (list_empty(&fl->commit_buckets[i].committing))
+	fl_cinfo = cinfo->ds;
+	bucket = fl_cinfo->buckets;
+	for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+		if (list_empty(&bucket->committing))
 			continue;
 		data = nfs_commitdata_alloc();
 		if (!data)
 			break;
 		data->ds_commit_index = i;
-		data->lseg = lseg;
+		data->lseg = bucket->clseg;
+		bucket->clseg = NULL;
 		list_add(&data->pages, list);
 		nreq++;
 	}
 
 	/* Clean up on error */
-	for (j = i; j < fl->number_of_buckets; j++) {
-		if (list_empty(&fl->commit_buckets[i].committing))
+	for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
+		if (list_empty(&bucket->committing))
 			continue;
-		nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
-		put_lseg(lseg);  /* associated with emptying bucket */
+		nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
+		put_lseg(bucket->clseg);
+		bucket->clseg = NULL;
 	}
-	put_lseg(lseg);
 	/* Caller will clean up entries put on list */
 	return nreq;
 }
@@ -1026,9 +1210,9 @@
 /* This follows nfs_commit_list pretty closely */
 static int
 filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
-			   int how)
+			   int how, struct nfs_commit_info *cinfo)
 {
-	struct nfs_write_data	*data, *tmp;
+	struct nfs_commit_data *data, *tmp;
 	LIST_HEAD(list);
 	unsigned int nreq = 0;
 
@@ -1039,30 +1223,34 @@
 			list_add(&data->pages, &list);
 			nreq++;
 		} else
-			nfs_retry_commit(mds_pages, NULL);
+			nfs_retry_commit(mds_pages, NULL, cinfo);
 	}
 
-	nreq += alloc_ds_commits(inode, &list);
+	nreq += alloc_ds_commits(cinfo, &list);
 
 	if (nreq == 0) {
-		nfs_commit_clear_lock(NFS_I(inode));
+		cinfo->completion_ops->error_cleanup(NFS_I(inode));
 		goto out;
 	}
 
-	atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
+	atomic_add(nreq, &cinfo->mds->rpcs_out);
 
 	list_for_each_entry_safe(data, tmp, &list, pages) {
 		list_del_init(&data->pages);
 		if (!data->lseg) {
-			nfs_init_commit(data, mds_pages, NULL);
-			nfs_initiate_commit(data, NFS_CLIENT(inode),
-					    data->mds_ops, how);
+			nfs_init_commit(data, mds_pages, NULL, cinfo);
+			nfs_initiate_commit(NFS_CLIENT(inode), data,
+					    data->mds_ops, how, 0);
 		} else {
-			nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
+			struct pnfs_commit_bucket *buckets;
+
+			buckets = cinfo->ds->buckets;
+			nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
 			filelayout_initiate_commit(data, how);
 		}
 	}
 out:
+	cinfo->ds->ncommitting = 0;
 	return PNFS_ATTEMPTED;
 }
 
@@ -1072,17 +1260,47 @@
 	nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
 }
 
+static struct pnfs_layout_hdr *
+filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+	struct nfs4_filelayout *flo;
+
+	flo = kzalloc(sizeof(*flo), gfp_flags);
+	return &flo->generic_hdr;
+}
+
+static void
+filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+	kfree(FILELAYOUT_FROM_HDR(lo));
+}
+
+static struct pnfs_ds_commit_info *
+filelayout_get_ds_info(struct inode *inode)
+{
+	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+	if (layout == NULL)
+		return NULL;
+	else
+		return &FILELAYOUT_FROM_HDR(layout)->commit_info;
+}
+
 static struct pnfs_layoutdriver_type filelayout_type = {
 	.id			= LAYOUT_NFSV4_1_FILES,
 	.name			= "LAYOUT_NFSV4_1_FILES",
 	.owner			= THIS_MODULE,
+	.alloc_layout_hdr	= filelayout_alloc_layout_hdr,
+	.free_layout_hdr	= filelayout_free_layout_hdr,
 	.alloc_lseg		= filelayout_alloc_lseg,
 	.free_lseg		= filelayout_free_lseg,
 	.pg_read_ops		= &filelayout_pg_read_ops,
 	.pg_write_ops		= &filelayout_pg_write_ops,
+	.get_ds_info		= &filelayout_get_ds_info,
 	.mark_request_commit	= filelayout_mark_request_commit,
 	.clear_request_commit	= filelayout_clear_request_commit,
 	.scan_commit_lists	= filelayout_scan_commit_lists,
+	.recover_commit_reqs	= filelayout_recover_commit_reqs,
 	.commit_pagelist	= filelayout_commit_pagelist,
 	.read_pagelist		= filelayout_read_pagelist,
 	.write_pagelist		= filelayout_write_pagelist,
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 21190bb..43fe802 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,6 +33,13 @@
 #include "pnfs.h"
 
 /*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module paramters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO   60
+#define NFS4_DEF_DS_RETRANS 5
+
+/*
  * Field testing shows we need to support up to 4096 stripe indices.
  * We store each index as a u8 (u32 on the wire) to keep the memory footprint
  * reasonable. This in turn means we support a maximum of 256
@@ -41,6 +48,9 @@
 #define NFS4_PNFS_MAX_STRIPE_CNT 4096
 #define NFS4_PNFS_MAX_MULTI_CNT  256 /* 256 fit into a u8 stripe_index */
 
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS   12001
+
 enum stripetype4 {
 	STRIPE_SPARSE = 1,
 	STRIPE_DENSE = 2
@@ -62,23 +72,14 @@
 	atomic_t		ds_count;
 };
 
-/* nfs4_file_layout_dsaddr flags */
-#define NFS4_DEVICE_ID_NEG_ENTRY	0x00000001
-
 struct nfs4_file_layout_dsaddr {
 	struct nfs4_deviceid_node	id_node;
-	unsigned long			flags;
 	u32				stripe_count;
 	u8				*stripe_indices;
 	u32				ds_num;
 	struct nfs4_pnfs_ds		*ds_list[1];
 };
 
-struct nfs4_fl_commit_bucket {
-	struct list_head written;
-	struct list_head committing;
-};
-
 struct nfs4_filelayout_segment {
 	struct pnfs_layout_segment generic_hdr;
 	u32 stripe_type;
@@ -89,10 +90,19 @@
 	struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
 	unsigned int num_fh;
 	struct nfs_fh **fh_array;
-	struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
-	int number_of_buckets;
 };
 
+struct nfs4_filelayout {
+	struct pnfs_layout_hdr generic_hdr;
+	struct pnfs_ds_commit_info commit_info;
+};
+
+static inline struct nfs4_filelayout *
+FILELAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+	return container_of(lo, struct nfs4_filelayout, generic_hdr);
+}
+
 static inline struct nfs4_filelayout_segment *
 FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
 {
@@ -107,6 +117,36 @@
 	return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
 }
 
+static inline void
+filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+	u32 *p = (u32 *)&node->deviceid;
+
+	printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
+		p[0], p[1], p[2], p[3]);
+
+	set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
+{
+	return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
+}
+
+static inline bool
+filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
+{
+	return test_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
+{
+	return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
+		filelayout_test_layout_invalid(lseg->pls_layout);
+}
+
 extern struct nfs_fh *
 nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
 
@@ -119,5 +159,6 @@
 extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
 struct nfs4_file_layout_dsaddr *
 get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
+void nfs4_ds_disconnect(struct nfs_client *clp);
 
 #endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index c9cff9a..a1fab8d 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -30,12 +30,16 @@
 
 #include <linux/nfs_fs.h>
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 
 #include "internal.h"
 #include "nfs4filelayout.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PNFS_LD
 
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
 /*
  * Data server cache
  *
@@ -145,6 +149,28 @@
 }
 
 /*
+ * Lookup DS by nfs_client pointer. Zero data server client pointer
+ */
+void nfs4_ds_disconnect(struct nfs_client *clp)
+{
+	struct nfs4_pnfs_ds *ds;
+	struct nfs_client *found = NULL;
+
+	dprintk("%s clp %p\n", __func__, clp);
+	spin_lock(&nfs4_ds_cache_lock);
+	list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+		if (ds->ds_clp && ds->ds_clp == clp) {
+			found = ds->ds_clp;
+			ds->ds_clp = NULL;
+		}
+	spin_unlock(&nfs4_ds_cache_lock);
+	if (found) {
+		set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+		nfs_put_client(clp);
+	}
+}
+
+/*
  * Create an rpc connection to the nfs4_pnfs_ds data server
  * Currently only supports IPv4 and IPv6 addresses
  */
@@ -165,8 +191,9 @@
 			__func__, ds->ds_remotestr, da->da_remotestr);
 
 		clp = nfs4_set_ds_client(mds_srv->nfs_client,
-				 (struct sockaddr *)&da->da_addr,
-				 da->da_addrlen, IPPROTO_TCP);
+					(struct sockaddr *)&da->da_addr,
+					da->da_addrlen, IPPROTO_TCP,
+					dataserver_timeo, dataserver_retrans);
 		if (!IS_ERR(clp))
 			break;
 	}
@@ -176,28 +203,7 @@
 		goto out;
 	}
 
-	if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
-		if (!is_ds_client(clp)) {
-			status = -ENODEV;
-			goto out_put;
-		}
-		ds->ds_clp = clp;
-		dprintk("%s [existing] server=%s\n", __func__,
-			ds->ds_remotestr);
-		goto out;
-	}
-
-	/*
-	 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
-	 * be equal to the MDS lease. Renewal is scheduled in create_session.
-	 */
-	spin_lock(&mds_srv->nfs_client->cl_lock);
-	clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
-	spin_unlock(&mds_srv->nfs_client->cl_lock);
-	clp->cl_last_renewal = jiffies;
-
-	/* New nfs_client */
-	status = nfs4_init_ds_session(clp);
+	status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
 	if (status)
 		goto out_put;
 
@@ -602,7 +608,7 @@
 
 		mp_count = be32_to_cpup(p); /* multipath count */
 		for (j = 0; j < mp_count; j++) {
-			da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+			da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
 					    &stream, gfp_flags);
 			if (da)
 				list_add_tail(&da->da_node, &dsaddrs);
@@ -791,48 +797,42 @@
 	return flseg->fh_array[i];
 }
 
-static void
-filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
-			       int err, const char *ds_remotestr)
-{
-	u32 *p = (u32 *)&dsaddr->id_node.deviceid;
-
-	printk(KERN_ERR "NFS: data server %s connection error %d."
-		" Deviceid [%x%x%x%x] marked out of use.\n",
-		ds_remotestr, err, p[0], p[1], p[2], p[3]);
-
-	spin_lock(&nfs4_ds_cache_lock);
-	dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
-	spin_unlock(&nfs4_ds_cache_lock);
-}
-
 struct nfs4_pnfs_ds *
 nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
 {
 	struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
 	struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
+	struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
+
+	if (filelayout_test_devid_invalid(devid))
+		return NULL;
 
 	if (ds == NULL) {
 		printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
 			__func__, ds_idx);
-		return NULL;
+		goto mark_dev_invalid;
 	}
 
 	if (!ds->ds_clp) {
 		struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
 		int err;
 
-		if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
-			/* Already tried to connect, don't try again */
-			dprintk("%s Deviceid marked out of use\n", __func__);
-			return NULL;
-		}
 		err = nfs4_ds_connect(s, ds);
-		if (err) {
-			filelayout_mark_devid_negative(dsaddr, err,
-						       ds->ds_remotestr);
-			return NULL;
-		}
+		if (err)
+			goto mark_dev_invalid;
 	}
 	return ds;
+
+mark_dev_invalid:
+	filelayout_mark_devid_invalid(devid);
+	return NULL;
 }
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The  number of times the NFSv4.1 client "
+			"retries a request before it attempts further "
+			" recovery  action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+			"NFSv4.1  client  waits for a response from a "
+			" data server before it retries an NFS request.");
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index a7f3ded..017b4b0 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -132,6 +132,35 @@
 	return ret;
 }
 
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+{
+	struct gss_api_mech *mech;
+	struct xdr_netobj oid;
+	int i;
+	rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+	for (i = 0; i < flavors->num_flavors; i++) {
+		struct nfs4_secinfo_flavor *flavor;
+		flavor = &flavors->flavors[i];
+
+		if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+			pseudoflavor = flavor->flavor;
+			break;
+		} else if (flavor->flavor == RPC_AUTH_GSS) {
+			oid.len  = flavor->gss.sec_oid4.len;
+			oid.data = flavor->gss.sec_oid4.data;
+			mech = gss_mech_get_by_OID(&oid);
+			if (!mech)
+				continue;
+			pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+			gss_mech_put(mech);
+			break;
+		}
+	}
+
+	return pseudoflavor;
+}
+
 static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
 {
 	struct page *page;
@@ -168,7 +197,7 @@
 	rpc_authflavor_t flavor;
 
 	flavor = nfs4_negotiate_security(inode, name);
-	if (flavor < 0)
+	if ((int)flavor < 0)
 		return ERR_PTR(flavor);
 
 	clone = rpc_clone_client(clnt);
@@ -300,7 +329,7 @@
  * @dentry - dentry of referral
  *
  */
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
 {
 	struct vfsmount *mnt = ERR_PTR(-ENOMEM);
 	struct dentry *parent;
@@ -341,3 +370,25 @@
 	dprintk("%s: done\n", __func__);
 	return mnt;
 }
+
+struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
+			       struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+	struct dentry *parent = dget_parent(dentry);
+	struct rpc_clnt *client;
+	struct vfsmount *mnt;
+
+	/* Look it up again to get its attributes and sec flavor */
+	client = nfs4_proc_lookup_mountpoint(parent->d_inode, &dentry->d_name, fh, fattr);
+	dput(parent);
+	if (IS_ERR(client))
+		return ERR_CAST(client);
+
+	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+		mnt = nfs_do_refmount(client, dentry);
+	else
+		mnt = nfs_do_submount(dentry, fh, fattr, client->cl_auth->au_flavor);
+
+	rpc_shutdown_client(client);
+	return mnt;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ab985f6..15fc7e4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
 #include "iostat.h"
 #include "callback.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PROC
 
@@ -80,6 +81,7 @@
 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
+static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
 			    struct nfs_fattr *fattr, struct iattr *sattr,
@@ -101,6 +103,10 @@
 	case -NFS4ERR_BADOWNER:
 	case -NFS4ERR_BADNAME:
 		return -EINVAL;
+	case -NFS4ERR_SHARE_DENIED:
+		return -EACCES;
+	case -NFS4ERR_MINOR_VERS_MISMATCH:
+		return -EPROTONOSUPPORT;
 	default:
 		dprintk("%s could not handle NFSv4 error %d\n",
 				__func__, -err);
@@ -112,7 +118,7 @@
 /*
  * This is our standard bitmap for GETATTR requests.
  */
-const u32 nfs4_fattr_bitmap[2] = {
+const u32 nfs4_fattr_bitmap[3] = {
 	FATTR4_WORD0_TYPE
 	| FATTR4_WORD0_CHANGE
 	| FATTR4_WORD0_SIZE
@@ -129,6 +135,24 @@
 	| FATTR4_WORD1_TIME_MODIFY
 };
 
+static const u32 nfs4_pnfs_open_bitmap[3] = {
+	FATTR4_WORD0_TYPE
+	| FATTR4_WORD0_CHANGE
+	| FATTR4_WORD0_SIZE
+	| FATTR4_WORD0_FSID
+	| FATTR4_WORD0_FILEID,
+	FATTR4_WORD1_MODE
+	| FATTR4_WORD1_NUMLINKS
+	| FATTR4_WORD1_OWNER
+	| FATTR4_WORD1_OWNER_GROUP
+	| FATTR4_WORD1_RAWDEV
+	| FATTR4_WORD1_SPACE_USED
+	| FATTR4_WORD1_TIME_ACCESS
+	| FATTR4_WORD1_TIME_METADATA
+	| FATTR4_WORD1_TIME_MODIFY,
+	FATTR4_WORD2_MDSTHRESHOLD
+};
+
 const u32 nfs4_statfs_bitmap[2] = {
 	FATTR4_WORD0_FILES_AVAIL
 	| FATTR4_WORD0_FILES_FREE
@@ -304,7 +328,7 @@
 		case -NFS4ERR_SEQ_MISORDERED:
 			dprintk("%s ERROR: %d Reset session\n", __func__,
 				errorcode);
-			nfs4_schedule_session_recovery(clp->cl_session);
+			nfs4_schedule_session_recovery(clp->cl_session, errorcode);
 			exception->retry = 1;
 			break;
 #endif /* defined(CONFIG_NFS_V4_1) */
@@ -772,7 +796,7 @@
 	struct nfs_inode *nfsi = NFS_I(dir);
 
 	spin_lock(&dir->i_lock);
-	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
 	if (!cinfo->atomic || cinfo->before != dir->i_version)
 		nfs_force_lookup_revalidate(dir);
 	dir->i_version = cinfo->after;
@@ -788,7 +812,6 @@
 	struct nfs4_string owner_name;
 	struct nfs4_string group_name;
 	struct nfs_fattr f_attr;
-	struct nfs_fattr dir_attr;
 	struct dentry *dir;
 	struct dentry *dentry;
 	struct nfs4_state_owner *owner;
@@ -804,12 +827,10 @@
 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
 {
 	p->o_res.f_attr = &p->f_attr;
-	p->o_res.dir_attr = &p->dir_attr;
 	p->o_res.seqid = p->o_arg.seqid;
 	p->c_res.seqid = p->c_arg.seqid;
 	p->o_res.server = p->o_arg.server;
 	nfs_fattr_init(&p->f_attr);
-	nfs_fattr_init(&p->dir_attr);
 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
 }
 
@@ -843,7 +864,7 @@
 	p->o_arg.name = &dentry->d_name;
 	p->o_arg.server = server;
 	p->o_arg.bitmask = server->attr_bitmask;
-	p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
+	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
 	if (attrs != NULL && attrs->ia_valid != 0) {
 		__be32 verf[2];
@@ -1332,7 +1353,7 @@
 			case -NFS4ERR_BAD_HIGH_SLOT:
 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 			case -NFS4ERR_DEADSESSION:
-				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+				nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
 				goto out;
 			case -NFS4ERR_STALE_CLIENTID:
 			case -NFS4ERR_STALE_STATEID:
@@ -1611,8 +1632,6 @@
 
 	nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
 
-	nfs_refresh_inode(dir, o_res->dir_attr);
-
 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
 		status = _nfs4_proc_open_confirm(data);
 		if (status != 0)
@@ -1645,11 +1664,8 @@
 
 	nfs_fattr_map_and_free_names(server, &data->f_attr);
 
-	if (o_arg->open_flags & O_CREAT) {
+	if (o_arg->open_flags & O_CREAT)
 		update_changeattr(dir, &o_res->cinfo);
-		nfs_post_op_update_inode(dir, o_res->dir_attr);
-	} else
-		nfs_refresh_inode(dir, o_res->dir_attr);
 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
 		server->caps &= ~NFS_CAP_POSIX_LOCK;
 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1789,7 +1805,14 @@
 /*
  * Returns a referenced nfs4_state
  */
-static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+static int _nfs4_do_open(struct inode *dir,
+			struct dentry *dentry,
+			fmode_t fmode,
+			int flags,
+			struct iattr *sattr,
+			struct rpc_cred *cred,
+			struct nfs4_state **res,
+			struct nfs4_threshold **ctx_th)
 {
 	struct nfs4_state_owner  *sp;
 	struct nfs4_state     *state = NULL;
@@ -1814,6 +1837,12 @@
 	if (opendata == NULL)
 		goto err_put_state_owner;
 
+	if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+		opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+		if (!opendata->f_attr.mdsthreshold)
+			goto err_opendata_put;
+		opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
+	}
 	if (dentry->d_inode != NULL)
 		opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
 
@@ -1839,11 +1868,19 @@
 			nfs_setattr_update_inode(state->inode, sattr);
 		nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
 	}
+
+	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
+		*ctx_th = opendata->f_attr.mdsthreshold;
+	else
+		kfree(opendata->f_attr.mdsthreshold);
+	opendata->f_attr.mdsthreshold = NULL;
+
 	nfs4_opendata_put(opendata);
 	nfs4_put_state_owner(sp);
 	*res = state;
 	return 0;
 err_opendata_put:
+	kfree(opendata->f_attr.mdsthreshold);
 	nfs4_opendata_put(opendata);
 err_put_state_owner:
 	nfs4_put_state_owner(sp);
@@ -1853,14 +1890,22 @@
 }
 
 
-static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+static struct nfs4_state *nfs4_do_open(struct inode *dir,
+					struct dentry *dentry,
+					fmode_t fmode,
+					int flags,
+					struct iattr *sattr,
+					struct rpc_cred *cred,
+					struct nfs4_threshold **ctx_th)
 {
 	struct nfs4_exception exception = { };
 	struct nfs4_state *res;
 	int status;
 
+	fmode &= FMODE_READ|FMODE_WRITE;
 	do {
-		status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+		status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
+				       &res, ctx_th);
 		if (status == 0)
 			break;
 		/* NOTE: BAD_SEQID means the server and client disagree about the
@@ -2184,7 +2229,8 @@
 	struct nfs4_state *state;
 
 	/* Protect against concurrent sillydeletes */
-	state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
+	state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
+			     ctx->cred, &ctx->mdsthreshold);
 	if (IS_ERR(state))
 		return ERR_CAST(state);
 	ctx->state = state;
@@ -2354,8 +2400,8 @@
 /*
  * get the file handle for the "/" directory on the server
  */
-static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
-			      struct nfs_fsinfo *info)
+int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
+			 struct nfs_fsinfo *info)
 {
 	int minor_version = server->nfs_client->cl_minorversion;
 	int status = nfs4_lookup_root(server, fhandle, info);
@@ -2372,6 +2418,31 @@
 	return nfs4_map_errors(status);
 }
 
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
+			      struct nfs_fsinfo *info)
+{
+	int error;
+	struct nfs_fattr *fattr = info->fattr;
+
+	error = nfs4_server_capabilities(server, mntfh);
+	if (error < 0) {
+		dprintk("nfs4_get_root: getcaps error = %d\n", -error);
+		return error;
+	}
+
+	error = nfs4_proc_getattr(server, mntfh, fattr);
+	if (error < 0) {
+		dprintk("nfs4_get_root: getattr error = %d\n", -error);
+		return error;
+	}
+
+	if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+	    !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+		memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+	return error;
+}
+
 /*
  * Get locations and (maybe) other attributes of a referral.
  * Note that we'll actually follow the referral later when
@@ -2478,6 +2549,14 @@
 
 	nfs_fattr_init(fattr);
 	
+	/* Deal with open(O_TRUNC) */
+	if (sattr->ia_valid & ATTR_OPEN)
+		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+
+	/* Optimization: if the end result is no change, don't RPC */
+	if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
+		return 0;
+
 	/* Search for an existing open(O_WRITE) file */
 	if (sattr->ia_valid & ATTR_FILE) {
 		struct nfs_open_context *ctx;
@@ -2489,10 +2568,6 @@
 		}
 	}
 
-	/* Deal with open(O_TRUNC) */
-	if (sattr->ia_valid & ATTR_OPEN)
-		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
-
 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
 	if (status == 0)
 		nfs_setattr_update_inode(inode, sattr);
@@ -2578,7 +2653,7 @@
 	return err;
 }
 
-static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
 	int status;
@@ -2761,7 +2836,7 @@
 		fmode = ctx->mode;
 	}
 	sattr->ia_mode &= ~current_umask();
-	state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
+	state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
 	d_drop(dentry);
 	if (IS_ERR(state)) {
 		status = PTR_ERR(state);
@@ -2783,7 +2858,6 @@
 	struct nfs_removeargs args = {
 		.fh = NFS_FH(dir),
 		.name = *name,
-		.bitmask = server->attr_bitmask,
 	};
 	struct nfs_removeres res = {
 		.server = server,
@@ -2793,19 +2867,11 @@
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
-	int status = -ENOMEM;
-
-	res.dir_attr = nfs_alloc_fattr();
-	if (res.dir_attr == NULL)
-		goto out;
+	int status;
 
 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
-	if (status == 0) {
+	if (status == 0)
 		update_changeattr(dir, &res.cinfo);
-		nfs_post_op_update_inode(dir, res.dir_attr);
-	}
-	nfs_free_fattr(res.dir_attr);
-out:
 	return status;
 }
 
@@ -2827,7 +2893,6 @@
 	struct nfs_removeargs *args = msg->rpc_argp;
 	struct nfs_removeres *res = msg->rpc_resp;
 
-	args->bitmask = server->cache_consistency_bitmask;
 	res->server = server;
 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
 	nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
@@ -2852,7 +2917,6 @@
 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
 		return 0;
 	update_changeattr(dir, &res->cinfo);
-	nfs_post_op_update_inode(dir, res->dir_attr);
 	return 1;
 }
 
@@ -2863,7 +2927,6 @@
 	struct nfs_renameres *res = msg->rpc_resp;
 
 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
-	arg->bitmask = server->attr_bitmask;
 	res->server = server;
 	nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
 }
@@ -2889,9 +2952,7 @@
 		return 0;
 
 	update_changeattr(old_dir, &res->old_cinfo);
-	nfs_post_op_update_inode(old_dir, res->old_fattr);
 	update_changeattr(new_dir, &res->new_cinfo);
-	nfs_post_op_update_inode(new_dir, res->new_fattr);
 	return 1;
 }
 
@@ -2904,7 +2965,6 @@
 		.new_dir = NFS_FH(new_dir),
 		.old_name = old_name,
 		.new_name = new_name,
-		.bitmask = server->attr_bitmask,
 	};
 	struct nfs_renameres res = {
 		.server = server,
@@ -2916,21 +2976,11 @@
 	};
 	int status = -ENOMEM;
 	
-	res.old_fattr = nfs_alloc_fattr();
-	res.new_fattr = nfs_alloc_fattr();
-	if (res.old_fattr == NULL || res.new_fattr == NULL)
-		goto out;
-
 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
 	if (!status) {
 		update_changeattr(old_dir, &res.old_cinfo);
-		nfs_post_op_update_inode(old_dir, res.old_fattr);
 		update_changeattr(new_dir, &res.new_cinfo);
-		nfs_post_op_update_inode(new_dir, res.new_fattr);
 	}
-out:
-	nfs_free_fattr(res.new_fattr);
-	nfs_free_fattr(res.old_fattr);
 	return status;
 }
 
@@ -2968,18 +3018,15 @@
 	int status = -ENOMEM;
 
 	res.fattr = nfs_alloc_fattr();
-	res.dir_attr = nfs_alloc_fattr();
-	if (res.fattr == NULL || res.dir_attr == NULL)
+	if (res.fattr == NULL)
 		goto out;
 
 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
 	if (!status) {
 		update_changeattr(dir, &res.cinfo);
-		nfs_post_op_update_inode(dir, res.dir_attr);
 		nfs_post_op_update_inode(inode, res.fattr);
 	}
 out:
-	nfs_free_fattr(res.dir_attr);
 	nfs_free_fattr(res.fattr);
 	return status;
 }
@@ -3002,7 +3049,6 @@
 	struct nfs4_create_res res;
 	struct nfs_fh fh;
 	struct nfs_fattr fattr;
-	struct nfs_fattr dir_fattr;
 };
 
 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
@@ -3026,9 +3072,7 @@
 		data->res.server = server;
 		data->res.fh = &data->fh;
 		data->res.fattr = &data->fattr;
-		data->res.dir_fattr = &data->dir_fattr;
 		nfs_fattr_init(data->res.fattr);
-		nfs_fattr_init(data->res.dir_fattr);
 	}
 	return data;
 }
@@ -3039,7 +3083,6 @@
 				    &data->arg.seq_args, &data->res.seq_res, 1);
 	if (status == 0) {
 		update_changeattr(dir, &data->res.dir_cinfo);
-		nfs_post_op_update_inode(dir, data->res.dir_fattr);
 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
 	}
 	return status;
@@ -3335,12 +3378,12 @@
 
 void __nfs4_read_done_cb(struct nfs_read_data *data)
 {
-	nfs_invalidate_atime(data->inode);
+	nfs_invalidate_atime(data->header->inode);
 }
 
 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
 {
-	struct nfs_server *server = NFS_SERVER(data->inode);
+	struct nfs_server *server = NFS_SERVER(data->header->inode);
 
 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
 		rpc_restart_call_prepare(task);
@@ -3375,7 +3418,7 @@
 
 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
 {
-	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+	if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
 				&data->args.seq_args,
 				&data->res.seq_res,
 				task))
@@ -3383,25 +3426,9 @@
 	rpc_call_start(task);
 }
 
-/* Reset the the nfs_read_data to send the read to the MDS. */
-void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
-{
-	dprintk("%s Reset task for i/o through\n", __func__);
-	put_lseg(data->lseg);
-	data->lseg = NULL;
-	/* offsets will differ in the dense stripe case */
-	data->args.offset = data->mds_offset;
-	data->ds_clp = NULL;
-	data->args.fh     = NFS_FH(data->inode);
-	data->read_done_cb = nfs4_read_done_cb;
-	task->tk_ops = data->mds_ops;
-	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
-}
-EXPORT_SYMBOL_GPL(nfs4_reset_read);
-
 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
 {
-	struct inode *inode = data->inode;
+	struct inode *inode = data->header->inode;
 	
 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
 		rpc_restart_call_prepare(task);
@@ -3409,7 +3436,7 @@
 	}
 	if (task->tk_status >= 0) {
 		renew_lease(NFS_SERVER(inode), data->timestamp);
-		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
+		nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
 	}
 	return 0;
 }
@@ -3422,32 +3449,30 @@
 		nfs4_write_done_cb(task, data);
 }
 
-/* Reset the the nfs_write_data to send the write to the MDS. */
-void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+static
+bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
 {
-	dprintk("%s Reset task for i/o through\n", __func__);
-	put_lseg(data->lseg);
-	data->lseg          = NULL;
-	data->ds_clp        = NULL;
-	data->write_done_cb = nfs4_write_done_cb;
-	data->args.fh       = NFS_FH(data->inode);
-	data->args.bitmask  = data->res.server->cache_consistency_bitmask;
-	data->args.offset   = data->mds_offset;
-	data->res.fattr     = &data->fattr;
-	task->tk_ops        = data->mds_ops;
-	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+	const struct nfs_pgio_header *hdr = data->header;
+
+	/* Don't request attributes for pNFS or O_DIRECT writes */
+	if (data->ds_clp != NULL || hdr->dreq != NULL)
+		return false;
+	/* Otherwise, request attributes if and only if we don't hold
+	 * a delegation
+	 */
+	return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
 }
-EXPORT_SYMBOL_GPL(nfs4_reset_write);
 
 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
 {
-	struct nfs_server *server = NFS_SERVER(data->inode);
+	struct nfs_server *server = NFS_SERVER(data->header->inode);
 
-	if (data->lseg) {
+	if (!nfs4_write_need_cache_consistency_data(data)) {
 		data->args.bitmask = NULL;
 		data->res.fattr = NULL;
 	} else
 		data->args.bitmask = server->cache_consistency_bitmask;
+
 	if (!data->write_done_cb)
 		data->write_done_cb = nfs4_write_done_cb;
 	data->res.server = server;
@@ -3459,6 +3484,16 @@
 
 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
 {
+	if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+				&data->args.seq_args,
+				&data->res.seq_res,
+				task))
+		return;
+	rpc_call_start(task);
+}
+
+static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
 	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
 				&data->args.seq_args,
 				&data->res.seq_res,
@@ -3467,7 +3502,7 @@
 	rpc_call_start(task);
 }
 
-static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
 {
 	struct inode *inode = data->inode;
 
@@ -3475,28 +3510,22 @@
 		rpc_restart_call_prepare(task);
 		return -EAGAIN;
 	}
-	nfs_refresh_inode(inode, data->res.fattr);
 	return 0;
 }
 
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
 {
 	if (!nfs4_sequence_done(task, &data->res.seq_res))
 		return -EAGAIN;
-	return data->write_done_cb(task, data);
+	return data->commit_done_cb(task, data);
 }
 
-static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
 	struct nfs_server *server = NFS_SERVER(data->inode);
 
-	if (data->lseg) {
-		data->args.bitmask = NULL;
-		data->res.fattr = NULL;
-	} else
-		data->args.bitmask = server->cache_consistency_bitmask;
-	if (!data->write_done_cb)
-		data->write_done_cb = nfs4_commit_done_cb;
+	if (data->commit_done_cb == NULL)
+		data->commit_done_cb = nfs4_commit_done_cb;
 	data->res.server = server;
 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
@@ -3905,7 +3934,7 @@
 		case -NFS4ERR_SEQ_MISORDERED:
 			dprintk("%s ERROR %d, Reset session\n", __func__,
 				task->tk_status);
-			nfs4_schedule_session_recovery(clp->cl_session);
+			nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
 			task->tk_status = 0;
 			return -EAGAIN;
 #endif /* CONFIG_NFS_V4_1 */
@@ -3931,13 +3960,21 @@
 	return -EAGAIN;
 }
 
-static void nfs4_construct_boot_verifier(struct nfs_client *clp,
-					 nfs4_verifier *bootverf)
+static void nfs4_init_boot_verifier(const struct nfs_client *clp,
+				    nfs4_verifier *bootverf)
 {
 	__be32 verf[2];
 
-	verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
-	verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+		/* An impossible timestamp guarantees this value
+		 * will never match a generated boot time. */
+		verf[0] = 0;
+		verf[1] = (__be32)(NSEC_PER_SEC + 1);
+	} else {
+		struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+		verf[0] = (__be32)nn->boot_time.tv_sec;
+		verf[1] = (__be32)nn->boot_time.tv_nsec;
+	}
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 
@@ -3960,7 +3997,7 @@
 	int loop = 0;
 	int status;
 
-	nfs4_construct_boot_verifier(clp, &sc_verifier);
+	nfs4_init_boot_verifier(clp, &sc_verifier);
 
 	for(;;) {
 		rcu_read_lock();
@@ -4104,7 +4141,7 @@
 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
 	data->args.fhandle = &data->fh;
 	data->args.stateid = &data->stateid;
-	data->args.bitmask = server->attr_bitmask;
+	data->args.bitmask = server->cache_consistency_bitmask;
 	nfs_copy_fh(&data->fh, NFS_FH(inode));
 	nfs4_stateid_copy(&data->stateid, stateid);
 	data->res.fattr = &data->fattr;
@@ -4125,9 +4162,10 @@
 	if (status != 0)
 		goto out;
 	status = data->rpc_status;
-	if (status != 0)
-		goto out;
-	nfs_refresh_inode(inode, &data->fattr);
+	if (status == 0)
+		nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+	else
+		nfs_refresh_inode(inode, &data->fattr);
 out:
 	rpc_put_task(task);
 	return status;
@@ -4837,7 +4875,7 @@
 			case -NFS4ERR_BAD_HIGH_SLOT:
 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 			case -NFS4ERR_DEADSESSION:
-				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+				nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
 				goto out;
 			case -ERESTARTSYS:
 				/*
@@ -5079,7 +5117,8 @@
 }
 
 static bool
-nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+nfs41_same_server_scope(struct nfs41_server_scope *a,
+			struct nfs41_server_scope *b)
 {
 	if (a->server_scope_sz == b->server_scope_sz &&
 	    memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5089,6 +5128,61 @@
 }
 
 /*
+ * nfs4_proc_bind_conn_to_session()
+ *
+ * The 4.1 client currently uses the same TCP connection for the
+ * fore and backchannel.
+ */
+int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
+{
+	int status;
+	struct nfs41_bind_conn_to_session_res res;
+	struct rpc_message msg = {
+		.rpc_proc =
+			&nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
+		.rpc_argp = clp,
+		.rpc_resp = &res,
+		.rpc_cred = cred,
+	};
+
+	dprintk("--> %s\n", __func__);
+	BUG_ON(clp == NULL);
+
+	res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+	if (unlikely(res.session == NULL)) {
+		status = -ENOMEM;
+		goto out;
+	}
+
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+	if (status == 0) {
+		if (memcmp(res.session->sess_id.data,
+		    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
+			dprintk("NFS: %s: Session ID mismatch\n", __func__);
+			status = -EIO;
+			goto out_session;
+		}
+		if (res.dir != NFS4_CDFS4_BOTH) {
+			dprintk("NFS: %s: Unexpected direction from server\n",
+				__func__);
+			status = -EIO;
+			goto out_session;
+		}
+		if (res.use_conn_in_rdma_mode) {
+			dprintk("NFS: %s: Server returned RDMA mode = true\n",
+				__func__);
+			status = -EIO;
+			goto out_session;
+		}
+	}
+out_session:
+	kfree(res.session);
+out:
+	dprintk("<-- %s status= %d\n", __func__, status);
+	return status;
+}
+
+/*
  * nfs4_proc_exchange_id()
  *
  * Since the clientid has expired, all compounds using sessions
@@ -5105,7 +5199,7 @@
 		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
 	};
 	struct nfs41_exchange_id_res res = {
-		.client = clp,
+		0
 	};
 	int status;
 	struct rpc_message msg = {
@@ -5118,7 +5212,7 @@
 	dprintk("--> %s\n", __func__);
 	BUG_ON(clp == NULL);
 
-	nfs4_construct_boot_verifier(clp, &verifier);
+	nfs4_init_boot_verifier(clp, &verifier);
 
 	args.id_len = scnprintf(args.id, sizeof(args.id),
 				"%s/%s/%u",
@@ -5126,59 +5220,135 @@
 				clp->cl_rpcclient->cl_nodename,
 				clp->cl_rpcclient->cl_auth->au_flavor);
 
-	res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
-	if (unlikely(!res.server_scope)) {
+	res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+					GFP_NOFS);
+	if (unlikely(res.server_owner == NULL)) {
 		status = -ENOMEM;
 		goto out;
 	}
 
-	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
-	if (unlikely(!res.impl_id)) {
+	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+					GFP_NOFS);
+	if (unlikely(res.server_scope == NULL)) {
+		status = -ENOMEM;
+		goto out_server_owner;
+	}
+
+	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
+	if (unlikely(res.impl_id == NULL)) {
 		status = -ENOMEM;
 		goto out_server_scope;
 	}
 
 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
-	if (!status)
-		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+	if (status == 0)
+		status = nfs4_check_cl_exchange_flags(res.flags);
 
-	if (!status) {
+	if (status == 0) {
+		clp->cl_clientid = res.clientid;
+		clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
+		if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+			clp->cl_seqid = res.seqid;
+
+		kfree(clp->cl_serverowner);
+		clp->cl_serverowner = res.server_owner;
+		res.server_owner = NULL;
+
 		/* use the most recent implementation id */
-		kfree(clp->impl_id);
-		clp->impl_id = res.impl_id;
-	} else
-		kfree(res.impl_id);
+		kfree(clp->cl_implid);
+		clp->cl_implid = res.impl_id;
 
-	if (!status) {
-		if (clp->server_scope &&
-		    !nfs41_same_server_scope(clp->server_scope,
+		if (clp->cl_serverscope != NULL &&
+		    !nfs41_same_server_scope(clp->cl_serverscope,
 					     res.server_scope)) {
 			dprintk("%s: server_scope mismatch detected\n",
 				__func__);
 			set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
-			kfree(clp->server_scope);
-			clp->server_scope = NULL;
+			kfree(clp->cl_serverscope);
+			clp->cl_serverscope = NULL;
 		}
 
-		if (!clp->server_scope) {
-			clp->server_scope = res.server_scope;
+		if (clp->cl_serverscope == NULL) {
+			clp->cl_serverscope = res.server_scope;
 			goto out;
 		}
-	}
+	} else
+		kfree(res.impl_id);
 
+out_server_owner:
+	kfree(res.server_owner);
 out_server_scope:
 	kfree(res.server_scope);
 out:
-	if (clp->impl_id)
+	if (clp->cl_implid != NULL)
 		dprintk("%s: Server Implementation ID: "
 			"domain: %s, name: %s, date: %llu,%u\n",
-			__func__, clp->impl_id->domain, clp->impl_id->name,
-			clp->impl_id->date.seconds,
-			clp->impl_id->date.nseconds);
+			__func__, clp->cl_implid->domain, clp->cl_implid->name,
+			clp->cl_implid->date.seconds,
+			clp->cl_implid->date.nseconds);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
 
+static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
+		struct rpc_cred *cred)
+{
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
+		.rpc_argp = clp,
+		.rpc_cred = cred,
+	};
+	int status;
+
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+	if (status)
+		dprintk("NFS: Got error %d from the server %s on "
+			"DESTROY_CLIENTID.", status, clp->cl_hostname);
+	return status;
+}
+
+static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
+		struct rpc_cred *cred)
+{
+	unsigned int loop;
+	int ret;
+
+	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+		ret = _nfs4_proc_destroy_clientid(clp, cred);
+		switch (ret) {
+		case -NFS4ERR_DELAY:
+		case -NFS4ERR_CLIENTID_BUSY:
+			ssleep(1);
+			break;
+		default:
+			return ret;
+		}
+	}
+	return 0;
+}
+
+int nfs4_destroy_clientid(struct nfs_client *clp)
+{
+	struct rpc_cred *cred;
+	int ret = 0;
+
+	if (clp->cl_mvops->minor_version < 1)
+		goto out;
+	if (clp->cl_exchange_flags == 0)
+		goto out;
+	cred = nfs4_get_exchange_id_cred(clp);
+	ret = nfs4_proc_destroy_clientid(clp, cred);
+	if (cred)
+		put_rpccred(cred);
+	switch (ret) {
+	case 0:
+	case -NFS4ERR_STALE_CLIENTID:
+		clp->cl_exchange_flags = 0;
+	}
+out:
+	return ret;
+}
+
 struct nfs4_get_lease_time_data {
 	struct nfs4_get_lease_time_args *args;
 	struct nfs4_get_lease_time_res *res;
@@ -5399,8 +5569,12 @@
 void nfs4_destroy_session(struct nfs4_session *session)
 {
 	struct rpc_xprt *xprt;
+	struct rpc_cred *cred;
 
-	nfs4_proc_destroy_session(session);
+	cred = nfs4_get_exchange_id_cred(session->clp);
+	nfs4_proc_destroy_session(session, cred);
+	if (cred)
+		put_rpccred(cred);
 
 	rcu_read_lock();
 	xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
@@ -5510,7 +5684,8 @@
 	return nfs4_verify_back_channel_attrs(args, session);
 }
 
-static int _nfs4_proc_create_session(struct nfs_client *clp)
+static int _nfs4_proc_create_session(struct nfs_client *clp,
+		struct rpc_cred *cred)
 {
 	struct nfs4_session *session = clp->cl_session;
 	struct nfs41_create_session_args args = {
@@ -5524,6 +5699,7 @@
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
 		.rpc_argp = &args,
 		.rpc_resp = &res,
+		.rpc_cred = cred,
 	};
 	int status;
 
@@ -5548,7 +5724,7 @@
  * It is the responsibility of the caller to verify the session is
  * expired before calling this routine.
  */
-int nfs4_proc_create_session(struct nfs_client *clp)
+int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
 	unsigned *ptr;
@@ -5556,7 +5732,7 @@
 
 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
 
-	status = _nfs4_proc_create_session(clp);
+	status = _nfs4_proc_create_session(clp, cred);
 	if (status)
 		goto out;
 
@@ -5578,10 +5754,15 @@
  * Issue the over-the-wire RPC DESTROY_SESSION.
  * The caller must serialize access to this routine.
  */
-int nfs4_proc_destroy_session(struct nfs4_session *session)
+int nfs4_proc_destroy_session(struct nfs4_session *session,
+		struct rpc_cred *cred)
 {
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
+		.rpc_argp = session,
+		.rpc_cred = cred,
+	};
 	int status = 0;
-	struct rpc_message msg;
 
 	dprintk("--> nfs4_proc_destroy_session\n");
 
@@ -5589,68 +5770,89 @@
 	if (session->clp->cl_cons_state != NFS_CS_READY)
 		return status;
 
-	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
-	msg.rpc_argp = session;
-	msg.rpc_resp = NULL;
-	msg.rpc_cred = NULL;
 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 
 	if (status)
-		printk(KERN_WARNING
-			"NFS: Got error %d from the server on DESTROY_SESSION. "
+		dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
 			"Session has been destroyed regardless...\n", status);
 
 	dprintk("<-- nfs4_proc_destroy_session\n");
 	return status;
 }
 
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+static int nfs41_check_session_ready(struct nfs_client *clp)
+{
+	int ret;
+	
+	if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+		ret = nfs4_client_recover_expired_lease(clp);
+		if (ret)
+			return ret;
+	}
+	if (clp->cl_cons_state < NFS_CS_READY)
+		return -EPROTONOSUPPORT;
+	smp_rmb();
+	return 0;
+}
+
 int nfs4_init_session(struct nfs_server *server)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_session *session;
 	unsigned int rsize, wsize;
-	int ret;
 
 	if (!nfs4_has_session(clp))
 		return 0;
 
 	session = clp->cl_session;
-	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
-		return 0;
+	spin_lock(&clp->cl_lock);
+	if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
 
-	rsize = server->rsize;
-	if (rsize == 0)
-		rsize = NFS_MAX_FILE_IO_SIZE;
-	wsize = server->wsize;
-	if (wsize == 0)
-		wsize = NFS_MAX_FILE_IO_SIZE;
+		rsize = server->rsize;
+		if (rsize == 0)
+			rsize = NFS_MAX_FILE_IO_SIZE;
+		wsize = server->wsize;
+		if (wsize == 0)
+			wsize = NFS_MAX_FILE_IO_SIZE;
 
-	session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
-	session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+		session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+		session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+	}
+	spin_unlock(&clp->cl_lock);
 
-	ret = nfs4_recover_expired_lease(server);
-	if (!ret)
-		ret = nfs4_check_client_ready(clp);
-	return ret;
+	return nfs41_check_session_ready(clp);
 }
 
-int nfs4_init_ds_session(struct nfs_client *clp)
+int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
 {
 	struct nfs4_session *session = clp->cl_session;
 	int ret;
 
-	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
-		return 0;
+	spin_lock(&clp->cl_lock);
+	if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+		/*
+		 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
+		 * DS lease to be equal to the MDS lease.
+		 */
+		clp->cl_lease_time = lease_time;
+		clp->cl_last_renewal = jiffies;
+	}
+	spin_unlock(&clp->cl_lock);
 
-	ret = nfs4_client_recover_expired_lease(clp);
-	if (!ret)
-		/* Test for the DS role */
-		if (!is_ds_client(clp))
-			ret = -ENODEV;
-	if (!ret)
-		ret = nfs4_check_client_ready(clp);
-	return ret;
-
+	ret = nfs41_check_session_ready(clp);
+	if (ret)
+		return ret;
+	/* Test for the DS role */
+	if (!is_ds_client(clp))
+		return -ENODEV;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
 
@@ -6557,6 +6759,7 @@
 	.file_inode_ops	= &nfs4_file_inode_operations,
 	.file_ops	= &nfs4_file_operations,
 	.getroot	= nfs4_proc_get_root,
+	.submount	= nfs4_submount,
 	.getattr	= nfs4_proc_getattr,
 	.setattr	= nfs4_proc_setattr,
 	.lookup		= nfs4_proc_lookup,
@@ -6589,13 +6792,13 @@
 	.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
 	.write_done	= nfs4_write_done,
 	.commit_setup	= nfs4_proc_commit_setup,
+	.commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
 	.commit_done	= nfs4_commit_done,
 	.lock		= nfs4_proc_lock,
 	.clear_acl_cache = nfs4_zap_acl_attr,
 	.close_context  = nfs4_close_context,
 	.open_context	= nfs4_atomic_open,
 	.init_client	= nfs4_init_client,
-	.secinfo	= nfs4_proc_secinfo,
 };
 
 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index dc484c0..6930bec 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -49,7 +49,7 @@
 #include "nfs4_fs.h"
 #include "delegation.h"
 
-#define NFSDBG_FACILITY	NFSDBG_PROC
+#define NFSDBG_FACILITY		NFSDBG_STATE
 
 void
 nfs4_renew_state(struct work_struct *work)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7f0fcfc..f38300e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,6 +57,8 @@
 #include "internal.h"
 #include "pnfs.h"
 
+#define NFSDBG_FACILITY		NFSDBG_STATE
+
 #define OPENOWNER_POOL_SIZE	8
 
 const nfs4_stateid zero_stateid;
@@ -242,6 +244,16 @@
 	return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
 }
 
+static void nfs41_finish_session_reset(struct nfs_client *clp)
+{
+	clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+	clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+	/* create_session negotiated new slot table */
+	clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+	clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+	nfs41_setup_state_renewal(clp);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
@@ -254,11 +266,10 @@
 		goto out;
 	set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 do_confirm:
-	status = nfs4_proc_create_session(clp);
+	status = nfs4_proc_create_session(clp, cred);
 	if (status != 0)
 		goto out;
-	clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-	nfs41_setup_state_renewal(clp);
+	nfs41_finish_session_reset(clp);
 	nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
 	return status;
@@ -1106,6 +1117,8 @@
 		return;
 	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
 		set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+	dprintk("%s: scheduling lease recovery for server %s\n", __func__,
+			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
@@ -1122,6 +1135,8 @@
 {
 	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
 	nfs_expire_all_delegations(clp);
+	dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
+			clp->cl_hostname);
 }
 
 void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
@@ -1158,6 +1173,8 @@
 	struct nfs_client *clp = server->nfs_client;
 
 	nfs4_state_mark_reclaim_nograce(clp, state);
+	dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
+			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1491,19 +1508,25 @@
 		case -NFS4ERR_BADSLOT:
 		case -NFS4ERR_BAD_HIGH_SLOT:
 		case -NFS4ERR_DEADSESSION:
-		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 		case -NFS4ERR_SEQ_FALSE_RETRY:
 		case -NFS4ERR_SEQ_MISORDERED:
 			set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
 			/* Zero session reset errors */
 			break;
+		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+			set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+			break;
 		case -EKEYEXPIRED:
 			/* Nothing we can do */
 			nfs4_warn_keyexpired(clp->cl_hostname);
 			break;
 		default:
+			dprintk("%s: failed to handle error %d for server %s\n",
+					__func__, error, clp->cl_hostname);
 			return error;
 	}
+	dprintk("%s: handled error %d for server %s\n", __func__, error,
+			clp->cl_hostname);
 	return 0;
 }
 
@@ -1572,34 +1595,82 @@
 	return nfs4_recovery_handle_error(clp, status);
 }
 
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+{
+	switch (status) {
+	case -NFS4ERR_SEQ_MISORDERED:
+		if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
+			return -ESERVERFAULT;
+		/* Lease confirmation error: retry after purging the lease */
+		ssleep(1);
+	case -NFS4ERR_CLID_INUSE:
+	case -NFS4ERR_STALE_CLIENTID:
+		clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+		break;
+	case -EACCES:
+		if (clp->cl_machine_cred == NULL)
+			return -EACCES;
+		/* Handle case where the user hasn't set up machine creds */
+		nfs4_clear_machine_cred(clp);
+	case -NFS4ERR_DELAY:
+	case -ETIMEDOUT:
+	case -EAGAIN:
+		ssleep(1);
+		break;
+
+	case -NFS4ERR_MINOR_VERS_MISMATCH:
+		if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+			nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
+		dprintk("%s: exit with error %d for server %s\n",
+				__func__, -EPROTONOSUPPORT, clp->cl_hostname);
+		return -EPROTONOSUPPORT;
+	case -EKEYEXPIRED:
+		nfs4_warn_keyexpired(clp->cl_hostname);
+	case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+				 * in nfs4_exchange_id */
+	default:
+		dprintk("%s: exit with error %d for server %s\n", __func__,
+				status, clp->cl_hostname);
+		return status;
+	}
+	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+	dprintk("%s: handled error %d for server %s\n", __func__, status,
+			clp->cl_hostname);
+	return 0;
+}
+
 static int nfs4_reclaim_lease(struct nfs_client *clp)
 {
 	struct rpc_cred *cred;
 	const struct nfs4_state_recovery_ops *ops =
 		clp->cl_mvops->reboot_recovery_ops;
-	int status = -ENOENT;
+	int status;
 
 	cred = ops->get_clid_cred(clp);
-	if (cred != NULL) {
-		status = ops->establish_clid(clp, cred);
-		put_rpccred(cred);
-		/* Handle case where the user hasn't set up machine creds */
-		if (status == -EACCES && cred == clp->cl_machine_cred) {
-			nfs4_clear_machine_cred(clp);
-			status = -EAGAIN;
-		}
-		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
-			status = -EPROTONOSUPPORT;
-	}
-	return status;
+	if (cred == NULL)
+		return -ENOENT;
+	status = ops->establish_clid(clp, cred);
+	put_rpccred(cred);
+	if (status != 0)
+		return nfs4_handle_reclaim_lease_error(clp, status);
+	return 0;
 }
 
 #ifdef CONFIG_NFS_V4_1
-void nfs4_schedule_session_recovery(struct nfs4_session *session)
+void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 {
 	struct nfs_client *clp = session->clp;
 
-	set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+	switch (err) {
+	default:
+		set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+		break;
+	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+		set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+	}
 	nfs4_schedule_lease_recovery(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1607,14 +1678,19 @@
 void nfs41_handle_recall_slot(struct nfs_client *clp)
 {
 	set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+	dprintk("%s: scheduling slot recall for server %s\n", __func__,
+			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
 }
 
 static void nfs4_reset_all_state(struct nfs_client *clp)
 {
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
-		clp->cl_boot_time = CURRENT_TIME;
+		set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+		clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 		nfs4_state_start_reclaim_nograce(clp);
+		dprintk("%s: scheduling reset of all state for server %s!\n",
+				__func__, clp->cl_hostname);
 		nfs4_schedule_state_manager(clp);
 	}
 }
@@ -1623,26 +1699,39 @@
 {
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
 		nfs4_state_start_reclaim_reboot(clp);
+		dprintk("%s: server %s rebooted!\n", __func__,
+				clp->cl_hostname);
 		nfs4_schedule_state_manager(clp);
 	}
 }
 
 static void nfs41_handle_state_revoked(struct nfs_client *clp)
 {
-	/* Temporary */
 	nfs4_reset_all_state(clp);
+	dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
 }
 
 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
 {
 	/* This will need to handle layouts too */
 	nfs_expire_all_delegations(clp);
+	dprintk("%s: Recallable state revoked on server %s!\n", __func__,
+			clp->cl_hostname);
+}
+
+static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
+{
+	nfs_expire_all_delegations(clp);
+	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
+		nfs4_schedule_state_manager(clp);
+	dprintk("%s: server %s declared a backchannel fault\n", __func__,
+			clp->cl_hostname);
 }
 
 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
 {
-	nfs_expire_all_delegations(clp);
-	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
+	if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+		&clp->cl_state) == 0)
 		nfs4_schedule_state_manager(clp);
 }
 
@@ -1650,6 +1739,10 @@
 {
 	if (!flags)
 		return;
+
+	dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
+		__func__, clp->cl_hostname, clp->cl_clientid, flags);
+
 	if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
 		nfs41_handle_server_reboot(clp);
 	if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
@@ -1659,18 +1752,21 @@
 		nfs41_handle_state_revoked(clp);
 	if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
 		nfs41_handle_recallable_state_revoked(clp);
-	if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
-			    SEQ4_STATUS_BACKCHANNEL_FAULT |
-			    SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+	if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
+		nfs41_handle_backchannel_fault(clp);
+	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+				SEQ4_STATUS_CB_PATH_DOWN_SESSION))
 		nfs41_handle_cb_path_down(clp);
 }
 
 static int nfs4_reset_session(struct nfs_client *clp)
 {
+	struct rpc_cred *cred;
 	int status;
 
 	nfs4_begin_drain_session(clp);
-	status = nfs4_proc_destroy_session(clp->cl_session);
+	cred = nfs4_get_exchange_id_cred(clp);
+	status = nfs4_proc_destroy_session(clp->cl_session, cred);
 	if (status && status != -NFS4ERR_BADSESSION &&
 	    status != -NFS4ERR_DEADSESSION) {
 		status = nfs4_recovery_handle_error(clp, status);
@@ -1678,19 +1774,19 @@
 	}
 
 	memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
-	status = nfs4_proc_create_session(clp);
+	status = nfs4_proc_create_session(clp, cred);
 	if (status) {
-		status = nfs4_recovery_handle_error(clp, status);
+		dprintk("%s: session reset failed with status %d for server %s!\n",
+			__func__, status, clp->cl_hostname);
+		status = nfs4_handle_reclaim_lease_error(clp, status);
 		goto out;
 	}
-	clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
-	/* create_session negotiated new slot table */
-	clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
-
-	 /* Let the state manager reestablish state */
-	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
-		nfs41_setup_state_renewal(clp);
+	nfs41_finish_session_reset(clp);
+	dprintk("%s: session reset was successful for server %s!\n",
+			__func__, clp->cl_hostname);
 out:
+	if (cred)
+		put_rpccred(cred);
 	return status;
 }
 
@@ -1722,37 +1818,41 @@
 	return 0;
 }
 
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
+{
+	struct rpc_cred *cred;
+	int ret;
+
+	nfs4_begin_drain_session(clp);
+	cred = nfs4_get_exchange_id_cred(clp);
+	ret = nfs4_proc_bind_conn_to_session(clp, cred);
+	if (cred)
+		put_rpccred(cred);
+	clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+	switch (ret) {
+	case 0:
+		dprintk("%s: bind_conn_to_session was successful for server %s!\n",
+			__func__, clp->cl_hostname);
+		break;
+	case -NFS4ERR_DELAY:
+		ssleep(1);
+		set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+		break;
+	default:
+		return nfs4_recovery_handle_error(clp, ret);
+	}
+	return 0;
+}
 #else /* CONFIG_NFS_V4_1 */
 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
-#endif /* CONFIG_NFS_V4_1 */
 
-/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
- * on EXCHANGE_ID for v4.1
- */
-static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
 {
-	switch (status) {
-	case -NFS4ERR_CLID_INUSE:
-	case -NFS4ERR_STALE_CLIENTID:
-		clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-		break;
-	case -NFS4ERR_DELAY:
-	case -ETIMEDOUT:
-	case -EAGAIN:
-		ssleep(1);
-		break;
-
-	case -EKEYEXPIRED:
-		nfs4_warn_keyexpired(clp->cl_hostname);
-	case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
-				 * in nfs4_exchange_id */
-	default:
-		return;
-	}
-	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+	return 0;
 }
+#endif /* CONFIG_NFS_V4_1 */
 
 static void nfs4_state_manager(struct nfs_client *clp)
 {
@@ -1760,19 +1860,21 @@
 
 	/* Ensure exclusive access to NFSv4 state */
 	do {
+		if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+			status = nfs4_reclaim_lease(clp);
+			if (status < 0)
+				goto out_error;
+			clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+		}
+
 		if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
 			/* We're going to have to re-establish a clientid */
 			status = nfs4_reclaim_lease(clp);
-			if (status) {
-				nfs4_set_lease_expired(clp, status);
-				if (test_bit(NFS4CLNT_LEASE_EXPIRED,
-							&clp->cl_state))
-					continue;
-				if (clp->cl_cons_state ==
-							NFS_CS_SESSION_INITING)
-					nfs_mark_client_ready(clp, status);
+			if (status < 0)
 				goto out_error;
-			}
+			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+				continue;
 			clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
 
 			if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
@@ -1803,6 +1905,15 @@
 				goto out_error;
 		}
 
+		/* Send BIND_CONN_TO_SESSION */
+		if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+				&clp->cl_state) && nfs4_has_session(clp)) {
+			status = nfs4_bind_conn_to_session(clp);
+			if (status < 0)
+				goto out_error;
+			continue;
+		}
+
 		/* First recover reboot state... */
 		if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
 			status = nfs4_do_reclaim(clp,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c54aae3..18fae29 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -53,9 +53,11 @@
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_idmap.h>
+
 #include "nfs4_fs.h"
 #include "internal.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY		NFSDBG_XDR
 
@@ -99,9 +101,12 @@
 #define nfs4_path_maxsz		(1 + ((3 + NFS4_MAXPATHLEN) >> 2))
 #define nfs4_owner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define nfs4_group_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
+/* We support only one layout type per file system */
+#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
 /* This is based on getfattr, which uses the most attributes: */
 #define nfs4_fattr_value_maxsz	(1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
-				3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
+				3 + 3 + 3 + nfs4_owner_maxsz + \
+				nfs4_group_maxsz + decode_mdsthreshold_maxsz))
 #define nfs4_fattr_maxsz	(nfs4_fattr_bitmap_maxsz + \
 				nfs4_fattr_value_maxsz)
 #define decode_getattr_maxsz    (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
@@ -321,8 +326,20 @@
 				     1 /* csr_flags */ + \
 				     decode_channel_attrs_maxsz + \
 				     decode_channel_attrs_maxsz)
+#define encode_bind_conn_to_session_maxsz  (op_encode_hdr_maxsz + \
+				     /* bctsa_sessid */ \
+				     XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+				     1 /* bctsa_dir */ + \
+				     1 /* bctsa_use_conn_in_rdma_mode */)
+#define decode_bind_conn_to_session_maxsz  (op_decode_hdr_maxsz +	\
+				     /* bctsr_sessid */ \
+				     XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+				     1 /* bctsr_dir */ + \
+				     1 /* bctsr_use_conn_in_rdma_mode */)
 #define encode_destroy_session_maxsz    (op_encode_hdr_maxsz + 4)
 #define decode_destroy_session_maxsz    (op_decode_hdr_maxsz)
+#define encode_destroy_clientid_maxsz   (op_encode_hdr_maxsz + 2)
+#define decode_destroy_clientid_maxsz   (op_decode_hdr_maxsz)
 #define encode_sequence_maxsz	(op_encode_hdr_maxsz + \
 				XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
 #define decode_sequence_maxsz	(op_decode_hdr_maxsz + \
@@ -421,30 +438,22 @@
 #define NFS4_enc_commit_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
-				encode_commit_maxsz + \
-				encode_getattr_maxsz)
+				encode_commit_maxsz)
 #define NFS4_dec_commit_sz	(compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
-				decode_commit_maxsz + \
-				decode_getattr_maxsz)
+				decode_commit_maxsz)
 #define NFS4_enc_open_sz        (compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
-				encode_savefh_maxsz + \
 				encode_open_maxsz + \
 				encode_getfh_maxsz + \
-				encode_getattr_maxsz + \
-				encode_restorefh_maxsz + \
 				encode_getattr_maxsz)
 #define NFS4_dec_open_sz        (compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
-				decode_savefh_maxsz + \
 				decode_open_maxsz + \
 				decode_getfh_maxsz + \
-				decode_getattr_maxsz + \
-				decode_restorefh_maxsz + \
 				decode_getattr_maxsz)
 #define NFS4_enc_open_confirm_sz \
 				(compound_encode_hdr_maxsz + \
@@ -595,47 +604,37 @@
 #define NFS4_enc_remove_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
-				encode_remove_maxsz + \
-				encode_getattr_maxsz)
+				encode_remove_maxsz)
 #define NFS4_dec_remove_sz	(compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
-				decode_remove_maxsz + \
-				decode_getattr_maxsz)
+				decode_remove_maxsz)
 #define NFS4_enc_rename_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
 				encode_savefh_maxsz + \
 				encode_putfh_maxsz + \
-				encode_rename_maxsz + \
-				encode_getattr_maxsz + \
-				encode_restorefh_maxsz + \
-				encode_getattr_maxsz)
+				encode_rename_maxsz)
 #define NFS4_dec_rename_sz	(compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
 				decode_savefh_maxsz + \
 				decode_putfh_maxsz + \
-				decode_rename_maxsz + \
-				decode_getattr_maxsz + \
-				decode_restorefh_maxsz + \
-				decode_getattr_maxsz)
+				decode_rename_maxsz)
 #define NFS4_enc_link_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
 				encode_savefh_maxsz + \
 				encode_putfh_maxsz + \
 				encode_link_maxsz + \
-				decode_getattr_maxsz + \
 				encode_restorefh_maxsz + \
-				decode_getattr_maxsz)
+				encode_getattr_maxsz)
 #define NFS4_dec_link_sz	(compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
 				decode_savefh_maxsz + \
 				decode_putfh_maxsz + \
 				decode_link_maxsz + \
-				decode_getattr_maxsz + \
 				decode_restorefh_maxsz + \
 				decode_getattr_maxsz)
 #define NFS4_enc_symlink_sz	(compound_encode_hdr_maxsz + \
@@ -653,20 +652,14 @@
 #define NFS4_enc_create_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
-				encode_savefh_maxsz + \
 				encode_create_maxsz + \
 				encode_getfh_maxsz + \
-				encode_getattr_maxsz + \
-				encode_restorefh_maxsz + \
 				encode_getattr_maxsz)
 #define NFS4_dec_create_sz	(compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
 				decode_putfh_maxsz + \
-				decode_savefh_maxsz + \
 				decode_create_maxsz + \
 				decode_getfh_maxsz + \
-				decode_getattr_maxsz + \
-				decode_restorefh_maxsz + \
 				decode_getattr_maxsz)
 #define NFS4_enc_pathconf_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
@@ -738,6 +731,12 @@
 				decode_putfh_maxsz + \
 				decode_secinfo_maxsz)
 #if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_bind_conn_to_session_sz \
+				(compound_encode_hdr_maxsz + \
+				 encode_bind_conn_to_session_maxsz)
+#define NFS4_dec_bind_conn_to_session_sz \
+				(compound_decode_hdr_maxsz + \
+				 decode_bind_conn_to_session_maxsz)
 #define NFS4_enc_exchange_id_sz \
 				(compound_encode_hdr_maxsz + \
 				 encode_exchange_id_maxsz)
@@ -754,6 +753,10 @@
 					 encode_destroy_session_maxsz)
 #define NFS4_dec_destroy_session_sz	(compound_decode_hdr_maxsz + \
 					 decode_destroy_session_maxsz)
+#define NFS4_enc_destroy_clientid_sz	(compound_encode_hdr_maxsz + \
+					 encode_destroy_clientid_maxsz)
+#define NFS4_dec_destroy_clientid_sz	(compound_decode_hdr_maxsz + \
+					 decode_destroy_clientid_maxsz)
 #define NFS4_enc_sequence_sz \
 				(compound_decode_hdr_maxsz + \
 				 encode_sequence_maxsz)
@@ -1103,7 +1106,7 @@
 	encode_nfs4_stateid(xdr, arg->stateid);
 }
 
-static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
 {
 	__be32 *p;
 
@@ -1194,6 +1197,17 @@
 			   bitmask[1] & nfs4_fattr_bitmap[1], hdr);
 }
 
+static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+				 const u32 *open_bitmap,
+				 struct compound_hdr *hdr)
+{
+	encode_getattr_three(xdr,
+			     bitmask[0] & open_bitmap[0],
+			     bitmask[1] & open_bitmap[1],
+			     bitmask[2] & open_bitmap[2],
+			     hdr);
+}
+
 static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
 {
 	encode_getattr_three(xdr,
@@ -1678,6 +1692,20 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
+static void encode_bind_conn_to_session(struct xdr_stream *xdr,
+				   struct nfs4_session *session,
+				   struct compound_hdr *hdr)
+{
+	__be32 *p;
+
+	encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
+		decode_bind_conn_to_session_maxsz, hdr);
+	encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+	p = xdr_reserve_space(xdr, 8);
+	*p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
+	*p = 0;	/* use_conn_in_rdma_mode = False */
+}
+
 static void encode_exchange_id(struct xdr_stream *xdr,
 			       struct nfs41_exchange_id_args *args,
 			       struct compound_hdr *hdr)
@@ -1726,6 +1754,7 @@
 	char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
 	uint32_t len;
 	struct nfs_client *clp = args->client;
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 	u32 max_resp_sz_cached;
 
 	/*
@@ -1767,7 +1796,7 @@
 	*p++ = cpu_to_be32(RPC_AUTH_UNIX);			/* auth_sys */
 
 	/* authsys_parms rfc1831 */
-	*p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec);	/* stamp */
+	*p++ = (__be32)nn->boot_time.tv_nsec;		/* stamp */
 	p = xdr_encode_opaque(p, machine_name, len);
 	*p++ = cpu_to_be32(0);				/* UID */
 	*p++ = cpu_to_be32(0);				/* GID */
@@ -1782,6 +1811,14 @@
 	encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
 }
 
+static void encode_destroy_clientid(struct xdr_stream *xdr,
+				   uint64_t clientid,
+				   struct compound_hdr *hdr)
+{
+	encode_op_hdr(xdr, OP_DESTROY_CLIENTID, decode_destroy_clientid_maxsz, hdr);
+	encode_uint64(xdr, clientid);
+}
+
 static void encode_reclaim_complete(struct xdr_stream *xdr,
 				    struct nfs41_reclaim_complete_args *args,
 				    struct compound_hdr *hdr)
@@ -2064,7 +2101,6 @@
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fh, &hdr);
 	encode_remove(xdr, &args->name, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2084,9 +2120,6 @@
 	encode_savefh(xdr, &hdr);
 	encode_putfh(xdr, args->new_dir, &hdr);
 	encode_rename(xdr, args->old_name, args->new_name, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
-	encode_restorefh(xdr, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2106,7 +2139,6 @@
 	encode_savefh(xdr, &hdr);
 	encode_putfh(xdr, args->dir_fh, &hdr);
 	encode_link(xdr, args->name, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_restorefh(xdr, &hdr);
 	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
@@ -2125,12 +2157,9 @@
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->dir_fh, &hdr);
-	encode_savefh(xdr, &hdr);
 	encode_create(xdr, args, &hdr);
 	encode_getfh(xdr, &hdr);
 	encode_getfattr(xdr, args->bitmask, &hdr);
-	encode_restorefh(xdr, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2191,12 +2220,9 @@
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fh, &hdr);
-	encode_savefh(xdr, &hdr);
 	encode_open(xdr, args, &hdr);
 	encode_getfh(xdr, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
-	encode_restorefh(xdr, &hdr);
-	encode_getfattr(xdr, args->dir_bitmask, &hdr);
+	encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2448,7 +2474,7 @@
  *  a COMMIT request
  */
 static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
-				struct nfs_writeargs *args)
+				struct nfs_commitargs *args)
 {
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2458,8 +2484,6 @@
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fh, &hdr);
 	encode_commit(xdr, args, &hdr);
-	if (args->bitmask)
-		encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2602,8 +2626,8 @@
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fhandle, &hdr);
-	encode_delegreturn(xdr, args->stateid, &hdr);
 	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_delegreturn(xdr, args->stateid, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2651,6 +2675,22 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /*
+ * BIND_CONN_TO_SESSION request
+ */
+static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct nfs_client *clp)
+{
+	struct compound_hdr hdr = {
+		.minorversion = clp->cl_mvops->minor_version,
+	};
+
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+	encode_nops(&hdr);
+}
+
+/*
  * EXCHANGE_ID request
  */
 static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
@@ -2699,6 +2739,22 @@
 }
 
 /*
+ * a DESTROY_CLIENTID request
+ */
+static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
+					 struct xdr_stream *xdr,
+					 struct nfs_client *clp)
+{
+	struct compound_hdr hdr = {
+		.minorversion = clp->cl_mvops->minor_version,
+	};
+
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_destroy_clientid(xdr, clp->cl_clientid, &hdr);
+	encode_nops(&hdr);
+}
+
+/*
  * a SEQUENCE request
  */
 static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -4102,7 +4158,7 @@
 	return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
 }
 
-static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
 {
 	int status;
 
@@ -4220,6 +4276,114 @@
 	return status;
 }
 
+static int decode_threshold_hint(struct xdr_stream *xdr,
+				  uint32_t *bitmap,
+				  uint64_t *res,
+				  uint32_t hint_bit)
+{
+	__be32 *p;
+
+	*res = 0;
+	if (likely(bitmap[0] & hint_bit)) {
+		p = xdr_inline_decode(xdr, 8);
+		if (unlikely(!p))
+			goto out_overflow;
+		xdr_decode_hyper(p, res);
+	}
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_first_threshold_item4(struct xdr_stream *xdr,
+					struct nfs4_threshold *res)
+{
+	__be32 *p, *savep;
+	uint32_t bitmap[3] = {0,}, attrlen;
+	int status;
+
+	/* layout type */
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(!p)) {
+		print_overflow_msg(__func__, xdr);
+		return -EIO;
+	}
+	res->l_type = be32_to_cpup(p);
+
+	/* thi_hintset bitmap */
+	status = decode_attr_bitmap(xdr, bitmap);
+	if (status < 0)
+		goto xdr_error;
+
+	/* thi_hintlist length */
+	status = decode_attr_length(xdr, &attrlen, &savep);
+	if (status < 0)
+		goto xdr_error;
+	/* thi_hintlist */
+	status = decode_threshold_hint(xdr, bitmap, &res->rd_sz, THRESHOLD_RD);
+	if (status < 0)
+		goto xdr_error;
+	status = decode_threshold_hint(xdr, bitmap, &res->wr_sz, THRESHOLD_WR);
+	if (status < 0)
+		goto xdr_error;
+	status = decode_threshold_hint(xdr, bitmap, &res->rd_io_sz,
+				       THRESHOLD_RD_IO);
+	if (status < 0)
+		goto xdr_error;
+	status = decode_threshold_hint(xdr, bitmap, &res->wr_io_sz,
+				       THRESHOLD_WR_IO);
+	if (status < 0)
+		goto xdr_error;
+
+	status = verify_attr_len(xdr, savep, attrlen);
+	res->bm = bitmap[0];
+
+	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+		 __func__, res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz,
+		res->wr_io_sz);
+xdr_error:
+	dprintk("%s ret=%d!\n", __func__, status);
+	return status;
+}
+
+/*
+ * Thresholds on pNFS direct I/O vrs MDS I/O
+ */
+static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
+				    uint32_t *bitmap,
+				    struct nfs4_threshold *res)
+{
+	__be32 *p;
+	int status = 0;
+	uint32_t num;
+
+	if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
+		return -EIO;
+	if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+		/* Did the server return an unrequested attribute? */
+		if (unlikely(res == NULL))
+			return -EREMOTEIO;
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(!p))
+			goto out_overflow;
+		num = be32_to_cpup(p);
+		if (num == 0)
+			return 0;
+		if (num > 1)
+			printk(KERN_INFO "%s: Warning: Multiple pNFS layout "
+				"drivers per filesystem not supported\n",
+				__func__);
+
+		status = decode_first_threshold_item4(xdr, res);
+		bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD;
+	}
+	return status;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
 static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
 		struct nfs_fattr *fattr, struct nfs_fh *fh,
 		struct nfs4_fs_locations *fs_loc,
@@ -4326,6 +4490,10 @@
 		goto xdr_error;
 	fattr->valid |= status;
 
+	status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold);
+	if (status < 0)
+		goto xdr_error;
+
 xdr_error:
 	dprintk("%s: xdr returned %d\n", __func__, -status);
 	return status;
@@ -5156,7 +5324,6 @@
 	uint32_t dummy;
 	char *dummy_str;
 	int status;
-	struct nfs_client *clp = res->client;
 	uint32_t impl_id_count;
 
 	status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
@@ -5166,36 +5333,39 @@
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
-	xdr_decode_hyper(p, &clp->cl_clientid);
+	xdr_decode_hyper(p, &res->clientid);
 	p = xdr_inline_decode(xdr, 12);
 	if (unlikely(!p))
 		goto out_overflow;
-	clp->cl_seqid = be32_to_cpup(p++);
-	clp->cl_exchange_flags = be32_to_cpup(p++);
+	res->seqid = be32_to_cpup(p++);
+	res->flags = be32_to_cpup(p++);
 
 	/* We ask for SP4_NONE */
 	dummy = be32_to_cpup(p);
 	if (dummy != SP4_NONE)
 		return -EIO;
 
-	/* Throw away minor_id */
+	/* server_owner4.so_minor_id */
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
+	p = xdr_decode_hyper(p, &res->server_owner->minor_id);
 
-	/* Throw away Major id */
+	/* server_owner4.so_major_id */
 	status = decode_opaque_inline(xdr, &dummy, &dummy_str);
 	if (unlikely(status))
 		return status;
-
-	/* Save server_scope */
-	status = decode_opaque_inline(xdr, &dummy, &dummy_str);
-	if (unlikely(status))
-		return status;
-
 	if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
 		return -EIO;
+	memcpy(res->server_owner->major_id, dummy_str, dummy);
+	res->server_owner->major_id_sz = dummy;
 
+	/* server_scope4 */
+	status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+	if (unlikely(status))
+		return status;
+	if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+		return -EIO;
 	memcpy(res->server_scope->server_scope, dummy_str, dummy);
 	res->server_scope->server_scope_sz = dummy;
 
@@ -5276,6 +5446,37 @@
 	return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
 }
 
+static int decode_bind_conn_to_session(struct xdr_stream *xdr,
+				struct nfs41_bind_conn_to_session_res *res)
+{
+	__be32 *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
+	if (!status)
+		status = decode_sessionid(xdr, &res->session->sess_id);
+	if (unlikely(status))
+		return status;
+
+	/* dir flags, rdma mode bool */
+	p = xdr_inline_decode(xdr, 8);
+	if (unlikely(!p))
+		goto out_overflow;
+
+	res->dir = be32_to_cpup(p++);
+	if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
+		return -EIO;
+	if (be32_to_cpup(p) == 0)
+		res->use_conn_in_rdma_mode = false;
+	else
+		res->use_conn_in_rdma_mode = true;
+
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
 static int decode_create_session(struct xdr_stream *xdr,
 				 struct nfs41_create_session_res *res)
 {
@@ -5312,6 +5513,11 @@
 	return decode_op_hdr(xdr, OP_DESTROY_SESSION);
 }
 
+static int decode_destroy_clientid(struct xdr_stream *xdr, void *dummy)
+{
+	return decode_op_hdr(xdr, OP_DESTROY_CLIENTID);
+}
+
 static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
 {
 	return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
@@ -5800,9 +6006,6 @@
 	if (status)
 		goto out;
 	status = decode_remove(xdr, &res->cinfo);
-	if (status)
-		goto out;
-	decode_getfattr(xdr, res->dir_attr, res->server);
 out:
 	return status;
 }
@@ -5832,15 +6035,6 @@
 	if (status)
 		goto out;
 	status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
-	if (status)
-		goto out;
-	/* Current FH is target directory */
-	if (decode_getfattr(xdr, res->new_fattr, res->server))
-		goto out;
-	status = decode_restorefh(xdr);
-	if (status)
-		goto out;
-	decode_getfattr(xdr, res->old_fattr, res->server);
 out:
 	return status;
 }
@@ -5876,8 +6070,6 @@
 	 * Note order: OP_LINK leaves the directory as the current
 	 *             filehandle.
 	 */
-	if (decode_getfattr(xdr, res->dir_attr, res->server))
-		goto out;
 	status = decode_restorefh(xdr);
 	if (status)
 		goto out;
@@ -5904,21 +6096,13 @@
 	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_savefh(xdr);
-	if (status)
-		goto out;
 	status = decode_create(xdr, &res->dir_cinfo);
 	if (status)
 		goto out;
 	status = decode_getfh(xdr, res->fh);
 	if (status)
 		goto out;
-	if (decode_getfattr(xdr, res->fattr, res->server))
-		goto out;
-	status = decode_restorefh(xdr);
-	if (status)
-		goto out;
-	decode_getfattr(xdr, res->dir_fattr, res->server);
+	decode_getfattr(xdr, res->fattr, res->server);
 out:
 	return status;
 }
@@ -6075,19 +6259,12 @@
 	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_savefh(xdr);
-	if (status)
-		goto out;
 	status = decode_open(xdr, res);
 	if (status)
 		goto out;
 	if (decode_getfh(xdr, &res->fh) != 0)
 		goto out;
-	if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
-		goto out;
-	if (decode_restorefh(xdr) != 0)
-		goto out;
-	decode_getfattr(xdr, res->dir_attr, res->server);
+	decode_getfattr(xdr, res->f_attr, res->server);
 out:
 	return status;
 }
@@ -6353,7 +6530,7 @@
  * Decode COMMIT response
  */
 static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
-			       struct nfs_writeres *res)
+			       struct nfs_commitres *res)
 {
 	struct compound_hdr hdr;
 	int status;
@@ -6368,10 +6545,6 @@
 	if (status)
 		goto out;
 	status = decode_commit(xdr, res);
-	if (status)
-		goto out;
-	if (res->fattr)
-		decode_getfattr(xdr, res->fattr, res->server);
 out:
 	return status;
 }
@@ -6527,10 +6700,10 @@
 	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_delegreturn(xdr);
+	status = decode_getfattr(xdr, res->fattr, res->server);
 	if (status != 0)
 		goto out;
-	decode_getfattr(xdr, res->fattr, res->server);
+	status = decode_delegreturn(xdr);
 out:
 	return status;
 }
@@ -6591,6 +6764,22 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /*
+ * Decode BIND_CONN_TO_SESSION response
+ */
+static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					void *res)
+{
+	struct compound_hdr hdr;
+	int status;
+
+	status = decode_compound_hdr(xdr, &hdr);
+	if (!status)
+		status = decode_bind_conn_to_session(xdr, res);
+	return status;
+}
+
+/*
  * Decode EXCHANGE_ID response
  */
 static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
@@ -6639,6 +6828,22 @@
 }
 
 /*
+ * Decode DESTROY_CLIENTID response
+ */
+static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					void *res)
+{
+	struct compound_hdr hdr;
+	int status;
+
+	status = decode_compound_hdr(xdr, &hdr);
+	if (!status)
+		status = decode_destroy_clientid(xdr, res);
+	return status;
+}
+
+/*
  * Decode SEQUENCE response
  */
 static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
@@ -7085,6 +7290,9 @@
 	PROC(TEST_STATEID,	enc_test_stateid,	dec_test_stateid),
 	PROC(FREE_STATEID,	enc_free_stateid,	dec_free_stateid),
 	PROC(GETDEVICELIST,	enc_getdevicelist,	dec_getdevicelist),
+	PROC(BIND_CONN_TO_SESSION,
+			enc_bind_conn_to_session, dec_bind_conn_to_session),
+	PROC(DESTROY_CLIENTID,	enc_destroy_clientid,	dec_destroy_clientid),
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 4bff4a3..b47277ba 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -211,7 +211,7 @@
 	memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
 }
 
-int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
+static int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
 		       struct objio_segment **pseg)
 {
 /*	This is the in memory structure of the objio_segment
@@ -440,11 +440,12 @@
 
 int objio_read_pagelist(struct nfs_read_data *rdata)
 {
+	struct nfs_pgio_header *hdr = rdata->header;
 	struct objio_state *objios;
 	int ret;
 
-	ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true,
-			rdata->lseg, rdata->args.pages, rdata->args.pgbase,
+	ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true,
+			hdr->lseg, rdata->args.pages, rdata->args.pgbase,
 			rdata->args.offset, rdata->args.count, rdata,
 			GFP_KERNEL, &objios);
 	if (unlikely(ret))
@@ -483,12 +484,12 @@
 {
 	struct objio_state *objios = priv;
 	struct nfs_write_data *wdata = objios->oir.rpcdata;
+	struct address_space *mapping = wdata->header->inode->i_mapping;
 	pgoff_t index = offset / PAGE_SIZE;
-	struct page *page = find_get_page(wdata->inode->i_mapping, index);
+	struct page *page = find_get_page(mapping, index);
 
 	if (!page) {
-		page = find_or_create_page(wdata->inode->i_mapping,
-						index, GFP_NOFS);
+		page = find_or_create_page(mapping, index, GFP_NOFS);
 		if (unlikely(!page)) {
 			dprintk("%s: grab_cache_page Failed index=0x%lx\n",
 				__func__, index);
@@ -518,11 +519,12 @@
 
 int objio_write_pagelist(struct nfs_write_data *wdata, int how)
 {
+	struct nfs_pgio_header *hdr = wdata->header;
 	struct objio_state *objios;
 	int ret;
 
-	ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false,
-			wdata->lseg, wdata->args.pages, wdata->args.pgbase,
+	ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false,
+			hdr->lseg, wdata->args.pages, wdata->args.pgbase,
 			wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
 			&objios);
 	if (unlikely(ret))
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 595c5fc..8746135 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -258,7 +258,7 @@
 	if (status >= 0)
 		rdata->res.count = status;
 	else
-		rdata->pnfs_error = status;
+		rdata->header->pnfs_error = status;
 	objlayout_iodone(oir);
 	/* must not use oir after this point */
 
@@ -279,12 +279,14 @@
 enum pnfs_try_status
 objlayout_read_pagelist(struct nfs_read_data *rdata)
 {
+	struct nfs_pgio_header *hdr = rdata->header;
+	struct inode *inode = hdr->inode;
 	loff_t offset = rdata->args.offset;
 	size_t count = rdata->args.count;
 	int err;
 	loff_t eof;
 
-	eof = i_size_read(rdata->inode);
+	eof = i_size_read(inode);
 	if (unlikely(offset + count > eof)) {
 		if (offset >= eof) {
 			err = 0;
@@ -297,17 +299,17 @@
 	}
 
 	rdata->res.eof = (offset + count) >= eof;
-	_fix_verify_io_params(rdata->lseg, &rdata->args.pages,
+	_fix_verify_io_params(hdr->lseg, &rdata->args.pages,
 			      &rdata->args.pgbase,
 			      rdata->args.offset, rdata->args.count);
 
 	dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
-		__func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
+		__func__, inode->i_ino, offset, count, rdata->res.eof);
 
 	err = objio_read_pagelist(rdata);
  out:
 	if (unlikely(err)) {
-		rdata->pnfs_error = err;
+		hdr->pnfs_error = err;
 		dprintk("%s: Returned Error %d\n", __func__, err);
 		return PNFS_NOT_ATTEMPTED;
 	}
@@ -340,7 +342,7 @@
 		wdata->res.count = status;
 		wdata->verf.committed = oir->committed;
 	} else {
-		wdata->pnfs_error = status;
+		wdata->header->pnfs_error = status;
 	}
 	objlayout_iodone(oir);
 	/* must not use oir after this point */
@@ -363,15 +365,16 @@
 objlayout_write_pagelist(struct nfs_write_data *wdata,
 			 int how)
 {
+	struct nfs_pgio_header *hdr = wdata->header;
 	int err;
 
-	_fix_verify_io_params(wdata->lseg, &wdata->args.pages,
+	_fix_verify_io_params(hdr->lseg, &wdata->args.pages,
 			      &wdata->args.pgbase,
 			      wdata->args.offset, wdata->args.count);
 
 	err = objio_write_pagelist(wdata, how);
 	if (unlikely(err)) {
-		wdata->pnfs_error = err;
+		hdr->pnfs_error = err;
 		dprintk("%s: Returned Error %d\n", __func__, err);
 		return PNFS_NOT_ATTEMPTED;
 	}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d21fcea..aed913c 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,6 +26,47 @@
 
 static struct kmem_cache *nfs_page_cachep;
 
+bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+{
+	p->npages = pagecount;
+	if (pagecount <= ARRAY_SIZE(p->page_array))
+		p->pagevec = p->page_array;
+	else {
+		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+		if (!p->pagevec)
+			p->npages = 0;
+	}
+	return p->pagevec != NULL;
+}
+
+void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+		       struct nfs_pgio_header *hdr,
+		       void (*release)(struct nfs_pgio_header *hdr))
+{
+	hdr->req = nfs_list_entry(desc->pg_list.next);
+	hdr->inode = desc->pg_inode;
+	hdr->cred = hdr->req->wb_context->cred;
+	hdr->io_start = req_offset(hdr->req);
+	hdr->good_bytes = desc->pg_count;
+	hdr->dreq = desc->pg_dreq;
+	hdr->release = release;
+	hdr->completion_ops = desc->pg_completion_ops;
+	if (hdr->completion_ops->init_hdr)
+		hdr->completion_ops->init_hdr(hdr);
+}
+
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+{
+	spin_lock(&hdr->lock);
+	if (pos < hdr->io_start + hdr->good_bytes) {
+		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+		hdr->good_bytes = pos - hdr->io_start;
+		hdr->error = error;
+	}
+	spin_unlock(&hdr->lock);
+}
+
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
@@ -76,12 +117,8 @@
 	 * long write-back delay. This will be adjusted in
 	 * update_nfs_request below if the region is not locked. */
 	req->wb_page    = page;
-	atomic_set(&req->wb_complete, 0);
 	req->wb_index	= page->index;
 	page_cache_get(page);
-	BUG_ON(PagePrivate(page));
-	BUG_ON(!PageLocked(page));
-	BUG_ON(page->mapping->host != inode);
 	req->wb_offset  = offset;
 	req->wb_pgbase	= offset;
 	req->wb_bytes   = count;
@@ -104,6 +141,15 @@
 	clear_bit(PG_BUSY, &req->wb_flags);
 	smp_mb__after_clear_bit();
 	wake_up_bit(&req->wb_flags, PG_BUSY);
+}
+
+/**
+ * nfs_unlock_and_release_request - Unlock request and release the nfs_page
+ * @req:
+ */
+void nfs_unlock_and_release_request(struct nfs_page *req)
+{
+	nfs_unlock_request(req);
 	nfs_release_request(req);
 }
 
@@ -203,6 +249,7 @@
 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 		     struct inode *inode,
 		     const struct nfs_pageio_ops *pg_ops,
+		     const struct nfs_pgio_completion_ops *compl_ops,
 		     size_t bsize,
 		     int io_flags)
 {
@@ -215,9 +262,11 @@
 	desc->pg_recoalesce = 0;
 	desc->pg_inode = inode;
 	desc->pg_ops = pg_ops;
+	desc->pg_completion_ops = compl_ops;
 	desc->pg_ioflags = io_flags;
 	desc->pg_error = 0;
 	desc->pg_lseg = NULL;
+	desc->pg_dreq = NULL;
 }
 
 /**
@@ -241,12 +290,12 @@
 		return false;
 	if (req->wb_context->state != prev->wb_context->state)
 		return false;
-	if (req->wb_index != (prev->wb_index + 1))
-		return false;
 	if (req->wb_pgbase != 0)
 		return false;
 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
 		return false;
+	if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+		return false;
 	return pgio->pg_ops->pg_test(pgio, prev, req);
 }
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 38512bc..bbc49ca 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -70,6 +70,10 @@
 
 	spin_lock(&pnfs_spinlock);
 	local = find_pnfs_driver_locked(id);
+	if (local != NULL && !try_module_get(local->owner)) {
+		dprintk("%s: Could not grab reference on module\n", __func__);
+		local = NULL;
+	}
 	spin_unlock(&pnfs_spinlock);
 	return local;
 }
@@ -80,6 +84,9 @@
 	if (nfss->pnfs_curr_ld) {
 		if (nfss->pnfs_curr_ld->clear_layoutdriver)
 			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+		/* Decrement the MDS count. Purge the deviceid cache if zero */
+		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
+			nfs4_deviceid_purge_client(nfss->nfs_client);
 		module_put(nfss->pnfs_curr_ld->owner);
 	}
 	nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@
 			goto out_no_driver;
 		}
 	}
-	if (!try_module_get(ld_type->owner)) {
-		dprintk("%s: Could not grab reference on module\n", __func__);
-		goto out_no_driver;
-	}
 	server->pnfs_curr_ld = ld_type;
 	if (ld_type->set_layoutdriver
 	    && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@
 		module_put(ld_type->owner);
 		goto out_no_driver;
 	}
+	/* Bump the MDS count */
+	atomic_inc(&server->nfs_client->cl_mds_count);
 
 	dprintk("%s: pNFS module for %u set\n", __func__, id);
 	return;
@@ -395,6 +400,9 @@
 	dprintk("%s:Begin lo %p\n", __func__, lo);
 
 	if (list_empty(&lo->plh_segs)) {
+		/* Reset MDS Threshold I/O counters */
+		NFS_I(lo->plh_inode)->write_io = 0;
+		NFS_I(lo->plh_inode)->read_io = 0;
 		if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
 			put_layout_hdr_locked(lo);
 		return 0;
@@ -455,6 +463,7 @@
 	spin_unlock(&nfsi->vfs_inode.i_lock);
 	pnfs_free_lseg_list(&tmp_list);
 }
+EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
 
 /*
  * Called by the state manger to remove all layouts established under an
@@ -692,6 +701,7 @@
 	dprintk("<-- %s status: %d\n", __func__, status);
 	return status;
 }
+EXPORT_SYMBOL_GPL(_pnfs_return_layout);
 
 bool pnfs_roc(struct inode *ino)
 {
@@ -931,6 +941,81 @@
 }
 
 /*
+ * Use mdsthreshold hints set at each OPEN to determine if I/O should go
+ * to the MDS or over pNFS
+ *
+ * The nfs_inode read_io and write_io fields are cumulative counters reset
+ * when there are no layout segments. Note that in pnfs_update_layout iomode
+ * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
+ * WRITE request.
+ *
+ * A return of true means use MDS I/O.
+ *
+ * From rfc 5661:
+ * If a file's size is smaller than the file size threshold, data accesses
+ * SHOULD be sent to the metadata server.  If an I/O request has a length that
+ * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
+ * server.  If both file size and I/O size are provided, the client SHOULD
+ * reach or exceed  both thresholds before sending its read or write
+ * requests to the data server.
+ */
+static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
+				     struct inode *ino, int iomode)
+{
+	struct nfs4_threshold *t = ctx->mdsthreshold;
+	struct nfs_inode *nfsi = NFS_I(ino);
+	loff_t fsize = i_size_read(ino);
+	bool size = false, size_set = false, io = false, io_set = false, ret = false;
+
+	if (t == NULL)
+		return ret;
+
+	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
+
+	switch (iomode) {
+	case IOMODE_READ:
+		if (t->bm & THRESHOLD_RD) {
+			dprintk("%s fsize %llu\n", __func__, fsize);
+			size_set = true;
+			if (fsize < t->rd_sz)
+				size = true;
+		}
+		if (t->bm & THRESHOLD_RD_IO) {
+			dprintk("%s nfsi->read_io %llu\n", __func__,
+				nfsi->read_io);
+			io_set = true;
+			if (nfsi->read_io < t->rd_io_sz)
+				io = true;
+		}
+		break;
+	case IOMODE_RW:
+		if (t->bm & THRESHOLD_WR) {
+			dprintk("%s fsize %llu\n", __func__, fsize);
+			size_set = true;
+			if (fsize < t->wr_sz)
+				size = true;
+		}
+		if (t->bm & THRESHOLD_WR_IO) {
+			dprintk("%s nfsi->write_io %llu\n", __func__,
+				nfsi->write_io);
+			io_set = true;
+			if (nfsi->write_io < t->wr_io_sz)
+				io = true;
+		}
+		break;
+	}
+	if (size_set && io_set) {
+		if (size && io)
+			ret = true;
+	} else if (size || io)
+		ret = true;
+
+	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
+	return ret;
+}
+
+/*
  * Layout segment is retreived from the server if not cached.
  * The appropriate layout segment is referenced and returned to the caller.
  */
@@ -957,6 +1042,10 @@
 
 	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
 		return NULL;
+
+	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
+		return NULL;
+
 	spin_lock(&ino->i_lock);
 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
 	if (lo == NULL) {
@@ -1082,6 +1171,10 @@
 {
 	BUG_ON(pgio->pg_lseg != NULL);
 
+	if (req->wb_offset != req->wb_pgbase) {
+		nfs_pageio_reset_read_mds(pgio);
+		return;
+	}
 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 					   req->wb_context,
 					   req_offset(req),
@@ -1100,6 +1193,10 @@
 {
 	BUG_ON(pgio->pg_lseg != NULL);
 
+	if (req->wb_offset != req->wb_pgbase) {
+		nfs_pageio_reset_write_mds(pgio);
+		return;
+	}
 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 					   req->wb_context,
 					   req_offset(req),
@@ -1113,26 +1210,31 @@
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
 
 bool
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+		      const struct nfs_pgio_completion_ops *compl_ops)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
 	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 
 	if (ld == NULL)
 		return false;
-	nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+	nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
+			server->rsize, 0);
 	return true;
 }
 
 bool
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+		       int ioflags,
+		       const struct nfs_pgio_completion_ops *compl_ops)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
 	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 
 	if (ld == NULL)
 		return false;
-	nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+	nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
+			server->wsize, ioflags);
 	return true;
 }
 
@@ -1162,13 +1264,15 @@
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
-static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+int pnfs_write_done_resend_to_mds(struct inode *inode,
+				struct list_head *head,
+				const struct nfs_pgio_completion_ops *compl_ops)
 {
 	struct nfs_pageio_descriptor pgio;
 	LIST_HEAD(failed);
 
 	/* Resend all requests through the MDS */
-	nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+	nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
 	while (!list_empty(head)) {
 		struct nfs_page *req = nfs_list_entry(head->next);
 
@@ -1188,30 +1292,37 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
+
+static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
+{
+	struct nfs_pgio_header *hdr = data->header;
+
+	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
+	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+	    PNFS_LAYOUTRET_ON_ERROR) {
+		clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+		pnfs_return_layout(hdr->inode);
+	}
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+		data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+							&hdr->pages,
+							hdr->completion_ops);
+}
 
 /*
  * Called by non rpc-based layout drivers
  */
 void pnfs_ld_write_done(struct nfs_write_data *data)
 {
-	if (likely(!data->pnfs_error)) {
+	struct nfs_pgio_header *hdr = data->header;
+
+	if (!hdr->pnfs_error) {
 		pnfs_set_layoutcommit(data);
-		data->mds_ops->rpc_call_done(&data->task, data);
-	} else {
-		dprintk("pnfs write error = %d\n", data->pnfs_error);
-		if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
-						PNFS_LAYOUTRET_ON_ERROR) {
-			/* Don't lo_commit on error, Server will needs to
-			 * preform a file recovery.
-			 */
-			clear_bit(NFS_INO_LAYOUTCOMMIT,
-				  &NFS_I(data->inode)->flags);
-			pnfs_return_layout(data->inode);
-		}
-		data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
-	}
-	put_lseg(data->lseg);
-	data->mds_ops->rpc_release(data);
+		hdr->mds_ops->rpc_call_done(&data->task, data);
+	} else
+		pnfs_ld_handle_write_error(data);
+	hdr->mds_ops->rpc_release(data);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
 
@@ -1219,12 +1330,13 @@
 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
 		struct nfs_write_data *data)
 {
-	list_splice_tail_init(&data->pages, &desc->pg_list);
-	if (data->req && list_empty(&data->req->wb_list))
-		nfs_list_add_request(data->req, &desc->pg_list);
-	nfs_pageio_reset_write_mds(desc);
-	desc->pg_recoalesce = 1;
-	put_lseg(data->lseg);
+	struct nfs_pgio_header *hdr = data->header;
+
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		list_splice_tail_init(&hdr->pages, &desc->pg_list);
+		nfs_pageio_reset_write_mds(desc);
+		desc->pg_recoalesce = 1;
+	}
 	nfs_writedata_release(data);
 }
 
@@ -1234,23 +1346,18 @@
 			struct pnfs_layout_segment *lseg,
 			int how)
 {
-	struct inode *inode = wdata->inode;
+	struct nfs_pgio_header *hdr = wdata->header;
+	struct inode *inode = hdr->inode;
 	enum pnfs_try_status trypnfs;
 	struct nfs_server *nfss = NFS_SERVER(inode);
 
-	wdata->mds_ops = call_ops;
-	wdata->lseg = get_lseg(lseg);
+	hdr->mds_ops = call_ops;
 
 	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
 		inode->i_ino, wdata->args.count, wdata->args.offset, how);
-
 	trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
-	if (trypnfs == PNFS_NOT_ATTEMPTED) {
-		put_lseg(wdata->lseg);
-		wdata->lseg = NULL;
-	} else
+	if (trypnfs != PNFS_NOT_ATTEMPTED)
 		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
-
 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
 	return trypnfs;
 }
@@ -1266,7 +1373,7 @@
 	while (!list_empty(head)) {
 		enum pnfs_try_status trypnfs;
 
-		data = list_entry(head->next, struct nfs_write_data, list);
+		data = list_first_entry(head, struct nfs_write_data, list);
 		list_del_init(&data->list);
 
 		trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
@@ -1276,43 +1383,82 @@
 	put_lseg(lseg);
 }
 
+static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
+{
+	put_lseg(hdr->lseg);
+	nfs_writehdr_free(hdr);
+}
+
 int
 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 {
-	LIST_HEAD(head);
+	struct nfs_write_header *whdr;
+	struct nfs_pgio_header *hdr;
 	int ret;
 
-	ret = nfs_generic_flush(desc, &head);
+	whdr = nfs_writehdr_alloc();
+	if (!whdr) {
+		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+		put_lseg(desc->pg_lseg);
+		desc->pg_lseg = NULL;
+		return -ENOMEM;
+	}
+	hdr = &whdr->header;
+	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+	hdr->lseg = get_lseg(desc->pg_lseg);
+	atomic_inc(&hdr->refcnt);
+	ret = nfs_generic_flush(desc, hdr);
 	if (ret != 0) {
 		put_lseg(desc->pg_lseg);
 		desc->pg_lseg = NULL;
-		return ret;
-	}
-	pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
-	return 0;
+	} else
+		pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
 
-static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+int pnfs_read_done_resend_to_mds(struct inode *inode,
+				struct list_head *head,
+				const struct nfs_pgio_completion_ops *compl_ops)
 {
 	struct nfs_pageio_descriptor pgio;
+	LIST_HEAD(failed);
 
-	put_lseg(data->lseg);
-	data->lseg = NULL;
-	dprintk("pnfs write error = %d\n", data->pnfs_error);
-	if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
-						PNFS_LAYOUTRET_ON_ERROR)
-		pnfs_return_layout(data->inode);
-
-	nfs_pageio_init_read_mds(&pgio, data->inode);
-
-	while (!list_empty(&data->pages)) {
-		struct nfs_page *req = nfs_list_entry(data->pages.next);
+	/* Resend all requests through the MDS */
+	nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
+	while (!list_empty(head)) {
+		struct nfs_page *req = nfs_list_entry(head->next);
 
 		nfs_list_remove_request(req);
-		nfs_pageio_add_request(&pgio, req);
+		if (!nfs_pageio_add_request(&pgio, req))
+			nfs_list_add_request(req, &failed);
 	}
 	nfs_pageio_complete(&pgio);
+
+	if (!list_empty(&failed)) {
+		list_move(&failed, head);
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
+
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+	struct nfs_pgio_header *hdr = data->header;
+
+	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
+	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+	    PNFS_LAYOUTRET_ON_ERROR) {
+		clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+		pnfs_return_layout(hdr->inode);
+	}
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+		data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+							&hdr->pages,
+							hdr->completion_ops);
 }
 
 /*
@@ -1320,13 +1466,14 @@
  */
 void pnfs_ld_read_done(struct nfs_read_data *data)
 {
-	if (likely(!data->pnfs_error)) {
+	struct nfs_pgio_header *hdr = data->header;
+
+	if (likely(!hdr->pnfs_error)) {
 		__nfs4_read_done_cb(data);
-		data->mds_ops->rpc_call_done(&data->task, data);
+		hdr->mds_ops->rpc_call_done(&data->task, data);
 	} else
 		pnfs_ld_handle_read_error(data);
-	put_lseg(data->lseg);
-	data->mds_ops->rpc_release(data);
+	hdr->mds_ops->rpc_release(data);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
 
@@ -1334,11 +1481,13 @@
 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
 		struct nfs_read_data *data)
 {
-	list_splice_tail_init(&data->pages, &desc->pg_list);
-	if (data->req && list_empty(&data->req->wb_list))
-		nfs_list_add_request(data->req, &desc->pg_list);
-	nfs_pageio_reset_read_mds(desc);
-	desc->pg_recoalesce = 1;
+	struct nfs_pgio_header *hdr = data->header;
+
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		list_splice_tail_init(&hdr->pages, &desc->pg_list);
+		nfs_pageio_reset_read_mds(desc);
+		desc->pg_recoalesce = 1;
+	}
 	nfs_readdata_release(data);
 }
 
@@ -1350,23 +1499,19 @@
 		       const struct rpc_call_ops *call_ops,
 		       struct pnfs_layout_segment *lseg)
 {
-	struct inode *inode = rdata->inode;
+	struct nfs_pgio_header *hdr = rdata->header;
+	struct inode *inode = hdr->inode;
 	struct nfs_server *nfss = NFS_SERVER(inode);
 	enum pnfs_try_status trypnfs;
 
-	rdata->mds_ops = call_ops;
-	rdata->lseg = get_lseg(lseg);
+	hdr->mds_ops = call_ops;
 
 	dprintk("%s: Reading ino:%lu %u@%llu\n",
 		__func__, inode->i_ino, rdata->args.count, rdata->args.offset);
 
 	trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
-	if (trypnfs == PNFS_NOT_ATTEMPTED) {
-		put_lseg(rdata->lseg);
-		rdata->lseg = NULL;
-	} else {
+	if (trypnfs != PNFS_NOT_ATTEMPTED)
 		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
-	}
 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
 	return trypnfs;
 }
@@ -1382,7 +1527,7 @@
 	while (!list_empty(head)) {
 		enum pnfs_try_status trypnfs;
 
-		data = list_entry(head->next, struct nfs_read_data, list);
+		data = list_first_entry(head, struct nfs_read_data, list);
 		list_del_init(&data->list);
 
 		trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
@@ -1392,20 +1537,40 @@
 	put_lseg(lseg);
 }
 
+static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
+{
+	put_lseg(hdr->lseg);
+	nfs_readhdr_free(hdr);
+}
+
 int
 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
 {
-	LIST_HEAD(head);
+	struct nfs_read_header *rhdr;
+	struct nfs_pgio_header *hdr;
 	int ret;
 
-	ret = nfs_generic_pagein(desc, &head);
-	if (ret != 0) {
+	rhdr = nfs_readhdr_alloc();
+	if (!rhdr) {
+		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+		ret = -ENOMEM;
 		put_lseg(desc->pg_lseg);
 		desc->pg_lseg = NULL;
 		return ret;
 	}
-	pnfs_do_multiple_reads(desc, &head);
-	return 0;
+	hdr = &rhdr->header;
+	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
+	hdr->lseg = get_lseg(desc->pg_lseg);
+	atomic_inc(&hdr->refcnt);
+	ret = nfs_generic_pagein(desc, hdr);
+	if (ret != 0) {
+		put_lseg(desc->pg_lseg);
+		desc->pg_lseg = NULL;
+	} else
+		pnfs_do_multiple_reads(desc, &hdr->rpc_list);
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
 
@@ -1438,30 +1603,32 @@
 void
 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
 {
-	struct nfs_inode *nfsi = NFS_I(wdata->inode);
+	struct nfs_pgio_header *hdr = wdata->header;
+	struct inode *inode = hdr->inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
 	loff_t end_pos = wdata->mds_offset + wdata->res.count;
 	bool mark_as_dirty = false;
 
-	spin_lock(&nfsi->vfs_inode.i_lock);
+	spin_lock(&inode->i_lock);
 	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
 		mark_as_dirty = true;
 		dprintk("%s: Set layoutcommit for inode %lu ",
-			__func__, wdata->inode->i_ino);
+			__func__, inode->i_ino);
 	}
-	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
 		/* references matched in nfs4_layoutcommit_release */
-		get_lseg(wdata->lseg);
+		get_lseg(hdr->lseg);
 	}
 	if (end_pos > nfsi->layout->plh_lwb)
 		nfsi->layout->plh_lwb = end_pos;
-	spin_unlock(&nfsi->vfs_inode.i_lock);
+	spin_unlock(&inode->i_lock);
 	dprintk("%s: lseg %p end_pos %llu\n",
-		__func__, wdata->lseg, nfsi->layout->plh_lwb);
+		__func__, hdr->lseg, nfsi->layout->plh_lwb);
 
 	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
 	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
 	if (mark_as_dirty)
-		mark_inode_dirty_sync(wdata->inode);
+		mark_inode_dirty_sync(inode);
 }
 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
 
@@ -1550,3 +1717,15 @@
 	kfree(data);
 	goto out;
 }
+
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+	struct nfs4_threshold *thp;
+
+	thp = kzalloc(sizeof(*thp), GFP_NOFS);
+	if (!thp) {
+		dprintk("%s mdsthreshold allocation failed\n", __func__);
+		return NULL;
+	}
+	return thp;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 442ebf6..64f90d8 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -63,6 +63,7 @@
 	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
 	NFS_LAYOUT_ROC,			/* some lseg had roc bit set */
 	NFS_LAYOUT_DESTROYED,		/* no new use of layout allowed */
+	NFS_LAYOUT_INVALID,		/* layout is being destroyed */
 };
 
 enum layoutdriver_policy_flags {
@@ -94,11 +95,20 @@
 	const struct nfs_pageio_ops *pg_read_ops;
 	const struct nfs_pageio_ops *pg_write_ops;
 
+	struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
 	void (*mark_request_commit) (struct nfs_page *req,
-					struct pnfs_layout_segment *lseg);
-	void (*clear_request_commit) (struct nfs_page *req);
-	int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock);
-	int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+				     struct pnfs_layout_segment *lseg,
+				     struct nfs_commit_info *cinfo);
+	void (*clear_request_commit) (struct nfs_page *req,
+				      struct nfs_commit_info *cinfo);
+	int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
+				  int max);
+	void (*recover_commit_reqs) (struct list_head *list,
+				     struct nfs_commit_info *cinfo);
+	int (*commit_pagelist)(struct inode *inode,
+			       struct list_head *mds_pages,
+			       int how,
+			       struct nfs_commit_info *cinfo);
 
 	/*
 	 * Return PNFS_ATTEMPTED to indicate the layout code has attempted
@@ -168,8 +178,10 @@
 void get_layout_hdr(struct pnfs_layout_hdr *lo);
 void put_lseg(struct pnfs_layout_segment *lseg);
 
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+			   const struct nfs_pgio_completion_ops *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+			    int, const struct nfs_pgio_completion_ops *);
 
 void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
 void unset_pnfs_layoutdriver(struct nfs_server *);
@@ -211,6 +223,11 @@
 					       gfp_t gfp_flags);
 
 void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
+int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
+			const struct nfs_pgio_completion_ops *compl_ops);
+int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
+			const struct nfs_pgio_completion_ops *compl_ops);
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
 
 /* nfs4_deviceid_flags */
 enum {
@@ -261,49 +278,66 @@
 }
 
 static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+		 struct nfs_commit_info *cinfo)
 {
-	if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+	if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
 		return PNFS_NOT_ATTEMPTED;
-	return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+	return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
+}
+
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+	if (ld == NULL || ld->get_ds_info == NULL)
+		return NULL;
+	return ld->get_ds_info(inode);
 }
 
 static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+			 struct nfs_commit_info *cinfo)
 {
 	struct inode *inode = req->wb_context->dentry->d_inode;
 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
 
 	if (lseg == NULL || ld->mark_request_commit == NULL)
 		return false;
-	ld->mark_request_commit(req, lseg);
+	ld->mark_request_commit(req, lseg, cinfo);
 	return true;
 }
 
 static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
 {
 	struct inode *inode = req->wb_context->dentry->d_inode;
 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
 
 	if (ld == NULL || ld->clear_request_commit == NULL)
 		return false;
-	ld->clear_request_commit(req);
+	ld->clear_request_commit(req, cinfo);
 	return true;
 }
 
 static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+		       int max)
 {
-	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
-	int ret;
-
-	if (ld == NULL || ld->scan_commit_lists == NULL)
+	if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
 		return 0;
-	ret = ld->scan_commit_lists(inode, max, lock);
-	if (ret != 0)
-		set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
-	return ret;
+	else
+		return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
+}
+
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+			 struct nfs_commit_info *cinfo)
+{
+	if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
+		return;
+	NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 }
 
 /* Should the pNFS client commit and return the layout upon a setattr */
@@ -327,6 +361,14 @@
 	return 0;
 }
 
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+		   struct nfs_server *nfss)
+{
+	return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld &&
+					nfss->pnfs_curr_ld->id == src->l_type);
+}
+
 #ifdef NFS_DEBUG
 void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
 #else
@@ -396,45 +438,74 @@
 {
 }
 
-static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+					 const struct nfs_pgio_completion_ops *compl_ops)
 {
 	return false;
 }
 
-static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
+					  const struct nfs_pgio_completion_ops *compl_ops)
 {
 	return false;
 }
 
 static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+		 struct nfs_commit_info *cinfo)
 {
 	return PNFS_NOT_ATTEMPTED;
 }
 
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+	return NULL;
+}
+
 static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+			 struct nfs_commit_info *cinfo)
 {
 	return false;
 }
 
 static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
 {
 	return false;
 }
 
 static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+		       int max)
 {
 	return 0;
 }
 
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+			 struct nfs_commit_info *cinfo)
+{
+}
+
 static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
 {
 	return 0;
 }
 
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+		   struct nfs_server *nfss)
+{
+	return false;
+}
+
+static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+	return NULL;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index d6408b6..617c741 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -178,7 +178,7 @@
 }
 
 static int
-nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct qstr *name,
 		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
 	struct nfs_diropargs	arg = {
@@ -640,16 +640,18 @@
 
 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
 {
+	struct inode *inode = data->header->inode;
+
 	if (nfs_async_handle_expired_key(task))
 		return -EAGAIN;
 
-	nfs_invalidate_atime(data->inode);
+	nfs_invalidate_atime(inode);
 	if (task->tk_status >= 0) {
-		nfs_refresh_inode(data->inode, data->res.fattr);
+		nfs_refresh_inode(inode, data->res.fattr);
 		/* Emulate the eof flag, which isn't normally needed in NFSv2
 		 * as it is guaranteed to always return the file attributes
 		 */
-		if (data->args.offset + data->args.count >= data->res.fattr->size)
+		if (data->args.offset + data->res.count >= data->res.fattr->size)
 			data->res.eof = 1;
 	}
 	return 0;
@@ -667,11 +669,13 @@
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
 {
+	struct inode *inode = data->header->inode;
+
 	if (nfs_async_handle_expired_key(task))
 		return -EAGAIN;
 
 	if (task->tk_status >= 0)
-		nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
 	return 0;
 }
 
@@ -687,8 +691,13 @@
 	rpc_call_start(task);
 }
 
+static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+	BUG();
+}
+
 static void
-nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
 	BUG();
 }
@@ -732,6 +741,7 @@
 	.file_inode_ops	= &nfs_file_inode_operations,
 	.file_ops	= &nfs_file_operations,
 	.getroot	= nfs_proc_get_root,
+	.submount	= nfs_submount,
 	.getattr	= nfs_proc_getattr,
 	.setattr	= nfs_proc_setattr,
 	.lookup		= nfs_proc_lookup,
@@ -763,6 +773,7 @@
 	.write_rpc_prepare = nfs_proc_write_rpc_prepare,
 	.write_done	= nfs_write_done,
 	.commit_setup	= nfs_proc_commit_setup,
+	.commit_rpc_prepare = nfs_proc_commit_rpc_prepare,
 	.lock		= nfs_proc_lock,
 	.lock_check_bounds = nfs_lock_check_bounds,
 	.close_context	= nfs_close_context,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0a4be28..86ced78 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,43 +30,73 @@
 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 
 static const struct nfs_pageio_ops nfs_pageio_read_ops;
-static const struct rpc_call_ops nfs_read_partial_ops;
-static const struct rpc_call_ops nfs_read_full_ops;
+static const struct rpc_call_ops nfs_read_common_ops;
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
 
 static struct kmem_cache *nfs_rdata_cachep;
 
-struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+struct nfs_read_header *nfs_readhdr_alloc(void)
 {
-	struct nfs_read_data *p;
+	struct nfs_read_header *rhdr;
 
-	p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
-	if (p) {
-		INIT_LIST_HEAD(&p->pages);
-		p->npages = pagecount;
-		if (pagecount <= ARRAY_SIZE(p->page_array))
-			p->pagevec = p->page_array;
-		else {
-			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
-			if (!p->pagevec) {
-				kmem_cache_free(nfs_rdata_cachep, p);
-				p = NULL;
-			}
-		}
+	rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
+	if (rhdr) {
+		struct nfs_pgio_header *hdr = &rhdr->header;
+
+		INIT_LIST_HEAD(&hdr->pages);
+		INIT_LIST_HEAD(&hdr->rpc_list);
+		spin_lock_init(&hdr->lock);
+		atomic_set(&hdr->refcnt, 0);
 	}
-	return p;
+	return rhdr;
 }
 
-void nfs_readdata_free(struct nfs_read_data *p)
+static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
+						unsigned int pagecount)
 {
-	if (p && (p->pagevec != &p->page_array[0]))
-		kfree(p->pagevec);
-	kmem_cache_free(nfs_rdata_cachep, p);
+	struct nfs_read_data *data, *prealloc;
+
+	prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
+	if (prealloc->header == NULL)
+		data = prealloc;
+	else
+		data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		goto out;
+
+	if (nfs_pgarray_set(&data->pages, pagecount)) {
+		data->header = hdr;
+		atomic_inc(&hdr->refcnt);
+	} else {
+		if (data != prealloc)
+			kfree(data);
+		data = NULL;
+	}
+out:
+	return data;
+}
+
+void nfs_readhdr_free(struct nfs_pgio_header *hdr)
+{
+	struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
+
+	kmem_cache_free(nfs_rdata_cachep, rhdr);
 }
 
 void nfs_readdata_release(struct nfs_read_data *rdata)
 {
+	struct nfs_pgio_header *hdr = rdata->header;
+	struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
+
 	put_nfs_open_context(rdata->args.context);
-	nfs_readdata_free(rdata);
+	if (rdata->pages.pagevec != rdata->pages.page_array)
+		kfree(rdata->pages.pagevec);
+	if (rdata != &read_header->rpc_data)
+		kfree(rdata);
+	else
+		rdata->header = NULL;
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
 }
 
 static
@@ -78,39 +108,11 @@
 	return 0;
 }
 
-static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
-{
-	unsigned int remainder = data->args.count - data->res.count;
-	unsigned int base = data->args.pgbase + data->res.count;
-	unsigned int pglen;
-	struct page **pages;
-
-	if (data->res.eof == 0 || remainder == 0)
-		return;
-	/*
-	 * Note: "remainder" can never be negative, since we check for
-	 * 	this in the XDR code.
-	 */
-	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-	base &= ~PAGE_CACHE_MASK;
-	pglen = PAGE_CACHE_SIZE - base;
-	for (;;) {
-		if (remainder <= pglen) {
-			zero_user(*pages, base, remainder);
-			break;
-		}
-		zero_user(*pages, base, pglen);
-		pages++;
-		remainder -= pglen;
-		pglen = PAGE_CACHE_SIZE;
-		base = 0;
-	}
-}
-
 void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
-		struct inode *inode)
+			      struct inode *inode,
+			      const struct nfs_pgio_completion_ops *compl_ops)
 {
-	nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+	nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
 			NFS_SERVER(inode)->rsize, 0);
 }
 
@@ -121,11 +123,12 @@
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
 
-static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
-		struct inode *inode)
+void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+			  struct inode *inode,
+			  const struct nfs_pgio_completion_ops *compl_ops)
 {
-	if (!pnfs_pageio_init_read(pgio, inode))
-		nfs_pageio_init_read_mds(pgio, inode);
+	if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
+		nfs_pageio_init_read_mds(pgio, inode, compl_ops);
 }
 
 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
@@ -146,9 +149,10 @@
 	if (len < PAGE_CACHE_SIZE)
 		zero_user_segment(page, len, PAGE_CACHE_SIZE);
 
-	nfs_pageio_init_read(&pgio, inode);
+	nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
 	nfs_pageio_add_request(&pgio, new);
 	nfs_pageio_complete(&pgio);
+	NFS_I(inode)->read_io += pgio.pg_bytes_written;
 	return 0;
 }
 
@@ -169,16 +173,49 @@
 	nfs_release_request(req);
 }
 
-int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
-		      const struct rpc_call_ops *call_ops)
+/* Note io was page aligned */
+static void nfs_read_completion(struct nfs_pgio_header *hdr)
 {
-	struct inode *inode = data->inode;
+	unsigned long bytes = 0;
+
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+		goto out;
+	while (!list_empty(&hdr->pages)) {
+		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+		struct page *page = req->wb_page;
+
+		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+			if (bytes > hdr->good_bytes)
+				zero_user(page, 0, PAGE_SIZE);
+			else if (hdr->good_bytes - bytes < PAGE_SIZE)
+				zero_user_segment(page,
+					hdr->good_bytes & ~PAGE_MASK,
+					PAGE_SIZE);
+		}
+		bytes += req->wb_bytes;
+		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+			if (bytes <= hdr->good_bytes)
+				SetPageUptodate(page);
+		} else
+			SetPageUptodate(page);
+		nfs_list_remove_request(req);
+		nfs_readpage_release(req);
+	}
+out:
+	hdr->release(hdr);
+}
+
+int nfs_initiate_read(struct rpc_clnt *clnt,
+		      struct nfs_read_data *data,
+		      const struct rpc_call_ops *call_ops, int flags)
+{
+	struct inode *inode = data->header->inode;
 	int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
 	struct rpc_task *task;
 	struct rpc_message msg = {
 		.rpc_argp = &data->args,
 		.rpc_resp = &data->res,
-		.rpc_cred = data->cred,
+		.rpc_cred = data->header->cred,
 	};
 	struct rpc_task_setup task_setup_data = {
 		.task = &data->task,
@@ -187,7 +224,7 @@
 		.callback_ops = call_ops,
 		.callback_data = data,
 		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC | swap_flags,
+		.flags = RPC_TASK_ASYNC | swap_flags | flags,
 	};
 
 	/* Set up the initial task struct. */
@@ -212,19 +249,15 @@
 /*
  * Set up the NFS read request struct
  */
-static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+static void nfs_read_rpcsetup(struct nfs_read_data *data,
 		unsigned int count, unsigned int offset)
 {
-	struct inode *inode = req->wb_context->dentry->d_inode;
+	struct nfs_page *req = data->header->req;
 
-	data->req	  = req;
-	data->inode	  = inode;
-	data->cred	  = req->wb_context->cred;
-
-	data->args.fh     = NFS_FH(inode);
+	data->args.fh     = NFS_FH(data->header->inode);
 	data->args.offset = req_offset(req) + offset;
 	data->args.pgbase = req->wb_pgbase + offset;
-	data->args.pages  = data->pagevec;
+	data->args.pages  = data->pages.pagevec;
 	data->args.count  = count;
 	data->args.context = get_nfs_open_context(req->wb_context);
 	data->args.lock_context = req->wb_lock_context;
@@ -238,9 +271,9 @@
 static int nfs_do_read(struct nfs_read_data *data,
 		const struct rpc_call_ops *call_ops)
 {
-	struct inode *inode = data->args.context->dentry->d_inode;
+	struct inode *inode = data->header->inode;
 
-	return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
+	return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
 }
 
 static int
@@ -253,7 +286,7 @@
 	while (!list_empty(head)) {
 		int ret2;
 
-		data = list_entry(head->next, struct nfs_read_data, list);
+		data = list_first_entry(head, struct nfs_read_data, list);
 		list_del_init(&data->list);
 
 		ret2 = nfs_do_read(data, call_ops);
@@ -275,6 +308,24 @@
 	}
 }
 
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+	.error_cleanup = nfs_async_read_error,
+	.completion = nfs_read_completion,
+};
+
+static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
+		struct nfs_pgio_header *hdr)
+{
+	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+	while (!list_empty(&hdr->rpc_list)) {
+		struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
+				struct nfs_read_data, list);
+		list_del(&data->list);
+		nfs_readdata_release(data);
+	}
+	desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+}
+
 /*
  * Generate multiple requests to fill a single page.
  *
@@ -288,93 +339,95 @@
  * won't see the new data until our attribute cache is updated.  This is more
  * or less conventional NFS client behavior.
  */
-static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
+			    struct nfs_pgio_header *hdr)
 {
-	struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+	struct nfs_page *req = hdr->req;
 	struct page *page = req->wb_page;
 	struct nfs_read_data *data;
 	size_t rsize = desc->pg_bsize, nbytes;
 	unsigned int offset;
-	int requests = 0;
-	int ret = 0;
-
-	nfs_list_remove_request(req);
 
 	offset = 0;
 	nbytes = desc->pg_count;
 	do {
 		size_t len = min(nbytes,rsize);
 
-		data = nfs_readdata_alloc(1);
-		if (!data)
-			goto out_bad;
-		data->pagevec[0] = page;
-		nfs_read_rpcsetup(req, data, len, offset);
-		list_add(&data->list, res);
-		requests++;
+		data = nfs_readdata_alloc(hdr, 1);
+		if (!data) {
+			nfs_pagein_error(desc, hdr);
+			return -ENOMEM;
+		}
+		data->pages.pagevec[0] = page;
+		nfs_read_rpcsetup(data, len, offset);
+		list_add(&data->list, &hdr->rpc_list);
 		nbytes -= len;
 		offset += len;
-	} while(nbytes != 0);
-	atomic_set(&req->wb_complete, requests);
-	desc->pg_rpc_callops = &nfs_read_partial_ops;
-	return ret;
-out_bad:
-	while (!list_empty(res)) {
-		data = list_entry(res->next, struct nfs_read_data, list);
-		list_del(&data->list);
-		nfs_readdata_release(data);
-	}
-	nfs_readpage_release(req);
-	return -ENOMEM;
+	} while (nbytes != 0);
+
+	nfs_list_remove_request(req);
+	nfs_list_add_request(req, &hdr->pages);
+	desc->pg_rpc_callops = &nfs_read_common_ops;
+	return 0;
 }
 
-static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
+			  struct nfs_pgio_header *hdr)
 {
 	struct nfs_page		*req;
 	struct page		**pages;
-	struct nfs_read_data	*data;
+	struct nfs_read_data    *data;
 	struct list_head *head = &desc->pg_list;
-	int ret = 0;
 
-	data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
-						     desc->pg_count));
+	data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+							  desc->pg_count));
 	if (!data) {
-		nfs_async_read_error(head);
-		ret = -ENOMEM;
-		goto out;
+		nfs_pagein_error(desc, hdr);
+		return -ENOMEM;
 	}
 
-	pages = data->pagevec;
+	pages = data->pages.pagevec;
 	while (!list_empty(head)) {
 		req = nfs_list_entry(head->next);
 		nfs_list_remove_request(req);
-		nfs_list_add_request(req, &data->pages);
+		nfs_list_add_request(req, &hdr->pages);
 		*pages++ = req->wb_page;
 	}
-	req = nfs_list_entry(data->pages.next);
 
-	nfs_read_rpcsetup(req, data, desc->pg_count, 0);
-	list_add(&data->list, res);
-	desc->pg_rpc_callops = &nfs_read_full_ops;
-out:
-	return ret;
+	nfs_read_rpcsetup(data, desc->pg_count, 0);
+	list_add(&data->list, &hdr->rpc_list);
+	desc->pg_rpc_callops = &nfs_read_common_ops;
+	return 0;
 }
 
-int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
+		       struct nfs_pgio_header *hdr)
 {
 	if (desc->pg_bsize < PAGE_CACHE_SIZE)
-		return nfs_pagein_multi(desc, head);
-	return nfs_pagein_one(desc, head);
+		return nfs_pagein_multi(desc, hdr);
+	return nfs_pagein_one(desc, hdr);
 }
 
 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
 {
-	LIST_HEAD(head);
+	struct nfs_read_header *rhdr;
+	struct nfs_pgio_header *hdr;
 	int ret;
 
-	ret = nfs_generic_pagein(desc, &head);
+	rhdr = nfs_readhdr_alloc();
+	if (!rhdr) {
+		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+		return -ENOMEM;
+	}
+	hdr = &rhdr->header;
+	nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
+	atomic_inc(&hdr->refcnt);
+	ret = nfs_generic_pagein(desc, hdr);
 	if (ret == 0)
-		ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
+		ret = nfs_do_multiple_reads(&hdr->rpc_list,
+					    desc->pg_rpc_callops);
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
 	return ret;
 }
 
@@ -389,20 +442,21 @@
  */
 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
 {
+	struct inode *inode = data->header->inode;
 	int status;
 
 	dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
 			task->tk_status);
 
-	status = NFS_PROTO(data->inode)->read_done(task, data);
+	status = NFS_PROTO(inode)->read_done(task, data);
 	if (status != 0)
 		return status;
 
-	nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
 
 	if (task->tk_status == -ESTALE) {
-		set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
-		nfs_mark_for_revalidate(data->inode);
+		set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+		nfs_mark_for_revalidate(inode);
 	}
 	return 0;
 }
@@ -412,15 +466,13 @@
 	struct nfs_readargs *argp = &data->args;
 	struct nfs_readres *resp = &data->res;
 
-	if (resp->eof || resp->count == argp->count)
-		return;
-
 	/* This is a short read! */
-	nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+	nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
 	/* Has the server at least made some progress? */
-	if (resp->count == 0)
+	if (resp->count == 0) {
+		nfs_set_pgio_error(data->header, -EIO, argp->offset);
 		return;
-
+	}
 	/* Yes, so retry the read at the end of the data */
 	data->mds_offset += resp->count;
 	argp->offset += resp->count;
@@ -429,114 +481,46 @@
 	rpc_restart_call_prepare(task);
 }
 
-/*
- * Handle a read reply that fills part of a page.
- */
-static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
+static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
 {
 	struct nfs_read_data *data = calldata;
- 
+	struct nfs_pgio_header *hdr = data->header;
+
+	/* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
 	if (nfs_readpage_result(task, data) != 0)
 		return;
 	if (task->tk_status < 0)
-		return;
+		nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
+	else if (data->res.eof) {
+		loff_t bound;
 
-	nfs_readpage_truncate_uninitialised_page(data);
-	nfs_readpage_retry(task, data);
+		bound = data->args.offset + data->res.count;
+		spin_lock(&hdr->lock);
+		if (bound < hdr->io_start + hdr->good_bytes) {
+			set_bit(NFS_IOHDR_EOF, &hdr->flags);
+			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
+			hdr->good_bytes = bound - hdr->io_start;
+		}
+		spin_unlock(&hdr->lock);
+	} else if (data->res.count != data->args.count)
+		nfs_readpage_retry(task, data);
 }
 
-static void nfs_readpage_release_partial(void *calldata)
+static void nfs_readpage_release_common(void *calldata)
 {
-	struct nfs_read_data *data = calldata;
-	struct nfs_page *req = data->req;
-	struct page *page = req->wb_page;
-	int status = data->task.tk_status;
-
-	if (status < 0)
-		set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
-
-	if (atomic_dec_and_test(&req->wb_complete)) {
-		if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
-			SetPageUptodate(page);
-		nfs_readpage_release(req);
-	}
 	nfs_readdata_release(calldata);
 }
 
 void nfs_read_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs_read_data *data = calldata;
-	NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
+	NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
 }
 
-static const struct rpc_call_ops nfs_read_partial_ops = {
+static const struct rpc_call_ops nfs_read_common_ops = {
 	.rpc_call_prepare = nfs_read_prepare,
-	.rpc_call_done = nfs_readpage_result_partial,
-	.rpc_release = nfs_readpage_release_partial,
-};
-
-static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
-{
-	unsigned int count = data->res.count;
-	unsigned int base = data->args.pgbase;
-	struct page **pages;
-
-	if (data->res.eof)
-		count = data->args.count;
-	if (unlikely(count == 0))
-		return;
-	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-	base &= ~PAGE_CACHE_MASK;
-	count += base;
-	for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
-		SetPageUptodate(*pages);
-	if (count == 0)
-		return;
-	/* Was this a short read? */
-	if (data->res.eof || data->res.count == data->args.count)
-		SetPageUptodate(*pages);
-}
-
-/*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
-{
-	struct nfs_read_data *data = calldata;
-
-	if (nfs_readpage_result(task, data) != 0)
-		return;
-	if (task->tk_status < 0)
-		return;
-	/*
-	 * Note: nfs_readpage_retry may change the values of
-	 * data->args. In the multi-page case, we therefore need
-	 * to ensure that we call nfs_readpage_set_pages_uptodate()
-	 * first.
-	 */
-	nfs_readpage_truncate_uninitialised_page(data);
-	nfs_readpage_set_pages_uptodate(data);
-	nfs_readpage_retry(task, data);
-}
-
-static void nfs_readpage_release_full(void *calldata)
-{
-	struct nfs_read_data *data = calldata;
-
-	while (!list_empty(&data->pages)) {
-		struct nfs_page *req = nfs_list_entry(data->pages.next);
-
-		nfs_list_remove_request(req);
-		nfs_readpage_release(req);
-	}
-	nfs_readdata_release(calldata);
-}
-
-static const struct rpc_call_ops nfs_read_full_ops = {
-	.rpc_call_prepare = nfs_read_prepare,
-	.rpc_call_done = nfs_readpage_result_full,
-	.rpc_release = nfs_readpage_release_full,
+	.rpc_call_done = nfs_readpage_result_common,
+	.rpc_release = nfs_readpage_release_common,
 };
 
 /*
@@ -668,11 +652,12 @@
 	if (ret == 0)
 		goto read_complete; /* all pages were read */
 
-	nfs_pageio_init_read(&pgio, inode);
+	nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
 
 	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
 
 	nfs_pageio_complete(&pgio);
+	NFS_I(inode)->read_io += pgio.pg_bytes_written;
 	npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
 read_complete:
@@ -684,7 +669,7 @@
 int __init nfs_init_readpagecache(void)
 {
 	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
-					     sizeof(struct nfs_read_data),
+					     sizeof(struct nfs_read_header),
 					     0, SLAB_HWCACHE_ALIGN,
 					     NULL);
 	if (nfs_rdata_cachep == NULL)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4ac7fca..906f09c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -66,6 +66,7 @@
 #include "pnfs.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
+#define NFS_TEXT_DATA		1
 
 #ifdef CONFIG_NFS_V3
 #define NFS_DEFAULT_VERSION 3
@@ -277,12 +278,22 @@
 	{ Opt_vers_err, NULL }
 };
 
+struct nfs_mount_info {
+	void (*fill_super)(struct super_block *, struct nfs_mount_info *);
+	int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
+	struct nfs_parsed_mount_data *parsed;
+	struct nfs_clone_mount *cloned;
+	struct nfs_fh *mntfh;
+};
+
 static void nfs_umount_begin(struct super_block *);
 static int  nfs_statfs(struct dentry *, struct kstatfs *);
 static int  nfs_show_options(struct seq_file *, struct dentry *);
 static int  nfs_show_devname(struct seq_file *, struct dentry *);
 static int  nfs_show_path(struct seq_file *, struct dentry *);
 static int  nfs_show_stats(struct seq_file *, struct dentry *);
+static struct dentry *nfs_fs_mount_common(struct file_system_type *,
+		struct nfs_server *, int, const char *, struct nfs_mount_info *);
 static struct dentry *nfs_fs_mount(struct file_system_type *,
 		int, const char *, void *);
 static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -323,12 +334,11 @@
 };
 
 #ifdef CONFIG_NFS_V4
-static int nfs4_validate_text_mount_data(void *options,
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
+static int nfs4_validate_mount_data(void *options,
 	struct nfs_parsed_mount_data *args, const char *dev_name);
 static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
-	struct nfs_parsed_mount_data *data);
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data);
+	struct nfs_mount_info *mount_info);
 static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *raw_data);
 static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
@@ -342,7 +352,7 @@
 static struct file_system_type nfs4_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "nfs4",
-	.mount		= nfs4_mount,
+	.mount		= nfs_fs_mount,
 	.kill_sb	= nfs4_kill_super,
 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
 };
@@ -786,8 +796,8 @@
 
 static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
 {
-	if (nfss->nfs_client && nfss->nfs_client->impl_id) {
-		struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+	if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
+		struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
 		seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
 			   "date='%llu,%u'",
 			   impl_id->name, impl_id->domain,
@@ -938,7 +948,7 @@
 		rpc_killall_tasks(rpc);
 }
 
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
 {
 	struct nfs_parsed_mount_data *data;
 
@@ -953,8 +963,8 @@
 		data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
 		data->auth_flavors[0]	= RPC_AUTH_UNIX;
 		data->auth_flavor_len	= 1;
-		data->version		= version;
 		data->minorversion	= 0;
+		data->need_mount	= true;
 		data->net		= current->nsproxy->net_ns;
 		security_init_mnt_opts(&data->lsm_opts);
 	}
@@ -1674,8 +1684,8 @@
  * Use the remote server's MOUNT service to request the NFS file handle
  * corresponding to the provided path.
  */
-static int nfs_try_mount(struct nfs_parsed_mount_data *args,
-			 struct nfs_fh *root_fh)
+static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+			     struct nfs_fh *root_fh)
 {
 	rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
 	unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
@@ -1738,6 +1748,26 @@
 	return nfs_walk_authlist(args, &request);
 }
 
+static struct dentry *nfs_try_mount(int flags, const char *dev_name,
+				    struct nfs_mount_info *mount_info)
+{
+	int status;
+	struct nfs_server *server;
+
+	if (mount_info->parsed->need_mount) {
+		status = nfs_request_mount(mount_info->parsed, mount_info->mntfh);
+		if (status)
+			return ERR_PTR(status);
+	}
+
+	/* Get a volume representation */
+	server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
+	if (IS_ERR(server))
+		return ERR_CAST(server);
+
+	return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
+}
+
 /*
  * Split "dev_name" into "hostname:export_path".
  *
@@ -1826,10 +1856,10 @@
  * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
  *   mountproto=tcp after mountproto=udp, and so on
  */
-static int nfs_validate_mount_data(void *options,
-				   struct nfs_parsed_mount_data *args,
-				   struct nfs_fh *mntfh,
-				   const char *dev_name)
+static int nfs23_validate_mount_data(void *options,
+				     struct nfs_parsed_mount_data *args,
+				     struct nfs_fh *mntfh,
+				     const char *dev_name)
 {
 	struct nfs_mount_data *data = (struct nfs_mount_data *)options;
 	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
@@ -1837,6 +1867,7 @@
 	if (data == NULL)
 		goto out_no_data;
 
+	args->version = NFS_DEFAULT_VERSION;
 	switch (data->version) {
 	case 1:
 		data->namlen = 0;
@@ -1883,6 +1914,7 @@
 		args->acregmax		= data->acregmax;
 		args->acdirmin		= data->acdirmin;
 		args->acdirmax		= data->acdirmax;
+		args->need_mount	= false;
 
 		memcpy(sap, &data->addr, sizeof(data->addr));
 		args->nfs_server.addrlen = sizeof(data->addr);
@@ -1934,43 +1966,8 @@
 		}
 
 		break;
-	default: {
-		int status;
-
-		if (nfs_parse_mount_options((char *)options, args) == 0)
-			return -EINVAL;
-
-		if (!nfs_verify_server_address(sap))
-			goto out_no_address;
-
-		if (args->version == 4)
-#ifdef CONFIG_NFS_V4
-			return nfs4_validate_text_mount_data(options,
-							     args, dev_name);
-#else
-			goto out_v4_not_compiled;
-#endif
-
-		nfs_set_port(sap, &args->nfs_server.port, 0);
-
-		nfs_set_mount_transport_protocol(args);
-
-		status = nfs_parse_devname(dev_name,
-					   &args->nfs_server.hostname,
-					   PAGE_SIZE,
-					   &args->nfs_server.export_path,
-					   NFS_MAXPATHLEN);
-		if (!status)
-			status = nfs_try_mount(args, mntfh);
-
-		kfree(args->nfs_server.export_path);
-		args->nfs_server.export_path = NULL;
-
-		if (status)
-			return status;
-
-		break;
-		}
+	default:
+		return NFS_TEXT_DATA;
 	}
 
 #ifndef CONFIG_NFS_V3
@@ -1999,12 +1996,6 @@
 	return -EPROTONOSUPPORT;
 #endif /* !CONFIG_NFS_V3 */
 
-#ifndef CONFIG_NFS_V4
-out_v4_not_compiled:
-	dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
-	return -EPROTONOSUPPORT;
-#endif /* !CONFIG_NFS_V4 */
-
 out_nomem:
 	dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
 	return -ENOMEM;
@@ -2018,6 +2009,82 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_NFS_V4
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+				   void *options,
+				   struct nfs_parsed_mount_data *args,
+				   struct nfs_fh *mntfh,
+				   const char *dev_name)
+{
+	if (fs_type == &nfs_fs_type)
+		return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+	return nfs4_validate_mount_data(options, args, dev_name);
+}
+#else
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+				   void *options,
+				   struct nfs_parsed_mount_data *args,
+				   struct nfs_fh *mntfh,
+				   const char *dev_name)
+{
+	return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+}
+#endif
+
+static int nfs_validate_text_mount_data(void *options,
+					struct nfs_parsed_mount_data *args,
+					const char *dev_name)
+{
+	int port = 0;
+	int max_namelen = PAGE_SIZE;
+	int max_pathlen = NFS_MAXPATHLEN;
+	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+	if (nfs_parse_mount_options((char *)options, args) == 0)
+		return -EINVAL;
+
+	if (!nfs_verify_server_address(sap))
+		goto out_no_address;
+
+	if (args->version == 4) {
+#ifdef CONFIG_NFS_V4
+		port = NFS_PORT;
+		max_namelen = NFS4_MAXNAMLEN;
+		max_pathlen = NFS4_MAXPATHLEN;
+		nfs_validate_transport_protocol(args);
+		nfs4_validate_mount_flags(args);
+#else
+		goto out_v4_not_compiled;
+#endif /* CONFIG_NFS_V4 */
+	} else
+		nfs_set_mount_transport_protocol(args);
+
+	nfs_set_port(sap, &args->nfs_server.port, port);
+
+	if (args->auth_flavor_len > 1)
+		goto out_bad_auth;
+
+	return nfs_parse_devname(dev_name,
+				   &args->nfs_server.hostname,
+				   max_namelen,
+				   &args->nfs_server.export_path,
+				   max_pathlen);
+
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+	dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+	return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
+out_no_address:
+	dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
+	return -EINVAL;
+
+out_bad_auth:
+	dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
+	return -EINVAL;
+}
+
 static int
 nfs_compare_remount_data(struct nfs_server *nfss,
 			 struct nfs_parsed_mount_data *data)
@@ -2129,8 +2196,9 @@
  * Finish setting up an NFS2/3 superblock
  */
 static void nfs_fill_super(struct super_block *sb,
-			   struct nfs_parsed_mount_data *data)
+			   struct nfs_mount_info *mount_info)
 {
+	struct nfs_parsed_mount_data *data = mount_info->parsed;
 	struct nfs_server *server = NFS_SB(sb);
 
 	sb->s_blocksize_bits = 0;
@@ -2154,8 +2222,9 @@
  * Finish setting up a cloned NFS2/3 superblock
  */
 static void nfs_clone_super(struct super_block *sb,
-			    const struct super_block *old_sb)
+			    struct nfs_mount_info *mount_info)
 {
+	const struct super_block *old_sb = mount_info->cloned->sb;
 	struct nfs_server *server = NFS_SB(sb);
 
 	sb->s_blocksize_bits = old_sb->s_blocksize_bits;
@@ -2278,52 +2347,70 @@
 	return nfs_compare_mount_options(sb, server, mntflags);
 }
 
+#ifdef CONFIG_NFS_FSCACHE
+static void nfs_get_cache_cookie(struct super_block *sb,
+				 struct nfs_parsed_mount_data *parsed,
+				 struct nfs_clone_mount *cloned)
+{
+	char *uniq = NULL;
+	int ulen = 0;
+
+	if (parsed && parsed->fscache_uniq) {
+		uniq = parsed->fscache_uniq;
+		ulen = strlen(parsed->fscache_uniq);
+	} else if (cloned) {
+		struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+		if (mnt_s->fscache_key) {
+			uniq = mnt_s->fscache_key->key.uniquifier;
+			ulen = mnt_s->fscache_key->key.uniq_len;
+		};
+	}
+
+	nfs_fscache_get_super_cookie(sb, uniq, ulen);
+}
+#else
+static void nfs_get_cache_cookie(struct super_block *sb,
+				 struct nfs_parsed_mount_data *parsed,
+				 struct nfs_clone_mount *cloned)
+{
+}
+#endif
+
 static int nfs_bdi_register(struct nfs_server *server)
 {
 	return bdi_register_dev(&server->backing_dev_info, server->s_dev);
 }
 
-static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data)
+static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
+			       struct nfs_mount_info *mount_info)
 {
-	struct nfs_server *server = NULL;
+	return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
+}
+
+static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
+				 struct nfs_mount_info *mount_info)
+{
+	/* clone any lsm security options from the parent to the new sb */
+	security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
+	if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
+		return -ESTALE;
+	return 0;
+}
+
+static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
+					  struct nfs_server *server,
+					  int flags, const char *dev_name,
+					  struct nfs_mount_info *mount_info)
+{
 	struct super_block *s;
-	struct nfs_parsed_mount_data *data;
-	struct nfs_fh *mntfh;
 	struct dentry *mntroot = ERR_PTR(-ENOMEM);
 	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
 	struct nfs_sb_mountdata sb_mntdata = {
 		.mntflags = flags,
+		.server = server,
 	};
 	int error;
 
-	data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
-	mntfh = nfs_alloc_fhandle();
-	if (data == NULL || mntfh == NULL)
-		goto out;
-
-	/* Validate the mount data */
-	error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
-	if (error < 0) {
-		mntroot = ERR_PTR(error);
-		goto out;
-	}
-
-#ifdef CONFIG_NFS_V4
-	if (data->version == 4) {
-		mntroot = nfs4_try_mount(flags, dev_name, data);
-		goto out;
-	}
-#endif	/* CONFIG_NFS_V4 */
-
-	/* Get a volume representation */
-	server = nfs_create_server(data, mntfh);
-	if (IS_ERR(server)) {
-		mntroot = ERR_CAST(server);
-		goto out;
-	}
-	sb_mntdata.server = server;
-
 	if (server->flags & NFS_MOUNT_UNSHARED)
 		compare_super = NULL;
 
@@ -2351,23 +2438,21 @@
 
 	if (!s->s_root) {
 		/* initial superblock/root creation */
-		nfs_fill_super(s, data);
-		nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+		mount_info->fill_super(s, mount_info);
+		nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
 	}
 
-	mntroot = nfs_get_root(s, mntfh, dev_name);
+	mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
 	if (IS_ERR(mntroot))
 		goto error_splat_super;
 
-	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+	error = mount_info->set_security(s, mntroot, mount_info);
 	if (error)
 		goto error_splat_root;
 
 	s->s_flags |= MS_ACTIVE;
 
 out:
-	nfs_free_parsed_mount_data(data);
-	nfs_free_fhandle(mntfh);
 	return mntroot;
 
 out_err_nosb:
@@ -2385,6 +2470,43 @@
 	goto out;
 }
 
+static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *raw_data)
+{
+	struct nfs_mount_info mount_info = {
+		.fill_super = nfs_fill_super,
+		.set_security = nfs_set_sb_security,
+	};
+	struct dentry *mntroot = ERR_PTR(-ENOMEM);
+	int error;
+
+	mount_info.parsed = nfs_alloc_parsed_mount_data();
+	mount_info.mntfh = nfs_alloc_fhandle();
+	if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
+		goto out;
+
+	/* Validate the mount data */
+	error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
+	if (error == NFS_TEXT_DATA)
+		error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
+	if (error < 0) {
+		mntroot = ERR_PTR(error);
+		goto out;
+	}
+
+#ifdef CONFIG_NFS_V4
+	if (mount_info.parsed->version == 4)
+		mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
+	else
+#endif	/* CONFIG_NFS_V4 */
+		mntroot = nfs_try_mount(flags, dev_name, &mount_info);
+
+out:
+	nfs_free_parsed_mount_data(mount_info.parsed);
+	nfs_free_fhandle(mount_info.mntfh);
+	return mntroot;
+}
+
 /*
  * Ensure that we unregister the bdi before kill_anon_super
  * releases the device name
@@ -2409,93 +2531,51 @@
 }
 
 /*
+ * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
+		const char *dev_name, struct nfs_mount_info *mount_info)
+{
+	struct nfs_clone_mount *data = mount_info->cloned;
+	struct nfs_server *server;
+	struct dentry *mntroot = ERR_PTR(-ENOMEM);
+	int error;
+
+	dprintk("--> nfs_xdev_mount_common()\n");
+
+	mount_info->mntfh = data->fh;
+
+	/* create a new volume representation */
+	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
+	if (IS_ERR(server)) {
+		error = PTR_ERR(server);
+		goto out_err;
+	}
+
+	mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
+	dprintk("<-- nfs_xdev_mount_common() = 0\n");
+out:
+	return mntroot;
+
+out_err:
+	dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
+	goto out;
+}
+
+/*
  * Clone an NFS2/3 server record on xdev traversal (FSID-change)
  */
 static struct dentry *
 nfs_xdev_mount(struct file_system_type *fs_type, int flags,
 		const char *dev_name, void *raw_data)
 {
-	struct nfs_clone_mount *data = raw_data;
-	struct super_block *s;
-	struct nfs_server *server;
-	struct dentry *mntroot;
-	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-	struct nfs_sb_mountdata sb_mntdata = {
-		.mntflags = flags,
+	struct nfs_mount_info mount_info = {
+		.fill_super = nfs_clone_super,
+		.set_security = nfs_clone_sb_security,
+		.cloned   = raw_data,
 	};
-	int error;
-
-	dprintk("--> nfs_xdev_mount()\n");
-
-	/* create a new volume representation */
-	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-	if (IS_ERR(server)) {
-		error = PTR_ERR(server);
-		goto out_err_noserver;
-	}
-	sb_mntdata.server = server;
-
-	if (server->flags & NFS_MOUNT_UNSHARED)
-		compare_super = NULL;
-
-	/* -o noac implies -o sync */
-	if (server->flags & NFS_MOUNT_NOAC)
-		sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-	if (IS_ERR(s)) {
-		error = PTR_ERR(s);
-		goto out_err_nosb;
-	}
-
-	if (s->s_fs_info != server) {
-		nfs_free_server(server);
-		server = NULL;
-	} else {
-		error = nfs_bdi_register(server);
-		if (error)
-			goto error_splat_bdi;
-	}
-
-	if (!s->s_root) {
-		/* initial superblock/root creation */
-		nfs_clone_super(s, data->sb);
-		nfs_fscache_get_super_cookie(s, NULL, data);
-	}
-
-	mntroot = nfs_get_root(s, data->fh, dev_name);
-	if (IS_ERR(mntroot)) {
-		error = PTR_ERR(mntroot);
-		goto error_splat_super;
-	}
-	if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-		dput(mntroot);
-		error = -ESTALE;
-		goto error_splat_super;
-	}
-
-	s->s_flags |= MS_ACTIVE;
-
-	/* clone any lsm security options from the parent to the new sb */
-	security_sb_clone_mnt_opts(data->sb, s);
-
-	dprintk("<-- nfs_xdev_mount() = 0\n");
-	return mntroot;
-
-out_err_nosb:
-	nfs_free_server(server);
-out_err_noserver:
-	dprintk("<-- nfs_xdev_mount() = %d [error]\n", error);
-	return ERR_PTR(error);
-
-error_splat_super:
-	if (server && !s->s_root)
-		bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-	deactivate_locked_super(s);
-	dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error);
-	return ERR_PTR(error);
+	return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
 }
 
 #ifdef CONFIG_NFS_V4
@@ -2504,8 +2584,9 @@
  * Finish setting up a cloned NFS4 superblock
  */
 static void nfs4_clone_super(struct super_block *sb,
-			    const struct super_block *old_sb)
+			     struct nfs_mount_info *mount_info)
 {
+	const struct super_block *old_sb = mount_info->cloned->sb;
 	sb->s_blocksize_bits = old_sb->s_blocksize_bits;
 	sb->s_blocksize = old_sb->s_blocksize;
 	sb->s_maxbytes = old_sb->s_maxbytes;
@@ -2523,7 +2604,8 @@
 /*
  * Set up an NFS4 superblock
  */
-static void nfs4_fill_super(struct super_block *sb)
+static void nfs4_fill_super(struct super_block *sb,
+			    struct nfs_mount_info *mount_info)
 {
 	sb->s_time_gran = 1;
 	sb->s_op = &nfs4_sops;
@@ -2542,37 +2624,6 @@
 			 NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
 }
 
-static int nfs4_validate_text_mount_data(void *options,
-					 struct nfs_parsed_mount_data *args,
-					 const char *dev_name)
-{
-	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
-	nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
-
-	nfs_validate_transport_protocol(args);
-
-	nfs4_validate_mount_flags(args);
-
-	if (args->version != 4) {
-		dfprintk(MOUNT,
-			 "NFS4: Illegal mount version\n");
-		return -EINVAL;
-	}
-
-	if (args->auth_flavor_len > 1) {
-		dfprintk(MOUNT,
-			 "NFS4: Too many RPC auth flavours specified\n");
-		return -EINVAL;
-	}
-
-	return nfs_parse_devname(dev_name,
-				   &args->nfs_server.hostname,
-				   NFS4_MAXNAMLEN,
-				   &args->nfs_server.export_path,
-				   NFS4_MAXPATHLEN);
-}
-
 /*
  * Validate NFSv4 mount options
  */
@@ -2587,6 +2638,8 @@
 	if (data == NULL)
 		goto out_no_data;
 
+	args->version = 4;
+
 	switch (data->version) {
 	case 1:
 		if (data->host_addrlen > sizeof(args->nfs_server.address))
@@ -2643,13 +2696,7 @@
 
 		break;
 	default:
-		if (nfs_parse_mount_options((char *)options, args) == 0)
-			return -EINVAL;
-
-		if (!nfs_verify_server_address(sap))
-			return -EINVAL;
-
-		return nfs4_validate_text_mount_data(options, args, dev_name);
+		return NFS_TEXT_DATA;
 	}
 
 	return 0;
@@ -2673,91 +2720,26 @@
  */
 static struct dentry *
 nfs4_remote_mount(struct file_system_type *fs_type, int flags,
-		  const char *dev_name, void *raw_data)
+		  const char *dev_name, void *info)
 {
-	struct nfs_parsed_mount_data *data = raw_data;
-	struct super_block *s;
+	struct nfs_mount_info *mount_info = info;
 	struct nfs_server *server;
-	struct nfs_fh *mntfh;
-	struct dentry *mntroot;
-	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-	struct nfs_sb_mountdata sb_mntdata = {
-		.mntflags = flags,
-	};
-	int error = -ENOMEM;
+	struct dentry *mntroot = ERR_PTR(-ENOMEM);
 
-	mntfh = nfs_alloc_fhandle();
-	if (data == NULL || mntfh == NULL)
-		goto out;
+	mount_info->fill_super = nfs4_fill_super;
+	mount_info->set_security = nfs_set_sb_security;
 
 	/* Get a volume representation */
-	server = nfs4_create_server(data, mntfh);
+	server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
 	if (IS_ERR(server)) {
-		error = PTR_ERR(server);
+		mntroot = ERR_CAST(server);
 		goto out;
 	}
-	sb_mntdata.server = server;
 
-	if (server->flags & NFS4_MOUNT_UNSHARED)
-		compare_super = NULL;
-
-	/* -o noac implies -o sync */
-	if (server->flags & NFS_MOUNT_NOAC)
-		sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-	if (IS_ERR(s)) {
-		error = PTR_ERR(s);
-		goto out_free;
-	}
-
-	if (s->s_fs_info != server) {
-		nfs_free_server(server);
-		server = NULL;
-	} else {
-		error = nfs_bdi_register(server);
-		if (error)
-			goto error_splat_bdi;
-	}
-
-	if (!s->s_root) {
-		/* initial superblock/root creation */
-		nfs4_fill_super(s);
-		nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
-	}
-
-	mntroot = nfs4_get_root(s, mntfh, dev_name);
-	if (IS_ERR(mntroot)) {
-		error = PTR_ERR(mntroot);
-		goto error_splat_super;
-	}
-
-	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
-	if (error)
-		goto error_splat_root;
-
-	s->s_flags |= MS_ACTIVE;
-
-	nfs_free_fhandle(mntfh);
-	return mntroot;
+	mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
 
 out:
-	nfs_free_fhandle(mntfh);
-	return ERR_PTR(error);
-
-out_free:
-	nfs_free_server(server);
-	goto out;
-
-error_splat_root:
-	dput(mntroot);
-error_splat_super:
-	if (server && !s->s_root)
-		bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-	deactivate_locked_super(s);
-	goto out;
+	return mntroot;
 }
 
 static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
@@ -2869,17 +2851,18 @@
 }
 
 static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
-			 struct nfs_parsed_mount_data *data)
+			 struct nfs_mount_info *mount_info)
 {
 	char *export_path;
 	struct vfsmount *root_mnt;
 	struct dentry *res;
+	struct nfs_parsed_mount_data *data = mount_info->parsed;
 
 	dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
 	export_path = data->nfs_server.export_path;
 	data->nfs_server.export_path = "/";
-	root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+	root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
 			data->nfs_server.hostname);
 	data->nfs_server.export_path = export_path;
 
@@ -2891,38 +2874,6 @@
 	return res;
 }
 
-/*
- * Get the superblock for an NFS4 mountpoint
- */
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data)
-{
-	struct nfs_parsed_mount_data *data;
-	int error = -ENOMEM;
-	struct dentry *res = ERR_PTR(-ENOMEM);
-
-	data = nfs_alloc_parsed_mount_data(4);
-	if (data == NULL)
-		goto out;
-
-	/* Validate the mount data */
-	error = nfs4_validate_mount_data(raw_data, data, dev_name);
-	if (error < 0) {
-		res = ERR_PTR(error);
-		goto out;
-	}
-
-	res = nfs4_try_mount(flags, dev_name, data);
-	if (IS_ERR(res))
-		error = PTR_ERR(res);
-
-out:
-	nfs_free_parsed_mount_data(data);
-	dprintk("<-- nfs4_mount() = %d%s\n", error,
-			error != 0 ? " [error]" : "");
-	return res;
-}
-
 static void nfs4_kill_super(struct super_block *sb)
 {
 	struct nfs_server *server = NFS_SB(sb);
@@ -2942,181 +2893,43 @@
 nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
 		 const char *dev_name, void *raw_data)
 {
-	struct nfs_clone_mount *data = raw_data;
-	struct super_block *s;
-	struct nfs_server *server;
-	struct dentry *mntroot;
-	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-	struct nfs_sb_mountdata sb_mntdata = {
-		.mntflags = flags,
+	struct nfs_mount_info mount_info = {
+		.fill_super = nfs4_clone_super,
+		.set_security = nfs_clone_sb_security,
+		.cloned = raw_data,
 	};
-	int error;
-
-	dprintk("--> nfs4_xdev_mount()\n");
-
-	/* create a new volume representation */
-	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-	if (IS_ERR(server)) {
-		error = PTR_ERR(server);
-		goto out_err_noserver;
-	}
-	sb_mntdata.server = server;
-
-	if (server->flags & NFS4_MOUNT_UNSHARED)
-		compare_super = NULL;
-
-	/* -o noac implies -o sync */
-	if (server->flags & NFS_MOUNT_NOAC)
-		sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-	if (IS_ERR(s)) {
-		error = PTR_ERR(s);
-		goto out_err_nosb;
-	}
-
-	if (s->s_fs_info != server) {
-		nfs_free_server(server);
-		server = NULL;
-	} else {
-		error = nfs_bdi_register(server);
-		if (error)
-			goto error_splat_bdi;
-	}
-
-	if (!s->s_root) {
-		/* initial superblock/root creation */
-		nfs4_clone_super(s, data->sb);
-		nfs_fscache_get_super_cookie(s, NULL, data);
-	}
-
-	mntroot = nfs4_get_root(s, data->fh, dev_name);
-	if (IS_ERR(mntroot)) {
-		error = PTR_ERR(mntroot);
-		goto error_splat_super;
-	}
-	if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-		dput(mntroot);
-		error = -ESTALE;
-		goto error_splat_super;
-	}
-
-	s->s_flags |= MS_ACTIVE;
-
-	security_sb_clone_mnt_opts(data->sb, s);
-
-	dprintk("<-- nfs4_xdev_mount() = 0\n");
-	return mntroot;
-
-out_err_nosb:
-	nfs_free_server(server);
-out_err_noserver:
-	dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
-	return ERR_PTR(error);
-
-error_splat_super:
-	if (server && !s->s_root)
-		bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-	deactivate_locked_super(s);
-	dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
-	return ERR_PTR(error);
+	return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
 }
 
 static struct dentry *
 nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
 			   const char *dev_name, void *raw_data)
 {
-	struct nfs_clone_mount *data = raw_data;
-	struct super_block *s;
-	struct nfs_server *server;
-	struct dentry *mntroot;
-	struct nfs_fh *mntfh;
-	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-	struct nfs_sb_mountdata sb_mntdata = {
-		.mntflags = flags,
+	struct nfs_mount_info mount_info = {
+		.fill_super = nfs4_fill_super,
+		.set_security = nfs_clone_sb_security,
+		.cloned = raw_data,
 	};
-	int error = -ENOMEM;
+	struct nfs_server *server;
+	struct dentry *mntroot = ERR_PTR(-ENOMEM);
 
 	dprintk("--> nfs4_referral_get_sb()\n");
 
-	mntfh = nfs_alloc_fhandle();
-	if (mntfh == NULL)
-		goto out_err_nofh;
+	mount_info.mntfh = nfs_alloc_fhandle();
+	if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
+		goto out;
 
 	/* create a new volume representation */
-	server = nfs4_create_referral_server(data, mntfh);
+	server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
 	if (IS_ERR(server)) {
-		error = PTR_ERR(server);
-		goto out_err_noserver;
-	}
-	sb_mntdata.server = server;
-
-	if (server->flags & NFS4_MOUNT_UNSHARED)
-		compare_super = NULL;
-
-	/* -o noac implies -o sync */
-	if (server->flags & NFS_MOUNT_NOAC)
-		sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-	if (IS_ERR(s)) {
-		error = PTR_ERR(s);
-		goto out_err_nosb;
+		mntroot = ERR_CAST(server);
+		goto out;
 	}
 
-	if (s->s_fs_info != server) {
-		nfs_free_server(server);
-		server = NULL;
-	} else {
-		error = nfs_bdi_register(server);
-		if (error)
-			goto error_splat_bdi;
-	}
-
-	if (!s->s_root) {
-		/* initial superblock/root creation */
-		nfs4_fill_super(s);
-		nfs_fscache_get_super_cookie(s, NULL, data);
-	}
-
-	mntroot = nfs4_get_root(s, mntfh, dev_name);
-	if (IS_ERR(mntroot)) {
-		error = PTR_ERR(mntroot);
-		goto error_splat_super;
-	}
-	if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-		dput(mntroot);
-		error = -ESTALE;
-		goto error_splat_super;
-	}
-
-	s->s_flags |= MS_ACTIVE;
-
-	security_sb_clone_mnt_opts(data->sb, s);
-
-	nfs_free_fhandle(mntfh);
-	dprintk("<-- nfs4_referral_get_sb() = 0\n");
+	mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
+out:
+	nfs_free_fhandle(mount_info.mntfh);
 	return mntroot;
-
-out_err_nosb:
-	nfs_free_server(server);
-out_err_noserver:
-	nfs_free_fhandle(mntfh);
-out_err_nofh:
-	dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
-	return ERR_PTR(error);
-
-error_splat_super:
-	if (server && !s->s_root)
-		bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-	deactivate_locked_super(s);
-	nfs_free_fhandle(mntfh);
-	dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
-	return ERR_PTR(error);
 }
 
 /*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c074623..4d6861c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -39,20 +39,20 @@
 /*
  * Local function declarations
  */
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
-				  struct inode *inode, int ioflags);
 static void nfs_redirty_request(struct nfs_page *req);
-static const struct rpc_call_ops nfs_write_partial_ops;
-static const struct rpc_call_ops nfs_write_full_ops;
+static const struct rpc_call_ops nfs_write_common_ops;
 static const struct rpc_call_ops nfs_commit_ops;
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 
 static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
+static struct kmem_cache *nfs_cdata_cachep;
 static mempool_t *nfs_commit_mempool;
 
-struct nfs_write_data *nfs_commitdata_alloc(void)
+struct nfs_commit_data *nfs_commitdata_alloc(void)
 {
-	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+	struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
@@ -62,46 +62,74 @@
 }
 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
 
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_free(struct nfs_commit_data *p)
 {
-	if (p && (p->pagevec != &p->page_array[0]))
-		kfree(p->pagevec);
 	mempool_free(p, nfs_commit_mempool);
 }
 EXPORT_SYMBOL_GPL(nfs_commit_free);
 
-struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+struct nfs_write_header *nfs_writehdr_alloc(void)
 {
-	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+	struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
 
 	if (p) {
+		struct nfs_pgio_header *hdr = &p->header;
+
 		memset(p, 0, sizeof(*p));
-		INIT_LIST_HEAD(&p->pages);
-		p->npages = pagecount;
-		if (pagecount <= ARRAY_SIZE(p->page_array))
-			p->pagevec = p->page_array;
-		else {
-			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
-			if (!p->pagevec) {
-				mempool_free(p, nfs_wdata_mempool);
-				p = NULL;
-			}
-		}
+		INIT_LIST_HEAD(&hdr->pages);
+		INIT_LIST_HEAD(&hdr->rpc_list);
+		spin_lock_init(&hdr->lock);
+		atomic_set(&hdr->refcnt, 0);
+		hdr->verf = &p->verf;
 	}
 	return p;
 }
 
-void nfs_writedata_free(struct nfs_write_data *p)
+static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
+						  unsigned int pagecount)
 {
-	if (p && (p->pagevec != &p->page_array[0]))
-		kfree(p->pagevec);
-	mempool_free(p, nfs_wdata_mempool);
+	struct nfs_write_data *data, *prealloc;
+
+	prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
+	if (prealloc->header == NULL)
+		data = prealloc;
+	else
+		data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		goto out;
+
+	if (nfs_pgarray_set(&data->pages, pagecount)) {
+		data->header = hdr;
+		atomic_inc(&hdr->refcnt);
+	} else {
+		if (data != prealloc)
+			kfree(data);
+		data = NULL;
+	}
+out:
+	return data;
+}
+
+void nfs_writehdr_free(struct nfs_pgio_header *hdr)
+{
+	struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
+	mempool_free(whdr, nfs_wdata_mempool);
 }
 
 void nfs_writedata_release(struct nfs_write_data *wdata)
 {
+	struct nfs_pgio_header *hdr = wdata->header;
+	struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
+
 	put_nfs_open_context(wdata->args.context);
-	nfs_writedata_free(wdata);
+	if (wdata->pages.pagevec != wdata->pages.page_array)
+		kfree(wdata->pages.pagevec);
+	if (wdata != &write_header->rpc_data)
+		kfree(wdata);
+	else
+		wdata->header = NULL;
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
 }
 
 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -203,7 +231,6 @@
 		struct inode *inode = page->mapping->host;
 		struct nfs_server *nfss = NFS_SERVER(inode);
 
-		page_cache_get(page);
 		if (atomic_long_inc_return(&nfss->writeback) >
 				NFS_CONGESTION_ON_THRESH) {
 			set_bdi_congested(&nfss->backing_dev_info,
@@ -219,7 +246,6 @@
 	struct nfs_server *nfss = NFS_SERVER(inode);
 
 	end_page_writeback(page);
-	page_cache_release(page);
 	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
 		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
@@ -235,10 +261,10 @@
 		req = nfs_page_find_request_locked(page);
 		if (req == NULL)
 			break;
-		if (nfs_lock_request_dontget(req))
+		if (nfs_lock_request(req))
 			break;
 		/* Note: If we hold the page lock, as is the case in nfs_writepage,
-		 *	 then the call to nfs_lock_request_dontget() will always
+		 *	 then the call to nfs_lock_request() will always
 		 *	 succeed provided that someone hasn't already marked the
 		 *	 request as dirty (in which case we don't care).
 		 */
@@ -310,7 +336,8 @@
 	struct nfs_pageio_descriptor pgio;
 	int err;
 
-	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+			      &nfs_async_write_completion_ops);
 	err = nfs_do_writepage(page, wbc, &pgio);
 	nfs_pageio_complete(&pgio);
 	if (err < 0)
@@ -353,7 +380,8 @@
 
 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 
-	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+			      &nfs_async_write_completion_ops);
 	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
 	nfs_pageio_complete(&pgio);
 
@@ -379,7 +407,7 @@
 	struct nfs_inode *nfsi = NFS_I(inode);
 
 	/* Lock the request! */
-	nfs_lock_request_dontget(req);
+	nfs_lock_request(req);
 
 	spin_lock(&inode->i_lock);
 	if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
@@ -421,65 +449,88 @@
 /**
  * nfs_request_add_commit_list - add request to a commit list
  * @req: pointer to a struct nfs_page
- * @head: commit list head
+ * @dst: commit list head
+ * @cinfo: holds list lock and accounting info
  *
- * This sets the PG_CLEAN bit, updates the inode global count of
+ * This sets the PG_CLEAN bit, updates the cinfo count of
  * number of outstanding requests requiring a commit as well as
  * the MM page stats.
  *
- * The caller must _not_ hold the inode->i_lock, but must be
+ * The caller must _not_ hold the cinfo->lock, but must be
  * holding the nfs_page lock.
  */
 void
-nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
+nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+			    struct nfs_commit_info *cinfo)
 {
-	struct inode *inode = req->wb_context->dentry->d_inode;
-
 	set_bit(PG_CLEAN, &(req)->wb_flags);
-	spin_lock(&inode->i_lock);
-	nfs_list_add_request(req, head);
-	NFS_I(inode)->ncommit++;
-	spin_unlock(&inode->i_lock);
-	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
-	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+	spin_lock(cinfo->lock);
+	nfs_list_add_request(req, dst);
+	cinfo->mds->ncommit++;
+	spin_unlock(cinfo->lock);
+	if (!cinfo->dreq) {
+		inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+		inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+			     BDI_RECLAIMABLE);
+		__mark_inode_dirty(req->wb_context->dentry->d_inode,
+				   I_DIRTY_DATASYNC);
+	}
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
 /**
  * nfs_request_remove_commit_list - Remove request from a commit list
  * @req: pointer to a nfs_page
+ * @cinfo: holds list lock and accounting info
  *
- * This clears the PG_CLEAN bit, and updates the inode global count of
+ * This clears the PG_CLEAN bit, and updates the cinfo's count of
  * number of outstanding requests requiring a commit
  * It does not update the MM page stats.
  *
- * The caller _must_ hold the inode->i_lock and the nfs_page lock.
+ * The caller _must_ hold the cinfo->lock and the nfs_page lock.
  */
 void
-nfs_request_remove_commit_list(struct nfs_page *req)
+nfs_request_remove_commit_list(struct nfs_page *req,
+			       struct nfs_commit_info *cinfo)
 {
-	struct inode *inode = req->wb_context->dentry->d_inode;
-
 	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
 		return;
 	nfs_list_remove_request(req);
-	NFS_I(inode)->ncommit--;
+	cinfo->mds->ncommit--;
 }
 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
 
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+				      struct inode *inode)
+{
+	cinfo->lock = &inode->i_lock;
+	cinfo->mds = &NFS_I(inode)->commit_info;
+	cinfo->ds = pnfs_get_ds_info(inode);
+	cinfo->dreq = NULL;
+	cinfo->completion_ops = &nfs_commit_completion_ops;
+}
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+		    struct inode *inode,
+		    struct nfs_direct_req *dreq)
+{
+	if (dreq)
+		nfs_init_cinfo_from_dreq(cinfo, dreq);
+	else
+		nfs_init_cinfo_from_inode(cinfo, inode);
+}
+EXPORT_SYMBOL_GPL(nfs_init_cinfo);
 
 /*
  * Add a request to the inode's commit list.
  */
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+			struct nfs_commit_info *cinfo)
 {
-	struct inode *inode = req->wb_context->dentry->d_inode;
-
-	if (pnfs_mark_request_commit(req, lseg))
+	if (pnfs_mark_request_commit(req, lseg, cinfo))
 		return;
-	nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
+	nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
 }
 
 static void
@@ -494,11 +545,13 @@
 {
 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
 		struct inode *inode = req->wb_context->dentry->d_inode;
+		struct nfs_commit_info cinfo;
 
-		if (!pnfs_clear_request_commit(req)) {
-			spin_lock(&inode->i_lock);
-			nfs_request_remove_commit_list(req);
-			spin_unlock(&inode->i_lock);
+		nfs_init_cinfo_from_inode(&cinfo, inode);
+		if (!pnfs_clear_request_commit(req, &cinfo)) {
+			spin_lock(cinfo.lock);
+			nfs_request_remove_commit_list(req, &cinfo);
+			spin_unlock(cinfo.lock);
 		}
 		nfs_clear_page_commit(req->wb_page);
 	}
@@ -508,28 +561,25 @@
 int nfs_write_need_commit(struct nfs_write_data *data)
 {
 	if (data->verf.committed == NFS_DATA_SYNC)
-		return data->lseg == NULL;
-	else
-		return data->verf.committed != NFS_FILE_SYNC;
+		return data->header->lseg == NULL;
+	return data->verf.committed != NFS_FILE_SYNC;
 }
 
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
-				  struct nfs_write_data *data)
-{
-	if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
-		nfs_mark_request_commit(req, data->lseg);
-		return 1;
-	}
-	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
-		nfs_mark_request_dirty(req);
-		return 1;
-	}
-	return 0;
-}
 #else
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+				      struct inode *inode)
+{
+}
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+		    struct inode *inode,
+		    struct nfs_direct_req *dreq)
+{
+}
+
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+			struct nfs_commit_info *cinfo)
 {
 }
 
@@ -544,25 +594,58 @@
 	return 0;
 }
 
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
-				  struct nfs_write_data *data)
-{
-	return 0;
-}
 #endif
 
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-static int
-nfs_need_commit(struct nfs_inode *nfsi)
+static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
-	return nfsi->ncommit > 0;
+	struct nfs_commit_info cinfo;
+	unsigned long bytes = 0;
+
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+		goto out;
+	nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
+	while (!list_empty(&hdr->pages)) {
+		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+		bytes += req->wb_bytes;
+		nfs_list_remove_request(req);
+		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
+		    (hdr->good_bytes < bytes)) {
+			nfs_set_pageerror(req->wb_page);
+			nfs_context_set_write_error(req->wb_context, hdr->error);
+			goto remove_req;
+		}
+		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+			nfs_mark_request_dirty(req);
+			goto next;
+		}
+		if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+			memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
+			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+			goto next;
+		}
+remove_req:
+		nfs_inode_remove_request(req);
+next:
+		nfs_unlock_request(req);
+		nfs_end_page_writeback(req->wb_page);
+		nfs_release_request(req);
+	}
+out:
+	hdr->release(hdr);
 }
 
-/* i_lock held by caller */
-static int
-nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
-		spinlock_t *lock)
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static unsigned long
+nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
+{
+	return cinfo->mds->ncommit;
+}
+
+/* cinfo->lock held by caller */
+int
+nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+		     struct nfs_commit_info *cinfo, int max)
 {
 	struct nfs_page *req, *tmp;
 	int ret = 0;
@@ -570,12 +653,13 @@
 	list_for_each_entry_safe(req, tmp, src, wb_list) {
 		if (!nfs_lock_request(req))
 			continue;
-		if (cond_resched_lock(lock))
+		kref_get(&req->wb_kref);
+		if (cond_resched_lock(cinfo->lock))
 			list_safe_reset_next(req, tmp, wb_list);
-		nfs_request_remove_commit_list(req);
+		nfs_request_remove_commit_list(req, cinfo);
 		nfs_list_add_request(req, dst);
 		ret++;
-		if (ret == max)
+		if ((ret == max) && !cinfo->dreq)
 			break;
 	}
 	return ret;
@@ -584,37 +668,38 @@
 /*
  * nfs_scan_commit - Scan an inode for commit requests
  * @inode: NFS inode to scan
- * @dst: destination list
+ * @dst: mds destination list
+ * @cinfo: mds and ds lists of reqs ready to commit
  *
  * Moves requests from the inode's 'commit' request list.
  * The requests are *not* checked to ensure that they form a contiguous set.
  */
-static int
-nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int
+nfs_scan_commit(struct inode *inode, struct list_head *dst,
+		struct nfs_commit_info *cinfo)
 {
-	struct nfs_inode *nfsi = NFS_I(inode);
 	int ret = 0;
 
-	spin_lock(&inode->i_lock);
-	if (nfsi->ncommit > 0) {
+	spin_lock(cinfo->lock);
+	if (cinfo->mds->ncommit > 0) {
 		const int max = INT_MAX;
 
-		ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
-				&inode->i_lock);
-		ret += pnfs_scan_commit_lists(inode, max - ret,
-				&inode->i_lock);
+		ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
+					   cinfo, max);
+		ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(cinfo->lock);
 	return ret;
 }
 
 #else
-static inline int nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
 {
 	return 0;
 }
 
-static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+		    struct nfs_commit_info *cinfo)
 {
 	return 0;
 }
@@ -659,7 +744,7 @@
 		    || end < req->wb_offset)
 			goto out_flushme;
 
-		if (nfs_lock_request_dontget(req))
+		if (nfs_lock_request(req))
 			break;
 
 		/* The request is locked, so wait and then retry */
@@ -729,7 +814,7 @@
 	nfs_grow_file(page, offset, count);
 	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
 	nfs_mark_request_dirty(req);
-	nfs_unlock_request(req);
+	nfs_unlock_and_release_request(req);
 	return 0;
 }
 
@@ -766,10 +851,14 @@
  * the PageUptodate() flag. In this case, we will need to turn off
  * write optimisations that depend on the page contents being correct.
  */
-static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
 {
-	return PageUptodate(page) &&
-		!(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+	if (nfs_have_delegated_attributes(inode))
+		goto out;
+	if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+		return false;
+out:
+	return PageUptodate(page) != 0;
 }
 
 /*
@@ -815,17 +904,6 @@
 	return status;
 }
 
-static void nfs_writepage_release(struct nfs_page *req,
-				  struct nfs_write_data *data)
-{
-	struct page *page = req->wb_page;
-
-	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
-		nfs_inode_remove_request(req);
-	nfs_unlock_request(req);
-	nfs_end_page_writeback(page);
-}
-
 static int flush_task_priority(int how)
 {
 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -837,18 +915,18 @@
 	return RPC_PRIORITY_NORMAL;
 }
 
-int nfs_initiate_write(struct nfs_write_data *data,
-		       struct rpc_clnt *clnt,
+int nfs_initiate_write(struct rpc_clnt *clnt,
+		       struct nfs_write_data *data,
 		       const struct rpc_call_ops *call_ops,
-		       int how)
+		       int how, int flags)
 {
-	struct inode *inode = data->inode;
+	struct inode *inode = data->header->inode;
 	int priority = flush_task_priority(how);
 	struct rpc_task *task;
 	struct rpc_message msg = {
 		.rpc_argp = &data->args,
 		.rpc_resp = &data->res,
-		.rpc_cred = data->cred,
+		.rpc_cred = data->header->cred,
 	};
 	struct rpc_task_setup task_setup_data = {
 		.rpc_client = clnt,
@@ -857,7 +935,7 @@
 		.callback_ops = call_ops,
 		.callback_data = data,
 		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
+		.flags = RPC_TASK_ASYNC | flags,
 		.priority = priority,
 	};
 	int ret = 0;
@@ -892,26 +970,21 @@
 /*
  * Set up the argument/result storage required for the RPC call.
  */
-static void nfs_write_rpcsetup(struct nfs_page *req,
-		struct nfs_write_data *data,
+static void nfs_write_rpcsetup(struct nfs_write_data *data,
 		unsigned int count, unsigned int offset,
-		int how)
+		int how, struct nfs_commit_info *cinfo)
 {
-	struct inode *inode = req->wb_context->dentry->d_inode;
+	struct nfs_page *req = data->header->req;
 
 	/* Set up the RPC argument and reply structs
 	 * NB: take care not to mess about with data->commit et al. */
 
-	data->req = req;
-	data->inode = inode = req->wb_context->dentry->d_inode;
-	data->cred = req->wb_context->cred;
-
-	data->args.fh     = NFS_FH(inode);
+	data->args.fh     = NFS_FH(data->header->inode);
 	data->args.offset = req_offset(req) + offset;
 	/* pnfs_set_layoutcommit needs this */
 	data->mds_offset = data->args.offset;
 	data->args.pgbase = req->wb_pgbase + offset;
-	data->args.pages  = data->pagevec;
+	data->args.pages  = data->pages.pagevec;
 	data->args.count  = count;
 	data->args.context = get_nfs_open_context(req->wb_context);
 	data->args.lock_context = req->wb_lock_context;
@@ -920,7 +993,7 @@
 	case 0:
 		break;
 	case FLUSH_COND_STABLE:
-		if (nfs_need_commit(NFS_I(inode)))
+		if (nfs_reqs_to_commit(cinfo))
 			break;
 	default:
 		data->args.stable = NFS_FILE_SYNC;
@@ -936,9 +1009,9 @@
 		const struct rpc_call_ops *call_ops,
 		int how)
 {
-	struct inode *inode = data->args.context->dentry->d_inode;
+	struct inode *inode = data->header->inode;
 
-	return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
+	return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
 }
 
 static int nfs_do_multiple_writes(struct list_head *head,
@@ -951,7 +1024,7 @@
 	while (!list_empty(head)) {
 		int ret2;
 
-		data = list_entry(head->next, struct nfs_write_data, list);
+		data = list_first_entry(head, struct nfs_write_data, list);
 		list_del_init(&data->list);
 		
 		ret2 = nfs_do_write(data, call_ops, how);
@@ -967,31 +1040,60 @@
  */
 static void nfs_redirty_request(struct nfs_page *req)
 {
-	struct page *page = req->wb_page;
-
 	nfs_mark_request_dirty(req);
 	nfs_unlock_request(req);
-	nfs_end_page_writeback(page);
+	nfs_end_page_writeback(req->wb_page);
+	nfs_release_request(req);
+}
+
+static void nfs_async_write_error(struct list_head *head)
+{
+	struct nfs_page	*req;
+
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_redirty_request(req);
+	}
+}
+
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+	.error_cleanup = nfs_async_write_error,
+	.completion = nfs_write_completion,
+};
+
+static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
+		struct nfs_pgio_header *hdr)
+{
+	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+	while (!list_empty(&hdr->rpc_list)) {
+		struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
+				struct nfs_write_data, list);
+		list_del(&data->list);
+		nfs_writedata_release(data);
+	}
+	desc->pg_completion_ops->error_cleanup(&desc->pg_list);
 }
 
 /*
  * Generate multiple small requests to write out a single
  * contiguous dirty area on one page.
  */
-static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
+			   struct nfs_pgio_header *hdr)
 {
-	struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+	struct nfs_page *req = hdr->req;
 	struct page *page = req->wb_page;
 	struct nfs_write_data *data;
 	size_t wsize = desc->pg_bsize, nbytes;
 	unsigned int offset;
 	int requests = 0;
-	int ret = 0;
+	struct nfs_commit_info cinfo;
 
-	nfs_list_remove_request(req);
+	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 
 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
-	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
 	     desc->pg_count > wsize))
 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 
@@ -1001,28 +1103,22 @@
 	do {
 		size_t len = min(nbytes, wsize);
 
-		data = nfs_writedata_alloc(1);
-		if (!data)
-			goto out_bad;
-		data->pagevec[0] = page;
-		nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
-		list_add(&data->list, res);
+		data = nfs_writedata_alloc(hdr, 1);
+		if (!data) {
+			nfs_flush_error(desc, hdr);
+			return -ENOMEM;
+		}
+		data->pages.pagevec[0] = page;
+		nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
+		list_add(&data->list, &hdr->rpc_list);
 		requests++;
 		nbytes -= len;
 		offset += len;
 	} while (nbytes != 0);
-	atomic_set(&req->wb_complete, requests);
-	desc->pg_rpc_callops = &nfs_write_partial_ops;
-	return ret;
-
-out_bad:
-	while (!list_empty(res)) {
-		data = list_entry(res->next, struct nfs_write_data, list);
-		list_del(&data->list);
-		nfs_writedata_release(data);
-	}
-	nfs_redirty_request(req);
-	return -ENOMEM;
+	nfs_list_remove_request(req);
+	nfs_list_add_request(req, &hdr->pages);
+	desc->pg_rpc_callops = &nfs_write_common_ops;
+	return 0;
 }
 
 /*
@@ -1033,62 +1129,71 @@
  * This is the case if nfs_updatepage detects a conflicting request
  * that has been written but not committed.
  */
-static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
+			 struct nfs_pgio_header *hdr)
 {
 	struct nfs_page		*req;
 	struct page		**pages;
 	struct nfs_write_data	*data;
 	struct list_head *head = &desc->pg_list;
-	int ret = 0;
+	struct nfs_commit_info cinfo;
 
-	data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
-						      desc->pg_count));
+	data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+							   desc->pg_count));
 	if (!data) {
-		while (!list_empty(head)) {
-			req = nfs_list_entry(head->next);
-			nfs_list_remove_request(req);
-			nfs_redirty_request(req);
-		}
-		ret = -ENOMEM;
-		goto out;
+		nfs_flush_error(desc, hdr);
+		return -ENOMEM;
 	}
-	pages = data->pagevec;
+
+	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
+	pages = data->pages.pagevec;
 	while (!list_empty(head)) {
 		req = nfs_list_entry(head->next);
 		nfs_list_remove_request(req);
-		nfs_list_add_request(req, &data->pages);
+		nfs_list_add_request(req, &hdr->pages);
 		*pages++ = req->wb_page;
 	}
-	req = nfs_list_entry(data->pages.next);
 
 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
-	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 
 	/* Set up the argument struct */
-	nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
-	list_add(&data->list, res);
-	desc->pg_rpc_callops = &nfs_write_full_ops;
-out:
-	return ret;
+	nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+	list_add(&data->list, &hdr->rpc_list);
+	desc->pg_rpc_callops = &nfs_write_common_ops;
+	return 0;
 }
 
-int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
+		      struct nfs_pgio_header *hdr)
 {
 	if (desc->pg_bsize < PAGE_CACHE_SIZE)
-		return nfs_flush_multi(desc, head);
-	return nfs_flush_one(desc, head);
+		return nfs_flush_multi(desc, hdr);
+	return nfs_flush_one(desc, hdr);
 }
 
 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 {
-	LIST_HEAD(head);
+	struct nfs_write_header *whdr;
+	struct nfs_pgio_header *hdr;
 	int ret;
 
-	ret = nfs_generic_flush(desc, &head);
+	whdr = nfs_writehdr_alloc();
+	if (!whdr) {
+		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+		return -ENOMEM;
+	}
+	hdr = &whdr->header;
+	nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
+	atomic_inc(&hdr->refcnt);
+	ret = nfs_generic_flush(desc, hdr);
 	if (ret == 0)
-		ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
-				desc->pg_ioflags);
+		ret = nfs_do_multiple_writes(&hdr->rpc_list,
+					     desc->pg_rpc_callops,
+					     desc->pg_ioflags);
+	if (atomic_dec_and_test(&hdr->refcnt))
+		hdr->completion_ops->completion(hdr);
 	return ret;
 }
 
@@ -1098,9 +1203,10 @@
 };
 
 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
-				  struct inode *inode, int ioflags)
+			       struct inode *inode, int ioflags,
+			       const struct nfs_pgio_completion_ops *compl_ops)
 {
-	nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+	nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
 				NFS_SERVER(inode)->wsize, ioflags);
 }
 
@@ -1111,79 +1217,26 @@
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
 
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
-				  struct inode *inode, int ioflags)
+void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+			   struct inode *inode, int ioflags,
+			   const struct nfs_pgio_completion_ops *compl_ops)
 {
-	if (!pnfs_pageio_init_write(pgio, inode, ioflags))
-		nfs_pageio_init_write_mds(pgio, inode, ioflags);
-}
-
-/*
- * Handle a write reply that flushed part of a page.
- */
-static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
-{
-	struct nfs_write_data	*data = calldata;
-
-	dprintk("NFS: %5u write(%s/%lld %d@%lld)",
-		task->tk_pid,
-		data->req->wb_context->dentry->d_inode->i_sb->s_id,
-		(long long)
-		  NFS_FILEID(data->req->wb_context->dentry->d_inode),
-		data->req->wb_bytes, (long long)req_offset(data->req));
-
-	nfs_writeback_done(task, data);
-}
-
-static void nfs_writeback_release_partial(void *calldata)
-{
-	struct nfs_write_data	*data = calldata;
-	struct nfs_page		*req = data->req;
-	struct page		*page = req->wb_page;
-	int status = data->task.tk_status;
-
-	if (status < 0) {
-		nfs_set_pageerror(page);
-		nfs_context_set_write_error(req->wb_context, status);
-		dprintk(", error = %d\n", status);
-		goto out;
-	}
-
-	if (nfs_write_need_commit(data)) {
-		struct inode *inode = page->mapping->host;
-
-		spin_lock(&inode->i_lock);
-		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
-			/* Do nothing we need to resend the writes */
-		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
-			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-			dprintk(" defer commit\n");
-		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
-			set_bit(PG_NEED_RESCHED, &req->wb_flags);
-			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
-			dprintk(" server reboot detected\n");
-		}
-		spin_unlock(&inode->i_lock);
-	} else
-		dprintk(" OK\n");
-
-out:
-	if (atomic_dec_and_test(&req->wb_complete))
-		nfs_writepage_release(req, data);
-	nfs_writedata_release(calldata);
+	if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
+		nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
 }
 
 void nfs_write_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs_write_data *data = calldata;
-	NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
+	NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
 }
 
-static const struct rpc_call_ops nfs_write_partial_ops = {
-	.rpc_call_prepare = nfs_write_prepare,
-	.rpc_call_done = nfs_writeback_done_partial,
-	.rpc_release = nfs_writeback_release_partial,
-};
+void nfs_commit_prepare(struct rpc_task *task, void *calldata)
+{
+	struct nfs_commit_data *data = calldata;
+
+	NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
+}
 
 /*
  * Handle a write reply that flushes a whole page.
@@ -1192,59 +1245,36 @@
  *	  writebacks since the page->count is kept > 1 for as long
  *	  as the page has a write request pending.
  */
-static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
+static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
 {
 	struct nfs_write_data	*data = calldata;
 
 	nfs_writeback_done(task, data);
 }
 
-static void nfs_writeback_release_full(void *calldata)
+static void nfs_writeback_release_common(void *calldata)
 {
 	struct nfs_write_data	*data = calldata;
+	struct nfs_pgio_header *hdr = data->header;
 	int status = data->task.tk_status;
 
-	/* Update attributes as result of writeback. */
-	while (!list_empty(&data->pages)) {
-		struct nfs_page *req = nfs_list_entry(data->pages.next);
-		struct page *page = req->wb_page;
-
-		nfs_list_remove_request(req);
-
-		dprintk("NFS: %5u write (%s/%lld %d@%lld)",
-			data->task.tk_pid,
-			req->wb_context->dentry->d_inode->i_sb->s_id,
-			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
-			req->wb_bytes,
-			(long long)req_offset(req));
-
-		if (status < 0) {
-			nfs_set_pageerror(page);
-			nfs_context_set_write_error(req->wb_context, status);
-			dprintk(", error = %d\n", status);
-			goto remove_request;
-		}
-
-		if (nfs_write_need_commit(data)) {
-			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-			nfs_mark_request_commit(req, data->lseg);
-			dprintk(" marked for commit\n");
-			goto next;
-		}
-		dprintk(" OK\n");
-remove_request:
-		nfs_inode_remove_request(req);
-	next:
-		nfs_unlock_request(req);
-		nfs_end_page_writeback(page);
+	if ((status >= 0) && nfs_write_need_commit(data)) {
+		spin_lock(&hdr->lock);
+		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
+			; /* Do nothing */
+		else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
+			memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf));
+		else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf)))
+			set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
+		spin_unlock(&hdr->lock);
 	}
-	nfs_writedata_release(calldata);
+	nfs_writedata_release(data);
 }
 
-static const struct rpc_call_ops nfs_write_full_ops = {
+static const struct rpc_call_ops nfs_write_common_ops = {
 	.rpc_call_prepare = nfs_write_prepare,
-	.rpc_call_done = nfs_writeback_done_full,
-	.rpc_release = nfs_writeback_release_full,
+	.rpc_call_done = nfs_writeback_done_common,
+	.rpc_release = nfs_writeback_release_common,
 };
 
 
@@ -1255,6 +1285,7 @@
 {
 	struct nfs_writeargs	*argp = &data->args;
 	struct nfs_writeres	*resp = &data->res;
+	struct inode		*inode = data->header->inode;
 	int status;
 
 	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1267,10 +1298,10 @@
 	 * another writer had changed the file, but some applications
 	 * depend on tighter cache coherency when writing.
 	 */
-	status = NFS_PROTO(data->inode)->write_done(task, data);
+	status = NFS_PROTO(inode)->write_done(task, data);
 	if (status != 0)
 		return;
-	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
+	nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
@@ -1288,46 +1319,47 @@
 		if (time_before(complain, jiffies)) {
 			dprintk("NFS:       faulty NFS server %s:"
 				" (committed = %d) != (stable = %d)\n",
-				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+				NFS_SERVER(inode)->nfs_client->cl_hostname,
 				resp->verf->committed, argp->stable);
 			complain = jiffies + 300 * HZ;
 		}
 	}
 #endif
-	/* Is this a short write? */
-	if (task->tk_status >= 0 && resp->count < argp->count) {
+	if (task->tk_status < 0)
+		nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
+	else if (resp->count < argp->count) {
 		static unsigned long    complain;
 
-		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
+		/* This a short write! */
+		nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
 
 		/* Has the server at least made some progress? */
-		if (resp->count != 0) {
-			/* Was this an NFSv2 write or an NFSv3 stable write? */
-			if (resp->verf->committed != NFS_UNSTABLE) {
-				/* Resend from where the server left off */
-				data->mds_offset += resp->count;
-				argp->offset += resp->count;
-				argp->pgbase += resp->count;
-				argp->count -= resp->count;
-			} else {
-				/* Resend as a stable write in order to avoid
-				 * headaches in the case of a server crash.
-				 */
-				argp->stable = NFS_FILE_SYNC;
+		if (resp->count == 0) {
+			if (time_before(complain, jiffies)) {
+				printk(KERN_WARNING
+				       "NFS: Server wrote zero bytes, expected %u.\n",
+				       argp->count);
+				complain = jiffies + 300 * HZ;
 			}
-			rpc_restart_call_prepare(task);
+			nfs_set_pgio_error(data->header, -EIO, argp->offset);
+			task->tk_status = -EIO;
 			return;
 		}
-		if (time_before(complain, jiffies)) {
-			printk(KERN_WARNING
-			       "NFS: Server wrote zero bytes, expected %u.\n",
-					argp->count);
-			complain = jiffies + 300 * HZ;
+		/* Was this an NFSv2 write or an NFSv3 stable write? */
+		if (resp->verf->committed != NFS_UNSTABLE) {
+			/* Resend from where the server left off */
+			data->mds_offset += resp->count;
+			argp->offset += resp->count;
+			argp->pgbase += resp->count;
+			argp->count -= resp->count;
+		} else {
+			/* Resend as a stable write in order to avoid
+			 * headaches in the case of a server crash.
+			 */
+			argp->stable = NFS_FILE_SYNC;
 		}
-		/* Can't do anything about it except throw an error. */
-		task->tk_status = -EIO;
+		rpc_restart_call_prepare(task);
 	}
-	return;
 }
 
 
@@ -1347,26 +1379,23 @@
 	return (ret < 0) ? ret : 1;
 }
 
-void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
 {
 	clear_bit(NFS_INO_COMMIT, &nfsi->flags);
 	smp_mb__after_clear_bit();
 	wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
 }
-EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
 
-void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(struct nfs_commit_data *data)
 {
-	struct nfs_write_data *wdata = data;
-
-	put_nfs_open_context(wdata->args.context);
-	nfs_commit_free(wdata);
+	put_nfs_open_context(data->context);
+	nfs_commit_free(data);
 }
 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
 
-int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
 			const struct rpc_call_ops *call_ops,
-			int how)
+			int how, int flags)
 {
 	struct rpc_task *task;
 	int priority = flush_task_priority(how);
@@ -1382,7 +1411,7 @@
 		.callback_ops = call_ops,
 		.callback_data = data,
 		.workqueue = nfsiod_workqueue,
-		.flags = RPC_TASK_ASYNC,
+		.flags = RPC_TASK_ASYNC | flags,
 		.priority = priority,
 	};
 	/* Set up the initial task struct.  */
@@ -1403,9 +1432,10 @@
 /*
  * Set up the argument/result storage required for the RPC call.
  */
-void nfs_init_commit(struct nfs_write_data *data,
-			    struct list_head *head,
-			    struct pnfs_layout_segment *lseg)
+void nfs_init_commit(struct nfs_commit_data *data,
+		     struct list_head *head,
+		     struct pnfs_layout_segment *lseg,
+		     struct nfs_commit_info *cinfo)
 {
 	struct nfs_page *first = nfs_list_entry(head->next);
 	struct inode *inode = first->wb_context->dentry->d_inode;
@@ -1419,13 +1449,14 @@
 	data->cred	  = first->wb_context->cred;
 	data->lseg	  = lseg; /* reference transferred */
 	data->mds_ops     = &nfs_commit_ops;
+	data->completion_ops = cinfo->completion_ops;
+	data->dreq	  = cinfo->dreq;
 
 	data->args.fh     = NFS_FH(data->inode);
 	/* Note: we always request a commit of the entire inode */
 	data->args.offset = 0;
 	data->args.count  = 0;
-	data->args.context = get_nfs_open_context(first->wb_context);
-	data->res.count   = 0;
+	data->context     = get_nfs_open_context(first->wb_context);
 	data->res.fattr   = &data->fattr;
 	data->res.verf    = &data->verf;
 	nfs_fattr_init(&data->fattr);
@@ -1433,18 +1464,21 @@
 EXPORT_SYMBOL_GPL(nfs_init_commit);
 
 void nfs_retry_commit(struct list_head *page_list,
-		      struct pnfs_layout_segment *lseg)
+		      struct pnfs_layout_segment *lseg,
+		      struct nfs_commit_info *cinfo)
 {
 	struct nfs_page *req;
 
 	while (!list_empty(page_list)) {
 		req = nfs_list_entry(page_list->next);
 		nfs_list_remove_request(req);
-		nfs_mark_request_commit(req, lseg);
-		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
-			     BDI_RECLAIMABLE);
-		nfs_unlock_request(req);
+		nfs_mark_request_commit(req, lseg, cinfo);
+		if (!cinfo->dreq) {
+			dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+			dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+				     BDI_RECLAIMABLE);
+		}
+		nfs_unlock_and_release_request(req);
 	}
 }
 EXPORT_SYMBOL_GPL(nfs_retry_commit);
@@ -1453,9 +1487,10 @@
  * Commit dirty pages
  */
 static int
-nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+		struct nfs_commit_info *cinfo)
 {
-	struct nfs_write_data	*data;
+	struct nfs_commit_data	*data;
 
 	data = nfs_commitdata_alloc();
 
@@ -1463,11 +1498,13 @@
 		goto out_bad;
 
 	/* Set up the argument struct */
-	nfs_init_commit(data, head, NULL);
-	return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
+	nfs_init_commit(data, head, NULL, cinfo);
+	atomic_inc(&cinfo->mds->rpcs_out);
+	return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
+				   how, 0);
  out_bad:
-	nfs_retry_commit(head, NULL);
-	nfs_commit_clear_lock(NFS_I(inode));
+	nfs_retry_commit(head, NULL, cinfo);
+	cinfo->completion_ops->error_cleanup(NFS_I(inode));
 	return -ENOMEM;
 }
 
@@ -1476,7 +1513,7 @@
  */
 static void nfs_commit_done(struct rpc_task *task, void *calldata)
 {
-	struct nfs_write_data	*data = calldata;
+	struct nfs_commit_data	*data = calldata;
 
         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
                                 task->tk_pid, task->tk_status);
@@ -1485,10 +1522,11 @@
 	NFS_PROTO(data->inode)->commit_done(task, data);
 }
 
-void nfs_commit_release_pages(struct nfs_write_data *data)
+static void nfs_commit_release_pages(struct nfs_commit_data *data)
 {
 	struct nfs_page	*req;
 	int status = data->task.tk_status;
+	struct nfs_commit_info cinfo;
 
 	while (!list_empty(&data->pages)) {
 		req = nfs_list_entry(data->pages.next);
@@ -1519,42 +1557,59 @@
 		dprintk(" mismatch\n");
 		nfs_mark_request_dirty(req);
 	next:
-		nfs_unlock_request(req);
+		nfs_unlock_and_release_request(req);
 	}
+	nfs_init_cinfo(&cinfo, data->inode, data->dreq);
+	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+		nfs_commit_clear_lock(NFS_I(data->inode));
 }
-EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
 
 static void nfs_commit_release(void *calldata)
 {
-	struct nfs_write_data *data = calldata;
+	struct nfs_commit_data *data = calldata;
 
-	nfs_commit_release_pages(data);
-	nfs_commit_clear_lock(NFS_I(data->inode));
+	data->completion_ops->completion(data);
 	nfs_commitdata_release(calldata);
 }
 
 static const struct rpc_call_ops nfs_commit_ops = {
-	.rpc_call_prepare = nfs_write_prepare,
+	.rpc_call_prepare = nfs_commit_prepare,
 	.rpc_call_done = nfs_commit_done,
 	.rpc_release = nfs_commit_release,
 };
 
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
+	.completion = nfs_commit_release_pages,
+	.error_cleanup = nfs_commit_clear_lock,
+};
+
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+			    int how, struct nfs_commit_info *cinfo)
+{
+	int status;
+
+	status = pnfs_commit_list(inode, head, how, cinfo);
+	if (status == PNFS_NOT_ATTEMPTED)
+		status = nfs_commit_list(inode, head, how, cinfo);
+	return status;
+}
+
 int nfs_commit_inode(struct inode *inode, int how)
 {
 	LIST_HEAD(head);
+	struct nfs_commit_info cinfo;
 	int may_wait = how & FLUSH_SYNC;
 	int res;
 
 	res = nfs_commit_set_lock(NFS_I(inode), may_wait);
 	if (res <= 0)
 		goto out_mark_dirty;
-	res = nfs_scan_commit(inode, &head);
+	nfs_init_cinfo_from_inode(&cinfo, inode);
+	res = nfs_scan_commit(inode, &head, &cinfo);
 	if (res) {
 		int error;
 
-		error = pnfs_commit_list(inode, &head, how);
-		if (error == PNFS_NOT_ATTEMPTED)
-			error = nfs_commit_list(inode, &head, how);
+		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
 		if (error < 0)
 			return error;
 		if (!may_wait)
@@ -1585,14 +1640,14 @@
 	int ret = 0;
 
 	/* no commits means nothing needs to be done */
-	if (!nfsi->ncommit)
+	if (!nfsi->commit_info.ncommit)
 		return ret;
 
 	if (wbc->sync_mode == WB_SYNC_NONE) {
 		/* Don't commit yet if this is a non-blocking flush and there
 		 * are a lot of outstanding writes for this mapping.
 		 */
-		if (nfsi->ncommit <= (nfsi->npages >> 1))
+		if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
 			goto out_mark_dirty;
 
 		/* don't wait for the COMMIT response */
@@ -1665,7 +1720,7 @@
 		req = nfs_page_find_request(page);
 		if (req == NULL)
 			break;
-		if (nfs_lock_request_dontget(req)) {
+		if (nfs_lock_request(req)) {
 			nfs_clear_request_commit(req);
 			nfs_inode_remove_request(req);
 			/*
@@ -1673,7 +1728,7 @@
 			 * page as being dirty
 			 */
 			cancel_dirty_page(page, PAGE_CACHE_SIZE);
-			nfs_unlock_request(req);
+			nfs_unlock_and_release_request(req);
 			break;
 		}
 		ret = nfs_wait_on_request(req);
@@ -1742,7 +1797,7 @@
 int __init nfs_init_writepagecache(void)
 {
 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
-					     sizeof(struct nfs_write_data),
+					     sizeof(struct nfs_write_header),
 					     0, SLAB_HWCACHE_ALIGN,
 					     NULL);
 	if (nfs_wdata_cachep == NULL)
@@ -1753,6 +1808,13 @@
 	if (nfs_wdata_mempool == NULL)
 		return -ENOMEM;
 
+	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
+					     sizeof(struct nfs_commit_data),
+					     0, SLAB_HWCACHE_ALIGN,
+					     NULL);
+	if (nfs_cdata_cachep == NULL)
+		return -ENOMEM;
+
 	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
 						      nfs_wdata_cachep);
 	if (nfs_commit_mempool == NULL)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 204438c..34a10d7 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -11,7 +11,7 @@
 	struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
 
 	for (f = exp->ex_flavors; f < end; f++) {
-		if (f->pseudoflavor == rqstp->rq_flavor)
+		if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
 			return f->flags;
 	}
 	return exp->ex_flags;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8e9689a..ba23349 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -15,11 +15,13 @@
 #include <linux/namei.h>
 #include <linux/module.h>
 #include <linux/exportfs.h>
+#include <linux/sunrpc/svc_xprt.h>
 
 #include <net/ipv6.h>
 
 #include "nfsd.h"
 #include "nfsfh.h"
+#include "netns.h"
 
 #define NFSDDBG_FACILITY	NFSDDBG_EXPORT
 
@@ -38,7 +40,6 @@
 #define	EXPKEY_HASHBITS		8
 #define	EXPKEY_HASHMAX		(1 << EXPKEY_HASHBITS)
 #define	EXPKEY_HASHMASK		(EXPKEY_HASHMAX -1)
-static struct cache_head *expkey_table[EXPKEY_HASHMAX];
 
 static void expkey_put(struct kref *ref)
 {
@@ -71,9 +72,9 @@
 	return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
 }
 
-static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
-static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
-static struct cache_detail svc_expkey_cache;
+static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+					    struct svc_expkey *old);
+static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
 
 static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
 {
@@ -131,7 +132,7 @@
 	key.ek_fsidtype = fsidtype;
 	memcpy(key.ek_fsid, buf, len);
 
-	ek = svc_expkey_lookup(&key);
+	ek = svc_expkey_lookup(cd, &key);
 	err = -ENOMEM;
 	if (!ek)
 		goto out;
@@ -145,7 +146,7 @@
 	err = 0;
 	if (len == 0) {
 		set_bit(CACHE_NEGATIVE, &key.h.flags);
-		ek = svc_expkey_update(&key, ek);
+		ek = svc_expkey_update(cd, &key, ek);
 		if (!ek)
 			err = -ENOMEM;
 	} else {
@@ -155,7 +156,7 @@
 
 		dprintk("Found the path %s\n", buf);
 
-		ek = svc_expkey_update(&key, ek);
+		ek = svc_expkey_update(cd, &key, ek);
 		if (!ek)
 			err = -ENOMEM;
 		path_put(&key.ek_path);
@@ -163,7 +164,7 @@
 	cache_flush();
  out:
 	if (ek)
-		cache_put(&ek->h, &svc_expkey_cache);
+		cache_put(&ek->h, cd);
 	if (dom)
 		auth_domain_put(dom);
 	kfree(buf);
@@ -239,10 +240,9 @@
 		return NULL;
 }
 
-static struct cache_detail svc_expkey_cache = {
+static struct cache_detail svc_expkey_cache_template = {
 	.owner		= THIS_MODULE,
 	.hash_size	= EXPKEY_HASHMAX,
-	.hash_table	= expkey_table,
 	.name		= "nfsd.fh",
 	.cache_put	= expkey_put,
 	.cache_upcall	= expkey_upcall,
@@ -268,13 +268,12 @@
 }
 
 static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
 {
 	struct cache_head *ch;
 	int hash = svc_expkey_hash(item);
 
-	ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
-				 hash);
+	ch = sunrpc_cache_lookup(cd, &item->h, hash);
 	if (ch)
 		return container_of(ch, struct svc_expkey, h);
 	else
@@ -282,13 +281,13 @@
 }
 
 static struct svc_expkey *
-svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
+svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+		  struct svc_expkey *old)
 {
 	struct cache_head *ch;
 	int hash = svc_expkey_hash(new);
 
-	ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
-				 &old->h, hash);
+	ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
 	if (ch)
 		return container_of(ch, struct svc_expkey, h);
 	else
@@ -299,8 +298,6 @@
 #define	EXPORT_HASHBITS		8
 #define	EXPORT_HASHMAX		(1<< EXPORT_HASHBITS)
 
-static struct cache_head *export_table[EXPORT_HASHMAX];
-
 static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
 {
 	int i;
@@ -525,6 +522,7 @@
 		goto out1;
 
 	exp.ex_client = dom;
+	exp.cd = cd;
 
 	/* expiry */
 	err = -EINVAL;
@@ -672,6 +670,7 @@
 	new->ex_fslocs.locations = NULL;
 	new->ex_fslocs.locations_count = 0;
 	new->ex_fslocs.migrated = 0;
+	new->cd = item->cd;
 }
 
 static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -707,10 +706,9 @@
 		return NULL;
 }
 
-struct cache_detail svc_export_cache = {
+static struct cache_detail svc_export_cache_template = {
 	.owner		= THIS_MODULE,
 	.hash_size	= EXPORT_HASHMAX,
-	.hash_table	= export_table,
 	.name		= "nfsd.export",
 	.cache_put	= svc_export_put,
 	.cache_upcall	= svc_export_upcall,
@@ -739,8 +737,7 @@
 	struct cache_head *ch;
 	int hash = svc_export_hash(exp);
 
-	ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
-				 hash);
+	ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
 	if (ch)
 		return container_of(ch, struct svc_export, h);
 	else
@@ -753,9 +750,7 @@
 	struct cache_head *ch;
 	int hash = svc_export_hash(old);
 
-	ch = sunrpc_cache_update(&svc_export_cache, &new->h,
-				 &old->h,
-				 hash);
+	ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
 	if (ch)
 		return container_of(ch, struct svc_export, h);
 	else
@@ -764,7 +759,8 @@
 
 
 static struct svc_expkey *
-exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
+exp_find_key(struct cache_detail *cd, svc_client *clp, int fsid_type,
+	     u32 *fsidv, struct cache_req *reqp)
 {
 	struct svc_expkey key, *ek;
 	int err;
@@ -776,18 +772,18 @@
 	key.ek_fsidtype = fsid_type;
 	memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
 
-	ek = svc_expkey_lookup(&key);
+	ek = svc_expkey_lookup(cd, &key);
 	if (ek == NULL)
 		return ERR_PTR(-ENOMEM);
-	err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+	err = cache_check(cd, &ek->h, reqp);
 	if (err)
 		return ERR_PTR(err);
 	return ek;
 }
 
 
-static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
-				     struct cache_req *reqp)
+static svc_export *exp_get_by_name(struct cache_detail *cd, svc_client *clp,
+				   const struct path *path, struct cache_req *reqp)
 {
 	struct svc_export *exp, key;
 	int err;
@@ -797,11 +793,12 @@
 
 	key.ex_client = clp;
 	key.ex_path = *path;
+	key.cd = cd;
 
 	exp = svc_export_lookup(&key);
 	if (exp == NULL)
 		return ERR_PTR(-ENOMEM);
-	err = cache_check(&svc_export_cache, &exp->h, reqp);
+	err = cache_check(cd, &exp->h, reqp);
 	if (err)
 		return ERR_PTR(err);
 	return exp;
@@ -810,16 +807,17 @@
 /*
  * Find the export entry for a given dentry.
  */
-static struct svc_export *exp_parent(svc_client *clp, struct path *path)
+static struct svc_export *exp_parent(struct cache_detail *cd, svc_client *clp,
+				     struct path *path)
 {
 	struct dentry *saved = dget(path->dentry);
-	svc_export *exp = exp_get_by_name(clp, path, NULL);
+	svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
 
 	while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
 		struct dentry *parent = dget_parent(path->dentry);
 		dput(path->dentry);
 		path->dentry = parent;
-		exp = exp_get_by_name(clp, path, NULL);
+		exp = exp_get_by_name(cd, clp, path, NULL);
 	}
 	dput(path->dentry);
 	path->dentry = saved;
@@ -834,13 +832,16 @@
  * since its harder to fool a kernel module than a user space program.
  */
 int
-exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
+exp_rootfh(struct net *net, svc_client *clp, char *name,
+	   struct knfsd_fh *f, int maxsize)
 {
 	struct svc_export	*exp;
 	struct path		path;
 	struct inode		*inode;
 	struct svc_fh		fh;
 	int			err;
+	struct nfsd_net		*nn = net_generic(net, nfsd_net_id);
+	struct cache_detail	*cd = nn->svc_export_cache;
 
 	err = -EPERM;
 	/* NB: we probably ought to check that it's NUL-terminated */
@@ -853,7 +854,7 @@
 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
 		 name, path.dentry, clp->name,
 		 inode->i_sb->s_id, inode->i_ino);
-	exp = exp_parent(clp, &path);
+	exp = exp_parent(cd, clp, &path);
 	if (IS_ERR(exp)) {
 		err = PTR_ERR(exp);
 		goto out;
@@ -875,16 +876,18 @@
 	return err;
 }
 
-static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
+static struct svc_export *exp_find(struct cache_detail *cd,
+				   struct auth_domain *clp, int fsid_type,
 				   u32 *fsidv, struct cache_req *reqp)
 {
 	struct svc_export *exp;
-	struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
+	struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
+	struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
 	if (IS_ERR(ek))
 		return ERR_CAST(ek);
 
-	exp = exp_get_by_name(clp, &ek->ek_path, reqp);
-	cache_put(&ek->h, &svc_expkey_cache);
+	exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
+	cache_put(&ek->h, nn->svc_expkey_cache);
 
 	if (IS_ERR(exp))
 		return ERR_CAST(exp);
@@ -901,13 +904,13 @@
 		return 0;
 	/* ip-address based client; check sec= export option: */
 	for (f = exp->ex_flavors; f < end; f++) {
-		if (f->pseudoflavor == rqstp->rq_flavor)
+		if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
 			return 0;
 	}
 	/* defaults in absence of sec= options: */
 	if (exp->ex_nflavors == 0) {
-		if (rqstp->rq_flavor == RPC_AUTH_NULL ||
-		    rqstp->rq_flavor == RPC_AUTH_UNIX)
+		if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
+		    rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
 			return 0;
 	}
 	return nfserr_wrongsec;
@@ -926,12 +929,14 @@
 rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
 {
 	struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+	struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+	struct cache_detail *cd = nn->svc_export_cache;
 
 	if (rqstp->rq_client == NULL)
 		goto gss;
 
 	/* First try the auth_unix client: */
-	exp = exp_get_by_name(rqstp->rq_client, path, &rqstp->rq_chandle);
+	exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
 	if (PTR_ERR(exp) == -ENOENT)
 		goto gss;
 	if (IS_ERR(exp))
@@ -943,7 +948,7 @@
 	/* Otherwise, try falling back on gss client */
 	if (rqstp->rq_gssclient == NULL)
 		return exp;
-	gssexp = exp_get_by_name(rqstp->rq_gssclient, path, &rqstp->rq_chandle);
+	gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
 	if (PTR_ERR(gssexp) == -ENOENT)
 		return exp;
 	if (!IS_ERR(exp))
@@ -955,12 +960,15 @@
 rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
 {
 	struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+	struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+	struct cache_detail *cd = nn->svc_export_cache;
 
 	if (rqstp->rq_client == NULL)
 		goto gss;
 
 	/* First try the auth_unix client: */
-	exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+	exp = exp_find(cd, rqstp->rq_client, fsid_type,
+		       fsidv, &rqstp->rq_chandle);
 	if (PTR_ERR(exp) == -ENOENT)
 		goto gss;
 	if (IS_ERR(exp))
@@ -972,7 +980,7 @@
 	/* Otherwise, try falling back on gss client */
 	if (rqstp->rq_gssclient == NULL)
 		return exp;
-	gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+	gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
 						&rqstp->rq_chandle);
 	if (PTR_ERR(gssexp) == -ENOENT)
 		return exp;
@@ -1029,13 +1037,15 @@
 /* Iterator */
 
 static void *e_start(struct seq_file *m, loff_t *pos)
-	__acquires(svc_export_cache.hash_lock)
+	__acquires(((struct cache_detail *)m->private)->hash_lock)
 {
 	loff_t n = *pos;
 	unsigned hash, export;
 	struct cache_head *ch;
-	
-	read_lock(&svc_export_cache.hash_lock);
+	struct cache_detail *cd = m->private;
+	struct cache_head **export_table = cd->hash_table;
+
+	read_lock(&cd->hash_lock);
 	if (!n--)
 		return SEQ_START_TOKEN;
 	hash = n >> 32;
@@ -1060,6 +1070,8 @@
 {
 	struct cache_head *ch = p;
 	int hash = (*pos >> 32);
+	struct cache_detail *cd = m->private;
+	struct cache_head **export_table = cd->hash_table;
 
 	if (p == SEQ_START_TOKEN)
 		hash = 0;
@@ -1082,9 +1094,11 @@
 }
 
 static void e_stop(struct seq_file *m, void *p)
-	__releases(svc_export_cache.hash_lock)
+	__releases(((struct cache_detail *)m->private)->hash_lock)
 {
-	read_unlock(&svc_export_cache.hash_lock);
+	struct cache_detail *cd = m->private;
+
+	read_unlock(&cd->hash_lock);
 }
 
 static struct flags {
@@ -1195,6 +1209,7 @@
 {
 	struct cache_head *cp = p;
 	struct svc_export *exp = container_of(cp, struct svc_export, h);
+	struct cache_detail *cd = m->private;
 
 	if (p == SEQ_START_TOKEN) {
 		seq_puts(m, "# Version 1.1\n");
@@ -1203,10 +1218,10 @@
 	}
 
 	cache_get(&exp->h);
-	if (cache_check(&svc_export_cache, &exp->h, NULL))
+	if (cache_check(cd, &exp->h, NULL))
 		return 0;
-	cache_put(&exp->h, &svc_export_cache);
-	return svc_export_show(m, &svc_export_cache, cp);
+	exp_put(exp);
+	return svc_export_show(m, cd, cp);
 }
 
 const struct seq_operations nfs_exports_op = {
@@ -1216,48 +1231,70 @@
 	.show	= e_show,
 };
 
-
 /*
  * Initialize the exports module.
  */
 int
-nfsd_export_init(void)
+nfsd_export_init(struct net *net)
 {
 	int rv;
-	dprintk("nfsd: initializing export module.\n");
+	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	rv = cache_register_net(&svc_export_cache, &init_net);
+	dprintk("nfsd: initializing export module (net: %p).\n", net);
+
+	nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
+	if (IS_ERR(nn->svc_export_cache))
+		return PTR_ERR(nn->svc_export_cache);
+	rv = cache_register_net(nn->svc_export_cache, net);
 	if (rv)
-		return rv;
-	rv = cache_register_net(&svc_expkey_cache, &init_net);
+		goto destroy_export_cache;
+
+	nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
+	if (IS_ERR(nn->svc_expkey_cache)) {
+		rv = PTR_ERR(nn->svc_expkey_cache);
+		goto unregister_export_cache;
+	}
+	rv = cache_register_net(nn->svc_expkey_cache, net);
 	if (rv)
-		cache_unregister_net(&svc_export_cache, &init_net);
+		goto destroy_expkey_cache;
+	return 0;
+
+destroy_expkey_cache:
+	cache_destroy_net(nn->svc_expkey_cache, net);
+unregister_export_cache:
+	cache_unregister_net(nn->svc_export_cache, net);
+destroy_export_cache:
+	cache_destroy_net(nn->svc_export_cache, net);
 	return rv;
-
 }
 
 /*
  * Flush exports table - called when last nfsd thread is killed
  */
 void
-nfsd_export_flush(void)
+nfsd_export_flush(struct net *net)
 {
-	cache_purge(&svc_expkey_cache);
-	cache_purge(&svc_export_cache);
+	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+	cache_purge(nn->svc_expkey_cache);
+	cache_purge(nn->svc_export_cache);
 }
 
 /*
  * Shutdown the exports module.
  */
 void
-nfsd_export_shutdown(void)
+nfsd_export_shutdown(struct net *net)
 {
+	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	dprintk("nfsd: shutting down export module.\n");
+	dprintk("nfsd: shutting down export module (net: %p).\n", net);
 
-	cache_unregister_net(&svc_expkey_cache, &init_net);
-	cache_unregister_net(&svc_export_cache, &init_net);
-	svcauth_unix_purge();
+	cache_unregister_net(nn->svc_expkey_cache, net);
+	cache_unregister_net(nn->svc_export_cache, net);
+	cache_destroy_net(nn->svc_expkey_cache, net);
+	cache_destroy_net(nn->svc_export_cache, net);
+	svcauth_unix_purge(net);
 
-	dprintk("nfsd: export shutdown complete.\n");
+	dprintk("nfsd: export shutdown complete (net: %p).\n", net);
 }
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c
index 9559ce4..e6c3815 100644
--- a/fs/nfsd/fault_inject.c
+++ b/fs/nfsd/fault_inject.c
@@ -58,6 +58,7 @@
 
 static int nfsd_inject_get(void *data, u64 *val)
 {
+	*val = 0;
 	return 0;
 }
 
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index 2f3be13..9d513ef 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -42,14 +42,14 @@
 #define IDMAP_NAMESZ 128
 
 #ifdef CONFIG_NFSD_V4
-int nfsd_idmap_init(void);
-void nfsd_idmap_shutdown(void);
+int nfsd_idmap_init(struct net *);
+void nfsd_idmap_shutdown(struct net *);
 #else
-static inline int nfsd_idmap_init(void)
+static inline int nfsd_idmap_init(struct net *net)
 {
 	return 0;
 }
-static inline void nfsd_idmap_shutdown(void)
+static inline void nfsd_idmap_shutdown(struct net *net)
 {
 }
 #endif
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 12e0cff..3936563 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -28,6 +28,12 @@
 
 struct nfsd_net {
 	struct cld_net *cld_net;
+
+	struct cache_detail *svc_expkey_cache;
+	struct cache_detail *svc_export_cache;
+
+	struct cache_detail *idtoname_cache;
+	struct cache_detail *nametoid_cache;
 };
 
 extern int nfsd_net_id;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c8e9f63..a5fd6b98 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -650,9 +650,10 @@
 	struct rpc_clnt *client;
 
 	if (clp->cl_minorversion == 0) {
-		if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+		if (!clp->cl_cred.cr_principal &&
+				(clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
 			return -EINVAL;
-		args.client_name = clp->cl_principal;
+		args.client_name = clp->cl_cred.cr_principal;
 		args.prognumber	= conn->cb_prog,
 		args.protocol = XPRT_TRANSPORT_TCP;
 		args.authflavor = clp->cl_flavor;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 322d11c..dae36f1 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -36,9 +36,11 @@
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/svc_xprt.h>
 #include <net/net_namespace.h>
 #include "idmap.h"
 #include "nfsd.h"
+#include "netns.h"
 
 /*
  * Turn off idmapping when using AUTH_SYS.
@@ -107,8 +109,6 @@
  * ID -> Name cache
  */
 
-static struct cache_head *idtoname_table[ENT_HASHMAX];
-
 static uint32_t
 idtoname_hash(struct ent *ent)
 {
@@ -183,13 +183,13 @@
 
 
 static int         idtoname_parse(struct cache_detail *, char *, int);
-static struct ent *idtoname_lookup(struct ent *);
-static struct ent *idtoname_update(struct ent *, struct ent *);
+static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
+static struct ent *idtoname_update(struct cache_detail *, struct ent *,
+				   struct ent *);
 
-static struct cache_detail idtoname_cache = {
+static struct cache_detail idtoname_cache_template = {
 	.owner		= THIS_MODULE,
 	.hash_size	= ENT_HASHMAX,
-	.hash_table	= idtoname_table,
 	.name		= "nfs4.idtoname",
 	.cache_put	= ent_put,
 	.cache_upcall	= idtoname_upcall,
@@ -244,7 +244,7 @@
 		goto out;
 
 	error = -ENOMEM;
-	res = idtoname_lookup(&ent);
+	res = idtoname_lookup(cd, &ent);
 	if (!res)
 		goto out;
 
@@ -260,11 +260,11 @@
 	else
 		memcpy(ent.name, buf1, sizeof(ent.name));
 	error = -ENOMEM;
-	res = idtoname_update(&ent, res);
+	res = idtoname_update(cd, &ent, res);
 	if (res == NULL)
 		goto out;
 
-	cache_put(&res->h, &idtoname_cache);
+	cache_put(&res->h, cd);
 
 	error = 0;
 out:
@@ -275,10 +275,9 @@
 
 
 static struct ent *
-idtoname_lookup(struct ent *item)
+idtoname_lookup(struct cache_detail *cd, struct ent *item)
 {
-	struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache,
-						    &item->h,
+	struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
 						    idtoname_hash(item));
 	if (ch)
 		return container_of(ch, struct ent, h);
@@ -287,10 +286,9 @@
 }
 
 static struct ent *
-idtoname_update(struct ent *new, struct ent *old)
+idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-	struct cache_head *ch = sunrpc_cache_update(&idtoname_cache,
-						    &new->h, &old->h,
+	struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
 						    idtoname_hash(new));
 	if (ch)
 		return container_of(ch, struct ent, h);
@@ -303,8 +301,6 @@
  * Name -> ID cache
  */
 
-static struct cache_head *nametoid_table[ENT_HASHMAX];
-
 static inline int
 nametoid_hash(struct ent *ent)
 {
@@ -359,14 +355,14 @@
 	return 0;
 }
 
-static struct ent *nametoid_lookup(struct ent *);
-static struct ent *nametoid_update(struct ent *, struct ent *);
+static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
+static struct ent *nametoid_update(struct cache_detail *, struct ent *,
+				   struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache = {
+static struct cache_detail nametoid_cache_template = {
 	.owner		= THIS_MODULE,
 	.hash_size	= ENT_HASHMAX,
-	.hash_table	= nametoid_table,
 	.name		= "nfs4.nametoid",
 	.cache_put	= ent_put,
 	.cache_upcall	= nametoid_upcall,
@@ -426,14 +422,14 @@
 		set_bit(CACHE_NEGATIVE, &ent.h.flags);
 
 	error = -ENOMEM;
-	res = nametoid_lookup(&ent);
+	res = nametoid_lookup(cd, &ent);
 	if (res == NULL)
 		goto out;
-	res = nametoid_update(&ent, res);
+	res = nametoid_update(cd, &ent, res);
 	if (res == NULL)
 		goto out;
 
-	cache_put(&res->h, &nametoid_cache);
+	cache_put(&res->h, cd);
 	error = 0;
 out:
 	kfree(buf1);
@@ -443,10 +439,9 @@
 
 
 static struct ent *
-nametoid_lookup(struct ent *item)
+nametoid_lookup(struct cache_detail *cd, struct ent *item)
 {
-	struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache,
-						    &item->h,
+	struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
 						    nametoid_hash(item));
 	if (ch)
 		return container_of(ch, struct ent, h);
@@ -455,10 +450,9 @@
 }
 
 static struct ent *
-nametoid_update(struct ent *new, struct ent *old)
+nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-	struct cache_head *ch = sunrpc_cache_update(&nametoid_cache,
-						    &new->h, &old->h,
+	struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
 						    nametoid_hash(new));
 	if (ch)
 		return container_of(ch, struct ent, h);
@@ -471,34 +465,55 @@
  */
 
 int
-nfsd_idmap_init(void)
+nfsd_idmap_init(struct net *net)
 {
 	int rv;
+	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	rv = cache_register_net(&idtoname_cache, &init_net);
+	nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
+	if (IS_ERR(nn->idtoname_cache))
+		return PTR_ERR(nn->idtoname_cache);
+	rv = cache_register_net(nn->idtoname_cache, net);
 	if (rv)
-		return rv;
-	rv = cache_register_net(&nametoid_cache, &init_net);
+		goto destroy_idtoname_cache;
+	nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
+	if (IS_ERR(nn->nametoid_cache)) {
+		rv = PTR_ERR(nn->idtoname_cache);
+		goto unregister_idtoname_cache;
+	}
+	rv = cache_register_net(nn->nametoid_cache, net);
 	if (rv)
-		cache_unregister_net(&idtoname_cache, &init_net);
+		goto destroy_nametoid_cache;
+	return 0;
+
+destroy_nametoid_cache:
+	cache_destroy_net(nn->nametoid_cache, net);
+unregister_idtoname_cache:
+	cache_unregister_net(nn->idtoname_cache, net);
+destroy_idtoname_cache:
+	cache_destroy_net(nn->idtoname_cache, net);
 	return rv;
 }
 
 void
-nfsd_idmap_shutdown(void)
+nfsd_idmap_shutdown(struct net *net)
 {
-	cache_unregister_net(&idtoname_cache, &init_net);
-	cache_unregister_net(&nametoid_cache, &init_net);
+	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+	cache_unregister_net(nn->idtoname_cache, net);
+	cache_unregister_net(nn->nametoid_cache, net);
+	cache_destroy_net(nn->idtoname_cache, net);
+	cache_destroy_net(nn->nametoid_cache, net);
 }
 
 static int
 idmap_lookup(struct svc_rqst *rqstp,
-		struct ent *(*lookup_fn)(struct ent *), struct ent *key,
-		struct cache_detail *detail, struct ent **item)
+		struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
+		struct ent *key, struct cache_detail *detail, struct ent **item)
 {
 	int ret;
 
-	*item = lookup_fn(key);
+	*item = lookup_fn(detail, key);
 	if (!*item)
 		return -ENOMEM;
  retry:
@@ -506,7 +521,7 @@
 
 	if (ret == -ETIMEDOUT) {
 		struct ent *prev_item = *item;
-		*item = lookup_fn(key);
+		*item = lookup_fn(detail, key);
 		if (*item != prev_item)
 			goto retry;
 		cache_put(&(*item)->h, detail);
@@ -531,19 +546,20 @@
 		.type = type,
 	};
 	int ret;
+	struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
 	if (namelen + 1 > sizeof(key.name))
 		return nfserr_badowner;
 	memcpy(key.name, name, namelen);
 	key.name[namelen] = '\0';
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
+	ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
 	if (ret == -ENOENT)
 		return nfserr_badowner;
 	if (ret)
 		return nfserrno(ret);
 	*id = item->id;
-	cache_put(&item->h, &nametoid_cache);
+	cache_put(&item->h, nn->nametoid_cache);
 	return 0;
 }
 
@@ -555,9 +571,10 @@
 		.type = type,
 	};
 	int ret;
+	struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-	ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
+	ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
 	if (ret == -ENOENT)
 		return sprintf(name, "%u", id);
 	if (ret)
@@ -565,7 +582,7 @@
 	ret = strlen(item->name);
 	BUG_ON(ret > IDMAP_NAMESZ);
 	memcpy(name, item->name, ret);
-	cache_put(&item->h, &idtoname_cache);
+	cache_put(&item->h, nn->idtoname_cache);
 	return ret;
 }
 
@@ -588,7 +605,7 @@
 static __be32
 do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
 {
-	if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+	if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
 		if (numeric_name_to_id(rqstp, type, name, namelen, id))
 			return 0;
 		/*
@@ -601,7 +618,7 @@
 static int
 do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
 {
-	if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+	if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
 		return sprintf(name, "%u", id);
 	return idmap_id_to_name(rqstp, type, id, name);
 }
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ed3f920..5ff0b7b 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -570,7 +570,7 @@
 cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 {
 	struct cld_upcall *tmp, *cup;
-	struct cld_msg *cmsg = (struct cld_msg *)src;
+	struct cld_msg __user *cmsg = (struct cld_msg __user *)src;
 	uint32_t xid;
 	struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
 						nfsd_net_id);
@@ -1029,7 +1029,7 @@
 	return ret;
 }
 
-struct notifier_block nfsd4_cld_block = {
+static struct notifier_block nfsd4_cld_block = {
 	.notifier_call = rpc_pipefs_event,
 };
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7f71c69..94effd5 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -42,6 +42,7 @@
 #include <linux/sunrpc/clnt.h>
 #include "xdr4.h"
 #include "vfs.h"
+#include "current_stateid.h"
 
 #define NFSDDBG_FACILITY                NFSDDBG_PROC
 
@@ -447,37 +448,69 @@
  *
  * which we should reject.
  */
-static void
-set_access(unsigned int *access, unsigned long bmap) {
+static unsigned int
+bmap_to_share_mode(unsigned long bmap) {
 	int i;
+	unsigned int access = 0;
 
-	*access = 0;
 	for (i = 1; i < 4; i++) {
 		if (test_bit(i, &bmap))
-			*access |= i;
+			access |= i;
 	}
+	return access;
 }
 
-static void
-set_deny(unsigned int *deny, unsigned long bmap) {
-	int i;
-
-	*deny = 0;
-	for (i = 0; i < 4; i++) {
-		if (test_bit(i, &bmap))
-			*deny |= i ;
-	}
-}
-
-static int
+static bool
 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
 	unsigned int access, deny;
 
-	set_access(&access, stp->st_access_bmap);
-	set_deny(&deny, stp->st_deny_bmap);
+	access = bmap_to_share_mode(stp->st_access_bmap);
+	deny = bmap_to_share_mode(stp->st_deny_bmap);
 	if ((access & open->op_share_deny) || (deny & open->op_share_access))
-		return 0;
-	return 1;
+		return false;
+	return true;
+}
+
+/* set share access for a given stateid */
+static inline void
+set_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+	__set_bit(access, &stp->st_access_bmap);
+}
+
+/* clear share access for a given stateid */
+static inline void
+clear_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+	__clear_bit(access, &stp->st_access_bmap);
+}
+
+/* test whether a given stateid has access */
+static inline bool
+test_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+	return test_bit(access, &stp->st_access_bmap);
+}
+
+/* set share deny for a given stateid */
+static inline void
+set_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+	__set_bit(access, &stp->st_deny_bmap);
+}
+
+/* clear share deny for a given stateid */
+static inline void
+clear_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+	__clear_bit(access, &stp->st_deny_bmap);
+}
+
+/* test whether a given stateid is denying specific access */
+static inline bool
+test_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+	return test_bit(access, &stp->st_deny_bmap);
 }
 
 static int nfs4_access_to_omode(u32 access)
@@ -493,6 +526,20 @@
 	BUG();
 }
 
+/* release all access and file references for a given stateid */
+static void
+release_all_access(struct nfs4_ol_stateid *stp)
+{
+	int i;
+
+	for (i = 1; i < 4; i++) {
+		if (test_access(i, stp))
+			nfs4_file_put_access(stp->st_file,
+					     nfs4_access_to_omode(i));
+		clear_access(i, stp);
+	}
+}
+
 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 {
 	list_del(&stp->st_perfile);
@@ -501,16 +548,7 @@
 
 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
 {
-	int i;
-
-	if (stp->st_access_bmap) {
-		for (i = 1; i < 4; i++) {
-			if (test_bit(i, &stp->st_access_bmap))
-				nfs4_file_put_access(stp->st_file,
-						nfs4_access_to_omode(i));
-			__clear_bit(i, &stp->st_access_bmap);
-		}
-	}
+	release_all_access(stp);
 	put_nfs4_file(stp->st_file);
 	stp->st_file = NULL;
 }
@@ -862,7 +900,7 @@
 	struct nfsd4_session *ses;
 	int mem;
 
-	BUG_ON(!spin_is_locked(&client_lock));
+	lockdep_assert_held(&client_lock);
 	ses = container_of(kref, struct nfsd4_session, se_ref);
 	nfsd4_del_conns(ses);
 	spin_lock(&nfsd_drc_lock);
@@ -885,7 +923,7 @@
 	struct nfsd4_session *new;
 	struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
 	int numslots, slotsize;
-	int status;
+	__be32 status;
 	int idx;
 
 	/*
@@ -984,7 +1022,8 @@
 renew_client_locked(struct nfs4_client *clp)
 {
 	if (is_client_expired(clp)) {
-		dprintk("%s: client (clientid %08x/%08x) already expired\n",
+		WARN_ON(1);
+		printk("%s: client (clientid %08x/%08x) already expired\n",
 			__func__,
 			clp->cl_clientid.cl_boot,
 			clp->cl_clientid.cl_id);
@@ -1041,7 +1080,7 @@
 static inline void
 free_client(struct nfs4_client *clp)
 {
-	BUG_ON(!spin_is_locked(&client_lock));
+	lockdep_assert_held(&client_lock);
 	while (!list_empty(&clp->cl_sessions)) {
 		struct nfsd4_session *ses;
 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1049,9 +1088,7 @@
 		list_del(&ses->se_perclnt);
 		nfsd4_put_session_locked(ses);
 	}
-	if (clp->cl_cred.cr_group_info)
-		put_group_info(clp->cl_cred.cr_group_info);
-	kfree(clp->cl_principal);
+	free_svc_cred(&clp->cl_cred);
 	kfree(clp->cl_name.data);
 	kfree(clp);
 }
@@ -1132,12 +1169,21 @@
 	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
 }
 
-static void copy_cred(struct svc_cred *target, struct svc_cred *source)
+static int copy_cred(struct svc_cred *target, struct svc_cred *source)
 {
+	if (source->cr_principal) {
+		target->cr_principal =
+				kstrdup(source->cr_principal, GFP_KERNEL);
+		if (target->cr_principal == NULL)
+			return -ENOMEM;
+	} else
+		target->cr_principal = NULL;
+	target->cr_flavor = source->cr_flavor;
 	target->cr_uid = source->cr_uid;
 	target->cr_gid = source->cr_gid;
 	target->cr_group_info = source->cr_group_info;
 	get_group_info(target->cr_group_info);
+	return 0;
 }
 
 static int same_name(const char *n1, const char *n2)
@@ -1157,11 +1203,31 @@
 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
 }
 
-/* XXX what about NGROUP */
+static bool groups_equal(struct group_info *g1, struct group_info *g2)
+{
+	int i;
+
+	if (g1->ngroups != g2->ngroups)
+		return false;
+	for (i=0; i<g1->ngroups; i++)
+		if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
+			return false;
+	return true;
+}
+
 static int
 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
 {
-	return cr1->cr_uid == cr2->cr_uid;
+	if ((cr1->cr_flavor != cr2->cr_flavor)
+		|| (cr1->cr_uid != cr2->cr_uid)
+		|| (cr1->cr_gid != cr2->cr_gid)
+		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+		return false;
+	if (cr1->cr_principal == cr2->cr_principal)
+		return true;
+	if (!cr1->cr_principal || !cr2->cr_principal)
+		return false;
+	return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
 }
 
 static void gen_clid(struct nfs4_client *clp)
@@ -1204,25 +1270,20 @@
 {
 	struct nfs4_client *clp;
 	struct sockaddr *sa = svc_addr(rqstp);
-	char *princ;
+	int ret;
 
 	clp = alloc_client(name);
 	if (clp == NULL)
 		return NULL;
 
 	INIT_LIST_HEAD(&clp->cl_sessions);
-
-	princ = svc_gss_principal(rqstp);
-	if (princ) {
-		clp->cl_principal = kstrdup(princ, GFP_KERNEL);
-		if (clp->cl_principal == NULL) {
-			spin_lock(&client_lock);
-			free_client(clp);
-			spin_unlock(&client_lock);
-			return NULL;
-		}
+	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+	if (ret) {
+		spin_lock(&client_lock);
+		free_client(clp);
+		spin_unlock(&client_lock);
+		return NULL;
 	}
-
 	idr_init(&clp->cl_stateids);
 	memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
 	atomic_set(&clp->cl_refcount, 0);
@@ -1240,8 +1301,6 @@
 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
 	copy_verf(clp, verf);
 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
-	clp->cl_flavor = rqstp->rq_flavor;
-	copy_cred(&clp->cl_cred, &rqstp->rq_cred);
 	gen_confirm(clp);
 	clp->cl_cb_session = NULL;
 	return clp;
@@ -1470,18 +1529,32 @@
 	clid->flags = new->cl_exchange_flags;
 }
 
+static bool client_has_state(struct nfs4_client *clp)
+{
+	/*
+	 * Note clp->cl_openowners check isn't quite right: there's no
+	 * need to count owners without stateid's.
+	 *
+	 * Also note we should probably be using this in 4.0 case too.
+	 */
+	return !list_empty(&clp->cl_openowners)
+		|| !list_empty(&clp->cl_delegations)
+		|| !list_empty(&clp->cl_sessions);
+}
+
 __be32
 nfsd4_exchange_id(struct svc_rqst *rqstp,
 		  struct nfsd4_compound_state *cstate,
 		  struct nfsd4_exchange_id *exid)
 {
 	struct nfs4_client *unconf, *conf, *new;
-	int status;
+	__be32 status;
 	unsigned int		strhashval;
 	char			dname[HEXDIR_LEN];
 	char			addr_str[INET6_ADDRSTRLEN];
 	nfs4_verifier		verf = exid->verifier;
 	struct sockaddr		*sa = svc_addr(rqstp);
+	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
 
 	rpc_ntop(sa, addr_str, sizeof(addr_str));
 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
@@ -1507,71 +1580,63 @@
 	status = nfs4_make_rec_clidname(dname, &exid->clname);
 
 	if (status)
-		goto error;
+		return status;
 
 	strhashval = clientstr_hashval(dname);
 
+	/* Cases below refer to rfc 5661 section 18.35.4: */
 	nfs4_lock_state();
-	status = nfs_ok;
-
 	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
-		if (!clp_used_exchangeid(conf)) {
-			status = nfserr_clid_inuse; /* XXX: ? */
-			goto out;
-		}
-		if (!same_verf(&verf, &conf->cl_verifier)) {
-			/* 18.35.4 case 8 */
-			if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
-				status = nfserr_not_same;
+		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
+		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
+
+		if (update) {
+			if (!clp_used_exchangeid(conf)) { /* buggy client */
+				status = nfserr_inval;
 				goto out;
 			}
-			/* Client reboot: destroy old state */
-			expire_client(conf);
-			goto out_new;
-		}
-		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
-			/* 18.35.4 case 9 */
-			if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+			if (!creds_match) { /* case 9 */
 				status = nfserr_perm;
 				goto out;
 			}
+			if (!verfs_match) { /* case 8 */
+				status = nfserr_not_same;
+				goto out;
+			}
+			/* case 6 */
+			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
+			new = conf;
+			goto out_copy;
+		}
+		if (!creds_match) { /* case 3 */
+			if (client_has_state(conf)) {
+				status = nfserr_clid_inuse;
+				goto out;
+			}
 			expire_client(conf);
 			goto out_new;
 		}
-		/*
-		 * Set bit when the owner id and verifier map to an already
-		 * confirmed client id (18.35.3).
-		 */
-		exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
-
-		/*
-		 * Falling into 18.35.4 case 2, possible router replay.
-		 * Leave confirmed record intact and return same result.
-		 */
-		copy_verf(conf, &verf);
-		new = conf;
-		goto out_copy;
+		if (verfs_match) { /* case 2 */
+			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+			new = conf;
+			goto out_copy;
+		}
+		/* case 5, client reboot */
+		goto out_new;
 	}
 
-	/* 18.35.4 case 7 */
-	if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+	if (update) { /* case 7 */
 		status = nfserr_noent;
 		goto out;
 	}
 
 	unconf  = find_unconfirmed_client_by_str(dname, strhashval);
-	if (unconf) {
-		/*
-		 * Possible retry or client restart.  Per 18.35.4 case 4,
-		 * a new unconfirmed record should be generated regardless
-		 * of whether any properties have changed.
-		 */
+	if (unconf) /* case 4, possible retry or client restart */
 		expire_client(unconf);
-	}
 
+	/* case 1 (normal case) */
 out_new:
-	/* Normal case */
 	new = create_client(exid->clname, dname, rqstp, &verf);
 	if (new == NULL) {
 		status = nfserr_jukebox;
@@ -1584,7 +1649,7 @@
 	exid->clientid.cl_boot = new->cl_clientid.cl_boot;
 	exid->clientid.cl_id = new->cl_clientid.cl_id;
 
-	exid->seqid = 1;
+	exid->seqid = new->cl_cs_slot.sl_seqid + 1;
 	nfsd4_set_ex_flags(new, exid);
 
 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
@@ -1593,12 +1658,10 @@
 
 out:
 	nfs4_unlock_state();
-error:
-	dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
 	return status;
 }
 
-static int
+static __be32
 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
 {
 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
@@ -1626,7 +1689,7 @@
  */
 static void
 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
-			   struct nfsd4_clid_slot *slot, int nfserr)
+			   struct nfsd4_clid_slot *slot, __be32 nfserr)
 {
 	slot->sl_status = nfserr;
 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
@@ -1657,7 +1720,7 @@
 				/* seqid, slotID, slotID, slotID, status */ \
 			5 ) * sizeof(__be32))
 
-static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
+static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
 {
 	return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
 		|| fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
@@ -1673,7 +1736,7 @@
 	struct nfsd4_session *new;
 	struct nfsd4_clid_slot *cs_slot = NULL;
 	bool confirm_me = false;
-	int status = 0;
+	__be32 status = 0;
 
 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
 		return nfserr_inval;
@@ -1686,16 +1749,10 @@
 		cs_slot = &conf->cl_cs_slot;
 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
 		if (status == nfserr_replay_cache) {
-			dprintk("Got a create_session replay! seqid= %d\n",
-				cs_slot->sl_seqid);
-			/* Return the cached reply status */
 			status = nfsd4_replay_create_session(cr_ses, cs_slot);
 			goto out;
 		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
 			status = nfserr_seq_misordered;
-			dprintk("Sequence misordered!\n");
-			dprintk("Expected seqid= %d but got seqid= %d\n",
-				cs_slot->sl_seqid, cr_ses->seqid);
 			goto out;
 		}
 	} else if (unconf) {
@@ -1704,7 +1761,6 @@
 			status = nfserr_clid_inuse;
 			goto out;
 		}
-
 		cs_slot = &unconf->cl_cs_slot;
 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
 		if (status) {
@@ -1712,7 +1768,6 @@
 			status = nfserr_seq_misordered;
 			goto out;
 		}
-
 		confirm_me = true;
 		conf = unconf;
 	} else {
@@ -1749,8 +1804,14 @@
 
 	/* cache solo and embedded create sessions under the state lock */
 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
-	if (confirm_me)
+	if (confirm_me) {
+		unsigned int hash = clientstr_hashval(unconf->cl_recdir);
+		struct nfs4_client *old =
+			find_confirmed_client_by_str(conf->cl_recdir, hash);
+		if (old)
+			expire_client(old);
 		move_to_confirmed(conf);
+	}
 out:
 	nfs4_unlock_state();
 	dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1818,7 +1879,7 @@
 		      struct nfsd4_destroy_session *sessionid)
 {
 	struct nfsd4_session *ses;
-	u32 status = nfserr_badsession;
+	__be32 status = nfserr_badsession;
 
 	/* Notes:
 	 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
@@ -1914,7 +1975,7 @@
 	struct nfsd4_session *session;
 	struct nfsd4_slot *slot;
 	struct nfsd4_conn *conn;
-	int status;
+	__be32 status;
 
 	if (resp->opcnt != 1)
 		return nfserr_sequence_pos;
@@ -2008,18 +2069,11 @@
 	return status;
 }
 
-static inline bool has_resources(struct nfs4_client *clp)
-{
-	return !list_empty(&clp->cl_openowners)
-		|| !list_empty(&clp->cl_delegations)
-		|| !list_empty(&clp->cl_sessions);
-}
-
 __be32
 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
 {
 	struct nfs4_client *conf, *unconf, *clp;
-	int status = 0;
+	__be32 status = 0;
 
 	nfs4_lock_state();
 	unconf = find_unconfirmed_client(&dc->clientid);
@@ -2028,7 +2082,7 @@
 	if (conf) {
 		clp = conf;
 
-		if (!is_client_expired(conf) && has_resources(conf)) {
+		if (!is_client_expired(conf) && client_has_state(conf)) {
 			status = nfserr_clientid_busy;
 			goto out;
 		}
@@ -2055,7 +2109,7 @@
 __be32
 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
 {
-	int status = 0;
+	__be32 status = 0;
 
 	if (rc->rca_one_fs) {
 		if (!cstate->current_fh.fh_dentry)
@@ -2106,17 +2160,13 @@
 	if (status)
 		return status;
 
-	/* 
-	 * XXX The Duplicate Request Cache (DRC) has been checked (??)
-	 * We get here on a DRC miss.
-	 */
-
 	strhashval = clientstr_hashval(dname);
 
+	/* Cases below refer to rfc 3530 section 14.2.33: */
 	nfs4_lock_state();
 	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
-		/* RFC 3530 14.2.33 CASE 0: */
+		/* case 0: */
 		status = nfserr_clid_inuse;
 		if (clp_used_exchangeid(conf))
 			goto out;
@@ -2129,63 +2179,18 @@
 			goto out;
 		}
 	}
-	/*
-	 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
-	 * has a description of SETCLIENTID request processing consisting
-	 * of 5 bullet points, labeled as CASE0 - CASE4 below.
-	 */
 	unconf = find_unconfirmed_client_by_str(dname, strhashval);
-	status = nfserr_jukebox;
-	if (!conf) {
-		/*
-		 * RFC 3530 14.2.33 CASE 4:
-		 * placed first, because it is the normal case
-		 */
-		if (unconf)
-			expire_client(unconf);
-		new = create_client(clname, dname, rqstp, &clverifier);
-		if (new == NULL)
-			goto out;
-		gen_clid(new);
-	} else if (same_verf(&conf->cl_verifier, &clverifier)) {
-		/*
-		 * RFC 3530 14.2.33 CASE 1:
-		 * probable callback update
-		 */
-		if (unconf) {
-			/* Note this is removing unconfirmed {*x***},
-			 * which is stronger than RFC recommended {vxc**}.
-			 * This has the advantage that there is at most
-			 * one {*x***} in either list at any time.
-			 */
-			expire_client(unconf);
-		}
-		new = create_client(clname, dname, rqstp, &clverifier);
-		if (new == NULL)
-			goto out;
-		copy_clid(new, conf);
-	} else if (!unconf) {
-		/*
-		 * RFC 3530 14.2.33 CASE 2:
-		 * probable client reboot; state will be removed if
-		 * confirmed.
-		 */
-		new = create_client(clname, dname, rqstp, &clverifier);
-		if (new == NULL)
-			goto out;
-		gen_clid(new);
-	} else {
-		/*
-		 * RFC 3530 14.2.33 CASE 3:
-		 * probable client reboot; state will be removed if
-		 * confirmed.
-		 */
+	if (unconf)
 		expire_client(unconf);
-		new = create_client(clname, dname, rqstp, &clverifier);
-		if (new == NULL)
-			goto out;
+	status = nfserr_jukebox;
+	new = create_client(clname, dname, rqstp, &clverifier);
+	if (new == NULL)
+		goto out;
+	if (conf && same_verf(&conf->cl_verifier, &clverifier))
+		/* case 1: probable callback update */
+		copy_clid(new, conf);
+	else /* case 4 (new client) or cases 2, 3 (client reboot): */
 		gen_clid(new);
-	}
 	/*
 	 * XXX: we should probably set this at creation time, and check
 	 * for consistent minorversion use throughout:
@@ -2203,17 +2208,11 @@
 }
 
 
-/*
- * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
- * a description of SETCLIENTID_CONFIRM request processing consisting of 4
- * bullets, labeled as CASE1 - CASE4 below.
- */
 __be32
 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
 			 struct nfsd4_compound_state *cstate,
 			 struct nfsd4_setclientid_confirm *setclientid_confirm)
 {
-	struct sockaddr *sa = svc_addr(rqstp);
 	struct nfs4_client *conf, *unconf;
 	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
 	clientid_t * clid = &setclientid_confirm->sc_clientid;
@@ -2221,84 +2220,44 @@
 
 	if (STALE_CLIENTID(clid))
 		return nfserr_stale_clientid;
-	/* 
-	 * XXX The Duplicate Request Cache (DRC) has been checked (??)
-	 * We get here on a DRC miss.
-	 */
-
 	nfs4_lock_state();
 
 	conf = find_confirmed_client(clid);
 	unconf = find_unconfirmed_client(clid);
-
-	status = nfserr_clid_inuse;
-	if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
-		goto out;
-	if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
-		goto out;
-
 	/*
-	 * section 14.2.34 of RFC 3530 has a description of
-	 * SETCLIENTID_CONFIRM request processing consisting
-	 * of 4 bullet points, labeled as CASE1 - CASE4 below.
+	 * We try hard to give out unique clientid's, so if we get an
+	 * attempt to confirm the same clientid with a different cred,
+	 * there's a bug somewhere.  Let's charitably assume it's our
+	 * bug.
 	 */
-	if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
-		/*
-		 * RFC 3530 14.2.34 CASE 1:
-		 * callback update
-		 */
-		if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
-			status = nfserr_clid_inuse;
-		else {
-			nfsd4_change_callback(conf, &unconf->cl_cb_conn);
-			nfsd4_probe_callback(conf);
-			expire_client(unconf);
+	status = nfserr_serverfault;
+	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
+		goto out;
+	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
+		goto out;
+	/* cases below refer to rfc 3530 section 14.2.34: */
+	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
+		if (conf && !unconf) /* case 2: probable retransmit */
 			status = nfs_ok;
+		else /* case 4: client hasn't noticed we rebooted yet? */
+			status = nfserr_stale_clientid;
+		goto out;
+	}
+	status = nfs_ok;
+	if (conf) { /* case 1: callback update */
+		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+		nfsd4_probe_callback(conf);
+		expire_client(unconf);
+	} else { /* case 3: normal case; new or rebooted client */
+		unsigned int hash = clientstr_hashval(unconf->cl_recdir);
 
+		conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+		if (conf) {
+			nfsd4_client_record_remove(conf);
+			expire_client(conf);
 		}
-	} else if (conf && !unconf) {
-		/*
-		 * RFC 3530 14.2.34 CASE 2:
-		 * probable retransmitted request; play it safe and
-		 * do nothing.
-		 */
-		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
-			status = nfserr_clid_inuse;
-		else
-			status = nfs_ok;
-	} else if (!conf && unconf
-			&& same_verf(&unconf->cl_confirm, &confirm)) {
-		/*
-		 * RFC 3530 14.2.34 CASE 3:
-		 * Normal case; new or rebooted client:
-		 */
-		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
-			status = nfserr_clid_inuse;
-		} else {
-			unsigned int hash =
-				clientstr_hashval(unconf->cl_recdir);
-			conf = find_confirmed_client_by_str(unconf->cl_recdir,
-							    hash);
-			if (conf) {
-				nfsd4_client_record_remove(conf);
-				expire_client(conf);
-			}
-			move_to_confirmed(unconf);
-			conf = unconf;
-			nfsd4_probe_callback(conf);
-			status = nfs_ok;
-		}
-	} else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
-	    && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
-				    				&confirm)))) {
-		/*
-		 * RFC 3530 14.2.34 CASE 4:
-		 * Client probably hasn't noticed that we rebooted yet.
-		 */
-		status = nfserr_stale_clientid;
-	} else {
-		/* check that we have hit one of the cases...*/
-		status = nfserr_clid_inuse;
+		move_to_confirmed(unconf);
+		nfsd4_probe_callback(unconf);
 	}
 out:
 	nfs4_unlock_state();
@@ -2454,8 +2413,8 @@
 	stp->st_file = fp;
 	stp->st_access_bmap = 0;
 	stp->st_deny_bmap = 0;
-	__set_bit(open->op_share_access, &stp->st_access_bmap);
-	__set_bit(open->op_share_deny, &stp->st_deny_bmap);
+	set_access(open->op_share_access, stp);
+	set_deny(open->op_share_deny, stp);
 	stp->st_openstp = NULL;
 }
 
@@ -2534,8 +2493,8 @@
 	ret = nfserr_locked;
 	/* Search for conflicting share reservations */
 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
-		if (test_bit(deny_type, &stp->st_deny_bmap) ||
-		    test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
+		if (test_deny(deny_type, stp) ||
+		    test_deny(NFS4_SHARE_DENY_BOTH, stp))
 			goto out;
 	}
 	ret = nfs_ok;
@@ -2791,7 +2750,7 @@
 	bool new_access;
 	__be32 status;
 
-	new_access = !test_bit(op_share_access, &stp->st_access_bmap);
+	new_access = !test_access(op_share_access, stp);
 	if (new_access) {
 		status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
 		if (status)
@@ -2806,8 +2765,8 @@
 		return status;
 	}
 	/* remember the open */
-	__set_bit(op_share_access, &stp->st_access_bmap);
-	__set_bit(open->op_share_deny, &stp->st_deny_bmap);
+	set_access(op_share_access, stp);
+	set_deny(open->op_share_deny, stp);
 
 	return nfs_ok;
 }
@@ -3155,10 +3114,17 @@
 static struct lock_manager nfsd4_manager = {
 };
 
+static bool grace_ended;
+
 static void
 nfsd4_end_grace(void)
 {
+	/* do nothing if grace period already ended */
+	if (grace_ended)
+		return;
+
 	dprintk("NFSD: end of grace period\n");
+	grace_ended = true;
 	nfsd4_record_grace_done(&init_net, boot_time);
 	locks_end_grace(&nfsd4_manager);
 	/*
@@ -3183,8 +3149,7 @@
 	nfs4_lock_state();
 
 	dprintk("NFSD: laundromat service - starting\n");
-	if (locks_in_grace())
-		nfsd4_end_grace();
+	nfsd4_end_grace();
 	INIT_LIST_HEAD(&reaplist);
 	spin_lock(&client_lock);
 	list_for_each_safe(pos, next, &client_lru) {
@@ -3276,18 +3241,18 @@
 }
 
 static inline int
-access_permit_read(unsigned long access_bmap)
+access_permit_read(struct nfs4_ol_stateid *stp)
 {
-	return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
-		test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
-		test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
+	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
+		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
+		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
 }
 
 static inline int
-access_permit_write(unsigned long access_bmap)
+access_permit_write(struct nfs4_ol_stateid *stp)
 {
-	return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
-		test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
+	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
+		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
 }
 
 static
@@ -3298,9 +3263,9 @@
 	/* For lock stateid's, we test the parent open, not the lock: */
 	if (stp->st_openstp)
 		stp = stp->st_openstp;
-	if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
+	if ((flags & WR_STATE) && !access_permit_write(stp))
                 goto out;
-	if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
+	if ((flags & RD_STATE) && !access_permit_read(stp))
                 goto out;
 	status = nfs_ok;
 out:
@@ -3340,7 +3305,7 @@
 	return (s32)a->si_generation - (s32)b->si_generation > 0;
 }
 
-static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
+static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
 	/*
 	 * When sessions are used the stateid generation number is ignored
@@ -3649,10 +3614,10 @@
 
 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
 {
-	if (!test_bit(access, &stp->st_access_bmap))
+	if (!test_access(access, stp))
 		return;
 	nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
-	__clear_bit(access, &stp->st_access_bmap);
+	clear_access(access, stp);
 }
 
 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
@@ -3674,12 +3639,12 @@
 }
 
 static void
-reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
+reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
 {
 	int i;
 	for (i = 0; i < 4; i++) {
 		if ((i & deny) != i)
-			__clear_bit(i, bmap);
+			clear_deny(i, stp);
 	}
 }
 
@@ -3706,19 +3671,19 @@
 	if (status)
 		goto out; 
 	status = nfserr_inval;
-	if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
-		dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
+	if (!test_access(od->od_share_access, stp)) {
+		dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
 			stp->st_access_bmap, od->od_share_access);
 		goto out;
 	}
-	if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
+	if (!test_deny(od->od_share_deny, stp)) {
 		dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
 			stp->st_deny_bmap, od->od_share_deny);
 		goto out;
 	}
 	nfs4_stateid_downgrade(stp, od->od_share_access);
 
-	reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
+	reset_union_bmap_deny(od->od_share_deny, stp);
 
 	update_stateid(&stp->st_stid.sc_stateid);
 	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4008,13 +3973,13 @@
 	struct nfs4_file *fp = lock_stp->st_file;
 	int oflag = nfs4_access_to_omode(access);
 
-	if (test_bit(access, &lock_stp->st_access_bmap))
+	if (test_access(access, lock_stp))
 		return;
 	nfs4_file_get_access(fp, oflag);
-	__set_bit(access, &lock_stp->st_access_bmap);
+	set_access(access, lock_stp);
 }
 
-__be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
+static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
 {
 	struct nfs4_file *fi = ost->st_file;
 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
@@ -4055,7 +4020,6 @@
 	struct nfs4_openowner *open_sop = NULL;
 	struct nfs4_lockowner *lock_sop = NULL;
 	struct nfs4_ol_stateid *lock_stp;
-	struct nfs4_file *fp;
 	struct file *filp = NULL;
 	struct file_lock file_lock;
 	struct file_lock conflock;
@@ -4123,7 +4087,6 @@
 			goto out;
 	}
 	lock_sop = lockowner(lock_stp->st_stateowner);
-	fp = lock_stp->st_file;
 
 	lkflg = setlkflg(lock->lk_type);
 	status = nfs4_check_openmode(lock_stp, lkflg);
@@ -4715,6 +4678,7 @@
 	nfsd4_client_tracking_init(&init_net);
 	boot_time = get_seconds();
 	locks_start_grace(&nfsd4_manager);
+	grace_ended = false;
 	printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
 	       nfsd4_grace);
 	ret = set_callback_cred();
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 74c00bc..4949667 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1674,12 +1674,12 @@
 
 static void write32(__be32 **p, u32 n)
 {
-	*(*p)++ = n;
+	*(*p)++ = htonl(n);
 }
 
 static void write64(__be32 **p, u64 n)
 {
-	write32(p, (u32)(n >> 32));
+	write32(p, (n >> 32));
 	write32(p, (u32)n);
 }
 
@@ -1744,15 +1744,16 @@
 }
 
 /* Encode as an array of strings the string given with components
- * separated @sep.
+ * separated @sep, escaped with esc_enter and esc_exit.
  */
-static __be32 nfsd4_encode_components(char sep, char *components,
-				   __be32 **pp, int *buflen)
+static __be32 nfsd4_encode_components_esc(char sep, char *components,
+				   __be32 **pp, int *buflen,
+				   char esc_enter, char esc_exit)
 {
 	__be32 *p = *pp;
 	__be32 *countp = p;
 	int strlen, count=0;
-	char *str, *end;
+	char *str, *end, *next;
 
 	dprintk("nfsd4_encode_components(%s)\n", components);
 	if ((*buflen -= 4) < 0)
@@ -1760,8 +1761,23 @@
 	WRITE32(0); /* We will fill this in with @count later */
 	end = str = components;
 	while (*end) {
-		for (; *end && (*end != sep); end++)
-			; /* Point to end of component */
+		bool found_esc = false;
+
+		/* try to parse as esc_start, ..., esc_end, sep */
+		if (*str == esc_enter) {
+			for (; *end && (*end != esc_exit); end++)
+				/* find esc_exit or end of string */;
+			next = end + 1;
+			if (*end && (!*next || *next == sep)) {
+				str++;
+				found_esc = true;
+			}
+		}
+
+		if (!found_esc)
+			for (; *end && (*end != sep); end++)
+				/* find sep or end of string */;
+
 		strlen = end - str;
 		if (strlen) {
 			if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
@@ -1780,6 +1796,15 @@
 	return 0;
 }
 
+/* Encode as an array of strings the string given with components
+ * separated @sep.
+ */
+static __be32 nfsd4_encode_components(char sep, char *components,
+				   __be32 **pp, int *buflen)
+{
+	return nfsd4_encode_components_esc(sep, components, pp, buflen, 0, 0);
+}
+
 /*
  * encode a location element of a fs_locations structure
  */
@@ -1789,7 +1814,8 @@
 	__be32 status;
 	__be32 *p = *pp;
 
-	status = nfsd4_encode_components(':', location->hosts, &p, buflen);
+	status = nfsd4_encode_components_esc(':', location->hosts, &p, buflen,
+						'[', ']');
 	if (status)
 		return status;
 	status = nfsd4_encode_components('/', location->path, &p, buflen);
@@ -3251,7 +3277,7 @@
 }
 
 static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
 			 struct nfsd4_exchange_id *exid)
 {
 	__be32 *p;
@@ -3306,7 +3332,7 @@
 }
 
 static __be32
-nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
 			    struct nfsd4_create_session *sess)
 {
 	__be32 *p;
@@ -3355,14 +3381,14 @@
 }
 
 static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
 			     struct nfsd4_destroy_session *destroy_session)
 {
 	return nfserr;
 }
 
 static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
 			  struct nfsd4_free_stateid *free_stateid)
 {
 	__be32 *p;
@@ -3371,13 +3397,13 @@
 		return nfserr;
 
 	RESERVE_SPACE(4);
-	WRITE32(nfserr);
+	*p++ = nfserr;
 	ADJUST_ARGS();
 	return nfserr;
 }
 
 static __be32
-nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
 		      struct nfsd4_sequence *seq)
 {
 	__be32 *p;
@@ -3399,8 +3425,8 @@
 	return 0;
 }
 
-__be32
-nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
+static __be32
+nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
 			  struct nfsd4_test_stateid *test_stateid)
 {
 	struct nfsd4_test_stateid_id *stateid, *next;
@@ -3503,7 +3529,7 @@
  * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
  * will be at least a page and will therefore hold the xdr_buf head.
  */
-int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
 {
 	struct xdr_buf *xb = &resp->rqstp->rq_res;
 	struct nfsd4_session *session = NULL;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 2c53be6..c55298e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -127,7 +127,17 @@
 
 static int exports_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &nfs_exports_op);
+	int err;
+	struct seq_file *seq;
+	struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+
+	err = seq_open(file, &nfs_exports_op);
+	if (err)
+		return err;
+
+	seq = file->private_data;
+	seq->private = nn->svc_export_cache;
+	return 0;
 }
 
 static const struct file_operations exports_operations = {
@@ -345,7 +355,7 @@
 	if (!dom)
 		return -ENOMEM;
 
-	len = exp_rootfh(dom, path, &fh,  maxsize);
+	len = exp_rootfh(&init_net, dom, path, &fh,  maxsize);
 	auth_domain_put(dom);
 	if (len)
 		return len;
@@ -651,6 +661,7 @@
 {
 	char *mesg = buf;
 	int fd, err;
+	struct net *net = &init_net;
 
 	err = get_int(&mesg, &fd);
 	if (err != 0 || fd < 0)
@@ -662,6 +673,8 @@
 
 	err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
 	if (err < 0) {
+		if (nfsd_serv->sv_nrthreads == 1)
+			svc_shutdown_net(nfsd_serv, net);
 		svc_destroy(nfsd_serv);
 		return err;
 	}
@@ -699,6 +712,7 @@
 	char transport[16];
 	struct svc_xprt *xprt;
 	int port, err;
+	struct net *net = &init_net;
 
 	if (sscanf(buf, "%15s %4u", transport, &port) != 2)
 		return -EINVAL;
@@ -710,12 +724,12 @@
 	if (err != 0)
 		return err;
 
-	err = svc_create_xprt(nfsd_serv, transport, &init_net,
+	err = svc_create_xprt(nfsd_serv, transport, net,
 				PF_INET, port, SVC_SOCK_ANONYMOUS);
 	if (err < 0)
 		goto out_err;
 
-	err = svc_create_xprt(nfsd_serv, transport, &init_net,
+	err = svc_create_xprt(nfsd_serv, transport, net,
 				PF_INET6, port, SVC_SOCK_ANONYMOUS);
 	if (err < 0 && err != -EAFNOSUPPORT)
 		goto out_close;
@@ -724,12 +738,14 @@
 	nfsd_serv->sv_nrthreads--;
 	return 0;
 out_close:
-	xprt = svc_find_xprt(nfsd_serv, transport, &init_net, PF_INET, port);
+	xprt = svc_find_xprt(nfsd_serv, transport, net, PF_INET, port);
 	if (xprt != NULL) {
 		svc_close_xprt(xprt);
 		svc_xprt_put(xprt);
 	}
 out_err:
+	if (nfsd_serv->sv_nrthreads == 1)
+		svc_shutdown_net(nfsd_serv, net);
 	svc_destroy(nfsd_serv);
 	return err;
 }
@@ -1127,7 +1143,34 @@
 #endif
 
 int nfsd_net_id;
+
+static __net_init int nfsd_init_net(struct net *net)
+{
+	int retval;
+
+	retval = nfsd_export_init(net);
+	if (retval)
+		goto out_export_error;
+	retval = nfsd_idmap_init(net);
+	if (retval)
+		goto out_idmap_error;
+	return 0;
+
+out_idmap_error:
+	nfsd_export_shutdown(net);
+out_export_error:
+	return retval;
+}
+
+static __net_exit void nfsd_exit_net(struct net *net)
+{
+	nfsd_idmap_shutdown(net);
+	nfsd_export_shutdown(net);
+}
+
 static struct pernet_operations nfsd_net_ops = {
+	.init = nfsd_init_net,
+	.exit = nfsd_exit_net,
 	.id   = &nfsd_net_id,
 	.size = sizeof(struct nfsd_net),
 };
@@ -1154,16 +1197,10 @@
 	retval = nfsd_reply_cache_init();
 	if (retval)
 		goto out_free_stat;
-	retval = nfsd_export_init();
-	if (retval)
-		goto out_free_cache;
 	nfsd_lockd_init();	/* lockd->nfsd callbacks */
-	retval = nfsd_idmap_init();
-	if (retval)
-		goto out_free_lockd;
 	retval = create_proc_exports_entry();
 	if (retval)
-		goto out_free_idmap;
+		goto out_free_lockd;
 	retval = register_filesystem(&nfsd_fs_type);
 	if (retval)
 		goto out_free_all;
@@ -1171,12 +1208,8 @@
 out_free_all:
 	remove_proc_entry("fs/nfs/exports", NULL);
 	remove_proc_entry("fs/nfs", NULL);
-out_free_idmap:
-	nfsd_idmap_shutdown();
 out_free_lockd:
 	nfsd_lockd_shutdown();
-	nfsd_export_shutdown();
-out_free_cache:
 	nfsd_reply_cache_shutdown();
 out_free_stat:
 	nfsd_stat_shutdown();
@@ -1192,13 +1225,11 @@
 
 static void __exit exit_nfsd(void)
 {
-	nfsd_export_shutdown();
 	nfsd_reply_cache_shutdown();
 	remove_proc_entry("fs/nfs/exports", NULL);
 	remove_proc_entry("fs/nfs", NULL);
 	nfsd_stat_shutdown();
 	nfsd_lockd_shutdown();
-	nfsd_idmap_shutdown();
 	nfsd4_free_slabs();
 	nfsd_fault_inject_cleanup();
 	unregister_filesystem(&nfsd_fs_type);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 68454e7..cc79300 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -636,7 +636,7 @@
 #endif
 	}
 	if (exp) {
-		cache_put(&exp->h, &svc_export_cache);
+		exp_put(exp);
 		fhp->fh_export = NULL;
 	}
 	return;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 28dfad3..ee709fc 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/fs_struct.h>
 #include <linux/swap.h>
+#include <linux/nsproxy.h>
 
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svcsock.h>
@@ -220,7 +221,7 @@
 	ret = nfsd_init_socks(port);
 	if (ret)
 		goto out_racache;
-	ret = lockd_up();
+	ret = lockd_up(&init_net);
 	if (ret)
 		goto out_racache;
 	ret = nfs4_state_start();
@@ -229,7 +230,7 @@
 	nfsd_up = true;
 	return 0;
 out_lockd:
-	lockd_down();
+	lockd_down(&init_net);
 out_racache:
 	nfsd_racache_shutdown();
 	return ret;
@@ -246,7 +247,7 @@
 	if (!nfsd_up)
 		return;
 	nfs4_state_shutdown();
-	lockd_down();
+	lockd_down(&init_net);
 	nfsd_racache_shutdown();
 	nfsd_up = false;
 }
@@ -261,7 +262,7 @@
 
 	printk(KERN_WARNING "nfsd: last server has exited, flushing export "
 			    "cache\n");
-	nfsd_export_flush();
+	nfsd_export_flush(net);
 }
 
 void nfsd_reset_versions(void)
@@ -330,6 +331,8 @@
 
 int nfsd_create_serv(void)
 {
+	int error;
+
 	WARN_ON(!mutex_is_locked(&nfsd_mutex));
 	if (nfsd_serv) {
 		svc_get(nfsd_serv);
@@ -343,6 +346,12 @@
 	if (nfsd_serv == NULL)
 		return -ENOMEM;
 
+	error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
+	if (error < 0) {
+		svc_destroy(nfsd_serv);
+		return error;
+	}
+
 	set_max_drc();
 	do_gettimeofday(&nfssvc_boot);		/* record boot time */
 	return 0;
@@ -373,6 +382,7 @@
 	int i = 0;
 	int tot = 0;
 	int err = 0;
+	struct net *net = &init_net;
 
 	WARN_ON(!mutex_is_locked(&nfsd_mutex));
 
@@ -417,6 +427,9 @@
 		if (err)
 			break;
 	}
+
+	if (nfsd_serv->sv_nrthreads == 1)
+		svc_shutdown_net(nfsd_serv, net);
 	svc_destroy(nfsd_serv);
 
 	return err;
@@ -432,6 +445,7 @@
 {
 	int	error;
 	bool	nfsd_up_before;
+	struct net *net = &init_net;
 
 	mutex_lock(&nfsd_mutex);
 	dprintk("nfsd: creating service\n");
@@ -464,6 +478,8 @@
 	if (error < 0 && !nfsd_up_before)
 		nfsd_shutdown();
 out_destroy:
+	if (nfsd_serv->sv_nrthreads == 1)
+		svc_shutdown_net(nfsd_serv, net);
 	svc_destroy(nfsd_serv);		/* Release server */
 out:
 	mutex_unlock(&nfsd_mutex);
@@ -547,6 +563,9 @@
 	nfsdstats.th_cnt --;
 
 out:
+	if (rqstp->rq_server->sv_nrthreads == 1)
+		svc_shutdown_net(rqstp->rq_server, &init_net);
+
 	/* Release the thread */
 	svc_exit_thread(rqstp);
 
@@ -659,8 +678,12 @@
 int nfsd_pool_stats_release(struct inode *inode, struct file *file)
 {
 	int ret = seq_release(inode, file);
+	struct net *net = &init_net;
+
 	mutex_lock(&nfsd_mutex);
 	/* this function really, really should have been called svc_put() */
+	if (nfsd_serv->sv_nrthreads == 1)
+		svc_shutdown_net(nfsd_serv, net);
 	svc_destroy(nfsd_serv);
 	mutex_unlock(&nfsd_mutex);
 	return ret;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 89ab137..849091e 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -232,7 +232,6 @@
 	time_t                  cl_time;        /* time of last lease renewal */
 	struct sockaddr_storage	cl_addr; 	/* client ipaddress */
 	u32			cl_flavor;	/* setclientid pseudoflavor */
-	char			*cl_principal;	/* setclientid principal name */
 	struct svc_cred		cl_cred; 	/* setclientid principal */
 	clientid_t		cl_clientid;	/* generated by server */
 	nfs4_verifier		cl_confirm;	/* generated by server */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 5686661..c8bd9c3 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2039,7 +2039,7 @@
 	if (err)
 		goto out;
 
-	offset = vfs_llseek(file, offset, 0);
+	offset = vfs_llseek(file, offset, SEEK_SET);
 	if (offset < 0) {
 		err = nfserrno((int)offset);
 		goto out_close;
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 1b35015..acd127d 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -60,7 +60,7 @@
 	__be32			*datap;
 	size_t			iovlen;
 	u32			minorversion;
-	u32			status;
+	__be32			status;
 	stateid_t	current_stateid;
 	stateid_t	save_stateid;
 	/* to indicate current and saved state id presents */
@@ -364,7 +364,7 @@
 };
 
 struct nfsd4_test_stateid {
-	__be32		ts_num_ids;
+	u32		ts_num_ids;
 	struct list_head ts_stateid_list;
 };
 
@@ -549,7 +549,7 @@
 		struct nfsd4_compoundargs *);
 int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
 		struct nfsd4_compoundres *);
-int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
 void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
 void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
 __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 2660152..62cebc8 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -37,6 +37,7 @@
 	 * This function should be implemented when the writeback function
 	 * will be implemented.
 	 */
+	struct the_nilfs *nilfs;
 	struct inode *inode = file->f_mapping->host;
 	int err;
 
@@ -45,18 +46,21 @@
 		return err;
 	mutex_lock(&inode->i_mutex);
 
-	if (!nilfs_inode_dirty(inode)) {
-		mutex_unlock(&inode->i_mutex);
-		return 0;
+	if (nilfs_inode_dirty(inode)) {
+		if (datasync)
+			err = nilfs_construct_dsync_segment(inode->i_sb, inode,
+							    0, LLONG_MAX);
+		else
+			err = nilfs_construct_segment(inode->i_sb);
 	}
-
-	if (datasync)
-		err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
-						    LLONG_MAX);
-	else
-		err = nilfs_construct_segment(inode->i_sb);
-
 	mutex_unlock(&inode->i_mutex);
+
+	nilfs = inode->i_sb->s_fs_info;
+	if (!err && nilfs_test_opt(nilfs, BARRIER)) {
+		err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+		if (err != -EIO)
+			err = 0;
+	}
 	return err;
 }
 
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 08a07a2..57ceaf3 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -191,6 +191,8 @@
 	while (!list_empty(head)) {
 		ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
 		list_del_init(&ii->i_dirty);
+		truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+		nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 		iput(&ii->vfs_inode);
 	}
 }
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8f7b95a..7cc6446 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -734,7 +734,7 @@
 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 		if (inode->i_data.nrpages)
 			truncate_inode_pages(&inode->i_data, 0);
-		end_writeback(inode);
+		clear_inode(inode);
 		nilfs_clear_inode(inode);
 		return;
 	}
@@ -746,7 +746,7 @@
 	/* TODO: some of the following operations may fail.  */
 	nilfs_truncate_bmap(ii, 0);
 	nilfs_mark_inode_dirty(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 	if (!ret)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 2a70fce..06658ca 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -692,8 +692,14 @@
 	if (ret < 0)
 		return ret;
 
+	nilfs = inode->i_sb->s_fs_info;
+	if (nilfs_test_opt(nilfs, BARRIER)) {
+		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+		if (ret == -EIO)
+			return ret;
+	}
+
 	if (argp != NULL) {
-		nilfs = inode->i_sb->s_fs_info;
 		down_read(&nilfs->ns_segctor_sem);
 		cno = nilfs->ns_cno - 1;
 		up_read(&nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 0bb2c20..b728479 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -508,31 +508,29 @@
 	return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);
 }
 
-static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp,
-			   int connectable)
+static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+			   struct inode *parent)
 {
 	struct nilfs_fid *fid = (struct nilfs_fid *)fh;
-	struct inode *inode = dentry->d_inode;
 	struct nilfs_root *root = NILFS_I(inode)->i_root;
 	int type;
 
-	if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE ||
-	    (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE))
+	if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
+		*lenp = NILFS_FID_SIZE_CONNECTABLE;
 		return 255;
+	}
+	if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
+		*lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
+		return 255;
+	}
 
 	fid->cno = root->cno;
 	fid->ino = inode->i_ino;
 	fid->gen = inode->i_generation;
 
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		struct inode *parent;
-
-		spin_lock(&dentry->d_lock);
-		parent = dentry->d_parent->d_inode;
+	if (parent) {
 		fid->parent_ino = parent->i_ino;
 		fid->parent_gen = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
-
 		type = FILEID_NILFS_WITH_PARENT;
 		*lenp = NILFS_FID_SIZE_CONNECTABLE;
 	} else {
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0e72ad6..88e11fb 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2309,6 +2309,8 @@
 		if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
 			continue;
 		list_del_init(&ii->i_dirty);
+		truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+		nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 		iput(&ii->vfs_inode);
 	}
 }
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
index a39edc4..e2ce79e 100644
--- a/fs/nls/Kconfig
+++ b/fs/nls/Kconfig
@@ -30,7 +30,7 @@
 	  cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
 	  iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
 	  iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
-	  koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8.
+	  koi8-r, koi8-ru, koi8-u, sjis, tis-620, macroman, utf8.
 	  If you specify a wrong value, it will use the built-in NLS;
 	  compatible with iso8859-1.
 
@@ -452,6 +452,161 @@
 	  input/output character sets. Say Y here for the preferred Ukrainian
 	  (koi8-u) and Belarusian (koi8-ru) character sets.
 
+config NLS_MAC_ROMAN
+	tristate "Codepage macroman"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
+	  more countries here].
+
+	  If unsure, say Y.
+
+config NLS_MAC_CELTIC
+	tristate "Codepage macceltic"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Celtic.
+
+	  If unsure, say Y.
+
+config NLS_MAC_CENTEURO
+	tristate "Codepage maccenteuro"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Central Europe.
+
+	  If unsure, say Y.
+
+config NLS_MAC_CROATIAN
+	tristate "Codepage maccroatian"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Croatian.
+
+	  If unsure, say Y.
+
+config NLS_MAC_CYRILLIC
+	tristate "Codepage maccyrillic"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Cyrillic.
+
+	  If unsure, say Y.
+
+config NLS_MAC_GAELIC
+	tristate "Codepage macgaelic"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Gaelic.
+
+	  If unsure, say Y.
+
+config NLS_MAC_GREEK
+	tristate "Codepage macgreek"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Greek.
+
+	  If unsure, say Y.
+
+config NLS_MAC_ICELAND
+	tristate "Codepage maciceland"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Iceland.
+
+	  If unsure, say Y.
+
+config NLS_MAC_INUIT
+	tristate "Codepage macinuit"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Inuit.
+
+	  If unsure, say Y.
+
+config NLS_MAC_ROMANIAN
+	tristate "Codepage macromanian"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Romanian.
+
+	  If unsure, say Y.
+
+config NLS_MAC_TURKISH
+	tristate "Codepage macturkish"
+	---help---
+	  The Apple HFS file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called MAC codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  Mac partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Mac codepage that is used for
+	  Turkish.
+
+	  If unsure, say Y.
+
 config NLS_UTF8
 	tristate "NLS UTF-8"
 	help
diff --git a/fs/nls/Makefile b/fs/nls/Makefile
index f499dd7..8ae37c1 100644
--- a/fs/nls/Makefile
+++ b/fs/nls/Makefile
@@ -42,3 +42,14 @@
 obj-$(CONFIG_NLS_KOI8_R)	+= nls_koi8-r.o
 obj-$(CONFIG_NLS_KOI8_U)	+= nls_koi8-u.o nls_koi8-ru.o
 obj-$(CONFIG_NLS_UTF8)		+= nls_utf8.o
+obj-$(CONFIG_NLS_MAC_CELTIC)    += mac-celtic.o
+obj-$(CONFIG_NLS_MAC_CENTEURO)  += mac-centeuro.o
+obj-$(CONFIG_NLS_MAC_CROATIAN)  += mac-croatian.o
+obj-$(CONFIG_NLS_MAC_CYRILLIC)  += mac-cyrillic.o
+obj-$(CONFIG_NLS_MAC_GAELIC)    += mac-gaelic.o
+obj-$(CONFIG_NLS_MAC_GREEK)     += mac-greek.o
+obj-$(CONFIG_NLS_MAC_ICELAND)   += mac-iceland.o
+obj-$(CONFIG_NLS_MAC_INUIT)     += mac-inuit.o
+obj-$(CONFIG_NLS_MAC_ROMANIAN)  += mac-romanian.o
+obj-$(CONFIG_NLS_MAC_ROMAN)     += mac-roman.o
+obj-$(CONFIG_NLS_MAC_TURKISH)   += mac-turkish.o
diff --git a/fs/nls/mac-celtic.c b/fs/nls/mac-celtic.c
new file mode 100644
index 0000000..634a8b7
--- /dev/null
+++ b/fs/nls/mac-celtic.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-celtic.c
+ *
+ * Charset macceltic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x00c6, 0x00d8,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x03c0, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x00e6, 0x00f8,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x00ff, 0x0178, 0x2044, 0x20ac,
+	0x2039, 0x203a, 0x0176, 0x0177,
+	/* 0xe0 */
+	0x2021, 0x00b7, 0x1ef2, 0x1ef3,
+	0x2030, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0x2663, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x00dd, 0x00fd,
+	0x0174, 0x0175, 0x1e84, 0x1e85,
+	0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0x00, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+	0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+	page20, page21, page22, NULL,   NULL,   page25, page26, NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macceltic",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macceltic(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macceltic(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macceltic)
+module_exit(exit_nls_macceltic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-centeuro.c b/fs/nls/mac-centeuro.c
new file mode 100644
index 0000000..979e626
--- /dev/null
+++ b/fs/nls/mac-centeuro.c
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-centeuro.c
+ *
+ * Charset maccenteuro translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x0100, 0x0101, 0x00c9,
+	0x0104, 0x00d6, 0x00dc, 0x00e1,
+	0x0105, 0x010c, 0x00e4, 0x010d,
+	0x0106, 0x0107, 0x00e9, 0x0179,
+	/* 0x90 */
+	0x017a, 0x010e, 0x00ed, 0x010f,
+	0x0112, 0x0113, 0x0116, 0x00f3,
+	0x0117, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x011a, 0x011b, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x0118, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x0119,
+	0x00a8, 0x2260, 0x0123, 0x012e,
+	/* 0xb0 */
+	0x012f, 0x012a, 0x2264, 0x2265,
+	0x012b, 0x0136, 0x2202, 0x2211,
+	0x0142, 0x013b, 0x013c, 0x013d,
+	0x013e, 0x0139, 0x013a, 0x0145,
+	/* 0xc0 */
+	0x0146, 0x0143, 0x00ac, 0x221a,
+	0x0144, 0x0147, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x0148,
+	0x0150, 0x00d5, 0x0151, 0x014c,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x014d, 0x0154, 0x0155, 0x0158,
+	0x2039, 0x203a, 0x0159, 0x0156,
+	/* 0xe0 */
+	0x0157, 0x0160, 0x201a, 0x201e,
+	0x0161, 0x015a, 0x015b, 0x00c1,
+	0x0164, 0x0165, 0x00cd, 0x017d,
+	0x017e, 0x016a, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0x016b, 0x016e, 0x00da, 0x016f,
+	0x0170, 0x0171, 0x0172, 0x0173,
+	0x00dd, 0x00fd, 0x0137, 0x017b,
+	0x0141, 0x017c, 0x0122, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */
+	0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */
+	0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */
+	0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */
+	0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */
+	0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */
+	0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */
+	0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */
+	0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "maccenteuro",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_maccenteuro(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_maccenteuro(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_maccenteuro)
+module_exit(exit_nls_maccenteuro)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-croatian.c b/fs/nls/mac-croatian.c
new file mode 100644
index 0000000..dd3f675
--- /dev/null
+++ b/fs/nls/mac-croatian.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-croatian.c
+ *
+ * Charset maccroatian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x0160, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x017d, 0x00d8,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x2206, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x0161, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x017e, 0x00f8,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x0106, 0x00ab,
+	0x010c, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x0110, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0xf8ff, 0x00a9, 0x2044, 0x20ac,
+	0x2039, 0x203a, 0x00c6, 0x00bb,
+	/* 0xe0 */
+	0x2013, 0x00b7, 0x201a, 0x201e,
+	0x2030, 0x00c2, 0x0107, 0x00c1,
+	0x010d, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0x0111, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x02c6, 0x02dc,
+	0x00af, 0x03c0, 0x00cb, 0x02da,
+	0x00b8, 0x00ca, 0x00e6, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xd9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0xfc, 0x00, 0xbc, 0xdf, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xde, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xfd, 0xfa, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xfe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+	0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0xfb, 0x00, 0xf7, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xe0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xb4, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "maccroatian",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_maccroatian(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_maccroatian(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_maccroatian)
+module_exit(exit_nls_maccroatian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-cyrillic.c b/fs/nls/mac-cyrillic.c
new file mode 100644
index 0000000..1112c84
--- /dev/null
+++ b/fs/nls/mac-cyrillic.c
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-cyrillic.c
+ *
+ * Charset maccyrillic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x0410, 0x0411, 0x0412, 0x0413,
+	0x0414, 0x0415, 0x0416, 0x0417,
+	0x0418, 0x0419, 0x041a, 0x041b,
+	0x041c, 0x041d, 0x041e, 0x041f,
+	/* 0x90 */
+	0x0420, 0x0421, 0x0422, 0x0423,
+	0x0424, 0x0425, 0x0426, 0x0427,
+	0x0428, 0x0429, 0x042a, 0x042b,
+	0x042c, 0x042d, 0x042e, 0x042f,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x0490, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x0406,
+	0x00ae, 0x00a9, 0x2122, 0x0402,
+	0x0452, 0x2260, 0x0403, 0x0453,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x0456, 0x00b5, 0x0491, 0x0408,
+	0x0404, 0x0454, 0x0407, 0x0457,
+	0x0409, 0x0459, 0x040a, 0x045a,
+	/* 0xc0 */
+	0x0458, 0x0405, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x040b,
+	0x045b, 0x040c, 0x045c, 0x0455,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x201e,
+	0x040e, 0x045e, 0x040f, 0x045f,
+	0x2116, 0x0401, 0x0451, 0x044f,
+	/* 0xe0 */
+	0x0430, 0x0431, 0x0432, 0x0433,
+	0x0434, 0x0435, 0x0436, 0x0437,
+	0x0438, 0x0439, 0x043a, 0x043b,
+	0x043c, 0x043d, 0x043e, 0x043f,
+	/* 0xf0 */
+	0x0440, 0x0441, 0x0442, 0x0443,
+	0x0444, 0x0445, 0x0446, 0x0447,
+	0x0448, 0x0449, 0x044a, 0x044b,
+	0x044c, 0x044d, 0x044e, 0x20ac,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page04[256] = {
+	0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */
+	0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */
+	0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */
+	0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */
+	0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   page04, NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "maccyrillic",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_maccyrillic(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_maccyrillic(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_maccyrillic)
+module_exit(exit_nls_maccyrillic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-gaelic.c b/fs/nls/mac-gaelic.c
new file mode 100644
index 0000000..2de9158
--- /dev/null
+++ b/fs/nls/mac-gaelic.c
@@ -0,0 +1,567 @@
+/*
+ * linux/fs/nls/mac-gaelic.c
+ *
+ * Charset macgaelic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x00c6, 0x00d8,
+	/* 0xb0 */
+	0x1e02, 0x00b1, 0x2264, 0x2265,
+	0x1e03, 0x010a, 0x010b, 0x1e0a,
+	0x1e0b, 0x1e1e, 0x1e1f, 0x0120,
+	0x0121, 0x1e40, 0x00e6, 0x00f8,
+	/* 0xc0 */
+	0x1e41, 0x1e56, 0x1e57, 0x027c,
+	0x0192, 0x017f, 0x1e60, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x1e61, 0x1e9b,
+	0x00ff, 0x0178, 0x1e6a, 0x20ac,
+	0x2039, 0x203a, 0x0176, 0x0177,
+	/* 0xe0 */
+	0x1e6b, 0x00b7, 0x1ef2, 0x1ef3,
+	0x204a, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0x2663, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x00dd, 0x00fd,
+	0x0174, 0x0175, 0x1e84, 0x1e85,
+	0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0x00, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0x00, 0xc7, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0x00, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0x00, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0xb5, 0xb6, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xbb, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+	0x00, 0x00, 0xb0, 0xb4, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0xb7, 0xb8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xba, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0xbd, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xc2, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+	0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+	page20, page21, page22, NULL,   NULL,   NULL,   page26, NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macgaelic",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macgaelic(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macgaelic(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macgaelic)
+module_exit(exit_nls_macgaelic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-greek.c b/fs/nls/mac-greek.c
new file mode 100644
index 0000000..a863100
--- /dev/null
+++ b/fs/nls/mac-greek.c
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-greek.c
+ *
+ * Charset macgreek translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00b9, 0x00b2, 0x00c9,
+	0x00b3, 0x00d6, 0x00dc, 0x0385,
+	0x00e0, 0x00e2, 0x00e4, 0x0384,
+	0x00a8, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00a3, 0x2122,
+	0x00ee, 0x00ef, 0x2022, 0x00bd,
+	0x2030, 0x00f4, 0x00f6, 0x00a6,
+	0x20ac, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x0393, 0x0394, 0x0398,
+	0x039b, 0x039e, 0x03a0, 0x00df,
+	0x00ae, 0x00a9, 0x03a3, 0x03aa,
+	0x00a7, 0x2260, 0x00b0, 0x00b7,
+	/* 0xb0 */
+	0x0391, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x0392, 0x0395, 0x0396,
+	0x0397, 0x0399, 0x039a, 0x039c,
+	0x03a6, 0x03ab, 0x03a8, 0x03a9,
+	/* 0xc0 */
+	0x03ac, 0x039d, 0x00ac, 0x039f,
+	0x03a1, 0x2248, 0x03a4, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x03a5,
+	0x03a7, 0x0386, 0x0388, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2015, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x0389,
+	0x038a, 0x038c, 0x038e, 0x03ad,
+	0x03ae, 0x03af, 0x03cc, 0x038f,
+	/* 0xe0 */
+	0x03cd, 0x03b1, 0x03b2, 0x03c8,
+	0x03b4, 0x03b5, 0x03c6, 0x03b3,
+	0x03b7, 0x03b9, 0x03be, 0x03ba,
+	0x03bb, 0x03bc, 0x03bd, 0x03bf,
+	/* 0xf0 */
+	0x03c0, 0x03ce, 0x03c1, 0x03c3,
+	0x03c4, 0x03b8, 0x03c9, 0x03c2,
+	0x03c7, 0x03c5, 0x03b6, 0x03ca,
+	0x03cb, 0x0390, 0x03b0, 0x00ad,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0x00, 0x00, 0x92, 0x00, 0xb4, 0x9b, 0xac, /* 0xa0-0xa7 */
+	0x8c, 0xa9, 0x00, 0xc7, 0xc2, 0xff, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xae, 0xb1, 0x82, 0x84, 0x00, 0x00, 0x00, 0xaf, /* 0xb0-0xb7 */
+	0x00, 0x81, 0x00, 0xc8, 0x00, 0x97, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x00, 0x89, 0x00, 0x8a, 0x00, 0x00, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x00, 0x00, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0x00, 0x9d, 0x00, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x8b, 0x87, 0xcd, 0x00, /* 0x80-0x87 */
+	0xce, 0xd7, 0xd8, 0x00, 0xd9, 0x00, 0xda, 0xdf, /* 0x88-0x8f */
+	0xfd, 0xb0, 0xb5, 0xa1, 0xa2, 0xb6, 0xb7, 0xb8, /* 0x90-0x97 */
+	0xa3, 0xb9, 0xba, 0xa4, 0xbb, 0xc1, 0xa5, 0xc3, /* 0x98-0x9f */
+	0xa6, 0xc4, 0x00, 0xaa, 0xc6, 0xcb, 0xbc, 0xcc, /* 0xa0-0xa7 */
+	0xbe, 0xbf, 0xab, 0xbd, 0xc0, 0xdb, 0xdc, 0xdd, /* 0xa8-0xaf */
+	0xfe, 0xe1, 0xe2, 0xe7, 0xe4, 0xe5, 0xfa, 0xe8, /* 0xb0-0xb7 */
+	0xf5, 0xe9, 0xeb, 0xec, 0xed, 0xee, 0xea, 0xef, /* 0xb8-0xbf */
+	0xf0, 0xf2, 0xf7, 0xf3, 0xf4, 0xf9, 0xe6, 0xf8, /* 0xc0-0xc7 */
+	0xe3, 0xf6, 0xfb, 0xfc, 0xde, 0xe0, 0xf1, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+	0xa0, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macgreek",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macgreek(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macgreek(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macgreek)
+module_exit(exit_nls_macgreek)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-iceland.c b/fs/nls/mac-iceland.c
new file mode 100644
index 0000000..babe299
--- /dev/null
+++ b/fs/nls/mac-iceland.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-iceland.c
+ *
+ * Charset maciceland translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x00dd, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x00c6, 0x00d8,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x03c0, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x00e6, 0x00f8,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x00ff, 0x0178, 0x2044, 0x20ac,
+	0x00d0, 0x00f0, 0x00de, 0x00fe,
+	/* 0xe0 */
+	0x00fd, 0x00b7, 0x201a, 0x201e,
+	0x2030, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0xf8ff, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x02c6, 0x02dc,
+	0x00af, 0x02d8, 0x02d9, 0x02da,
+	0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0xdc, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xa0, 0xde, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0xdd, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xe0, 0xdf, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "maciceland",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_maciceland(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_maciceland(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_maciceland)
+module_exit(exit_nls_maciceland)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-inuit.c b/fs/nls/mac-inuit.c
new file mode 100644
index 0000000..312364f
--- /dev/null
+++ b/fs/nls/mac-inuit.c
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-inuit.c
+ *
+ * Charset macinuit translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x1403, 0x1404, 0x1405, 0x1406,
+	0x140a, 0x140b, 0x1431, 0x1432,
+	0x1433, 0x1434, 0x1438, 0x1439,
+	0x1449, 0x144e, 0x144f, 0x1450,
+	/* 0x90 */
+	0x1451, 0x1455, 0x1456, 0x1466,
+	0x146d, 0x146e, 0x146f, 0x1470,
+	0x1472, 0x1473, 0x1483, 0x148b,
+	0x148c, 0x148d, 0x148e, 0x1490,
+	/* 0xa0 */
+	0x1491, 0x00b0, 0x14a1, 0x14a5,
+	0x14a6, 0x2022, 0x00b6, 0x14a7,
+	0x00ae, 0x00a9, 0x2122, 0x14a8,
+	0x14aa, 0x14ab, 0x14bb, 0x14c2,
+	/* 0xb0 */
+	0x14c3, 0x14c4, 0x14c5, 0x14c7,
+	0x14c8, 0x14d0, 0x14ef, 0x14f0,
+	0x14f1, 0x14f2, 0x14f4, 0x14f5,
+	0x1505, 0x14d5, 0x14d6, 0x14d7,
+	/* 0xc0 */
+	0x14d8, 0x14da, 0x14db, 0x14ea,
+	0x1528, 0x1529, 0x152a, 0x152b,
+	0x152d, 0x2026, 0x00a0, 0x152e,
+	0x153e, 0x1555, 0x1556, 0x1557,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x1558, 0x1559,
+	0x155a, 0x155d, 0x1546, 0x1547,
+	0x1548, 0x1549, 0x154b, 0x154c,
+	/* 0xe0 */
+	0x1550, 0x157f, 0x1580, 0x1581,
+	0x1582, 0x1583, 0x1584, 0x1585,
+	0x158f, 0x1590, 0x1591, 0x1592,
+	0x1593, 0x1594, 0x1595, 0x1671,
+	/* 0xf0 */
+	0x1672, 0x1673, 0x1674, 0x1675,
+	0x1676, 0x1596, 0x15a0, 0x15a1,
+	0x15a2, 0x15a3, 0x15a4, 0x15a5,
+	0x15a6, 0x157c, 0x0141, 0x0142,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+	0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page14[256] = {
+	0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
+	0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
+	0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
+	0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
+	0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
+	0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
+	0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
+	0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page15[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
+	0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
+	0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
+	0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
+	0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page16[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   page14, page15, page16, NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macinuit",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macinuit(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macinuit(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macinuit)
+module_exit(exit_nls_macinuit)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-roman.c b/fs/nls/mac-roman.c
new file mode 100644
index 0000000..53ce080
--- /dev/null
+++ b/fs/nls/mac-roman.c
@@ -0,0 +1,637 @@
+/*
+ * linux/fs/nls/mac-roman.c
+ *
+ * Charset macroman translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x00c6, 0x00d8,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x03c0, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x00e6, 0x00f8,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x00ff, 0x0178, 0x2044, 0x20ac,
+	0x2039, 0x203a, 0xfb01, 0xfb02,
+	/* 0xe0 */
+	0x2021, 0x00b7, 0x201a, 0x201e,
+	0x2030, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0xf8ff, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x02c6, 0x02dc,
+	0x00af, 0x02d8, 0x02d9, 0x02da,
+	0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char pagefb[256] = {
+	0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	pagef8, NULL,   NULL,   pagefb, NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macroman",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macroman(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macroman(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macroman)
+module_exit(exit_nls_macroman)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-romanian.c b/fs/nls/mac-romanian.c
new file mode 100644
index 0000000..add6f7a
--- /dev/null
+++ b/fs/nls/mac-romanian.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-romanian.c
+ *
+ * Charset macromanian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x0102, 0x0218,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x03c0, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x0103, 0x0219,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x00ff, 0x0178, 0x2044, 0x20ac,
+	0x2039, 0x203a, 0x021a, 0x021b,
+	/* 0xe0 */
+	0x2021, 0x00b7, 0x201a, 0x201e,
+	0x2030, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0xf8ff, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0x0131, 0x02c6, 0x02dc,
+	0x00af, 0x02d8, 0x02d9, 0x02da,
+	0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0x00, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0x00, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0x00, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0x00, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0xae, 0xbe, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xaf, 0xbf, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macromanian",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macromanian(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macromanian(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macromanian)
+module_exit(exit_nls_macromanian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-turkish.c b/fs/nls/mac-turkish.c
new file mode 100644
index 0000000..dffa96d
--- /dev/null
+++ b/fs/nls/mac-turkish.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-turkish.c
+ *
+ * Charset macturkish translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+	/* 0x00 */
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10 */
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20 */
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30 */
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40 */
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50 */
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60 */
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70 */
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80 */
+	0x00c4, 0x00c5, 0x00c7, 0x00c9,
+	0x00d1, 0x00d6, 0x00dc, 0x00e1,
+	0x00e0, 0x00e2, 0x00e4, 0x00e3,
+	0x00e5, 0x00e7, 0x00e9, 0x00e8,
+	/* 0x90 */
+	0x00ea, 0x00eb, 0x00ed, 0x00ec,
+	0x00ee, 0x00ef, 0x00f1, 0x00f3,
+	0x00f2, 0x00f4, 0x00f6, 0x00f5,
+	0x00fa, 0x00f9, 0x00fb, 0x00fc,
+	/* 0xa0 */
+	0x2020, 0x00b0, 0x00a2, 0x00a3,
+	0x00a7, 0x2022, 0x00b6, 0x00df,
+	0x00ae, 0x00a9, 0x2122, 0x00b4,
+	0x00a8, 0x2260, 0x00c6, 0x00d8,
+	/* 0xb0 */
+	0x221e, 0x00b1, 0x2264, 0x2265,
+	0x00a5, 0x00b5, 0x2202, 0x2211,
+	0x220f, 0x03c0, 0x222b, 0x00aa,
+	0x00ba, 0x03a9, 0x00e6, 0x00f8,
+	/* 0xc0 */
+	0x00bf, 0x00a1, 0x00ac, 0x221a,
+	0x0192, 0x2248, 0x2206, 0x00ab,
+	0x00bb, 0x2026, 0x00a0, 0x00c0,
+	0x00c3, 0x00d5, 0x0152, 0x0153,
+	/* 0xd0 */
+	0x2013, 0x2014, 0x201c, 0x201d,
+	0x2018, 0x2019, 0x00f7, 0x25ca,
+	0x00ff, 0x0178, 0x011e, 0x011f,
+	0x0130, 0x0131, 0x015e, 0x015f,
+	/* 0xe0 */
+	0x2021, 0x00b7, 0x201a, 0x201e,
+	0x2030, 0x00c2, 0x00ca, 0x00c1,
+	0x00cb, 0x00c8, 0x00cd, 0x00ce,
+	0x00cf, 0x00cc, 0x00d3, 0x00d4,
+	/* 0xf0 */
+	0xf8ff, 0x00d2, 0x00da, 0x00db,
+	0x00d9, 0xf8a0, 0x02c6, 0x02dc,
+	0x00af, 0x02d8, 0x02d9, 0x02da,
+	0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+	0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+	0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+	0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+	0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+	0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+	0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+	0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+	0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+	0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+	0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+	0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xdf, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+	0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+	0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+	0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+	page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+	pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	const unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "macturkish",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_macturkish(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_macturkish(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_macturkish)
+module_exit(exit_nls_macturkish)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ccb14d3..b39c5c1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -123,7 +123,7 @@
 }
 EXPORT_SYMBOL_GPL(__fsnotify_parent);
 
-static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
+static int send_to_group(struct inode *to_tell,
 			 struct fsnotify_mark *inode_mark,
 			 struct fsnotify_mark *vfsmount_mark,
 			 __u32 mask, void *data,
@@ -168,10 +168,10 @@
 			vfsmount_test_mask &= ~inode_mark->ignored_mask;
 	}
 
-	pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
+	pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
 		 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
 		 " data=%p data_is=%d cookie=%d event=%p\n",
-		 __func__, group, to_tell, mnt, mask, inode_mark,
+		 __func__, group, to_tell, mask, inode_mark,
 		 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
 		 data_is, cookie, *event);
 
@@ -258,16 +258,16 @@
 
 		if (inode_group > vfsmount_group) {
 			/* handle inode */
-			ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
+			ret = send_to_group(to_tell, inode_mark, NULL, mask, data,
 					    data_is, cookie, file_name, &event);
 			/* we didn't use the vfsmount_mark */
 			vfsmount_group = NULL;
 		} else if (vfsmount_group > inode_group) {
-			ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data,
+			ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, data,
 					    data_is, cookie, file_name, &event);
 			inode_group = NULL;
 		} else {
-			ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark,
+			ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
 					    mask, data, data_is, cookie, file_name,
 					    &event);
 		}
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 8639169..7389d2d 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2096,7 +2096,9 @@
 	err = file_remove_suid(file);
 	if (err)
 		goto out;
-	file_update_time(file);
+	err = file_update_time(file);
+	if (err)
+		goto out;
 	written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
 			count);
 out:
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2eaa666..c6dbd3d 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2258,7 +2258,7 @@
 	ntfs_inode *ni = NTFS_I(vi);
 
 	truncate_inode_pages(&vi->i_data, 0);
-	end_writeback(vi);
+	clear_inode(vi);
 
 #ifdef NTFS_RW
 	if (NInoDirty(ni)) {
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index c7ee03c..0725e60 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -422,45 +422,46 @@
 			       struct ocfs2_blockcheck_stats *stats)
 {
 	int rc = 0;
-	struct ocfs2_block_check check;
+	u32 bc_crc32e;
+	u16 bc_ecc;
 	u32 crc, ecc;
 
 	ocfs2_blockcheck_inc_check(stats);
 
-	check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-	check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+	bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+	bc_ecc = le16_to_cpu(bc->bc_ecc);
 
 	memset(bc, 0, sizeof(struct ocfs2_block_check));
 
 	/* Fast path - if the crc32 validates, we're good to go */
 	crc = crc32_le(~0, data, blocksize);
-	if (crc == check.bc_crc32e)
+	if (crc == bc_crc32e)
 		goto out;
 
 	ocfs2_blockcheck_inc_failure(stats);
 	mlog(ML_ERROR,
 	     "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
-	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
+	     (unsigned int)bc_crc32e, (unsigned int)crc);
 
 	/* Ok, try ECC fixups */
 	ecc = ocfs2_hamming_encode_block(data, blocksize);
-	ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc);
+	ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc);
 
 	/* And check the crc32 again */
 	crc = crc32_le(~0, data, blocksize);
-	if (crc == check.bc_crc32e) {
+	if (crc == bc_crc32e) {
 		ocfs2_blockcheck_inc_recover(stats);
 		goto out;
 	}
 
 	mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
-	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
+	     (unsigned int)bc_crc32e, (unsigned int)crc);
 
 	rc = -EIO;
 
 out:
-	bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-	bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+	bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+	bc->bc_ecc = cpu_to_le16(bc_ecc);
 
 	return rc;
 }
@@ -528,7 +529,8 @@
 				   struct ocfs2_blockcheck_stats *stats)
 {
 	int i, rc = 0;
-	struct ocfs2_block_check check;
+	u32 bc_crc32e;
+	u16 bc_ecc;
 	u32 crc, ecc, fix;
 
 	BUG_ON(nr < 0);
@@ -538,21 +540,21 @@
 
 	ocfs2_blockcheck_inc_check(stats);
 
-	check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-	check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+	bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+	bc_ecc = le16_to_cpu(bc->bc_ecc);
 
 	memset(bc, 0, sizeof(struct ocfs2_block_check));
 
 	/* Fast path - if the crc32 validates, we're good to go */
 	for (i = 0, crc = ~0; i < nr; i++)
 		crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-	if (crc == check.bc_crc32e)
+	if (crc == bc_crc32e)
 		goto out;
 
 	ocfs2_blockcheck_inc_failure(stats);
 	mlog(ML_ERROR,
 	     "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
-	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
+	     (unsigned int)bc_crc32e, (unsigned int)crc);
 
 	/* Ok, try ECC fixups */
 	for (i = 0, ecc = 0; i < nr; i++) {
@@ -565,7 +567,7 @@
 						bhs[i]->b_size * 8,
 						bhs[i]->b_size * 8 * i);
 	}
-	fix = ecc ^ check.bc_ecc;
+	fix = ecc ^ bc_ecc;
 	for (i = 0; i < nr; i++) {
 		/*
 		 * Try the fix against each buffer.  It will only affect
@@ -578,19 +580,19 @@
 	/* And check the crc32 again */
 	for (i = 0, crc = ~0; i < nr; i++)
 		crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-	if (crc == check.bc_crc32e) {
+	if (crc == bc_crc32e) {
 		ocfs2_blockcheck_inc_recover(stats);
 		goto out;
 	}
 
 	mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
-	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
+	     (unsigned int)bc_crc32e, (unsigned int)crc);
 
 	rc = -EIO;
 
 out:
-	bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-	bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+	bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+	bc->bc_ecc = cpu_to_le16(bc_ecc);
 
 	return rc;
 }
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 3a3ed4b..fbec0be 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -293,7 +293,7 @@
 	struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
 	char *name;
 	struct list_head *iter, *head=NULL;
-	u64 cookie;
+	__be64 cookie;
 	u32 flags;
 	u8 node;
 
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index a5952ce..de854cc 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -679,7 +679,7 @@
 };
 
 union dlm_query_join_response {
-	u32 intval;
+	__be32 intval;
 	struct dlm_query_join_packet packet;
 };
 
@@ -755,8 +755,8 @@
 struct dlm_node_info {
 	u8 ni_nodenum;
 	u8 pad1;
-	u16 ni_ipv4_port;
-	u32 ni_ipv4_address;
+	__be16 ni_ipv4_port;
+	__be32 ni_ipv4_address;
 };
 
 struct dlm_query_nodeinfo {
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 92f2ead..9e89d70 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -818,7 +818,7 @@
 	union dlm_query_join_response response;
 
 	response.packet = *packet;
-	*wire = cpu_to_be32(response.intval);
+	*wire = be32_to_cpu(response.intval);
 }
 
 static void dlm_query_join_wire_to_packet(u32 wire,
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 3b5825e..e31d6ae 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -367,7 +367,7 @@
 	int status;
 	struct dlmfs_inode_private *ip;
 
-	end_writeback(inode);
+	clear_inode(inode);
 
 	mlog(0, "inode %lu\n", inode->i_ino);
 
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 745db42..322216a 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -177,21 +177,23 @@
 	return parent;
 }
 
-static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
-			   int connectable)
+static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
+			   struct inode *parent)
 {
-	struct inode *inode = dentry->d_inode;
 	int len = *max_len;
 	int type = 1;
 	u64 blkno;
 	u32 generation;
 	__le32 *fh = (__force __le32 *) fh_in;
 
+#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
+#error "You go ahead and fix that mess, then.  Somehow"
 	trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
 				    dentry->d_name.name,
 				    fh, len, connectable);
+#endif
 
-	if (connectable && (len < 6)) {
+	if (parent && (len < 6)) {
 		*max_len = 6;
 		type = 255;
 		goto bail;
@@ -211,12 +213,7 @@
 	fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
 	fh[2] = cpu_to_le32(generation);
 
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		struct inode *parent;
-
-		spin_lock(&dentry->d_lock);
-
-		parent = dentry->d_parent->d_inode;
+	if (parent) {
 		blkno = OCFS2_I(parent)->ip_blkno;
 		generation = parent->i_generation;
 
@@ -224,8 +221,6 @@
 		fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
 		fh[5] = cpu_to_le32(generation);
 
-		spin_unlock(&dentry->d_lock);
-
 		len = 6;
 		type = 2;
 
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 17454a9..d89e08a 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -273,11 +273,13 @@
 	inode->i_gid = le32_to_cpu(fe->i_gid);
 
 	/* Fast symlinks will have i_size but no allocated clusters. */
-	if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+	if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
 		inode->i_blocks = 0;
-	else
+		inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
+	} else {
 		inode->i_blocks = ocfs2_inode_sector_count(inode);
-	inode->i_mapping->a_ops = &ocfs2_aops;
+		inode->i_mapping->a_ops = &ocfs2_aops;
+	}
 	inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
 	inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
 	inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
@@ -331,10 +333,7 @@
 		    OCFS2_I(inode)->ip_dir_lock_gen = 1;
 		    break;
 	    case S_IFLNK:
-		    if (ocfs2_inode_is_fast_symlink(inode))
-			inode->i_op = &ocfs2_fast_symlink_inode_operations;
-		    else
-			inode->i_op = &ocfs2_symlink_inode_operations;
+		    inode->i_op = &ocfs2_symlink_inode_operations;
 		    i_size_write(inode, le64_to_cpu(fe->i_size));
 		    break;
 	    default:
@@ -1069,7 +1068,7 @@
 	int status;
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
-	end_writeback(inode);
+	clear_inode(inode);
 	trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
 				inode->i_nlink);
 
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index a1a1bfd..d96f7f8 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -864,7 +864,7 @@
 		if (status)
 			break;
 
-		reqp = (struct ocfs2_info_request *)(unsigned long)req_addr;
+		reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
 		if (!reqp) {
 			status = -EINVAL;
 			goto bail;
@@ -888,9 +888,11 @@
 	struct ocfs2_space_resv sr;
 	struct ocfs2_new_group_input input;
 	struct reflink_arguments args;
-	const char *old_path, *new_path;
+	const char __user *old_path;
+	const char __user *new_path;
 	bool preserve;
 	struct ocfs2_info info;
+	void __user *argp = (void __user *)arg;
 
 	switch (cmd) {
 	case OCFS2_IOC_GETFLAGS:
@@ -937,17 +939,15 @@
 
 		return ocfs2_group_add(inode, &input);
 	case OCFS2_IOC_REFLINK:
-		if (copy_from_user(&args, (struct reflink_arguments *)arg,
-				   sizeof(args)))
+		if (copy_from_user(&args, argp, sizeof(args)))
 			return -EFAULT;
-		old_path = (const char *)(unsigned long)args.old_path;
-		new_path = (const char *)(unsigned long)args.new_path;
+		old_path = (const char __user *)(unsigned long)args.old_path;
+		new_path = (const char __user *)(unsigned long)args.new_path;
 		preserve = (args.preserve != 0);
 
 		return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
 	case OCFS2_IOC_INFO:
-		if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-				   sizeof(struct ocfs2_info)))
+		if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
 			return -EFAULT;
 
 		return ocfs2_info_handle(inode, &info, 0);
@@ -960,22 +960,20 @@
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
 
-		if (copy_from_user(&range, (struct fstrim_range *)arg,
-		    sizeof(range)))
+		if (copy_from_user(&range, argp, sizeof(range)))
 			return -EFAULT;
 
 		ret = ocfs2_trim_fs(sb, &range);
 		if (ret < 0)
 			return ret;
 
-		if (copy_to_user((struct fstrim_range *)arg, &range,
-		    sizeof(range)))
+		if (copy_to_user(argp, &range, sizeof(range)))
 			return -EFAULT;
 
 		return 0;
 	}
 	case OCFS2_IOC_MOVE_EXT:
-		return ocfs2_ioctl_move_extents(filp, (void __user *)arg);
+		return ocfs2_ioctl_move_extents(filp, argp);
 	default:
 		return -ENOTTY;
 	}
@@ -988,6 +986,7 @@
 	struct reflink_arguments args;
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct ocfs2_info info;
+	void __user *argp = (void __user *)arg;
 
 	switch (cmd) {
 	case OCFS2_IOC32_GETFLAGS:
@@ -1006,16 +1005,14 @@
 	case FITRIM:
 		break;
 	case OCFS2_IOC_REFLINK:
-		if (copy_from_user(&args, (struct reflink_arguments *)arg,
-				   sizeof(args)))
+		if (copy_from_user(&args, argp, sizeof(args)))
 			return -EFAULT;
 		preserve = (args.preserve != 0);
 
 		return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
 					   compat_ptr(args.new_path), preserve);
 	case OCFS2_IOC_INFO:
-		if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-				   sizeof(struct ocfs2_info)))
+		if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
 			return -EFAULT;
 
 		return ocfs2_info_handle(inode, &info, 1);
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index b1e3fce..6083432 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -1082,8 +1082,7 @@
 	context->file = filp;
 
 	if (argp) {
-		if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
-				   sizeof(range))) {
+		if (copy_from_user(&range, argp, sizeof(range))) {
 			status = -EFAULT;
 			goto out;
 		}
@@ -1138,8 +1137,7 @@
 	 * length and new_offset even if failure happens somewhere.
 	 */
 	if (argp) {
-		if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
-				sizeof(range)))
+		if (copy_to_user(argp, &range, sizeof(range)))
 			status = -EFAULT;
 	}
 
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a9856e3..9f39c64 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1724,15 +1724,16 @@
 	fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
 	inode->i_rdev = 0;
 	newsize = l - 1;
+	inode->i_op = &ocfs2_symlink_inode_operations;
 	if (l > ocfs2_fast_symlink_chars(sb)) {
 		u32 offset = 0;
 
-		inode->i_op = &ocfs2_symlink_inode_operations;
 		status = dquot_alloc_space_nodirty(inode,
 		    ocfs2_clusters_to_bytes(osb->sb, 1));
 		if (status)
 			goto bail;
 		did_quota = 1;
+		inode->i_mapping->a_ops = &ocfs2_aops;
 		status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
 					      new_fe_bh,
 					      handle, data_ac, NULL,
@@ -1750,7 +1751,7 @@
 		i_size_write(inode, newsize);
 		inode->i_blocks = ocfs2_inode_sector_count(inode);
 	} else {
-		inode->i_op = &ocfs2_fast_symlink_inode_operations;
+		inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
 		memcpy((char *) fe->id2.i_symlink, symname, l);
 		i_size_write(inode, newsize);
 		inode->i_blocks = 0;
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 5d22872..f1fbb4b 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -54,101 +54,40 @@
 #include "buffer_head_io.h"
 
 
-static char *ocfs2_fast_symlink_getlink(struct inode *inode,
-					struct buffer_head **bh)
+static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
 {
-	int status;
-	char *link = NULL;
+	struct inode *inode = page->mapping->host;
+	struct buffer_head *bh;
+	int status = ocfs2_read_inode_block(inode, &bh);
 	struct ocfs2_dinode *fe;
+	const char *link;
+	void *kaddr;
+	size_t len;
 
-	status = ocfs2_read_inode_block(inode, bh);
 	if (status < 0) {
 		mlog_errno(status);
-		link = ERR_PTR(status);
-		goto bail;
+		return status;
 	}
 
-	fe = (struct ocfs2_dinode *) (*bh)->b_data;
+	fe = (struct ocfs2_dinode *) bh->b_data;
 	link = (char *) fe->id2.i_symlink;
-bail:
-
-	return link;
-}
-
-static int ocfs2_readlink(struct dentry *dentry,
-			  char __user *buffer,
-			  int buflen)
-{
-	int ret;
-	char *link;
-	struct buffer_head *bh = NULL;
-	struct inode *inode = dentry->d_inode;
-
-	link = ocfs2_fast_symlink_getlink(inode, &bh);
-	if (IS_ERR(link)) {
-		ret = PTR_ERR(link);
-		goto out;
-	}
-
-	/*
-	 * Without vfsmount we can't update atime now,
-	 * but we will update atime here ultimately.
-	 */
-	ret = vfs_readlink(dentry, buffer, buflen, link);
-
+	/* will be less than a page size */
+	len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
+	kaddr = kmap_atomic(page);
+	memcpy(kaddr, link, len + 1);
+	kunmap_atomic(kaddr);
+	SetPageUptodate(page);
+	unlock_page(page);
 	brelse(bh);
-out:
-	if (ret < 0)
-		mlog_errno(ret);
-	return ret;
+	return 0;
 }
 
-static void *ocfs2_fast_follow_link(struct dentry *dentry,
-				    struct nameidata *nd)
-{
-	int status = 0;
-	int len;
-	char *target, *link = ERR_PTR(-ENOMEM);
-	struct inode *inode = dentry->d_inode;
-	struct buffer_head *bh = NULL;
-
-	BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
-	target = ocfs2_fast_symlink_getlink(inode, &bh);
-	if (IS_ERR(target)) {
-		status = PTR_ERR(target);
-		mlog_errno(status);
-		goto bail;
-	}
-
-	/* Fast symlinks can't be large */
-	len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
-	link = kzalloc(len + 1, GFP_NOFS);
-	if (!link) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
-	memcpy(link, target, len);
-
-bail:
-	nd_set_link(nd, status ? ERR_PTR(status) : link);
-	brelse(bh);
-
-	if (status)
-		mlog_errno(status);
-	return NULL;
-}
-
-static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
-{
-	char *link = nd_get_link(nd);
-	if (!IS_ERR(link))
-		kfree(link);
-}
+const struct address_space_operations ocfs2_fast_symlink_aops = {
+	.readpage		= ocfs2_fast_symlink_readpage,
+};
 
 const struct inode_operations ocfs2_symlink_inode_operations = {
-	.readlink	= page_readlink,
+	.readlink	= generic_readlink,
 	.follow_link	= page_follow_link_light,
 	.put_link	= page_put_link,
 	.getattr	= ocfs2_getattr,
@@ -159,15 +98,3 @@
 	.removexattr	= generic_removexattr,
 	.fiemap		= ocfs2_fiemap,
 };
-const struct inode_operations ocfs2_fast_symlink_inode_operations = {
-	.readlink	= ocfs2_readlink,
-	.follow_link	= ocfs2_fast_follow_link,
-	.put_link	= ocfs2_fast_put_link,
-	.getattr	= ocfs2_getattr,
-	.setattr	= ocfs2_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ocfs2_listxattr,
-	.removexattr	= generic_removexattr,
-	.fiemap		= ocfs2_fiemap,
-};
diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h
index 65a6c9c..71ee424 100644
--- a/fs/ocfs2/symlink.h
+++ b/fs/ocfs2/symlink.h
@@ -27,7 +27,7 @@
 #define OCFS2_SYMLINK_H
 
 extern const struct inode_operations ocfs2_symlink_inode_operations;
-extern const struct inode_operations ocfs2_fast_symlink_inode_operations;
+extern const struct address_space_operations ocfs2_fast_symlink_aops;
 
 /*
  * Test whether an inode is a fast symlink.
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index dbc8422..e6213b3 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -184,7 +184,7 @@
 static void omfs_evict_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	if (inode->i_nlink)
 		return;
diff --git a/fs/open.c b/fs/open.c
index d543012..d6c79a0 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -654,10 +654,23 @@
 	return error;
 }
 
-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-					struct file *f,
-					int (*open)(struct inode *, struct file *),
-					const struct cred *cred)
+int open_check_o_direct(struct file *f)
+{
+	/* NB: we're sure to have correct a_ops only after f_op->open */
+	if (f->f_flags & O_DIRECT) {
+		if (!f->f_mapping->a_ops ||
+		    ((!f->f_mapping->a_ops->direct_IO) &&
+		    (!f->f_mapping->a_ops->get_xip_mem))) {
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+				   struct file *f,
+				   int (*open)(struct inode *, struct file *),
+				   const struct cred *cred)
 {
 	static const struct file_operations empty_fops = {};
 	struct inode *inode;
@@ -713,16 +726,6 @@
 
 	file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
 
-	/* NB: we're sure to have correct a_ops only after f_op->open */
-	if (f->f_flags & O_DIRECT) {
-		if (!f->f_mapping->a_ops ||
-		    ((!f->f_mapping->a_ops->direct_IO) &&
-		    (!f->f_mapping->a_ops->get_xip_mem))) {
-			fput(f);
-			f = ERR_PTR(-EINVAL);
-		}
-	}
-
 	return f;
 
 cleanup_all:
@@ -744,12 +747,29 @@
 	f->f_path.dentry = NULL;
 	f->f_path.mnt = NULL;
 cleanup_file:
-	put_filp(f);
 	dput(dentry);
 	mntput(mnt);
 	return ERR_PTR(error);
 }
 
+static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+				struct file *f,
+				int (*open)(struct inode *, struct file *),
+				const struct cred *cred)
+{
+	struct file *res = do_dentry_open(dentry, mnt, f, open, cred);
+	if (!IS_ERR(res)) {
+		int error = open_check_o_direct(f);
+		if (error) {
+			fput(res);
+			res = ERR_PTR(error);
+		}
+	} else {
+		put_filp(f);
+	}
+	return res;
+}
+
 /**
  * lookup_instantiate_filp - instantiates the open intent filp
  * @nd: pointer to nameidata
@@ -804,13 +824,31 @@
 
 	/* Pick up the filp from the open intent */
 	filp = nd->intent.open.file;
-	nd->intent.open.file = NULL;
 
 	/* Has the filesystem initialised the file for us? */
-	if (filp->f_path.dentry == NULL) {
+	if (filp->f_path.dentry != NULL) {
+		nd->intent.open.file = NULL;
+	} else {
+		struct file *res;
+
 		path_get(&nd->path);
-		filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
-				     NULL, cred);
+		res = do_dentry_open(nd->path.dentry, nd->path.mnt,
+				     filp, NULL, cred);
+		if (!IS_ERR(res)) {
+			int error;
+
+			nd->intent.open.file = NULL;
+			BUG_ON(res != filp);
+
+			error = open_check_o_direct(filp);
+			if (error) {
+				fput(filp);
+				filp = ERR_PTR(error);
+			}
+		} else {
+			/* Allow nd->intent.open.file to be recycled */
+			filp = res;
+		}
 	}
 	return filp;
 }
diff --git a/fs/pipe.c b/fs/pipe.c
index fec5e4a..49c1065 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -654,8 +654,11 @@
 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
-	if (ret > 0)
-		file_update_time(filp);
+	if (ret > 0) {
+		int err = file_update_time(filp);
+		if (err)
+			ret = err;
+	}
 	return ret;
 }
 
@@ -693,7 +696,7 @@
 
 			return put_user(count, (int __user *)arg);
 		default:
-			return -EINVAL;
+			return -ENOIOCTLCMD;
 	}
 }
 
diff --git a/fs/pnode.c b/fs/pnode.c
index ab5fa9e..bed378d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -257,12 +257,12 @@
 		prev_src_mnt  = child;
 	}
 out:
-	br_write_lock(vfsmount_lock);
+	br_write_lock(&vfsmount_lock);
 	while (!list_empty(&tmp_list)) {
 		child = list_first_entry(&tmp_list, struct mount, mnt_hash);
 		umount_tree(child, 0, &umount_list);
 	}
-	br_write_unlock(vfsmount_lock);
+	br_write_unlock(&vfsmount_lock);
 	release_mounts(&umount_list);
 	return ret;
 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index dc4c5a7..c1c207c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -370,7 +370,7 @@
 			struct pid *pid, struct task_struct *task, int whole)
 {
 	unsigned long vsize, eip, esp, wchan = ~0UL;
-	long priority, nice;
+	int priority, nice;
 	int tty_pgrp = -1, tty_nr = 0;
 	sigset_t sigign, sigcatch;
 	char state;
@@ -492,7 +492,7 @@
 	seq_put_decimal_ull(m, ' ', 0);
 	seq_put_decimal_ull(m, ' ', start_time);
 	seq_put_decimal_ull(m, ' ', vsize);
-	seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
+	seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
 	seq_put_decimal_ull(m, ' ', rsslim);
 	seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
 	seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
@@ -517,9 +517,23 @@
 	seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
 	seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
 	seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
-	seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
-	seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
-	seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
+
+	if (mm && permitted) {
+		seq_put_decimal_ull(m, ' ', mm->start_data);
+		seq_put_decimal_ull(m, ' ', mm->end_data);
+		seq_put_decimal_ull(m, ' ', mm->start_brk);
+		seq_put_decimal_ull(m, ' ', mm->arg_start);
+		seq_put_decimal_ull(m, ' ', mm->arg_end);
+		seq_put_decimal_ull(m, ' ', mm->env_start);
+		seq_put_decimal_ull(m, ' ', mm->env_end);
+	} else
+		seq_printf(m, " 0 0 0 0 0 0 0");
+
+	if (permitted)
+		seq_put_decimal_ll(m, ' ', task->exit_code);
+	else
+		seq_put_decimal_ll(m, ' ', 0);
+
 	seq_putc(m, '\n');
 	if (mm)
 		mmput(mm);
@@ -565,3 +579,126 @@
 
 	return 0;
 }
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static struct pid *
+get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+{
+	struct task_struct *start, *task;
+	struct pid *pid = NULL;
+
+	read_lock(&tasklist_lock);
+
+	start = pid_task(proc_pid(inode), PIDTYPE_PID);
+	if (!start)
+		goto out;
+
+	/*
+	 * Lets try to continue searching first, this gives
+	 * us significant speedup on children-rich processes.
+	 */
+	if (pid_prev) {
+		task = pid_task(pid_prev, PIDTYPE_PID);
+		if (task && task->real_parent == start &&
+		    !(list_empty(&task->sibling))) {
+			if (list_is_last(&task->sibling, &start->children))
+				goto out;
+			task = list_first_entry(&task->sibling,
+						struct task_struct, sibling);
+			pid = get_pid(task_pid(task));
+			goto out;
+		}
+	}
+
+	/*
+	 * Slow search case.
+	 *
+	 * We might miss some children here if children
+	 * are exited while we were not holding the lock,
+	 * but it was never promised to be accurate that
+	 * much.
+	 *
+	 * "Just suppose that the parent sleeps, but N children
+	 *  exit after we printed their tids. Now the slow paths
+	 *  skips N extra children, we miss N tasks." (c)
+	 *
+	 * So one need to stop or freeze the leader and all
+	 * its children to get a precise result.
+	 */
+	list_for_each_entry(task, &start->children, sibling) {
+		if (pos-- == 0) {
+			pid = get_pid(task_pid(task));
+			break;
+		}
+	}
+
+out:
+	read_unlock(&tasklist_lock);
+	return pid;
+}
+
+static int children_seq_show(struct seq_file *seq, void *v)
+{
+	struct inode *inode = seq->private;
+	pid_t pid;
+
+	pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
+	return seq_printf(seq, "%d ", pid);
+}
+
+static void *children_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return get_children_pid(seq->private, NULL, *pos);
+}
+
+static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct pid *pid;
+
+	pid = get_children_pid(seq->private, v, *pos + 1);
+	put_pid(v);
+
+	++*pos;
+	return pid;
+}
+
+static void children_seq_stop(struct seq_file *seq, void *v)
+{
+	put_pid(v);
+}
+
+static const struct seq_operations children_seq_ops = {
+	.start	= children_seq_start,
+	.next	= children_seq_next,
+	.stop	= children_seq_stop,
+	.show	= children_seq_show,
+};
+
+static int children_seq_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *m;
+	int ret;
+
+	ret = seq_open(file, &children_seq_ops);
+	if (ret)
+		return ret;
+
+	m = file->private_data;
+	m->private = inode;
+
+	return ret;
+}
+
+int children_seq_release(struct inode *inode, struct file *file)
+{
+	seq_release(inode, file);
+	return 0;
+}
+
+const struct file_operations proc_tid_children_operations = {
+	.open    = children_seq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = children_seq_release,
+};
+#endif /* CONFIG_CHECKPOINT_RESTORE */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d2d3108..437195f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,11 +199,6 @@
 	return result;
 }
 
-struct mm_struct *mm_for_maps(struct task_struct *task)
-{
-	return mm_access(task, PTRACE_MODE_READ);
-}
-
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 {
 	int res = 0;
@@ -243,7 +238,7 @@
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
 {
-	struct mm_struct *mm = mm_for_maps(task);
+	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
 	int res = PTR_ERR(mm);
 	if (mm && !IS_ERR(mm)) {
 		unsigned int nwords = 0;
@@ -411,12 +406,13 @@
 
 static int proc_oom_score(struct task_struct *task, char *buffer)
 {
+	unsigned long totalpages = totalram_pages + total_swap_pages;
 	unsigned long points = 0;
 
 	read_lock(&tasklist_lock);
 	if (pid_alive(task))
-		points = oom_badness(task, NULL, NULL,
-					totalram_pages + total_swap_pages);
+		points = oom_badness(task, NULL, NULL, totalpages) *
+						1000 / totalpages;
 	read_unlock(&tasklist_lock);
 	return sprintf(buffer, "%lu\n", points);
 }
@@ -678,7 +674,7 @@
 	.release	= single_release,
 };
 
-static int mem_open(struct inode* inode, struct file* file)
+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 {
 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 	struct mm_struct *mm;
@@ -686,7 +682,7 @@
 	if (!task)
 		return -ESRCH;
 
-	mm = mm_access(task, PTRACE_MODE_ATTACH);
+	mm = mm_access(task, mode);
 	put_task_struct(task);
 
 	if (IS_ERR(mm))
@@ -706,6 +702,11 @@
 	return 0;
 }
 
+static int mem_open(struct inode *inode, struct file *file)
+{
+	return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+}
+
 static ssize_t mem_rw(struct file *file, char __user *buf,
 			size_t count, loff_t *ppos, int write)
 {
@@ -802,30 +803,29 @@
 	.release	= mem_release,
 };
 
+static int environ_open(struct inode *inode, struct file *file)
+{
+	return __mem_open(inode, file, PTRACE_MODE_READ);
+}
+
 static ssize_t environ_read(struct file *file, char __user *buf,
 			size_t count, loff_t *ppos)
 {
-	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
 	char *page;
 	unsigned long src = *ppos;
-	int ret = -ESRCH;
-	struct mm_struct *mm;
+	int ret = 0;
+	struct mm_struct *mm = file->private_data;
 
-	if (!task)
-		goto out_no_task;
+	if (!mm)
+		return 0;
 
-	ret = -ENOMEM;
 	page = (char *)__get_free_page(GFP_TEMPORARY);
 	if (!page)
-		goto out;
-
-
-	mm = mm_for_maps(task);
-	ret = PTR_ERR(mm);
-	if (!mm || IS_ERR(mm))
-		goto out_free;
+		return -ENOMEM;
 
 	ret = 0;
+	if (!atomic_inc_not_zero(&mm->mm_users))
+		goto free;
 	while (count > 0) {
 		int this_len, retval, max_len;
 
@@ -837,7 +837,7 @@
 		max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
 		this_len = (this_len > max_len) ? max_len : this_len;
 
-		retval = access_process_vm(task, (mm->env_start + src),
+		retval = access_remote_vm(mm, (mm->env_start + src),
 			page, this_len, 0);
 
 		if (retval <= 0) {
@@ -856,19 +856,18 @@
 		count -= retval;
 	}
 	*ppos = src;
-
 	mmput(mm);
-out_free:
+
+free:
 	free_page((unsigned long) page);
-out:
-	put_task_struct(task);
-out_no_task:
 	return ret;
 }
 
 static const struct file_operations proc_environ_operations = {
+	.open		= environ_open,
 	.read		= environ_read,
 	.llseek		= generic_file_llseek,
+	.release	= mem_release,
 };
 
 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1804,7 +1803,7 @@
 			rcu_read_lock();
 			file = fcheck_files(files, fd);
 			if (file) {
-				unsigned i_mode, f_mode = file->f_mode;
+				unsigned f_mode = file->f_mode;
 
 				rcu_read_unlock();
 				put_files_struct(files);
@@ -1820,12 +1819,14 @@
 					inode->i_gid = GLOBAL_ROOT_GID;
 				}
 
-				i_mode = S_IFLNK;
-				if (f_mode & FMODE_READ)
-					i_mode |= S_IRUSR | S_IXUSR;
-				if (f_mode & FMODE_WRITE)
-					i_mode |= S_IWUSR | S_IXUSR;
-				inode->i_mode = i_mode;
+				if (S_ISLNK(inode->i_mode)) {
+					unsigned i_mode = S_IFLNK;
+					if (f_mode & FMODE_READ)
+						i_mode |= S_IRUSR | S_IXUSR;
+					if (f_mode & FMODE_WRITE)
+						i_mode |= S_IWUSR | S_IXUSR;
+					inode->i_mode = i_mode;
+				}
 
 				security_task_to_inode(task, inode);
 				put_task_struct(task);
@@ -1849,7 +1850,7 @@
 static struct dentry *proc_fd_instantiate(struct inode *dir,
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-	unsigned fd = *(const unsigned *)ptr;
+	unsigned fd = (unsigned long)ptr;
  	struct inode *inode;
  	struct proc_inode *ei;
 	struct dentry *error = ERR_PTR(-ENOENT);
@@ -1860,6 +1861,7 @@
 	ei = PROC_I(inode);
 	ei->fd = fd;
 
+	inode->i_mode = S_IFLNK;
 	inode->i_op = &proc_pid_link_inode_operations;
 	inode->i_size = 64;
 	ei->op.proc_get_link = proc_fd_link;
@@ -1886,7 +1888,7 @@
 	if (fd == ~0U)
 		goto out;
 
-	result = instantiate(dir, dentry, task, &fd);
+	result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
 out:
 	put_task_struct(task);
 out_no_task:
@@ -1929,21 +1931,22 @@
 			     fd++, filp->f_pos++) {
 				char name[PROC_NUMBUF];
 				int len;
+				int rv;
 
 				if (!fcheck_files(files, fd))
 					continue;
 				rcu_read_unlock();
 
 				len = snprintf(name, sizeof(name), "%d", fd);
-				if (proc_fill_cache(filp, dirent, filldir,
-						    name, len, instantiate,
-						    p, &fd) < 0) {
-					rcu_read_lock();
-					break;
-				}
+				rv = proc_fill_cache(filp, dirent, filldir,
+						     name, len, instantiate, p,
+						     (void *)(unsigned long)fd);
+				if (rv < 0)
+					goto out_fd_loop;
 				rcu_read_lock();
 			}
 			rcu_read_unlock();
+out_fd_loop:
 			put_files_struct(files);
 	}
 out:
@@ -2023,11 +2026,8 @@
 	if (!task)
 		goto out_notask;
 
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
-		goto out;
-
-	mm = get_task_mm(task);
-	if (!mm)
+	mm = mm_access(task, PTRACE_MODE_READ);
+	if (IS_ERR_OR_NULL(mm))
 		goto out;
 
 	if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2356,7 +2356,7 @@
 static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-	unsigned fd = *(unsigned *)ptr;
+	unsigned fd = (unsigned long)ptr;
  	struct inode *inode;
  	struct proc_inode *ei;
 	struct dentry *error = ERR_PTR(-ENOENT);
@@ -3403,6 +3403,9 @@
 	ONE("stat",      S_IRUGO, proc_tid_stat),
 	ONE("statm",     S_IRUGO, proc_pid_statm),
 	REG("maps",      S_IRUGO, proc_tid_maps_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+	REG("children",  S_IRUGO, proc_tid_children_operations),
+#endif
 #ifdef CONFIG_NUMA
 	REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
 #endif
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 554ecc5..7ac817b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -33,7 +33,7 @@
 	const struct proc_ns_operations *ns_ops;
 
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	/* Stop tracking associated processes */
 	put_pid(PROC_I(inode)->pid);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5f79bb8..eca4aca 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -31,8 +31,6 @@
 	unsigned long	largest_chunk;
 };
 
-extern struct mm_struct *mm_for_maps(struct task_struct *);
-
 #ifdef CONFIG_MMU
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 extern void get_vmalloc_info(struct vmalloc_info *vmi);
@@ -56,6 +54,7 @@
 				struct pid *pid, struct task_struct *task);
 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
 
+extern const struct file_operations proc_tid_children_operations;
 extern const struct file_operations proc_pid_maps_operations;
 extern const struct file_operations proc_tid_maps_operations;
 extern const struct file_operations proc_pid_numa_maps_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1030a71..4540b8f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -125,7 +125,7 @@
 	if (!priv->task)
 		return ERR_PTR(-ESRCH);
 
-	mm = mm_for_maps(priv->task);
+	mm = mm_access(priv->task, PTRACE_MODE_READ);
 	if (!mm || IS_ERR(mm))
 		return mm;
 	down_read(&mm->mmap_sem);
@@ -393,6 +393,7 @@
 	unsigned long anonymous;
 	unsigned long anonymous_thp;
 	unsigned long swap;
+	unsigned long nonlinear;
 	u64 pss;
 };
 
@@ -402,24 +403,33 @@
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = mss->vma;
-	struct page *page;
+	pgoff_t pgoff = linear_page_index(vma, addr);
+	struct page *page = NULL;
 	int mapcount;
 
-	if (is_swap_pte(ptent)) {
-		mss->swap += ptent_size;
-		return;
+	if (pte_present(ptent)) {
+		page = vm_normal_page(vma, addr, ptent);
+	} else if (is_swap_pte(ptent)) {
+		swp_entry_t swpent = pte_to_swp_entry(ptent);
+
+		if (!non_swap_entry(swpent))
+			mss->swap += ptent_size;
+		else if (is_migration_entry(swpent))
+			page = migration_entry_to_page(swpent);
+	} else if (pte_file(ptent)) {
+		if (pte_to_pgoff(ptent) != pgoff)
+			mss->nonlinear += ptent_size;
 	}
 
-	if (!pte_present(ptent))
-		return;
-
-	page = vm_normal_page(vma, addr, ptent);
 	if (!page)
 		return;
 
 	if (PageAnon(page))
 		mss->anonymous += ptent_size;
 
+	if (page->index != pgoff)
+		mss->nonlinear += ptent_size;
+
 	mss->resident += ptent_size;
 	/* Accumulate the size in pages that have been accessed. */
 	if (pte_young(ptent) || PageReferenced(page))
@@ -521,6 +531,10 @@
 		   (vma->vm_flags & VM_LOCKED) ?
 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
+	if (vma->vm_flags & VM_NONLINEAR)
+		seq_printf(m, "Nonlinear:      %8lu kB\n",
+				mss.nonlinear >> 10);
+
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task->mm))
 			? vma->vm_start : 0;
@@ -700,6 +714,7 @@
 
 #define PM_PRESENT          PM_STATUS(4LL)
 #define PM_SWAP             PM_STATUS(2LL)
+#define PM_FILE             PM_STATUS(1LL)
 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
 #define PM_END_OF_BUFFER    1
 
@@ -733,22 +748,33 @@
 	return err;
 }
 
-static u64 swap_pte_to_pagemap_entry(pte_t pte)
+static void pte_to_pagemap_entry(pagemap_entry_t *pme,
+		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-	swp_entry_t e = pte_to_swp_entry(pte);
-	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
-}
+	u64 frame, flags;
+	struct page *page = NULL;
 
-static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
-{
-	if (is_swap_pte(pte))
-		*pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
-				| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
-	else if (pte_present(pte))
-		*pme = make_pme(PM_PFRAME(pte_pfn(pte))
-				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
-	else
+	if (pte_present(pte)) {
+		frame = pte_pfn(pte);
+		flags = PM_PRESENT;
+		page = vm_normal_page(vma, addr, pte);
+	} else if (is_swap_pte(pte)) {
+		swp_entry_t entry = pte_to_swp_entry(pte);
+
+		frame = swp_type(entry) |
+			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+		flags = PM_SWAP;
+		if (is_migration_entry(entry))
+			page = migration_entry_to_page(entry);
+	} else {
 		*pme = make_pme(PM_NOT_PRESENT);
+		return;
+	}
+
+	if (page && !PageAnon(page))
+		flags |= PM_FILE;
+
+	*pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -784,7 +810,7 @@
 
 	/* find the first VMA at or above 'addr' */
 	vma = find_vma(walk->mm, addr);
-	if (pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
 		for (; addr != end; addr += PAGE_SIZE) {
 			unsigned long offset;
 
@@ -815,7 +841,7 @@
 		if (vma && (vma->vm_start <= addr) &&
 		    !is_vm_hugetlb_page(vma)) {
 			pte = pte_offset_map(pmd, addr);
-			pte_to_pagemap_entry(&pme, *pte);
+			pte_to_pagemap_entry(&pme, vma, addr, *pte);
 			/* unmap before userspace copy */
 			pte_unmap(pte);
 		}
@@ -869,11 +895,11 @@
  * For each page in the address space, this file contains one 64-bit entry
  * consisting of the following:
  *
- * Bits 0-55  page frame number (PFN) if present
+ * Bits 0-54  page frame number (PFN) if present
  * Bits 0-4   swap type if swapped
- * Bits 5-55  swap offset if swapped
+ * Bits 5-54  swap offset if swapped
  * Bits 55-60 page shift (page size = 1<<page shift)
- * Bit  61    reserved for future use
+ * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
  *
@@ -919,7 +945,7 @@
 	if (!pm.buffer)
 		goto out_task;
 
-	mm = mm_for_maps(task);
+	mm = mm_access(task, PTRACE_MODE_READ);
 	ret = PTR_ERR(mm);
 	if (!mm || IS_ERR(mm))
 		goto out_free;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 74fe164..1ccfa53 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -223,7 +223,7 @@
 	if (!priv->task)
 		return ERR_PTR(-ESRCH);
 
-	mm = mm_for_maps(priv->task);
+	mm = mm_access(priv->task, PTRACE_MODE_READ);
 	if (!mm || IS_ERR(mm)) {
 		put_task_struct(priv->task);
 		priv->task = NULL;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 1241285..5e289a7 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -23,12 +23,12 @@
 
 	poll_wait(file, &p->ns->poll, wait);
 
-	br_read_lock(vfsmount_lock);
+	br_read_lock(&vfsmount_lock);
 	if (p->m.poll_event != ns->event) {
 		p->m.poll_event = ns->event;
 		res |= POLLERR | POLLPRI;
 	}
-	br_read_unlock(vfsmount_lock);
+	br_read_unlock(&vfsmount_lock);
 
 	return res;
 }
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 1950788..11a2aa2 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -85,7 +85,7 @@
 	struct pstore_private	*p = inode->i_private;
 	unsigned long		flags;
 
-	end_writeback(inode);
+	clear_inode(inode);
 	if (p) {
 		spin_lock_irqsave(&allpstore_lock, flags);
 		list_del(&p->list);
@@ -258,7 +258,7 @@
 	return rc;
 }
 
-int pstore_fill_super(struct super_block *sb, void *data, int silent)
+static int pstore_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct inode *inode;
 
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 82c585f..03ce7a9 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -94,20 +94,15 @@
  * as we can from the end of the buffer.
  */
 static void pstore_dump(struct kmsg_dumper *dumper,
-	    enum kmsg_dump_reason reason,
-	    const char *s1, unsigned long l1,
-	    const char *s2, unsigned long l2)
+			enum kmsg_dump_reason reason)
 {
-	unsigned long	s1_start, s2_start;
-	unsigned long	l1_cpy, l2_cpy;
-	unsigned long	size, total = 0;
-	char		*dst;
+	unsigned long	total = 0;
 	const char	*why;
 	u64		id;
-	int		hsize, ret;
 	unsigned int	part = 1;
 	unsigned long	flags = 0;
 	int		is_locked = 0;
+	int		ret;
 
 	why = get_reason_str(reason);
 
@@ -119,30 +114,25 @@
 		spin_lock_irqsave(&psinfo->buf_lock, flags);
 	oopscount++;
 	while (total < kmsg_bytes) {
+		char *dst;
+		unsigned long size;
+		int hsize;
+		size_t len;
+
 		dst = psinfo->buf;
 		hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
 		size = psinfo->bufsize - hsize;
 		dst += hsize;
 
-		l2_cpy = min(l2, size);
-		l1_cpy = min(l1, size - l2_cpy);
-
-		if (l1_cpy + l2_cpy == 0)
+		if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
 			break;
 
-		s2_start = l2 - l2_cpy;
-		s1_start = l1 - l1_cpy;
-
-		memcpy(dst, s1 + s1_start, l1_cpy);
-		memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
-
 		ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-				   hsize + l1_cpy + l2_cpy, psinfo);
+				    hsize + len, psinfo);
 		if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
 			pstore_new_entry = 1;
-		l1 -= l1_cpy;
-		l2 -= l2_cpy;
-		total += l1_cpy + l2_cpy;
+
+		total += hsize + len;
 		part++;
 	}
 	if (in_nmi()) {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 9123cce..453030f 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -106,6 +106,8 @@
 	time->tv_sec = 0;
 	time->tv_nsec = 0;
 
+	/* Update old/shadowed buffer. */
+	persistent_ram_save_old(prz);
 	size = persistent_ram_old_size(prz);
 	*buf = kmalloc(size, GFP_KERNEL);
 	if (*buf == NULL)
@@ -184,6 +186,7 @@
 		return -EINVAL;
 
 	persistent_ram_free_old(cxt->przs[id]);
+	persistent_ram_zap(cxt->przs[id]);
 
 	return 0;
 }
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 31f8d18..c5fbdbb 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -250,23 +250,24 @@
 	persistent_ram_update_ecc(prz, start, count);
 }
 
-static void __init
-persistent_ram_save_old(struct persistent_ram_zone *prz)
+void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
 	struct persistent_ram_buffer *buffer = prz->buffer;
 	size_t size = buffer_size(prz);
 	size_t start = buffer_start(prz);
-	char *dest;
 
-	persistent_ram_ecc_old(prz);
+	if (!size)
+		return;
 
-	dest = kmalloc(size, GFP_KERNEL);
-	if (dest == NULL) {
+	if (!prz->old_log) {
+		persistent_ram_ecc_old(prz);
+		prz->old_log = kmalloc(size, GFP_KERNEL);
+	}
+	if (!prz->old_log) {
 		pr_err("persistent_ram: failed to allocate buffer\n");
 		return;
 	}
 
-	prz->old_log = dest;
 	prz->old_log_size = size;
 	memcpy(prz->old_log, &buffer->data[start], size - start);
 	memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@
 	prz->old_log_size = 0;
 }
 
+void persistent_ram_zap(struct persistent_ram_zone *prz)
+{
+	atomic_set(&prz->buffer->start, 0);
+	atomic_set(&prz->buffer->size, 0);
+	persistent_ram_update_header_ecc(prz);
+}
+
 static void *persistent_ram_vmap(phys_addr_t start, size_t size)
 {
 	struct page **pages;
@@ -405,6 +413,7 @@
 				" size %zu, start %zu\n",
 			       buffer_size(prz), buffer_start(prz));
 			persistent_ram_save_old(prz);
+			return 0;
 		}
 	} else {
 		pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@
 	}
 
 	prz->buffer->sig = PERSISTENT_RAM_SIG;
-	atomic_set(&prz->buffer->start, 0);
-	atomic_set(&prz->buffer->size, 0);
+	persistent_ram_zap(prz);
 
 	return 0;
 }
@@ -448,7 +456,6 @@
 		goto err;
 
 	persistent_ram_post_init(prz, ecc);
-	persistent_ram_update_header_ecc(prz);
 
 	return prz;
 err:
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d69a1d1..10cbe84 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -116,15 +116,15 @@
  * spinlock to internal buffers before writing.
  *
  * Lock ordering (including related VFS locks) is the following:
- *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ *   dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
  *   dqio_mutex
+ * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
  * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
  * dqptr_sem. But filesystem has to count with the fact that functions such as
  * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
  * from inside a transaction to keep filesystem consistency after a crash. Also
  * filesystems usually want to do some IO on dquot from ->mark_dirty which is
  * called with dqptr_sem held.
- * i_mutex on quota files is special (it's below dqio_mutex)
  */
 
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -638,7 +638,7 @@
 	dqstats_inc(DQST_SYNCS);
 	mutex_unlock(&dqopt->dqonoff_mutex);
 
-	if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+	if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
 		return 0;
 
 	/* This is not very clever (and fast) but currently I don't know about
@@ -652,18 +652,17 @@
 	 * Now when everything is written we can discard the pagecache so
 	 * that userspace sees the changes.
 	 */
-	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+	mutex_lock(&dqopt->dqonoff_mutex);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (type != -1 && cnt != type)
 			continue;
 		if (!sb_has_quota_active(sb, cnt))
 			continue;
-		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
-				  I_MUTEX_QUOTA);
-		truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
-		mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+		mutex_lock(&dqopt->files[cnt]->i_mutex);
+		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+		mutex_unlock(&dqopt->files[cnt]->i_mutex);
 	}
-	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+	mutex_unlock(&dqopt->dqonoff_mutex);
 
 	return 0;
 }
@@ -907,14 +906,14 @@
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
-#ifdef CONFIG_QUOTA_DEBUG
-		if (unlikely(inode_get_rsv_space(inode) > 0))
-			reserved = 1;
-#endif
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
 		spin_unlock(&inode_sb_list_lock);
 
+#ifdef CONFIG_QUOTA_DEBUG
+		if (unlikely(inode_get_rsv_space(inode) > 0))
+			reserved = 1;
+#endif
 		iput(old_inode);
 		__dquot_initialize(inode, type);
 
@@ -2037,8 +2036,7 @@
 			/* If quota was reenabled in the meantime, we have
 			 * nothing to do */
 			if (!sb_has_quota_loaded(sb, cnt)) {
-				mutex_lock_nested(&toputinode[cnt]->i_mutex,
-						  I_MUTEX_QUOTA);
+				mutex_lock(&toputinode[cnt]->i_mutex);
 				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
 				  S_NOATIME | S_NOQUOTA);
 				truncate_inode_pages(&toputinode[cnt]->i_data,
@@ -2133,7 +2131,7 @@
 		/* We don't want quota and atime on quota files (deadlocks
 		 * possible) Also nobody should write to the file - we use
 		 * special IO operations which ignore the immutable bit. */
-		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+		mutex_lock(&inode->i_mutex);
 		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
 					     S_NOQUOTA);
 		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
@@ -2180,7 +2178,7 @@
 	iput(inode);
 out_lock:
 	if (oldflags != -1) {
-		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+		mutex_lock(&inode->i_mutex);
 		/* Set the flags back (in the case of accidental quotaon()
 		 * on a wrong file we don't want to mess up the flags) */
 		inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
diff --git a/fs/read_write.c b/fs/read_write.c
index ffc99d2..c20614f 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -633,8 +633,7 @@
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
 			      unsigned long nr_segs, unsigned long fast_segs,
 			      struct iovec *fast_pointer,
-			      struct iovec **ret_pointer,
-			      int check_access)
+			      struct iovec **ret_pointer)
 {
 	unsigned long seg;
 	ssize_t ret;
@@ -690,7 +689,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		if (check_access
+		if (type >= 0
 		    && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
 			ret = -EFAULT;
 			goto out;
@@ -723,7 +722,7 @@
 	}
 
 	ret = rw_copy_check_uvector(type, uvector, nr_segs,
-				    ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+				    ARRAY_SIZE(iovstack), iovstack, &iov);
 	if (ret <= 0)
 		goto out;
 
diff --git a/fs/readdir.c b/fs/readdir.c
index cc0a822..39e3370 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -108,11 +108,11 @@
 	int error;
 	struct file * file;
 	struct readdir_callback buf;
+	int fput_needed;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.result = 0;
 	buf.dirent = dirent;
@@ -121,8 +121,7 @@
 	if (buf.result)
 		error = buf.result;
 
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
 
@@ -195,16 +194,15 @@
 	struct file * file;
 	struct linux_dirent __user * lastdirent;
 	struct getdents_callback buf;
+	int fput_needed;
 	int error;
 
-	error = -EFAULT;
 	if (!access_ok(VERIFY_WRITE, dirent, count))
-		goto out;
+		return -EFAULT;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.current_dir = dirent;
 	buf.previous = NULL;
@@ -221,8 +219,7 @@
 		else
 			error = count - buf.count;
 	}
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
 
@@ -278,16 +275,15 @@
 	struct file * file;
 	struct linux_dirent64 __user * lastdirent;
 	struct getdents_callback64 buf;
+	int fput_needed;
 	int error;
 
-	error = -EFAULT;
 	if (!access_ok(VERIFY_WRITE, dirent, count))
-		goto out;
+		return -EFAULT;
 
-	error = -EBADF;
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (!file)
-		goto out;
+		return -EBADF;
 
 	buf.current_dir = dirent;
 	buf.previous = NULL;
@@ -305,7 +301,6 @@
 		else
 			error = count - buf.count;
 	}
-	fput(file);
-out:
+	fput_light(file, fput_needed);
 	return error;
 }
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 494c315..a6d4268 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,14 +76,14 @@
 		;
 	}
       out:
-	end_writeback(inode);	/* note this must go after the journal_end to prevent deadlock */
+	clear_inode(inode);	/* note this must go after the journal_end to prevent deadlock */
 	dquot_drop(inode);
 	inode->i_blocks = 0;
 	reiserfs_write_unlock_once(inode->i_sb, depth);
 	return;
 
 no_delete:
-	end_writeback(inode);
+	clear_inode(inode);
 	dquot_drop(inode);
 }
 
@@ -1592,13 +1592,12 @@
 		(fh_type == 6) ? fid->raw[5] : 0);
 }
 
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-		       int need_parent)
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+		       struct inode *parent)
 {
-	struct inode *inode = dentry->d_inode;
 	int maxlen = *lenp;
 
-	if (need_parent && (maxlen < 5)) {
+	if (parent && (maxlen < 5)) {
 		*lenp = 5;
 		return 255;
 	} else if (maxlen < 3) {
@@ -1610,20 +1609,15 @@
 	data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
 	data[2] = inode->i_generation;
 	*lenp = 3;
-	/* no room for directory info? return what we've stored so far */
-	if (maxlen < 5 || !need_parent)
-		return 3;
-
-	spin_lock(&dentry->d_lock);
-	inode = dentry->d_parent->d_inode;
-	data[3] = inode->i_ino;
-	data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
-	*lenp = 5;
-	if (maxlen >= 6) {
-		data[5] = inode->i_generation;
-		*lenp = 6;
+	if (parent) {
+		data[3] = parent->i_ino;
+		data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
+		*lenp = 5;
+		if (maxlen >= 6) {
+			data[5] = parent->i_generation;
+			*lenp = 6;
+		}
 	}
-	spin_unlock(&dentry->d_lock);
 	return *lenp;
 }
 
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index b1a0857..afcadcc 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1923,6 +1923,8 @@
 	 * the workqueue job (flush_async_commit) needs this lock
 	 */
 	reiserfs_write_unlock(sb);
+
+	cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
 	flush_workqueue(commit_wq);
 
 	if (!reiserfs_mounted_fs_count) {
@@ -3231,8 +3233,6 @@
 			       th->t_trans_id, journal->j_trans_id);
 	}
 
-	sb->s_dirt = 1;
-
 	prepared = test_clear_buffer_journal_prepared(bh);
 	clear_buffer_journal_restore_dirty(bh);
 	/* already in this transaction, we are done */
@@ -3316,6 +3316,7 @@
 		journal->j_first = cn;
 		journal->j_last = cn;
 	}
+	reiserfs_schedule_old_flush(sb);
 	return 0;
 }
 
@@ -3492,7 +3493,7 @@
 ** flushes any old transactions to disk
 ** ends the current transaction if it is too old
 */
-int reiserfs_flush_old_commits(struct super_block *sb)
+void reiserfs_flush_old_commits(struct super_block *sb)
 {
 	time_t now;
 	struct reiserfs_transaction_handle th;
@@ -3502,9 +3503,8 @@
 	/* safety check so we don't flush while we are replaying the log during
 	 * mount
 	 */
-	if (list_empty(&journal->j_journal_list)) {
-		return 0;
-	}
+	if (list_empty(&journal->j_journal_list))
+		return;
 
 	/* check the current transaction.  If there are no writers, and it is
 	 * too old, finish it, and force the commit blocks to disk
@@ -3526,7 +3526,6 @@
 			do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
 		}
 	}
-	return sb->s_dirt;
 }
 
 /*
@@ -3955,7 +3954,7 @@
 	 ** it tells us if we should continue with the journal_end, or just return
 	 */
 	if (!check_journal_end(th, sb, nblocks, flags)) {
-		sb->s_dirt = 1;
+		reiserfs_schedule_old_flush(sb);
 		wake_queued_writers(sb);
 		reiserfs_async_progress_wait(sb);
 		goto out;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index a59d271..33215f5 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -480,6 +480,11 @@
 	struct dentry *priv_root;	/* root of /.reiserfs_priv */
 	struct dentry *xattr_root;	/* root of /.reiserfs_priv/xattrs */
 	int j_errno;
+
+	int work_queued;              /* non-zero delayed work is queued */
+	struct delayed_work old_work; /* old transactions flush delayed work */
+	spinlock_t old_work_lock;     /* protects old_work and work_queued */
+
 #ifdef CONFIG_QUOTA
 	char *s_qf_names[MAXQUOTAS];
 	int s_jquota_fmt;
@@ -2452,7 +2457,7 @@
 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
 int reiserfs_commit_page(struct inode *inode, struct page *page,
 			 unsigned from, unsigned to);
-int reiserfs_flush_old_commits(struct super_block *);
+void reiserfs_flush_old_commits(struct super_block *);
 int reiserfs_commit_for_inode(struct inode *);
 int reiserfs_inode_needs_commit(struct inode *);
 void reiserfs_update_inode_transaction(struct inode *);
@@ -2487,6 +2492,7 @@
 int reiserfs_allocate_list_bitmaps(struct super_block *s,
 				   struct reiserfs_list_bitmap *, unsigned int);
 
+void reiserfs_schedule_old_flush(struct super_block *s);
 void add_save_link(struct reiserfs_transaction_handle *th,
 		   struct inode *inode, int truncate);
 int remove_save_link(struct inode *inode, int truncate);
@@ -2611,8 +2617,8 @@
 				     int fh_len, int fh_type);
 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
 				     int fh_len, int fh_type);
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-		       int connectable);
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+		       struct inode *parent);
 
 int reiserfs_truncate_file(struct inode *, int update_timestamps);
 void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 9a17f63..3ce02cf 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -200,7 +200,6 @@
 					  (bmap_nr_new - bmap_nr)));
 	PUT_SB_BLOCK_COUNT(s, block_count_new);
 	PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
-	s->s_dirt = 1;
 
 	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
 
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 8b7616e..651ce76 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -72,20 +72,58 @@
 	if (!journal_begin(&th, s, 1))
 		if (!journal_end_sync(&th, s, 1))
 			reiserfs_flush_old_commits(s);
-	s->s_dirt = 0;	/* Even if it's not true.
-			 * We'll loop forever in sync_supers otherwise */
 	reiserfs_write_unlock(s);
 	return 0;
 }
 
-static void reiserfs_write_super(struct super_block *s)
+static void flush_old_commits(struct work_struct *work)
 {
+	struct reiserfs_sb_info *sbi;
+	struct super_block *s;
+
+	sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
+	s = sbi->s_journal->j_work_sb;
+
+	spin_lock(&sbi->old_work_lock);
+	sbi->work_queued = 0;
+	spin_unlock(&sbi->old_work_lock);
+
 	reiserfs_sync_fs(s, 1);
 }
 
+void reiserfs_schedule_old_flush(struct super_block *s)
+{
+	struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+	unsigned long delay;
+
+	if (s->s_flags & MS_RDONLY)
+		return;
+
+	spin_lock(&sbi->old_work_lock);
+	if (!sbi->work_queued) {
+		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+		queue_delayed_work(system_long_wq, &sbi->old_work, delay);
+		sbi->work_queued = 1;
+	}
+	spin_unlock(&sbi->old_work_lock);
+}
+
+static void cancel_old_flush(struct super_block *s)
+{
+	struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+
+	cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+	spin_lock(&sbi->old_work_lock);
+	sbi->work_queued = 0;
+	spin_unlock(&sbi->old_work_lock);
+}
+
 static int reiserfs_freeze(struct super_block *s)
 {
 	struct reiserfs_transaction_handle th;
+
+	cancel_old_flush(s);
+
 	reiserfs_write_lock(s);
 	if (!(s->s_flags & MS_RDONLY)) {
 		int err = journal_begin(&th, s, 1);
@@ -99,7 +137,6 @@
 			journal_end_sync(&th, s, 1);
 		}
 	}
-	s->s_dirt = 0;
 	reiserfs_write_unlock(s);
 	return 0;
 }
@@ -483,9 +520,6 @@
 
 	reiserfs_write_lock(s);
 
-	if (s->s_dirt)
-		reiserfs_write_super(s);
-
 	/* change file system state to current state if it was mounted with read-write permissions */
 	if (!(s->s_flags & MS_RDONLY)) {
 		if (!journal_begin(&th, s, 10)) {
@@ -692,7 +726,6 @@
 	.dirty_inode = reiserfs_dirty_inode,
 	.evict_inode = reiserfs_evict_inode,
 	.put_super = reiserfs_put_super,
-	.write_super = reiserfs_write_super,
 	.sync_fs = reiserfs_sync_fs,
 	.freeze_fs = reiserfs_freeze,
 	.unfreeze_fs = reiserfs_unfreeze,
@@ -1400,7 +1433,6 @@
 	err = journal_end(&th, s, 10);
 	if (err)
 		goto out_err;
-	s->s_dirt = 0;
 
 	if (!(*mount_flags & MS_RDONLY)) {
 		dquot_resume(s, -1);
@@ -1730,19 +1762,21 @@
 		return -ENOMEM;
 	s->s_fs_info = sbi;
 	/* Set default values for options: non-aggressive tails, RO on errors */
-	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
-	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
-	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
+	sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+	sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+	sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
 	/* no preallocation minimum, be smart in
 	   reiserfs_file_write instead */
-	REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+	sbi->s_alloc_options.preallocmin = 0;
 	/* Preallocate by 16 blocks (17-1) at once */
-	REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
+	sbi->s_alloc_options.preallocsize = 17;
 	/* setup default block allocator options */
 	reiserfs_init_alloc_options(s);
 
-	mutex_init(&REISERFS_SB(s)->lock);
-	REISERFS_SB(s)->lock_depth = -1;
+	spin_lock_init(&sbi->old_work_lock);
+	INIT_DELAYED_WORK(&sbi->old_work, flush_old_commits);
+	mutex_init(&sbi->lock);
+	sbi->lock_depth = -1;
 
 	jdev_name = NULL;
 	if (reiserfs_parse_options
@@ -1751,8 +1785,8 @@
 		goto error_unlocked;
 	}
 	if (jdev_name && jdev_name[0]) {
-		REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
-		if (!REISERFS_SB(s)->s_jdev) {
+		sbi->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
+		if (!sbi->s_jdev) {
 			SWARN(silent, s, "", "Cannot allocate memory for "
 				"journal device name");
 			goto error;
@@ -1810,7 +1844,7 @@
 	/* make data=ordered the default */
 	if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
 	    !reiserfs_data_writeback(s)) {
-		REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
+		sbi->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
 	}
 
 	if (reiserfs_data_log(s)) {
@@ -2003,6 +2037,8 @@
 		reiserfs_write_unlock(s);
 	}
 
+	cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+
 	reiserfs_free_bitmap_cache(s);
 	if (SB_BUFFER_WITH_SB(s))
 		brelse(SB_BUFFER_WITH_SB(s));
@@ -2270,7 +2306,6 @@
 			(unsigned long long)off, (unsigned long long)len);
 		return -EIO;
 	}
-	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 	while (towrite > 0) {
 		tocopy = sb->s_blocksize - offset < towrite ?
 		    sb->s_blocksize - offset : towrite;
@@ -2302,16 +2337,13 @@
 		blk++;
 	}
 out:
-	if (len == towrite) {
-		mutex_unlock(&inode->i_mutex);
+	if (len == towrite)
 		return err;
-	}
 	if (inode->i_size < off + len - towrite)
 		i_size_write(inode, off + len - towrite);
 	inode->i_version++;
 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 	mark_inode_dirty(inode);
-	mutex_unlock(&inode->i_mutex);
 	return len - towrite;
 }
 
diff --git a/fs/select.c b/fs/select.c
index 17d33d0..bae3215 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -614,7 +614,6 @@
 	return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 		       fd_set __user *exp, struct timespec __user *tsp,
 		       const sigset_t __user *sigmask, size_t sigsetsize)
@@ -686,7 +685,6 @@
 
 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef __ARCH_WANT_SYS_OLD_SELECT
 struct sel_arg_struct {
@@ -941,7 +939,6 @@
 	return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
 		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
 		size_t, sigsetsize)
@@ -992,4 +989,3 @@
 
 	return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7ae2a57..9f35a37 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -269,12 +269,13 @@
 		if (ufd < 0)
 			kfree(ctx);
 	} else {
-		struct file *file = fget(ufd);
+		int fput_needed;
+		struct file *file = fget_light(ufd, &fput_needed);
 		if (!file)
 			return -EBADF;
 		ctx = file->private_data;
 		if (file->f_op != &signalfd_fops) {
-			fput(file);
+			fput_light(file, fput_needed);
 			return -EINVAL;
 		}
 		spin_lock_irq(&current->sighand->siglock);
@@ -282,7 +283,7 @@
 		spin_unlock_irq(&current->sighand->siglock);
 
 		wake_up(&current->sighand->signalfd_wqh);
-		fput(file);
+		fput_light(file, fput_needed);
 	}
 
 	return ufd;
diff --git a/fs/splice.c b/fs/splice.c
index f847684..c9f1318 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1003,8 +1003,10 @@
 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
 		ret = file_remove_suid(out);
 		if (!ret) {
-			file_update_time(out);
-			ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
+			ret = file_update_time(out);
+			if (!ret)
+				ret = splice_from_pipe_feed(pipe, &sd,
+							    pipe_to_file);
 		}
 		mutex_unlock(&inode->i_mutex);
 	} while (ret > 0);
@@ -1388,7 +1390,7 @@
  */
 static int get_iovec_page_array(const struct iovec __user *iov,
 				unsigned int nr_vecs, struct page **pages,
-				struct partial_page *partial, int aligned,
+				struct partial_page *partial, bool aligned,
 				unsigned int pipe_buffers)
 {
 	int buffers = 0, error = 0;
@@ -1626,7 +1628,7 @@
 		return -ENOMEM;
 
 	spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
-					    spd.partial, flags & SPLICE_F_GIFT,
+					    spd.partial, false,
 					    pipe->buffers);
 	if (spd.nr_pages <= 0)
 		ret = spd.nr_pages;
diff --git a/fs/statfs.c b/fs/statfs.c
index 43e6b6f..95ad5c0 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -87,11 +87,12 @@
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-	struct file *file = fget(fd);
+	int fput_needed;
+	struct file *file = fget_light(fd, &fput_needed);
 	int error = -EBADF;
 	if (file) {
 		error = vfs_statfs(&file->f_path, st);
-		fput(file);
+		fput_light(file, fput_needed);
 	}
 	return error;
 }
diff --git a/fs/sync.c b/fs/sync.c
index 0e8db93..11e3d1c 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -188,11 +188,12 @@
 {
 	struct file *file;
 	int ret = -EBADF;
+	int fput_needed;
 
-	file = fget(fd);
+	file = fget_light(fd, &fput_needed);
 	if (file) {
 		ret = vfs_fsync(file, datasync);
-		fput(file);
+		fput_light(file, fput_needed);
 	}
 	return ret;
 }
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 907c2b3..0ce3ccf 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -310,7 +310,7 @@
 	struct sysfs_dirent *sd  = inode->i_private;
 
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	sysfs_put(sd);
 }
 
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3da5ce2..08d0b25 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -316,7 +316,7 @@
 		sysv_truncate(inode);
 	}
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (!inode->i_nlink)
 		sysv_free_inode(inode);
 }
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 685a837..92df3b0 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2918,6 +2918,9 @@
 	struct dentry *dent;
 	struct ubifs_debug_info *d = c->dbg;
 
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
 	n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
 		     c->vi.ubi_num, c->vi.vol_id);
 	if (n == UBIFS_DFS_DIR_LEN) {
@@ -3010,7 +3013,8 @@
  */
 void dbg_debugfs_exit_fs(struct ubifs_info *c)
 {
-	debugfs_remove_recursive(c->dbg->dfs_dir);
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove_recursive(c->dbg->dfs_dir);
 }
 
 struct ubifs_global_debug_info ubifs_dbg;
@@ -3095,6 +3099,9 @@
 	const char *fname;
 	struct dentry *dent;
 
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
 	fname = "ubifs";
 	dent = debugfs_create_dir(fname, NULL);
 	if (IS_ERR_OR_NULL(dent))
@@ -3159,7 +3166,8 @@
  */
 void dbg_debugfs_exit(void)
 {
-	debugfs_remove_recursive(dfs_rootdir);
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove_recursive(dfs_rootdir);
 }
 
 /**
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 62a2727..a6d42ef 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1127,16 +1127,7 @@
 	struct ubifs_inode *ui = ubifs_inode(inode);
 
 	mutex_lock(&ui->ui_mutex);
-	stat->dev = inode->i_sb->s_dev;
-	stat->ino = inode->i_ino;
-	stat->mode = inode->i_mode;
-	stat->nlink = inode->i_nlink;
-	stat->uid = inode->i_uid;
-	stat->gid = inode->i_gid;
-	stat->rdev = inode->i_rdev;
-	stat->atime = inode->i_atime;
-	stat->mtime = inode->i_mtime;
-	stat->ctime = inode->i_ctime;
+	generic_fillattr(inode, stat);
 	stat->blksize = UBIFS_BLOCK_SIZE;
 	stat->size = ui->ui_size;
 
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 2559d17..28ec13a 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -939,8 +939,8 @@
 	}
 	dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
 		 lp->free, lp->flags);
-	ubifs_assert(lp->flags | LPROPS_TAKEN);
-	ubifs_assert(lp->flags | LPROPS_INDEX);
+	ubifs_assert(lp->flags & LPROPS_TAKEN);
+	ubifs_assert(lp->flags & LPROPS_INDEX);
 	return lnum;
 }
 
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 001accc..5862dd9 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -378,7 +378,7 @@
 		smp_wmb();
 	}
 done:
-	end_writeback(inode);
+	clear_inode(inode);
 }
 
 static void ubifs_dirty_inode(struct inode *inode, int flags)
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7d75280..873e1ba 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -80,7 +80,7 @@
 	} else
 		truncate_inode_pages(&inode->i_data, 0);
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
 	    inode->i_size != iinfo->i_lenExtents) {
 		udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index a165c66..1802417 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1260,16 +1260,15 @@
 				 fid->udf.parent_partref,
 				 fid->udf.parent_generation);
 }
-static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
-			 int connectable)
+static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+			 struct inode *parent)
 {
 	int len = *lenp;
-	struct inode *inode =  de->d_inode;
 	struct kernel_lb_addr location = UDF_I(inode)->i_location;
 	struct fid *fid = (struct fid *)fh;
 	int type = FILEID_UDF_WITHOUT_PARENT;
 
-	if (connectable && (len < 5)) {
+	if (parent && (len < 5)) {
 		*lenp = 5;
 		return 255;
 	} else if (len < 3) {
@@ -1282,14 +1281,11 @@
 	fid->udf.partref = location.partitionReferenceNum;
 	fid->udf.generation = inode->i_generation;
 
-	if (connectable && !S_ISDIR(inode->i_mode)) {
-		spin_lock(&de->d_lock);
-		inode = de->d_parent->d_inode;
-		location = UDF_I(inode)->i_location;
+	if (parent) {
+		location = UDF_I(parent)->i_location;
 		fid->udf.parent_block = location.logicalBlockNum;
 		fid->udf.parent_partref = location.partitionReferenceNum;
 		fid->udf.parent_generation = inode->i_generation;
-		spin_unlock(&de->d_lock);
 		*lenp = 5;
 		type = FILEID_UDF_WITH_PARENT;
 	}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index ac8a348..8d86a87 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -56,6 +56,7 @@
 #include <linux/seq_file.h>
 #include <linux/bitmap.h>
 #include <linux/crc-itu-t.h>
+#include <linux/log2.h>
 #include <asm/byteorder.h>
 
 #include "udf_sb.h"
@@ -1215,16 +1216,65 @@
 	return ret;
 }
 
+static int udf_load_sparable_map(struct super_block *sb,
+				 struct udf_part_map *map,
+				 struct sparablePartitionMap *spm)
+{
+	uint32_t loc;
+	uint16_t ident;
+	struct sparingTable *st;
+	struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
+	int i;
+	struct buffer_head *bh;
+
+	map->s_partition_type = UDF_SPARABLE_MAP15;
+	sdata->s_packet_len = le16_to_cpu(spm->packetLength);
+	if (!is_power_of_2(sdata->s_packet_len)) {
+		udf_err(sb, "error loading logical volume descriptor: "
+			"Invalid packet length %u\n",
+			(unsigned)sdata->s_packet_len);
+		return -EIO;
+	}
+	if (spm->numSparingTables > 4) {
+		udf_err(sb, "error loading logical volume descriptor: "
+			"Too many sparing tables (%d)\n",
+			(int)spm->numSparingTables);
+		return -EIO;
+	}
+
+	for (i = 0; i < spm->numSparingTables; i++) {
+		loc = le32_to_cpu(spm->locSparingTable[i]);
+		bh = udf_read_tagged(sb, loc, loc, &ident);
+		if (!bh)
+			continue;
+
+		st = (struct sparingTable *)bh->b_data;
+		if (ident != 0 ||
+		    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
+			    strlen(UDF_ID_SPARING)) ||
+		    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
+							sb->s_blocksize) {
+			brelse(bh);
+			continue;
+		}
+
+		sdata->s_spar_map[i] = bh;
+	}
+	map->s_partition_func = udf_get_pblock_spar15;
+	return 0;
+}
+
 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
 			       struct kernel_lb_addr *fileset)
 {
 	struct logicalVolDesc *lvd;
-	int i, j, offset;
+	int i, offset;
 	uint8_t type;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct genericPartitionMap *gpm;
 	uint16_t ident;
 	struct buffer_head *bh;
+	unsigned int table_len;
 	int ret = 0;
 
 	bh = udf_read_tagged(sb, block, block, &ident);
@@ -1232,15 +1282,20 @@
 		return 1;
 	BUG_ON(ident != TAG_IDENT_LVD);
 	lvd = (struct logicalVolDesc *)bh->b_data;
-
-	i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-	if (i != 0) {
-		ret = i;
+	table_len = le32_to_cpu(lvd->mapTableLength);
+	if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+		udf_err(sb, "error loading logical volume descriptor: "
+			"Partition table too long (%u > %lu)\n", table_len,
+			sb->s_blocksize - sizeof(*lvd));
 		goto out_bh;
 	}
 
+	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+	if (ret)
+		goto out_bh;
+
 	for (i = 0, offset = 0;
-	     i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
+	     i < sbi->s_partitions && offset < table_len;
 	     i++, offset += gpm->partitionMapLength) {
 		struct udf_part_map *map = &sbi->s_partmaps[i];
 		gpm = (struct genericPartitionMap *)
@@ -1275,38 +1330,9 @@
 			} else if (!strncmp(upm2->partIdent.ident,
 						UDF_ID_SPARABLE,
 						strlen(UDF_ID_SPARABLE))) {
-				uint32_t loc;
-				struct sparingTable *st;
-				struct sparablePartitionMap *spm =
-					(struct sparablePartitionMap *)gpm;
-
-				map->s_partition_type = UDF_SPARABLE_MAP15;
-				map->s_type_specific.s_sparing.s_packet_len =
-						le16_to_cpu(spm->packetLength);
-				for (j = 0; j < spm->numSparingTables; j++) {
-					struct buffer_head *bh2;
-
-					loc = le32_to_cpu(
-						spm->locSparingTable[j]);
-					bh2 = udf_read_tagged(sb, loc, loc,
-							     &ident);
-					map->s_type_specific.s_sparing.
-							s_spar_map[j] = bh2;
-
-					if (bh2 == NULL)
-						continue;
-
-					st = (struct sparingTable *)bh2->b_data;
-					if (ident != 0 || strncmp(
-						st->sparingIdent.ident,
-						UDF_ID_SPARING,
-						strlen(UDF_ID_SPARING))) {
-						brelse(bh2);
-						map->s_type_specific.s_sparing.
-							s_spar_map[j] = NULL;
-					}
-				}
-				map->s_partition_func = udf_get_pblock_spar15;
+				if (udf_load_sparable_map(sb, map,
+				    (struct sparablePartitionMap *)gpm) < 0)
+					goto out_bh;
 			} else if (!strncmp(upm2->partIdent.ident,
 						UDF_ID_METADATA,
 						strlen(UDF_ID_METADATA))) {
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7cdd395..dd7c89d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -895,7 +895,7 @@
 	}
 
 	invalidate_inode_buffers(inode);
-	end_writeback(inode);
+	clear_inode(inode);
 
 	if (want_delete) {
 		lock_ufs(inode->i_sb);
diff --git a/fs/utimes.c b/fs/utimes.c
index ba653f3..fa4dbe4 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -140,18 +140,19 @@
 		goto out;
 
 	if (filename == NULL && dfd != AT_FDCWD) {
+		int fput_needed;
 		struct file *file;
 
 		if (flags & AT_SYMLINK_NOFOLLOW)
 			goto out;
 
-		file = fget(dfd);
+		file = fget_light(dfd, &fput_needed);
 		error = -EBADF;
 		if (!file)
 			goto out;
 
 		error = utimes_common(&file->f_path, times);
-		fput(file);
+		fput_light(file, fput_needed);
 	} else {
 		struct path path;
 		int lookup_flags = 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index 3c8c1cc..1d7ac37 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -399,11 +399,12 @@
 SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
 		const void __user *,value, size_t, size, int, flags)
 {
+	int fput_needed;
 	struct file *f;
 	struct dentry *dentry;
 	int error = -EBADF;
 
-	f = fget(fd);
+	f = fget_light(fd, &fput_needed);
 	if (!f)
 		return error;
 	dentry = f->f_path.dentry;
@@ -413,7 +414,7 @@
 		error = setxattr(dentry, name, value, size, flags);
 		mnt_drop_write_file(f);
 	}
-	fput(f);
+	fput_light(f, fput_needed);
 	return error;
 }
 
@@ -486,15 +487,16 @@
 SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
 		void __user *, value, size_t, size)
 {
+	int fput_needed;
 	struct file *f;
 	ssize_t error = -EBADF;
 
-	f = fget(fd);
+	f = fget_light(fd, &fput_needed);
 	if (!f)
 		return error;
 	audit_inode(NULL, f->f_path.dentry);
 	error = getxattr(f->f_path.dentry, name, value, size);
-	fput(f);
+	fput_light(f, fput_needed);
 	return error;
 }
 
@@ -566,15 +568,16 @@
 
 SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
 {
+	int fput_needed;
 	struct file *f;
 	ssize_t error = -EBADF;
 
-	f = fget(fd);
+	f = fget_light(fd, &fput_needed);
 	if (!f)
 		return error;
 	audit_inode(NULL, f->f_path.dentry);
 	error = listxattr(f->f_path.dentry, list, size);
-	fput(f);
+	fput_light(f, fput_needed);
 	return error;
 }
 
@@ -634,11 +637,12 @@
 
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
+	int fput_needed;
 	struct file *f;
 	struct dentry *dentry;
 	int error = -EBADF;
 
-	f = fget(fd);
+	f = fget_light(fd, &fput_needed);
 	if (!f)
 		return error;
 	dentry = f->f_path.dentry;
@@ -648,7 +652,7 @@
 		error = removexattr(dentry, name);
 		mnt_drop_write_file(f);
 	}
-	fput(f);
+	fput_light(f, fput_needed);
 	return error;
 }
 
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index a907de5..4a7286c 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -46,7 +46,7 @@
 }
 
 void *
-kmem_alloc(size_t size, unsigned int __nocast flags)
+kmem_alloc(size_t size, xfs_km_flags_t flags)
 {
 	int	retries = 0;
 	gfp_t	lflags = kmem_flags_convert(flags);
@@ -65,7 +65,7 @@
 }
 
 void *
-kmem_zalloc(size_t size, unsigned int __nocast flags)
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
 {
 	void	*ptr;
 
@@ -87,7 +87,7 @@
 
 void *
 kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
-	     unsigned int __nocast flags)
+	     xfs_km_flags_t flags)
 {
 	void	*new;
 
@@ -102,7 +102,7 @@
 }
 
 void *
-kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
 	int	retries = 0;
 	gfp_t	lflags = kmem_flags_convert(flags);
@@ -121,7 +121,7 @@
 }
 
 void *
-kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
 	void	*ptr;
 
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index ab7c53f..b2f2620 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -27,10 +27,11 @@
  * General memory allocation interfaces
  */
 
-#define KM_SLEEP	0x0001u
-#define KM_NOSLEEP	0x0002u
-#define KM_NOFS		0x0004u
-#define KM_MAYFAIL	0x0008u
+typedef unsigned __bitwise xfs_km_flags_t;
+#define KM_SLEEP	((__force xfs_km_flags_t)0x0001u)
+#define KM_NOSLEEP	((__force xfs_km_flags_t)0x0002u)
+#define KM_NOFS		((__force xfs_km_flags_t)0x0004u)
+#define KM_MAYFAIL	((__force xfs_km_flags_t)0x0008u)
 
 /*
  * We use a special process flag to avoid recursive callbacks into
@@ -38,7 +39,7 @@
  * warnings, so we explicitly skip any generic ones (silly of us).
  */
 static inline gfp_t
-kmem_flags_convert(unsigned int __nocast flags)
+kmem_flags_convert(xfs_km_flags_t flags)
 {
 	gfp_t	lflags;
 
@@ -54,9 +55,9 @@
 	return lflags;
 }
 
-extern void *kmem_alloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
+extern void *kmem_alloc(size_t, xfs_km_flags_t);
+extern void *kmem_zalloc(size_t, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
 extern void  kmem_free(const void *);
 
 static inline void *kmem_zalloc_large(size_t size)
@@ -107,7 +108,7 @@
 		kmem_cache_destroy(zone);
 }
 
-extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
+extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
 
 #endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 229641f..9d1aeb7 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1080,6 +1080,7 @@
 			goto restart;
 		}
 
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
 		trace_xfs_alloc_size_neither(args);
 		args->agbno = NULLAGBLOCK;
 		return 0;
@@ -2441,7 +2442,7 @@
 	DECLARE_COMPLETION_ONSTACK(done);
 
 	args->done = &done;
-	INIT_WORK(&args->work, xfs_alloc_vextent_worker);
+	INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
 	queue_work(xfs_alloc_wq, &args->work);
 	wait_for_completion(&done);
 	return args->result;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index ae31c31..8dad722 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -981,10 +981,15 @@
 				imap_valid = 0;
 			}
 		} else {
-			if (PageUptodate(page)) {
+			if (PageUptodate(page))
 				ASSERT(buffer_mapped(bh));
-				imap_valid = 0;
-			}
+			/*
+			 * This buffer is not uptodate and will not be
+			 * written to disk.  Ensure that we will put any
+			 * subsequent writeable buffers into a new
+			 * ioend.
+			 */
+			imap_valid = 0;
 			continue;
 		}
 
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 172d3cc..a4beb42 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -201,14 +201,7 @@
 	bp->b_length = numblks;
 	bp->b_io_length = numblks;
 	bp->b_flags = flags;
-
-	/*
-	 * We do not set the block number here in the buffer because we have not
-	 * finished initialising the buffer. We insert the buffer into the cache
-	 * in this state, so this ensures that we are unable to do IO on a
-	 * buffer that hasn't been fully initialised.
-	 */
-	bp->b_bn = XFS_BUF_DADDR_NULL;
+	bp->b_bn = blkno;
 	atomic_set(&bp->b_pin_count, 0);
 	init_waitqueue_head(&bp->b_waiters);
 
@@ -567,11 +560,6 @@
 	if (bp != new_bp)
 		xfs_buf_free(new_bp);
 
-	/*
-	 * Now we have a workable buffer, fill in the block number so
-	 * that we can do IO on it.
-	 */
-	bp->b_bn = blkno;
 	bp->b_io_length = bp->b_length;
 
 found:
@@ -772,7 +760,7 @@
 	int			error, i;
 	xfs_buf_t		*bp;
 
-	bp = xfs_buf_alloc(target, 0, numblks, 0);
+	bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
 	if (unlikely(bp == NULL))
 		goto fail;
 
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 2d25d19..4267922 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -52,19 +52,18 @@
 
 STATIC int
 xfs_fs_encode_fh(
-	struct dentry		*dentry,
-	__u32			*fh,
-	int			*max_len,
-	int			connectable)
+	struct inode	*inode,
+	__u32		*fh,
+	int		*max_len,
+	struct inode	*parent)
 {
 	struct fid		*fid = (struct fid *)fh;
 	struct xfs_fid64	*fid64 = (struct xfs_fid64 *)fh;
-	struct inode		*inode = dentry->d_inode;
 	int			fileid_type;
 	int			len;
 
 	/* Directories don't need their parent encoded, they have ".." */
-	if (S_ISDIR(inode->i_mode) || !connectable)
+	if (!parent)
 		fileid_type = FILEID_INO32_GEN;
 	else
 		fileid_type = FILEID_INO32_GEN_PARENT;
@@ -96,20 +95,16 @@
 
 	switch (fileid_type) {
 	case FILEID_INO32_GEN_PARENT:
-		spin_lock(&dentry->d_lock);
-		fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
-		spin_unlock(&dentry->d_lock);
+		fid->i32.parent_ino = XFS_I(parent)->i_ino;
+		fid->i32.parent_gen = parent->i_generation;
 		/*FALLTHRU*/
 	case FILEID_INO32_GEN:
 		fid->i32.ino = XFS_I(inode)->i_ino;
 		fid->i32.gen = inode->i_generation;
 		break;
 	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
-		spin_lock(&dentry->d_lock);
-		fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
-		spin_unlock(&dentry->d_lock);
+		fid64->parent_ino = XFS_I(parent)->i_ino;
+		fid64->parent_gen = parent->i_generation;
 		/*FALLTHRU*/
 	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
 		fid64->ino = XFS_I(inode)->i_ino;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8d214b8..9f7ec15 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -586,8 +586,11 @@
 	 * lock above.  Eventually we should look into a way to avoid
 	 * the pointless lock roundtrip.
 	 */
-	if (likely(!(file->f_mode & FMODE_NOCMTIME)))
-		file_update_time(file);
+	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
+		error = file_update_time(file);
+		if (error)
+			return error;
+	}
 
 	/*
 	 * If we're writing the file then make sure to clear the setuid and
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6cdbf90c..d041d47 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -505,6 +505,14 @@
 	}
 
 	/*
+	 * Stale inode items should force out the iclog.
+	 */
+	if (ip->i_flags & XFS_ISTALE) {
+		rval = XFS_ITEM_PINNED;
+		goto out_unlock;
+	}
+
+	/*
 	 * Someone else is already flushing the inode.  Nothing we can do
 	 * here but wait for the flush to finish and remove the item from
 	 * the AIL.
@@ -514,15 +522,6 @@
 		goto out_unlock;
 	}
 
-	/*
-	 * Stale inode items should force out the iclog.
-	 */
-	if (ip->i_flags & XFS_ISTALE) {
-		xfs_ifunlock(ip);
-		xfs_iunlock(ip, XFS_ILOCK_SHARED);
-		return XFS_ITEM_PINNED;
-	}
-
 	ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 	ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6b965bf..d90d4a3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -38,13 +38,21 @@
 kmem_zone_t	*xfs_log_ticket_zone;
 
 /* Local miscellaneous function prototypes */
-STATIC int	 xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
-				    xlog_in_core_t **, xfs_lsn_t *);
+STATIC int
+xlog_commit_record(
+	struct xlog		*log,
+	struct xlog_ticket	*ticket,
+	struct xlog_in_core	**iclog,
+	xfs_lsn_t		*commitlsnp);
+
 STATIC xlog_t *  xlog_alloc_log(xfs_mount_t	*mp,
 				xfs_buftarg_t	*log_target,
 				xfs_daddr_t	blk_offset,
 				int		num_bblks);
-STATIC int	 xlog_space_left(struct log *log, atomic64_t *head);
+STATIC int
+xlog_space_left(
+	struct xlog		*log,
+	atomic64_t		*head);
 STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void	 xlog_dealloc_log(xlog_t *log);
 
@@ -64,8 +72,10 @@
 				     int		eventual_size);
 STATIC void xlog_state_want_sync(xlog_t	*log, xlog_in_core_t *iclog);
 
-STATIC void xlog_grant_push_ail(struct log	*log,
-				int		need_bytes);
+STATIC void
+xlog_grant_push_ail(
+	struct xlog	*log,
+	int		need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
 					   xlog_ticket_t *ticket);
 STATIC void xlog_ungrant_log_space(xlog_t	 *log,
@@ -73,7 +83,9 @@
 
 #if defined(DEBUG)
 STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void	xlog_verify_grant_tail(struct log *log);
+STATIC void
+xlog_verify_grant_tail(
+	struct xlog	*log);
 STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
 				  int count, boolean_t syncing);
 STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@
 
 static void
 xlog_grant_sub_space(
-	struct log	*log,
-	atomic64_t	*head,
-	int		bytes)
+	struct xlog		*log,
+	atomic64_t		*head,
+	int			bytes)
 {
 	int64_t	head_val = atomic64_read(head);
 	int64_t new, old;
@@ -115,9 +127,9 @@
 
 static void
 xlog_grant_add_space(
-	struct log	*log,
-	atomic64_t	*head,
-	int		bytes)
+	struct xlog		*log,
+	atomic64_t		*head,
+	int			bytes)
 {
 	int64_t	head_val = atomic64_read(head);
 	int64_t new, old;
@@ -165,7 +177,7 @@
 
 static inline int
 xlog_ticket_reservation(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_grant_head	*head,
 	struct xlog_ticket	*tic)
 {
@@ -182,7 +194,7 @@
 
 STATIC bool
 xlog_grant_head_wake(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_grant_head	*head,
 	int			*free_bytes)
 {
@@ -204,7 +216,7 @@
 
 STATIC int
 xlog_grant_head_wait(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_grant_head	*head,
 	struct xlog_ticket	*tic,
 	int			need_bytes)
@@ -256,7 +268,7 @@
  */
 STATIC int
 xlog_grant_head_check(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_grant_head	*head,
 	struct xlog_ticket	*tic,
 	int			*need_bytes)
@@ -323,7 +335,7 @@
 	struct xfs_mount	*mp,
 	struct xlog_ticket	*tic)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	int			need_bytes;
 	int			error = 0;
 
@@ -389,7 +401,7 @@
 	bool			permanent,
 	uint		 	t_type)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	struct xlog_ticket	*tic;
 	int			need_bytes;
 	int			error = 0;
@@ -465,7 +477,7 @@
 	struct xlog_in_core	**iclog,
 	uint			flags)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	xfs_lsn_t		lsn = 0;
 
 	if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@
 void
 xfs_log_unmount(xfs_mount_t *mp)
 {
+	cancel_delayed_work_sync(&mp->m_sync_work);
 	xfs_trans_ail_destroy(mp);
 	xlog_dealloc_log(mp->m_log);
 }
@@ -838,7 +851,7 @@
 xfs_log_space_wake(
 	struct xfs_mount	*mp)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	int			free_bytes;
 
 	if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@
 xlog_assign_tail_lsn_locked(
 	struct xfs_mount	*mp)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	struct xfs_log_item	*lip;
 	xfs_lsn_t		tail_lsn;
 
@@ -965,7 +978,7 @@
  */
 STATIC int
 xlog_space_left(
-	struct log	*log,
+	struct xlog	*log,
 	atomic64_t	*head)
 {
 	int		free_bytes;
@@ -1277,7 +1290,7 @@
  */
 STATIC int
 xlog_commit_record(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_ticket	*ticket,
 	struct xlog_in_core	**iclog,
 	xfs_lsn_t		*commitlsnp)
@@ -1311,7 +1324,7 @@
  */
 STATIC void
 xlog_grant_push_ail(
-	struct log	*log,
+	struct xlog	*log,
 	int		need_bytes)
 {
 	xfs_lsn_t	threshold_lsn = 0;
@@ -1790,7 +1803,7 @@
 
 static xlog_op_header_t *
 xlog_write_setup_ophdr(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_op_header	*ophdr,
 	struct xlog_ticket	*ticket,
 	uint			flags)
@@ -1873,7 +1886,7 @@
 
 static int
 xlog_write_copy_finish(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_in_core	*iclog,
 	uint			flags,
 	int			*record_cnt,
@@ -1958,7 +1971,7 @@
  */
 int
 xlog_write(
-	struct log		*log,
+	struct xlog		*log,
 	struct xfs_log_vec	*log_vector,
 	struct xlog_ticket	*ticket,
 	xfs_lsn_t		*start_lsn,
@@ -2821,7 +2834,7 @@
 	uint			flags,
 	int			*log_flushed)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	struct xlog_in_core	*iclog;
 	xfs_lsn_t		lsn;
 
@@ -2969,7 +2982,7 @@
 	uint			flags,
 	int			*log_flushed)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	struct xlog_in_core	*iclog;
 	int			already_slept = 0;
 
@@ -3147,12 +3160,12 @@
  */
 xlog_ticket_t *
 xlog_ticket_alloc(
-	struct log	*log,
+	struct xlog	*log,
 	int		unit_bytes,
 	int		cnt,
 	char		client,
 	bool		permanent,
-	int		alloc_flags)
+	xfs_km_flags_t	alloc_flags)
 {
 	struct xlog_ticket *tic;
 	uint		num_headers;
@@ -3278,7 +3291,7 @@
  */
 void
 xlog_verify_dest_ptr(
-	struct log	*log,
+	struct xlog	*log,
 	char		*ptr)
 {
 	int i;
@@ -3307,7 +3320,7 @@
  */
 STATIC void
 xlog_verify_grant_tail(
-	struct log	*log)
+	struct xlog	*log)
 {
 	int		tail_cycle, tail_blocks;
 	int		cycle, space;
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7d6197c..ddc4529 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -44,7 +44,7 @@
  */
 static struct xlog_ticket *
 xlog_cil_ticket_alloc(
-	struct log	*log)
+	struct xlog	*log)
 {
 	struct xlog_ticket *tic;
 
@@ -72,7 +72,7 @@
  */
 void
 xlog_cil_init_post_recovery(
-	struct log	*log)
+	struct xlog	*log)
 {
 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 	log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@
  */
 STATIC void
 xfs_cil_prepare_item(
-	struct log		*log,
+	struct xlog		*log,
 	struct xfs_log_vec	*lv,
 	int			*len,
 	int			*diff_iovecs)
@@ -231,7 +231,7 @@
  */
 static void
 xlog_cil_insert_items(
-	struct log		*log,
+	struct xlog		*log,
 	struct xfs_log_vec	*log_vector,
 	struct xlog_ticket	*ticket)
 {
@@ -373,7 +373,7 @@
  */
 STATIC int
 xlog_cil_push(
-	struct log		*log)
+	struct xlog		*log)
 {
 	struct xfs_cil		*cil = log->l_cilp;
 	struct xfs_log_vec	*lv;
@@ -601,7 +601,7 @@
  */
 static void
 xlog_cil_push_background(
-	struct log	*log)
+	struct xlog	*log)
 {
 	struct xfs_cil	*cil = log->l_cilp;
 
@@ -629,7 +629,7 @@
 
 static void
 xlog_cil_push_foreground(
-	struct log	*log,
+	struct xlog	*log,
 	xfs_lsn_t	push_seq)
 {
 	struct xfs_cil	*cil = log->l_cilp;
@@ -683,7 +683,7 @@
 	xfs_lsn_t		*commit_lsn,
 	int			flags)
 {
-	struct log		*log = mp->m_log;
+	struct xlog		*log = mp->m_log;
 	int			log_flags = 0;
 	struct xfs_log_vec	*log_vector;
 
@@ -754,7 +754,7 @@
  */
 xfs_lsn_t
 xlog_cil_force_lsn(
-	struct log	*log,
+	struct xlog	*log,
 	xfs_lsn_t	sequence)
 {
 	struct xfs_cil		*cil = log->l_cilp;
@@ -833,7 +833,7 @@
  */
 int
 xlog_cil_init(
-	struct log	*log)
+	struct xlog	*log)
 {
 	struct xfs_cil	*cil;
 	struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@
 
 void
 xlog_cil_destroy(
-	struct log	*log)
+	struct xlog	*log)
 {
 	if (log->l_cilp->xc_ctx) {
 		if (log->l_cilp->xc_ctx->ticket)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 735ff1e..72eba22 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,7 @@
 #define __XFS_LOG_PRIV_H__
 
 struct xfs_buf;
-struct log;
+struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
 
@@ -352,7 +352,7 @@
 	struct xlog_in_core	*ic_next;
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
-	struct log		*ic_log;
+	struct xlog		*ic_log;
 	int			ic_size;
 	int			ic_offset;
 	int			ic_bwritecnt;
@@ -409,7 +409,7 @@
  * operations almost as efficient as the old logging methods.
  */
 struct xfs_cil {
-	struct log		*xc_log;
+	struct xlog		*xc_log;
 	struct list_head	xc_cil;
 	spinlock_t		xc_cil_lock;
 	struct xfs_cil_ctx	*xc_ctx;
@@ -487,7 +487,7 @@
  * overflow 31 bits worth of byte offset, so using a byte number will mean
  * that round off problems won't occur when releasing partial reservations.
  */
-typedef struct log {
+typedef struct xlog {
 	/* The following fields don't need locking */
 	struct xfs_mount	*l_mp;	        /* mount point */
 	struct xfs_ail		*l_ailp;	/* AIL log is working with */
@@ -553,9 +553,14 @@
 extern void	 xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 
 extern kmem_zone_t *xfs_log_ticket_zone;
-struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
-				int count, char client, bool permanent,
-				int alloc_flags);
+struct xlog_ticket *
+xlog_ticket_alloc(
+	struct xlog	*log,
+	int		unit_bytes,
+	int		count,
+	char		client,
+	bool		permanent,
+	xfs_km_flags_t	alloc_flags);
 
 
 static inline void
@@ -567,9 +572,14 @@
 }
 
 void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
-int	xlog_write(struct log *log, struct xfs_log_vec *log_vector,
-				struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
-				xlog_in_core_t **commit_iclog, uint flags);
+int
+xlog_write(
+	struct xlog		*log,
+	struct xfs_log_vec	*log_vector,
+	struct xlog_ticket	*tic,
+	xfs_lsn_t		*start_lsn,
+	struct xlog_in_core	**commit_iclog,
+	uint			flags);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@
 /*
  * Committed Item List interfaces
  */
-int	xlog_cil_init(struct log *log);
-void	xlog_cil_init_post_recovery(struct log *log);
-void	xlog_cil_destroy(struct log *log);
+int
+xlog_cil_init(struct xlog *log);
+void
+xlog_cil_init_post_recovery(struct xlog *log);
+void
+xlog_cil_destroy(struct xlog *log);
 
 /*
  * CIL force routines
  */
-xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+xfs_lsn_t
+xlog_cil_force_lsn(
+	struct xlog *log,
+	xfs_lsn_t sequence);
 
 static inline void
-xlog_cil_force(struct log *log)
+xlog_cil_force(struct xlog *log)
 {
 	xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
 }
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ca38690..a7be98a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1471,8 +1471,8 @@
 
 STATIC int
 xlog_recover_add_to_cont_trans(
-	struct log		*log,
-	xlog_recover_t		*trans,
+	struct xlog		*log,
+	struct xlog_recover	*trans,
 	xfs_caddr_t		dp,
 	int			len)
 {
@@ -1517,8 +1517,8 @@
  */
 STATIC int
 xlog_recover_add_to_trans(
-	struct log		*log,
-	xlog_recover_t		*trans,
+	struct xlog		*log,
+	struct xlog_recover	*trans,
 	xfs_caddr_t		dp,
 	int			len)
 {
@@ -1588,8 +1588,8 @@
  */
 STATIC int
 xlog_recover_reorder_trans(
-	struct log		*log,
-	xlog_recover_t		*trans,
+	struct xlog		*log,
+	struct xlog_recover	*trans,
 	int			pass)
 {
 	xlog_recover_item_t	*item, *n;
@@ -1642,8 +1642,8 @@
  */
 STATIC int
 xlog_recover_buffer_pass1(
-	struct log		*log,
-	xlog_recover_item_t	*item)
+	struct xlog			*log,
+	struct xlog_recover_item	*item)
 {
 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
 	struct list_head	*bucket;
@@ -1696,7 +1696,7 @@
  */
 STATIC int
 xlog_check_buffer_cancelled(
-	struct log		*log,
+	struct xlog		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
 	ushort			flags)
@@ -2689,9 +2689,9 @@
 
 STATIC int
 xlog_recover_commit_pass1(
-	struct log		*log,
-	struct xlog_recover	*trans,
-	xlog_recover_item_t	*item)
+	struct xlog			*log,
+	struct xlog_recover		*trans,
+	struct xlog_recover_item	*item)
 {
 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
 
@@ -2716,10 +2716,10 @@
 
 STATIC int
 xlog_recover_commit_pass2(
-	struct log		*log,
-	struct xlog_recover	*trans,
-	struct list_head	*buffer_list,
-	xlog_recover_item_t	*item)
+	struct xlog			*log,
+	struct xlog_recover		*trans,
+	struct list_head		*buffer_list,
+	struct xlog_recover_item	*item)
 {
 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
 
@@ -2753,7 +2753,7 @@
  */
 STATIC int
 xlog_recover_commit_trans(
-	struct log		*log,
+	struct xlog		*log,
 	struct xlog_recover	*trans,
 	int			pass)
 {
@@ -2793,8 +2793,8 @@
 
 STATIC int
 xlog_recover_unmount_trans(
-	struct log		*log,
-	xlog_recover_t		*trans)
+	struct xlog		*log,
+	struct xlog_recover	*trans)
 {
 	/* Do nothing now */
 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 8b89c5a..90c1fc9 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,7 +53,7 @@
 
 #include "xfs_sync.h"
 
-struct log;
+struct xlog;
 struct xfs_mount_args;
 struct xfs_inode;
 struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@
 	uint			m_readio_blocks; /* min read size blocks */
 	uint			m_writeio_log;	/* min write size log bytes */
 	uint			m_writeio_blocks; /* min write size blocks */
-	struct log		*m_log;		/* log specific stuff */
+	struct xlog		*m_log;		/* log specific stuff */
 	int			m_logbufs;	/* number of log buffers */
 	int			m_logbsize;	/* size of each log buffer */
 	uint			m_rsumlevels;	/* rt summary levels */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2fcfd5b..0d9de41 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -932,7 +932,7 @@
 	trace_xfs_evict_inode(ip);
 
 	truncate_inode_pages(&inode->i_data, 0);
-	end_writeback(inode);
+	clear_inode(inode);
 	XFS_STATS_INC(vn_rele);
 	XFS_STATS_INC(vn_remove);
 	XFS_STATS_DEC(vn_active);
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index c9d3409..1e9ee06 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -386,23 +386,23 @@
 	 * We shouldn't write/force the log if we are in the mount/unmount
 	 * process or on a read only filesystem. The workqueue still needs to be
 	 * active in both cases, however, because it is used for inode reclaim
-	 * during these times.  Use the s_umount semaphore to provide exclusion
-	 * with unmount.
+	 * during these times.  Use the MS_ACTIVE flag to avoid doing anything
+	 * during mount.  Doing work during unmount is avoided by calling
+	 * cancel_delayed_work_sync on this work queue before tearing down
+	 * the ail and the log in xfs_log_unmount.
 	 */
-	if (down_read_trylock(&mp->m_super->s_umount)) {
-		if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-			/* dgc: errors ignored here */
-			if (mp->m_super->s_frozen == SB_UNFROZEN &&
-			    xfs_log_need_covered(mp))
-				error = xfs_fs_log_dummy(mp);
-			else
-				xfs_log_force(mp, 0);
+	if (!(mp->m_super->s_flags & MS_ACTIVE) &&
+	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+		/* dgc: errors ignored here */
+		if (mp->m_super->s_frozen == SB_UNFROZEN &&
+		    xfs_log_need_covered(mp))
+			error = xfs_fs_log_dummy(mp);
+		else
+			xfs_log_force(mp, 0);
 
-			/* start pushing all the metadata that is currently
-			 * dirty */
-			xfs_ail_push_all(mp->m_ail);
-		}
-		up_read(&mp->m_super->s_umount);
+		/* start pushing all the metadata that is currently
+		 * dirty */
+		xfs_ail_push_all(mp->m_ail);
 	}
 
 	/* queue us up again */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 7cf9d35..caf5dab 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -32,7 +32,7 @@
 struct xfs_dquot;
 struct xfs_log_item;
 struct xlog_ticket;
-struct log;
+struct xlog;
 struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
@@ -762,7 +762,7 @@
 DEFINE_DQUOT_EVENT(xfs_dqflush_done);
 
 DECLARE_EVENT_CLASS(xfs_loggrant_class,
-	TP_PROTO(struct log *log, struct xlog_ticket *tic),
+	TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
 	TP_ARGS(log, tic),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
@@ -830,7 +830,7 @@
 
 #define DEFINE_LOGGRANT_EVENT(name) \
 DEFINE_EVENT(xfs_loggrant_class, name, \
-	TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+	TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
 	TP_ARGS(log, tic))
 DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
 DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
-	TP_PROTO(struct log *log, struct xlog_recover *trans,
+	TP_PROTO(struct xlog *log, struct xlog_recover *trans,
 		struct xlog_recover_item *item, int pass),
 	TP_ARGS(log, trans, item, pass),
 	TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@
 
 #define DEFINE_LOG_RECOVER_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_item_class, name, \
-	TP_PROTO(struct log *log, struct xlog_recover *trans, \
+	TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
 		struct xlog_recover_item *item, int pass), \
 	TP_ARGS(log, trans, item, pass))
 
@@ -1709,7 +1709,7 @@
 DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
-	TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+	TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
 	TP_ARGS(log, buf_f),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
@@ -1739,7 +1739,7 @@
 
 #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
-	TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+	TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
 	TP_ARGS(log, buf_f))
 
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
-	TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+	TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
 	TP_ARGS(log, in_f),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
@@ -1790,7 +1790,7 @@
 )
 #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
-	TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+	TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
 	TP_ARGS(log, in_f))
 
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index cdf896f..fdf3245 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -584,7 +584,7 @@
 _xfs_trans_alloc(
 	xfs_mount_t	*mp,
 	uint		type,
-	uint		memflags)
+	xfs_km_flags_t	memflags)
 {
 	xfs_trans_t	*tp;
 
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7ab99e1..7c37b53 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -443,7 +443,7 @@
  * XFS transaction mechanism exported interfaces.
  */
 xfs_trans_t	*xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t	*_xfs_trans_alloc(struct xfs_mount *, uint, uint);
+xfs_trans_t	*_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
 xfs_trans_t	*xfs_trans_dup(xfs_trans_t *);
 int		xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
 				  uint, uint);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index b0d6282..9e6e1c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -440,8 +440,8 @@
 
 #else	/* CONFIG_ACPI */
 
-static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
-static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
+static inline int register_acpi_bus_type(void *bus) { return 0; }
+static inline int unregister_acpi_bus_type(void *bus) { return 0; }
 
 #endif				/* CONFIG_ACPI */
 
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 53f91b1..2c85a0f 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -8,6 +8,7 @@
 header-y += ioctl.h
 header-y += ioctls.h
 header-y += ipcbuf.h
+header-y += kvm_para.h
 header-y += mman-common.h
 header-y += mman.h
 header-y += msgbuf.h
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 4ae54e0..a7b0914 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -28,5 +28,9 @@
 #error Inconsistent word size. Check asm/bitsperlong.h
 #endif
 
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 2520a6e..7d10f96 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,10 +3,18 @@
 
 #include <linux/compiler.h>
 
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING		(1 << 0)
+#define BUGFLAG_TAINT(taint)	(BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug)	((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
 #ifdef CONFIG_BUG
 
 #ifdef CONFIG_GENERIC_BUG
-#ifndef __ASSEMBLY__
 struct bug_entry {
 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
 	unsigned long	bug_addr;
@@ -23,12 +31,6 @@
 #endif
 	unsigned short	flags;
 };
-#endif		/* __ASSEMBLY__ */
-
-#define BUGFLAG_WARNING		(1 << 0)
-#define BUGFLAG_TAINT(taint)	(BUGFLAG_WARNING | ((taint) << 8))
-#define BUG_GET_TAINT(bug)	((bug)->flags >> 8)
-
 #endif	/* CONFIG_GENERIC_BUG */
 
 /*
@@ -60,7 +62,6 @@
  * to provide better diagnostics.
  */
 #ifndef __WARN_TAINT
-#ifndef __ASSEMBLY__
 extern __printf(3, 4)
 void warn_slowpath_fmt(const char *file, const int line,
 		       const char *fmt, ...);
@@ -69,7 +70,6 @@
 			     const char *fmt, ...);
 extern void warn_slowpath_null(const char *file, const int line);
 #define WANT_WARN_ON_SLOWPATH
-#endif
 #define __WARN()		warn_slowpath_null(__FILE__, __LINE__)
 #define __WARN_printf(arg...)	warn_slowpath_fmt(__FILE__, __LINE__, arg)
 #define __WARN_printf_taint(taint, arg...)				\
@@ -202,4 +202,6 @@
 # define WARN_ON_SMP(x)			({0;})
 #endif
 
+#endif /* __ASSEMBLY__ */
+
 #endif
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffa..abfb268 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,13 +3,15 @@
 
 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 /*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 				       dma_addr_t *dma_handle, void **ret);
 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			    void *cpu_addr, size_t size, int *ret);
 /*
  * Standard interface
  */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 0000000..c544356
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+	if (dev && dev->cma_area)
+		return dev->cma_area;
+	return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+	if (dev)
+		dev->cma_area = cma;
+	if (!dev || !dma_contiguous_default_area)
+		dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 125c54e..ff4947b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -158,9 +158,8 @@
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
-				  unsigned long address,
-				  pmd_t *pmdp);
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTE_SAME
@@ -446,6 +445,18 @@
 #endif /* __HAVE_ARCH_PMD_WRITE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+	/*
+	 * Depend on compiler for an atomic pmd read. NOTE: this is
+	 * only going to work, if the pmdval_t isn't larger than
+	 * an unsigned long.
+	 */
+	return *pmdp;
+}
+#endif
+
 /*
  * This function is meant to be used by sites walking pagetables with
  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -459,14 +470,30 @@
  * undefined so behaving like if the pmd was none is safe (because it
  * can return none anyway). The compiler level barrier() is critically
  * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
  */
 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 {
-	/* depend on compiler for an atomic pmd read */
-	pmd_t pmdval = *pmd;
+	pmd_t pmdval = pmd_read_atomic(pmd);
 	/*
 	 * The barrier will stabilize the pmdval in a register or on
 	 * the stack so that it will stop changing under the code.
+	 *
+	 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+	 * pmd_read_atomic is allowed to return a not atomic pmdval
+	 * (for example pointing to an hugepage that has never been
+	 * mapped in the pmd). The below checks will only care about
+	 * the low part of the pmd with 32bit PAE x86 anyway, with the
+	 * exception of pmd_none(). So the important thing is that if
+	 * the low part of the pmd is found null, the high part will
+	 * be also null or the pmd_none() check below would be
+	 * confused.
 	 */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	barrier();
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
index 91d44bd..fe74fcc 100644
--- a/include/asm-generic/posix_types.h
+++ b/include/asm-generic/posix_types.h
@@ -23,10 +23,6 @@
 typedef unsigned int	__kernel_mode_t;
 #endif
 
-#ifndef __kernel_nlink_t
-typedef __kernel_ulong_t __kernel_nlink_t;
-#endif
-
 #ifndef __kernel_pid_t
 typedef int		__kernel_pid_t;
 #endif
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 0000000..3f21f1b
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This says "generic", but it's actually big-endian only.
+ * Little-endian can use more efficient versions of these
+ * interfaces, see for example
+ *	 arch/x86/include/asm/word-at-a-time.h
+ * for those.
+ */
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+	const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+	unsigned long mask = (val & c->low_bits) + c->low_bits;
+	return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+	long byte = 0;
+#ifdef CONFIG_64BIT
+	if (mask >> 32)
+		mask >>= 32;
+	else
+		byte = 4;
+#endif
+	if (mask >> 16)
+		mask >>= 16;
+	else
+		byte += 2;
+	return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+	unsigned long rhs = val | c->low_bits;
+	*data = rhs;
+	return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 73e4560..bac55c2 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -54,7 +54,7 @@
 	struct drm_object_properties *properties;
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 16
+#define DRM_OBJECT_MAX_PROPERTY 24
 struct drm_object_properties {
 	int count;
 	uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 6bd325f..19a2404 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -31,7 +31,7 @@
 
 static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 {
-	if (size != 0 && nmemb > ULONG_MAX / size)
+	if (size != 0 && nmemb > SIZE_MAX / size)
 		return NULL;
 
 	if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@
 /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
 static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
 {
-	if (size != 0 && nmemb > ULONG_MAX / size)
+	if (size != 0 && nmemb > SIZE_MAX / size)
 		return NULL;
 
 	if (size * nmemb <= PAGE_SIZE)
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 58d0bda..a7aec39 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,7 +1,3 @@
-/*
-   This file is auto-generated from the drm_pciids.txt in the DRM CVS
-   Please contact dri-devel@lists.sf.net to add new cards to this list
-*/
 #define radeon_PCI_IDS \
 	{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -181,6 +177,7 @@
 	{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
@@ -198,6 +195,7 @@
 	{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
@@ -229,10 +227,11 @@
 	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -531,6 +530,7 @@
 	{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+	{0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -550,6 +550,7 @@
 	{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -561,11 +562,19 @@
 	{0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index b6d7ce9..6873358 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -64,6 +64,7 @@
  * A structure for mapping buffer.
  *
  * @handle: a handle to gem object created.
+ * @pad: just padding to be 64-bit aligned.
  * @size: memory size to be mapped.
  * @mapped: having user virtual address mmaped.
  *	- this variable would be filled by exynos gem module
@@ -72,7 +73,8 @@
  */
 struct drm_exynos_gem_mmap {
 	unsigned int handle;
-	unsigned int size;
+	unsigned int pad;
+	uint64_t size;
 	uint64_t mapped;
 };
 
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 4cd59b9..8760be3 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -225,6 +225,8 @@
 header-y += kdev_t.h
 header-y += kernel.h
 header-y += kernelcapi.h
+header-y += kernel-page-flags.h
+header-y += kexec.h
 header-y += keyboard.h
 header-y += keyctl.h
 header-y += l2tp.h
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e64ce2c..0254901 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -92,6 +92,8 @@
  * right now
  * @serving: the virtual channel currently being served by this physical
  * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
  */
 struct pl08x_phy_chan {
 	unsigned int id;
@@ -99,6 +101,7 @@
 	spinlock_t lock;
 	int signal;
 	struct pl08x_dma_chan *serving;
+	bool locked;
 };
 
 /**
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
index 47bedc0..0a95e73 100644
--- a/include/linux/apple_bl.h
+++ b/include/linux/apple_bl.h
@@ -5,7 +5,7 @@
 #ifndef _LINUX_APPLE_BL_H
 #define _LINUX_APPLE_BL_H
 
-#ifdef CONFIG_BACKLIGHT_APPLE
+#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
 
 extern int apple_bl_register(void);
 extern void apple_bl_unregister(void);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4d94eb8..2643589 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,6 +269,14 @@
 extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+#ifdef CONFIG_BLK_CGROUP
+int bio_associate_current(struct bio *bio);
+void bio_disassociate_task(struct bio *bio);
+#else	/* CONFIG_BLK_CGROUP */
+static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+static inline void bio_disassociate_task(struct bio *bio) { }
+#endif	/* CONFIG_BLK_CGROUP */
+
 /*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4053cbd..0edb65d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,6 +14,8 @@
 struct bio_integrity_payload;
 struct page;
 struct block_device;
+struct io_context;
+struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *, int);
 typedef void (bio_destructor_t) (struct bio *);
 
@@ -66,6 +68,14 @@
 	bio_end_io_t		*bi_end_io;
 
 	void			*bi_private;
+#ifdef CONFIG_BLK_CGROUP
+	/*
+	 * Optional ioc and css associated with this bio.  Put on bio
+	 * release.  Read comment on top of bio_associate_current().
+	 */
+	struct io_context	*bi_ioc;
+	struct cgroup_subsys_state *bi_css;
+#endif
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 	struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d4ac24..ba43f40 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -32,10 +32,17 @@
 struct request;
 struct sg_io_hdr;
 struct bsg_job;
+struct blkcg_gq;
 
 #define BLKDEV_MIN_RQ	4
 #define BLKDEV_MAX_RQ	128	/* Default maximum */
 
+/*
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+#define BLKCG_MAX_POLS		2
+
 struct request;
 typedef void (rq_end_io_fn)(struct request *, int);
 
@@ -363,6 +370,11 @@
 	struct list_head	timeout_list;
 
 	struct list_head	icq_list;
+#ifdef CONFIG_BLK_CGROUP
+	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
+	struct blkcg_gq		*root_blkg;
+	struct list_head	blkg_list;
+#endif
 
 	struct queue_limits	limits;
 
@@ -390,12 +402,17 @@
 
 	struct mutex		sysfs_lock;
 
+	int			bypass_depth;
+
 #if defined(CONFIG_BLK_DEV_BSG)
 	bsg_job_fn		*bsg_job_fn;
 	int			bsg_job_size;
 	struct bsg_class_device bsg_dev;
 #endif
 
+#ifdef CONFIG_BLK_CGROUP
+	struct list_head	all_q_node;
+#endif
 #ifdef CONFIG_BLK_DEV_THROTTLING
 	/* Throttle data */
 	struct throtl_data *td;
@@ -407,7 +424,7 @@
 #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
 #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
-#define QUEUE_FLAG_ELVSWITCH	6	/* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BYPASS	6	/* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */
@@ -491,6 +508,7 @@
 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q)	\
 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 1a0cd27..324fe08 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -135,9 +135,6 @@
 extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
 				   int flags);
 
-extern void *alloc_bootmem_section(unsigned long size,
-				   unsigned long section_nr);
-
 #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
 extern void *alloc_remap(int nid, unsigned long size);
 #else
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 72961c3..aaac4bb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -30,6 +30,13 @@
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
 
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
 /**
  * BUILD_BUG_ON - break compile if a condition is true.
  * @condition: the condition which the compiler should know is false.
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index aa13392..d4080f3 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -14,6 +14,14 @@
 struct ceph_auth_client;
 struct ceph_authorizer;
 
+struct ceph_auth_handshake {
+	struct ceph_authorizer *authorizer;
+	void *authorizer_buf;
+	size_t authorizer_buf_len;
+	void *authorizer_reply_buf;
+	size_t authorizer_reply_buf_len;
+};
+
 struct ceph_auth_client_ops {
 	const char *name;
 
@@ -43,9 +51,7 @@
 	 * the response to authenticate the service.
 	 */
 	int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
-				 struct ceph_authorizer **a,
-				 void **buf, size_t *len,
-				 void **reply_buf, size_t *reply_len);
+				 struct ceph_auth_handshake *auth);
 	int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
 				       struct ceph_authorizer *a, size_t len);
 	void (*destroy_authorizer)(struct ceph_auth_client *ac,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index b8c6069..e81ab30 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -65,7 +65,7 @@
 	__le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
 	/* object -> pg layout */
-	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+	__le32 fl_unused;       /* unused; used to be preferred primary (-1) */
 	__le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
 } __attribute__ ((packed));
 
@@ -384,7 +384,7 @@
 		__le32 stripe_count;         /* ... */
 		__le32 object_size;
 		__le32 file_replication;
-		__le32 preferred;
+		__le32 unused;               /* used to be preferred osd */
 	} __attribute__ ((packed)) open;
 	struct {
 		__le32 flags;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 220ae21..d8615de 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -46,9 +46,14 @@
 /*
  * bounds check input.
  */
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+	return end >= *p && n <= end - *p;
+}
+
 #define ceph_decode_need(p, end, n, bad)		\
 	do {						\
-		if (unlikely(*(p) + (n) > (end))) 	\
+		if (!likely(ceph_has_room(p, end, n)))	\
 			goto bad;			\
 	} while (0)
 
@@ -167,7 +172,7 @@
 
 #define ceph_encode_need(p, end, n, bad)		\
 	do {						\
-		if (unlikely(*(p) + (n) > (end))) 	\
+		if (!likely(ceph_has_room(p, end, n)))	\
 			goto bad;			\
 	} while (0)
 
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 3bff047..2521a95 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -25,9 +25,9 @@
 	void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
 
 	/* authorize an outgoing connection */
-	int (*get_authorizer) (struct ceph_connection *con,
-			       void **buf, int *len, int *proto,
-			       void **reply_buf, int *reply_len, int force_new);
+	struct ceph_auth_handshake *(*get_authorizer) (
+				struct ceph_connection *con,
+			       int *proto, int force_new);
 	int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
 	int (*invalidate_authorizer)(struct ceph_connection *con);
 
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7c05ac2..cedfb1a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -6,9 +6,10 @@
 #include <linux/mempool.h>
 #include <linux/rbtree.h>
 
-#include "types.h"
-#include "osdmap.h"
-#include "messenger.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
 
 /* 
  * Maximum object name size 
@@ -40,9 +41,7 @@
 	struct list_head o_requests;
 	struct list_head o_linger_requests;
 	struct list_head o_osd_lru;
-	struct ceph_authorizer *o_authorizer;
-	void *o_authorizer_buf, *o_authorizer_reply_buf;
-	size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+	struct ceph_auth_handshake o_auth;
 	unsigned long lru_ttl;
 	int o_marked_for_keepalive;
 	struct list_head o_keepalive_item;
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ba4c205..311ef8d 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -65,8 +65,6 @@
 #define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
 #define ceph_file_layout_object_su(l) \
 	((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
-	((__s32)le32_to_cpu((l).fl_pg_preferred))
 #define ceph_file_layout_pg_pool(l) \
 	((__s32)le32_to_cpu((l).fl_pg_pool))
 
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index 5e4312b..eb3f84b 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -30,7 +30,7 @@
 	const struct clk_ops	*ops;
 	struct clk_hw		*hw;
 	struct clk		*parent;
-	char			**parent_names;
+	const char		**parent_names;
 	struct clk		**parents;
 	u8			num_parents;
 	unsigned long		rate;
@@ -55,12 +55,22 @@
  * alternative macro for static initialization
  */
 
-extern struct clk_ops clk_fixed_rate_ops;
+#define DEFINE_CLK(_name, _ops, _flags, _parent_names,		\
+		_parents)					\
+	static struct clk _name = {				\
+		.name = #_name,					\
+		.ops = &_ops,					\
+		.hw = &_name##_hw.hw,				\
+		.parent_names = _parent_names,			\
+		.num_parents = ARRAY_SIZE(_parent_names),	\
+		.parents = _parents,				\
+		.flags = _flags,				\
+	}
 
 #define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,		\
 				_fixed_rate_flags)		\
 	static struct clk _name;				\
-	static char *_name##_parent_names[] = {};		\
+	static const char *_name##_parent_names[] = {};		\
 	static struct clk_fixed_rate _name##_hw = {		\
 		.hw = {						\
 			.clk = &_name,				\
@@ -68,23 +78,14 @@
 		.fixed_rate = _rate,				\
 		.flags = _fixed_rate_flags,			\
 	};							\
-	static struct clk _name = {				\
-		.name = #_name,					\
-		.ops = &clk_fixed_rate_ops,			\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _name##_parent_names,		\
-		.num_parents =					\
-			ARRAY_SIZE(_name##_parent_names),	\
-		.flags = _flags,				\
-	};
-
-extern struct clk_ops clk_gate_ops;
+	DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,		\
+			_name##_parent_names, NULL);
 
 #define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,	\
 				_flags, _reg, _bit_idx,		\
 				_gate_flags, _lock)		\
 	static struct clk _name;				\
-	static char *_name##_parent_names[] = {			\
+	static const char *_name##_parent_names[] = {		\
 		_parent_name,					\
 	};							\
 	static struct clk *_name##_parents[] = {		\
@@ -99,24 +100,14 @@
 		.flags = _gate_flags,				\
 		.lock = _lock,					\
 	};							\
-	static struct clk _name = {				\
-		.name = #_name,					\
-		.ops = &clk_gate_ops,				\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _name##_parent_names,		\
-		.num_parents =					\
-			ARRAY_SIZE(_name##_parent_names),	\
-		.parents = _name##_parents,			\
-		.flags = _flags,				\
-	};
-
-extern struct clk_ops clk_divider_ops;
+	DEFINE_CLK(_name, clk_gate_ops, _flags,			\
+			_name##_parent_names, _name##_parents);
 
 #define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,	\
 				_flags, _reg, _shift, _width,	\
 				_divider_flags, _lock)		\
 	static struct clk _name;				\
-	static char *_name##_parent_names[] = {			\
+	static const char *_name##_parent_names[] = {		\
 		_parent_name,					\
 	};							\
 	static struct clk *_name##_parents[] = {		\
@@ -132,18 +123,8 @@
 		.flags = _divider_flags,			\
 		.lock = _lock,					\
 	};							\
-	static struct clk _name = {				\
-		.name = #_name,					\
-		.ops = &clk_divider_ops,			\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _name##_parent_names,		\
-		.num_parents =					\
-			ARRAY_SIZE(_name##_parent_names),	\
-		.parents = _name##_parents,			\
-		.flags = _flags,				\
-	};
-
-extern struct clk_ops clk_mux_ops;
+	DEFINE_CLK(_name, clk_divider_ops, _flags,		\
+			_name##_parent_names, _name##_parents);
 
 #define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags,	\
 				_reg, _shift, _width,		\
@@ -159,16 +140,28 @@
 		.flags = _mux_flags,				\
 		.lock = _lock,					\
 	};							\
-	static struct clk _name = {				\
-		.name = #_name,					\
-		.ops = &clk_mux_ops,				\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _parent_names,			\
-		.num_parents =					\
-			ARRAY_SIZE(_parent_names),		\
-		.parents = _parents,				\
-		.flags = _flags,				\
-	};
+	DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,	\
+			_parents);
+
+#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,		\
+				_parent_ptr, _flags,		\
+				_mult, _div)			\
+	static struct clk _name;				\
+	static const char *_name##_parent_names[] = {		\
+		_parent_name,					\
+	};							\
+	static struct clk *_name##_parents[] = {		\
+		_parent_ptr,					\
+	};							\
+	static struct clk_fixed_factor _name##_hw = {		\
+		.hw = {						\
+			.clk = &_name,				\
+		},						\
+		.mult = _mult,					\
+		.div = _div,					\
+	};							\
+	DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,		\
+			_name##_parent_names, _name##_parents);
 
 /**
  * __clk_init - initialize the data structures in a struct clk
@@ -189,8 +182,12 @@
  *
  * It is not necessary to call clk_register if __clk_init is used directly with
  * statically initialized clock data.
+ *
+ * Returns 0 on success, otherwise an error code.
  */
-void __clk_init(struct device *dev, struct clk *clk);
+int __clk_init(struct device *dev, struct clk *clk);
+
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
 
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5508897..4a0b483 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -15,19 +15,6 @@
 
 #ifdef CONFIG_COMMON_CLK
 
-/**
- * struct clk_hw - handle for traversing from a struct clk to its corresponding
- * hardware-specific structure.  struct clk_hw should be declared within struct
- * clk_foo and then referenced by the struct clk instance that uses struct
- * clk_foo's clk_ops
- *
- * clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
- */
-struct clk_hw {
-	struct clk *clk;
-};
-
 /*
  * flags used across common struct clk.  these flags should only affect the
  * top-level framework.  custom flags for dealing with hardware specifics
@@ -39,6 +26,8 @@
 #define CLK_IGNORE_UNUSED	BIT(3) /* do not gate even if unused */
 #define CLK_IS_ROOT		BIT(4) /* root clk, has no parent */
 
+struct clk_hw;
+
 /**
  * struct clk_ops -  Callback operations for hardware clocks; these are to
  * be provided by the clock implementation, and will be called by drivers
@@ -88,19 +77,11 @@
  * 		array index into the value programmed into the hardware.
  * 		Returns 0 on success, -EERROR otherwise.
  *
- * @set_rate:	Change the rate of this clock. If this callback returns
- * 		CLK_SET_RATE_PARENT, the rate change will be propagated to the
- * 		parent clock (which may propagate again if the parent clock
- * 		also sets this flag). The requested rate of the parent is
- * 		passed back from the callback in the second 'unsigned long *'
- * 		argument.  Note that it is up to the hardware clock's set_rate
- * 		implementation to insure that clocks do not run out of spec
- * 		when propgating the call to set_rate up to the parent.  One way
- * 		to do this is to gate the clock (via clk_disable and/or
- * 		clk_unprepare) before calling clk_set_rate, then ungating it
- * 		afterward.  If your clock also has the CLK_GATE_SET_RATE flag
- * 		set then this will insure safety.  Returns 0 on success,
- * 		-EERROR otherwise.
+ * @set_rate:	Change the rate of this clock. The requested rate is specified
+ *		by the second argument, which should typically be the return
+ *		of .round_rate call.  The third argument gives the parent rate
+ *		which is likely helpful for most .set_rate implementation.
+ *		Returns 0 on success, -EERROR otherwise.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -125,10 +106,46 @@
 					unsigned long *);
 	int		(*set_parent)(struct clk_hw *hw, u8 index);
 	u8		(*get_parent)(struct clk_hw *hw);
-	int		(*set_rate)(struct clk_hw *hw, unsigned long);
+	int		(*set_rate)(struct clk_hw *hw, unsigned long,
+				    unsigned long);
 	void		(*init)(struct clk_hw *hw);
 };
 
+/**
+ * struct clk_init_data - holds init data that's common to all clocks and is
+ * shared between the clock provider and the common clock framework.
+ *
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ */
+struct clk_init_data {
+	const char		*name;
+	const struct clk_ops	*ops;
+	const char		**parent_names;
+	u8			num_parents;
+	unsigned long		flags;
+};
+
+/**
+ * struct clk_hw - handle for traversing from a struct clk to its corresponding
+ * hardware-specific structure.  struct clk_hw should be declared within struct
+ * clk_foo and then referenced by the struct clk instance that uses struct
+ * clk_foo's clk_ops
+ *
+ * @clk: pointer to the struct clk instance that points back to this struct
+ * clk_hw instance
+ *
+ * @init: pointer to struct clk_init_data that contains the init data shared
+ * with the common clock framework.
+ */
+struct clk_hw {
+	struct clk *clk;
+	struct clk_init_data *init;
+};
+
 /*
  * DOC: Basic clock implementations common to many platforms
  *
@@ -149,6 +166,7 @@
 	u8		flags;
 };
 
+extern const struct clk_ops clk_fixed_rate_ops;
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		unsigned long fixed_rate);
@@ -165,7 +183,7 @@
  * Clock which can gate its output.  Implements .enable & .disable
  *
  * Flags:
- * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
+ * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
  * 	enable the clock.  Setting this flag does the opposite: setting the bit
  * 	disable the clock and clearing it enables the clock
  */
@@ -175,11 +193,11 @@
 	u8		bit_idx;
 	u8		flags;
 	spinlock_t	*lock;
-	char		*parent[1];
 };
 
 #define CLK_GATE_SET_TO_DISABLE		BIT(0)
 
+extern const struct clk_ops clk_gate_ops;
 struct clk *clk_register_gate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 bit_idx,
@@ -212,12 +230,12 @@
 	u8		width;
 	u8		flags;
 	spinlock_t	*lock;
-	char		*parent[1];
 };
 
 #define CLK_DIVIDER_ONE_BASED		BIT(0)
 #define CLK_DIVIDER_POWER_OF_TWO	BIT(1)
 
+extern const struct clk_ops clk_divider_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
@@ -238,7 +256,7 @@
  *
  * Flags:
  * CLK_MUX_INDEX_ONE - register index starts at 1, not 0
- * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
+ * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
  */
 struct clk_mux {
 	struct clk_hw	hw;
@@ -252,29 +270,49 @@
 #define CLK_MUX_INDEX_ONE		BIT(0)
 #define CLK_MUX_INDEX_BIT		BIT(1)
 
+extern const struct clk_ops clk_mux_ops;
 struct clk *clk_register_mux(struct device *dev, const char *name,
-		char **parent_names, u8 num_parents, unsigned long flags,
+		const char **parent_names, u8 num_parents, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
 		u8 clk_mux_flags, spinlock_t *lock);
 
 /**
+ * struct clk_fixed_factor - fixed multiplier and divider clock
+ *
+ * @hw:		handle between common and hardware-specific interfaces
+ * @mult:	multiplier
+ * @div:	divider
+ *
+ * Clock with a fixed multiplier and divider. The output frequency is the
+ * parent clock rate divided by div and multiplied by mult.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ */
+
+struct clk_fixed_factor {
+	struct clk_hw	hw;
+	unsigned int	mult;
+	unsigned int	div;
+};
+
+extern struct clk_ops clk_fixed_factor_ops;
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+		const char *parent_name, unsigned long flags,
+		unsigned int mult, unsigned int div);
+
+/**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
  * @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
  *
  * clk_register is the primary interface for populating the clock tree with new
  * clock nodes.  It returns a pointer to the newly allocated struct clk which
  * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
  */
-struct clk *clk_register(struct device *dev, const char *name,
-		const struct clk_ops *ops, struct clk_hw *hw,
-		char **parent_names, u8 num_parents, unsigned long flags);
+struct clk *clk_register(struct device *dev, struct clk_hw *hw);
+
+void clk_unregister(struct clk *clk);
 
 /* helper functions */
 const char *__clk_get_name(struct clk *clk);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 70cf722..ad5c43e 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -81,7 +81,7 @@
 
 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
 
-#endif /* !CONFIG_COMMON_CLK */
+#endif
 
 /**
  * clk_get - lookup and obtain a reference to a clock producer.
@@ -252,7 +252,7 @@
  * Returns rounded clock rate in Hz, or negative errno.
  */
 long clk_round_rate(struct clk *clk, unsigned long rate);
- 
+
 /**
  * clk_set_rate - set the clock rate for a clock source
  * @clk: clock source
@@ -261,7 +261,7 @@
  * Returns success (0) or negative errno.
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
- 
+
 /**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 81e803e..acba8943 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -132,6 +132,7 @@
 			       struct clock_event_device *evt);
 extern void clockevents_register_device(struct clock_event_device *dev);
 
+extern void clockevents_config(struct clock_event_device *dev, u32 freq);
 extern void clockevents_config_and_register(struct clock_event_device *dev,
 					    u32 freq, unsigned long min_delta,
 					    unsigned long max_delta);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5d46217..4e89039 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -577,8 +577,7 @@
 		const struct compat_iovec __user *uvector,
 		unsigned long nr_segs,
 		unsigned long fast_segs, struct iovec *fast_pointer,
-		struct iovec **ret_pointer,
-		int check_access);
+		struct iovec **ret_pointer);
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e5834aa..6a6d7ae 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -47,9 +47,9 @@
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline		inline		__attribute__((always_inline))
-# define __inline__	__inline__	__attribute__((always_inline))
-# define __inline	__inline	__attribute__((always_inline))
+# define inline		inline		__attribute__((always_inline)) notrace
+# define __inline__	__inline__	__attribute__((always_inline)) notrace
+# define __inline	__inline	__attribute__((always_inline)) notrace
 #else
 /* A lot of inline functions can cause havoc with function tracing */
 # define inline		inline		notrace
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7230bb5..2e9b9eb 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -177,6 +177,7 @@
 #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
 #define unregister_hotcpu_notifier(nb)	unregister_cpu_notifier(nb)
+void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 917dc5a..ebbed2c 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -277,17 +277,13 @@
  * @task: The task to query
  *
  * Access the objective credentials of a task.  The caller must hold the RCU
- * readlock or the task must be dead and unable to change its own credentials.
+ * readlock.
  *
  * The result of this function should not be passed directly to get_cred();
  * rather get_task_cred() should be used instead.
  */
-#define __task_cred(task)						\
-	({								\
-		const struct task_struct *__t = (task);			\
-		rcu_dereference_check(__t->real_cred,			\
-				      task_is_dead(__t));		\
-	})
+#define __task_cred(task)	\
+	rcu_dereference((task)->real_cred)
 
 /**
  * get_current_cred - Get the current task's subjective credentials
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 97e435b..7c47508 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -151,16 +151,6 @@
 	struct crush_bucket **buckets;
 	struct crush_rule **rules;
 
-	/*
-	 * Parent pointers to identify the parent bucket a device or
-	 * bucket in the hierarchy.  If an item appears more than
-	 * once, this is the _last_ time it appeared (where buckets
-	 * are processed in bucket id order, from -1 on down to
-	 * -max_buckets.
-	 */
-	__u32 *bucket_parents;
-	__u32 *device_parents;
-
 	__s32 max_buckets;
 	__u32 max_rules;
 	__s32 max_devices;
@@ -168,8 +158,7 @@
 
 
 /* crush.c */
-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
-extern void crush_calc_parents(struct crush_map *map);
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
 extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
 extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
 extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
@@ -177,4 +166,9 @@
 extern void crush_destroy_bucket(struct crush_bucket *b);
 extern void crush_destroy(struct crush_map *map);
 
+static inline int crush_calc_tree_node(int i)
+{
+	return ((i+1) << 1)-1;
+}
+
 #endif
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index c46b99c..71d79f4 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -10,11 +10,10 @@
 
 #include "crush.h"
 
-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
-extern int crush_do_rule(struct crush_map *map,
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
 			 int ruleno,
 			 int x, int *result, int result_max,
-			 int forcefeed,    /* -1 for none */
-			 __u32 *weights);
+			 const __u32 *weights);
 
 #endif
diff --git a/include/linux/device.h b/include/linux/device.h
index e04f577..161d962 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -667,6 +667,10 @@
 
 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
 					     override */
+#ifdef CONFIG_CMA
+	struct cma *cma_area;		/* contiguous memory area for dma
+					   allocations */
+#endif
 	/* arch specific additions */
 	struct dev_archdata	archdata;
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3efbfc2..eb48f38 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -61,6 +61,13 @@
  * 		   This Callback must not sleep.
  * @kmap: maps a page from the buffer into kernel address space.
  * @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ * 	  mapping needs to be coherent - if the exporter doesn't directly
+ * 	  support this, it needs to fake coherency by shooting down any ptes
+ * 	  when transitioning away from the cpu domain.
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
+ *	  address space. Same restrictions as for vmap and friends apply.
+ * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
 	int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +99,11 @@
 	void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
 	void *(*kmap)(struct dma_buf *, unsigned long);
 	void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+
+	void *(*vmap)(struct dma_buf *);
+	void (*vunmap)(struct dma_buf *, void *vaddr);
 };
 
 /**
@@ -167,6 +179,11 @@
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
 void *dma_buf_kmap(struct dma_buf *, unsigned long);
 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+		 unsigned long);
+void *dma_buf_vmap(struct dma_buf *);
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
 #else
 
 static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +265,22 @@
 				  unsigned long pnum, void *vaddr)
 {
 }
+
+static inline int dma_buf_mmap(struct dma_buf *dmabuf,
+			       struct vm_area_struct *vma,
+			       unsigned long pgoff)
+{
+	return -ENODEV;
+}
+
+static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	return NULL;
+}
+
+static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+}
 #endif /* CONFIG_DMA_SHARED_BUFFER */
 
 #endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 0000000..2f303e4
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *	Marek Szyprowski <m.szyprowski@samsung.com>
+ *	Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ *   The Contiguous Memory Allocator (CMA) makes it possible to
+ *   allocate big contiguous chunks of memory after the system has
+ *   booted.
+ *
+ * Why is it needed?
+ *
+ *   Various devices on embedded systems have no scatter-getter and/or
+ *   IO map support and require contiguous blocks of memory to
+ *   operate.  They include devices such as cameras, hardware video
+ *   coders, etc.
+ *
+ *   Such devices often require big memory buffers (a full HD frame
+ *   is, for instance, more then 2 mega pixels large, i.e. more than 6
+ *   MB of memory), which makes mechanisms such as kmalloc() or
+ *   alloc_page() ineffective.
+ *
+ *   At the same time, a solution where a big memory region is
+ *   reserved for a device is suboptimal since often more memory is
+ *   reserved then strictly required and, moreover, the memory is
+ *   inaccessible to page system even if device drivers don't use it.
+ *
+ *   CMA tries to solve this issue by operating on memory regions
+ *   where only movable pages can be allocated from.  This way, kernel
+ *   can use the memory for pagecache and when device driver requests
+ *   it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ *   CMA should not be used by the device drivers directly. It is
+ *   only a helper framework for dma-mapping subsystem.
+ *
+ *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS	(1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+			   phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count);
+
+#else
+
+#define MAX_CMA_AREAS	(0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+			   phys_addr_t base, phys_addr_t limit)
+{
+	return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int order)
+{
+	return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count)
+{
+	return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f9a2e5e..56377df 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -615,11 +615,13 @@
 }
 
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
-	struct dma_chan *chan, void *buf, size_t len,
+	struct dma_chan *chan, dma_addr_t buf, size_t len,
 	enum dma_transfer_direction dir, unsigned long flags)
 {
 	struct scatterlist sg;
-	sg_init_one(&sg, buf, len);
+	sg_init_table(&sg, 1);
+	sg_dma_address(&sg) = buf;
+	sg_dma_len(&sg) = len;
 
 	return chan->device->device_prep_slave_sg(chan, &sg, 1,
 						  dir, flags, NULL);
@@ -633,6 +635,18 @@
 						  dir, flags, NULL);
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
+	enum dma_transfer_direction dir, unsigned long flags,
+	struct rio_dma_ext *rio_ext)
+{
+	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+						  dir, flags, rio_ext);
+}
+#endif
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 		size_t period_len, enum dma_transfer_direction dir)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9e5f560..47e3d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.11"
+#define REL_VERSION "8.3.13"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 96
@@ -112,8 +112,8 @@
 	ERR_OPEN_MD_DISK	= 105,
 	ERR_DISK_NOT_BDEV	= 107,
 	ERR_MD_NOT_BDEV		= 108,
-	ERR_DISK_TO_SMALL	= 111,
-	ERR_MD_DISK_TO_SMALL	= 112,
+	ERR_DISK_TOO_SMALL	= 111,
+	ERR_MD_DISK_TOO_SMALL	= 112,
 	ERR_BDCLAIM_DISK	= 114,
 	ERR_BDCLAIM_MD_DISK	= 115,
 	ERR_MD_IDX_INVALID	= 116,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 447c367..fb670bf 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -48,6 +48,11 @@
 #define DRBD_TIMEOUT_MAX 600
 #define DRBD_TIMEOUT_DEF 60       /* 6 seconds */
 
+ /* If backing disk takes longer than disk_timeout, mark the disk as failed */
+#define DRBD_DISK_TIMEOUT_MIN 0    /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0    /* disabled */
+
   /* active connection retries when C_WF_CONNECTION */
 #define DRBD_CONNECT_INT_MIN 1
 #define DRBD_CONNECT_INT_MAX 120
@@ -60,7 +65,7 @@
 
  /* timeout for the ping packets.*/
 #define DRBD_PING_TIMEO_MIN  1
-#define DRBD_PING_TIMEO_MAX  100
+#define DRBD_PING_TIMEO_MAX  300
 #define DRBD_PING_TIMEO_DEF  5
 
   /* max number of write requests between write barriers */
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index ab6159e4..a8706f0 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -31,9 +31,12 @@
 	NL_INTEGER(	56,	T_MAY_IGNORE,	max_bio_bvecs)
 	NL_BIT(		57,	T_MAY_IGNORE,	no_disk_barrier)
 	NL_BIT(		58,	T_MAY_IGNORE,	no_disk_drain)
+	NL_INTEGER(	89,	T_MAY_IGNORE,	disk_timeout)
 )
 
-NL_PACKET(detach, 4, )
+NL_PACKET(detach, 4,
+	NL_BIT(		88,	T_MANDATORY,	detach_force)
+)
 
 NL_PACKET(net_conf, 5,
 	NL_STRING(	8,	T_MANDATORY,	my_addr,	128)
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c621d76..91ba3ba 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -71,6 +71,25 @@
 #define DEV_FLAG_X64		BIT(DEV_X64)
 
 /**
+ * enum hw_event_mc_err_type - type of the detected error
+ *
+ * @HW_EVENT_ERR_CORRECTED:	Corrected Error - Indicates that an ECC
+ *				corrected error was detected
+ * @HW_EVENT_ERR_UNCORRECTED:	Uncorrected Error - Indicates an error that
+ *				can't be corrected by ECC, but it is not
+ *				fatal (maybe it is on an unused memory area,
+ *				or the memory controller could recover from
+ *				it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_FATAL:		Fatal Error - Uncorrected error that could not
+ *				be recovered.
+ */
+enum hw_event_mc_err_type {
+	HW_EVENT_ERR_CORRECTED,
+	HW_EVENT_ERR_UNCORRECTED,
+	HW_EVENT_ERR_FATAL,
+};
+
+/**
  * enum mem_type - memory types. For a more detailed reference, please see
  *			http://en.wikipedia.org/wiki/DRAM
  *
@@ -313,38 +332,141 @@
  */
 
 /**
+ * enum edac_mc_layer - memory controller hierarchy layer
+ *
+ * @EDAC_MC_LAYER_BRANCH:	memory layer is named "branch"
+ * @EDAC_MC_LAYER_CHANNEL:	memory layer is named "channel"
+ * @EDAC_MC_LAYER_SLOT:		memory layer is named "slot"
+ * @EDAC_MC_LAYER_CHIP_SELECT:	memory layer is named "chip select"
+ *
+ * This enum is used by the drivers to tell edac_mc_sysfs what name should
+ * be used when describing a memory stick location.
+ */
+enum edac_mc_layer_type {
+	EDAC_MC_LAYER_BRANCH,
+	EDAC_MC_LAYER_CHANNEL,
+	EDAC_MC_LAYER_SLOT,
+	EDAC_MC_LAYER_CHIP_SELECT,
+};
+
+/**
+ * struct edac_mc_layer - describes the memory controller hierarchy
+ * @layer:		layer type
+ * @size:		number of components per layer. For example,
+ *			if the channel layer has two channels, size = 2
+ * @is_virt_csrow:	This layer is part of the "csrow" when old API
+ *			compatibility mode is enabled. Otherwise, it is
+ *			a channel
+ */
+struct edac_mc_layer {
+	enum edac_mc_layer_type	type;
+	unsigned		size;
+	bool			is_virt_csrow;
+};
+
+/*
+ * Maximum number of layers used by the memory controller to uniquely
+ * identify a single memory stick.
+ * NOTE: Changing this constant requires not only to change the constant
+ * below, but also to change the existing code at the core, as there are
+ * some code there that are optimized for 3 layers.
+ */
+#define EDAC_MAX_LAYERS		3
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
+ *		   for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers:	a struct edac_mc_layer array, describing how many elements
+ *		were allocated for each layer
+ * @var:	name of the var where we want to get the pointer
+ *		(like mci->dimms)
+ * @n_layers:	Number of layers at the @layers array
+ * @layer0:	layer0 position
+ * @layer1:	layer1 position. Unused if n_layers < 2
+ * @layer2:	layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ *		and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ *		and to return "&var[layer0][layer1][layer2]"
+ *
+ * A loop could be used here to make it more generic, but, as we only have
+ * 3 layers, this is a little faster.
+ * By design, layers can never be 0 or more than 3. If that ever happens,
+ * a NULL is returned, causing an OOPS during the memory allocation routine,
+ * with would point to the developer that he's doing something wrong.
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({	\
+	typeof(var) __p;						\
+	if ((nlayers) == 1)						\
+		__p = &var[layer0];					\
+	else if ((nlayers) == 2)					\
+		__p = &var[(layer1) + ((layers[1]).size * (layer0))];	\
+	else if ((nlayers) == 3)					\
+		__p = &var[(layer2) + ((layers[2]).size * ((layer1) +	\
+			    ((layers[1]).size * (layer0))))];		\
+	else								\
+		__p = NULL;						\
+	__p;								\
+})
+
+
+/* FIXME: add the proper per-location error counts */
+struct dimm_info {
+	char label[EDAC_MC_LABEL_LEN + 1];	/* DIMM label on motherboard */
+
+	/* Memory location data */
+	unsigned location[EDAC_MAX_LAYERS];
+
+	struct mem_ctl_info *mci;	/* the parent */
+
+	u32 grain;		/* granularity of reported error in bytes */
+	enum dev_type dtype;	/* memory device type */
+	enum mem_type mtype;	/* memory dimm type */
+	enum edac_type edac_mode;	/* EDAC mode for this dimm */
+
+	u32 nr_pages;			/* number of pages on this dimm */
+
+	unsigned csrow, cschannel;	/* Points to the old API data */
+};
+
+/**
  * struct rank_info - contains the information for one DIMM rank
  *
  * @chan_idx:	channel number where the rank is (typically, 0 or 1)
  * @ce_count:	number of correctable errors for this rank
- * @label:	DIMM label. Different ranks for the same DIMM should be
- *		filled, on userspace, with the same label.
- *		FIXME: The core currently won't enforce it.
  * @csrow:	A pointer to the chip select row structure (the parent
  *		structure). The location of the rank is given by
  *		the (csrow->csrow_idx, chan_idx) vector.
+ * @dimm:	A pointer to the DIMM structure, where the DIMM label
+ *		information is stored.
+ *
+ * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
+ *	  This is a bad assumption, but it makes this patch easier. Later
+ *	  patches in this series will fix this issue.
  */
 struct rank_info {
 	int chan_idx;
-	u32 ce_count;
-	char label[EDAC_MC_LABEL_LEN + 1];
-	struct csrow_info *csrow;	/* the parent */
+	struct csrow_info *csrow;
+	struct dimm_info *dimm;
+
+	u32 ce_count;		/* Correctable Errors for this csrow */
 };
 
 struct csrow_info {
-	unsigned long first_page;	/* first page number in dimm */
-	unsigned long last_page;	/* last page number in dimm */
+	/* Used only by edac_mc_find_csrow_by_page() */
+	unsigned long first_page;	/* first page number in csrow */
+	unsigned long last_page;	/* last page number in csrow */
 	unsigned long page_mask;	/* used for interleaving -
-					 * 0UL for non intlv
-					 */
-	u32 nr_pages;		/* number of pages in csrow */
-	u32 grain;		/* granularity of reported error in bytes */
-	int csrow_idx;		/* the chip-select row */
-	enum dev_type dtype;	/* memory device type */
+					 * 0UL for non intlv */
+
+	int csrow_idx;			/* the chip-select row */
+
 	u32 ue_count;		/* Uncorrectable Errors for this csrow */
 	u32 ce_count;		/* Correctable Errors for this csrow */
-	enum mem_type mtype;	/* memory csrow type */
-	enum edac_type edac_mode;	/* EDAC mode for this csrow */
+
 	struct mem_ctl_info *mci;	/* the parent */
 
 	struct kobject kobj;	/* sysfs kobject for this csrow */
@@ -426,8 +548,20 @@
 	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
 					   unsigned long page);
 	int mc_idx;
-	int nr_csrows;
 	struct csrow_info *csrows;
+	unsigned nr_csrows, num_cschannel;
+
+	/* Memory Controller hierarchy */
+	unsigned n_layers;
+	struct edac_mc_layer *layers;
+	bool mem_is_per_rank;
+
+	/*
+	 * DIMM info. Will eventually remove the entire csrows_info some day
+	 */
+	unsigned tot_dimms;
+	struct dimm_info *dimms;
+
 	/*
 	 * FIXME - what about controllers on other busses? - IDs must be
 	 * unique.  dev pointer should be sufficiently unique, but
@@ -440,12 +574,16 @@
 	const char *dev_name;
 	char proc_name[MC_PROC_NAME_MAX_LEN + 1];
 	void *pvt_info;
-	u32 ue_noinfo_count;	/* Uncorrectable Errors w/o info */
-	u32 ce_noinfo_count;	/* Correctable Errors w/o info */
-	u32 ue_count;		/* Total Uncorrectable Errors for this MC */
-	u32 ce_count;		/* Total Correctable Errors for this MC */
 	unsigned long start_time;	/* mci load start time (in jiffies) */
 
+	/*
+	 * drivers shouldn't access those fields directly, as the core
+	 * already handles that.
+	 */
+	u32 ce_noinfo_count, ue_noinfo_count;
+	u32 ue_mc, ce_mc;
+	u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+
 	struct completion complete;
 
 	/* edac sysfs device control */
@@ -458,7 +596,7 @@
 	 * by the low level driver.
 	 *
 	 * Set by the low level driver to provide attributes at the
-	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * controller level.
 	 * An array of structures, NULL terminated
 	 *
 	 * If attributes are desired, then set to array of attributes
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7d4e035..c03af76 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,12 +28,13 @@
 
 typedef void (elevator_init_icq_fn) (struct io_cq *);
 typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
+				   struct bio *, gfp_t);
 typedef void (elevator_put_req_fn) (struct request *);
 typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
 typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
 
-typedef void *(elevator_init_fn) (struct request_queue *);
+typedef int (elevator_init_fn) (struct request_queue *);
 typedef void (elevator_exit_fn) (struct elevator_queue *);
 
 struct elevator_ops
@@ -129,7 +130,8 @@
 extern int elv_may_queue(struct request_queue *, int);
 extern void elv_abort_queue(struct request_queue *);
 extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern int elv_set_request(struct request_queue *q, struct request *rq,
+			   struct bio *bio, gfp_t gfp_mask);
 extern void elv_put_request(struct request_queue *, struct request *);
 extern void elv_drain_elevator(struct request_queue *);
 
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 2d09bfa..e0de516 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -17,6 +17,7 @@
 #define ENOIOCTLCMD	515	/* No ioctl command */
 #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
 #define EPROBE_DEFER	517	/* Driver requests probe retry */
+#define EOPENSTALE	518	/* open found a stale dentry */
 
 /* Defined for the NFSv3 protocol */
 #define EBADHANDLE	521	/* Illegal NFS file handle */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 91bb4f2..3c3ef19 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -34,7 +34,7 @@
 struct file *eventfd_fget(int fd);
 struct eventfd_ctx *eventfd_ctx_fdget(int fd);
 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-int eventfd_signal(struct eventfd_ctx *ctx, int n);
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
 				  __u64 *cnt);
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 3a4cef5..12291a7 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -165,8 +165,8 @@
  */
 
 struct export_operations {
-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
-			int connectable);
+	int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
+			struct inode *parent);
 	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
 			int fh_len, int fh_type);
 	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb68..ac3f1c6 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -554,6 +554,10 @@
 #define FB_EVENT_FB_UNBIND              0x0E
 /*      CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
 #define FB_EVENT_REMAP_ALL_CONSOLE      0x0F
+/*      A hardware display blank early change occured */
+#define FB_EARLY_EVENT_BLANK		0x10
+/*      A hardware display blank revert early change occured */
+#define FB_R_EARLY_EVENT_BLANK		0x11
 
 struct fb_event {
 	struct fb_info *info;
@@ -607,6 +611,7 @@
 	struct mutex lock; /* mutex that protects the page list */
 	struct list_head pagelist; /* list of touched pages */
 	/* callback */
+	void (*first_io)(struct fb_info *info);
 	void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
 };
 #endif
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644
index 0000000..0e4e2ee
--- /dev/null
+++ b/include/linux/frontswap.h
@@ -0,0 +1,127 @@
+#ifndef _LINUX_FRONTSWAP_H
+#define _LINUX_FRONTSWAP_H
+
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+struct frontswap_ops {
+	void (*init)(unsigned);
+	int (*store)(unsigned, pgoff_t, struct page *);
+	int (*load)(unsigned, pgoff_t, struct page *);
+	void (*invalidate_page)(unsigned, pgoff_t);
+	void (*invalidate_area)(unsigned);
+};
+
+extern bool frontswap_enabled;
+extern struct frontswap_ops
+	frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_shrink(unsigned long);
+extern unsigned long frontswap_curr_pages(void);
+extern void frontswap_writethrough(bool);
+
+extern void __frontswap_init(unsigned type);
+extern int __frontswap_store(struct page *page);
+extern int __frontswap_load(struct page *page);
+extern void __frontswap_invalidate_page(unsigned, pgoff_t);
+extern void __frontswap_invalidate_area(unsigned);
+
+#ifdef CONFIG_FRONTSWAP
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+	bool ret = false;
+
+	if (frontswap_enabled && sis->frontswap_map)
+		ret = test_bit(offset, sis->frontswap_map);
+	return ret;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+	if (frontswap_enabled && sis->frontswap_map)
+		set_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+	if (frontswap_enabled && sis->frontswap_map)
+		clear_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+				     unsigned long *map)
+{
+	p->frontswap_map = map;
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+	return p->frontswap_map;
+}
+#else
+/* all inline routines become no-ops and all externs are ignored */
+
+#define frontswap_enabled (0)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+	return false;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+				     unsigned long *map)
+{
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+	return NULL;
+}
+#endif
+
+static inline int frontswap_store(struct page *page)
+{
+	int ret = -1;
+
+	if (frontswap_enabled)
+		ret = __frontswap_store(page);
+	return ret;
+}
+
+static inline int frontswap_load(struct page *page)
+{
+	int ret = -1;
+
+	if (frontswap_enabled)
+		ret = __frontswap_load(page);
+	return ret;
+}
+
+static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+	if (frontswap_enabled)
+		__frontswap_invalidate_page(type, offset);
+}
+
+static inline void frontswap_invalidate_area(unsigned type)
+{
+	if (frontswap_enabled)
+		__frontswap_invalidate_area(type);
+}
+
+static inline void frontswap_init(unsigned type)
+{
+	if (frontswap_enabled)
+		__frontswap_init(type);
+}
+
+#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c0e5337..17fd887 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -173,6 +173,15 @@
 #define WRITE_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
 
+
+/*
+ * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+ * that indicates that they should check the contents of the iovec are
+ * valid, but not check the memory that the iovec elements
+ * points too.
+ */
+#define CHECK_IOVEC_ONLY -1
+
 #define SEL_IN		1
 #define SEL_OUT		2
 #define SEL_EX		4
@@ -793,13 +802,14 @@
 		unsigned int __i_nlink;
 	};
 	dev_t			i_rdev;
+	loff_t			i_size;
 	struct timespec		i_atime;
 	struct timespec		i_mtime;
 	struct timespec		i_ctime;
 	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
 	unsigned short          i_bytes;
+	unsigned int		i_blkbits;
 	blkcnt_t		i_blocks;
-	loff_t			i_size;
 
 #ifdef __NEED_I_SIZE_ORDERED
 	seqcount_t		i_size_seqcount;
@@ -819,9 +829,8 @@
 		struct list_head	i_dentry;
 		struct rcu_head		i_rcu;
 	};
-	atomic_t		i_count;
-	unsigned int		i_blkbits;
 	u64			i_version;
+	atomic_t		i_count;
 	atomic_t		i_dio_count;
 	atomic_t		i_writecount;
 	const struct file_operations	*i_fop;	/* former ->i_op->default_file_ops */
@@ -1681,9 +1690,9 @@
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
-	void (*truncate_range)(struct inode *, loff_t, loff_t);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
+	int (*update_time)(struct inode *, struct timespec *, int);
 } ____cacheline_aligned;
 
 struct seq_file;
@@ -1691,8 +1700,7 @@
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
 			      unsigned long nr_segs, unsigned long fast_segs,
 			      struct iovec *fast_pointer,
-			      struct iovec **ret_pointer,
-			      int check_access);
+			      struct iovec **ret_pointer);
 
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1764,8 +1772,8 @@
  * I_FREEING		Set when inode is about to be freed but still has dirty
  *			pages or buffers attached or the inode itself is still
  *			dirty.
- * I_CLEAR		Added by end_writeback().  In this state the inode is clean
- *			and can be destroyed.  Inode keeps I_FREEING.
+ * I_CLEAR		Added by clear_inode().  In this state the inode is
+ *			clean and can be destroyed.  Inode keeps I_FREEING.
  *
  *			Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
  *			prohibited for many purposes.  iget() must wait for
@@ -1773,9 +1781,10 @@
  *			anew.  Other functions will just ignore such inodes,
  *			if appropriate.  I_NEW is used for waiting.
  *
- * I_SYNC		Synchonized write of dirty inode data.  The bits is
- *			set during data writeback, and cleared with a wakeup
- *			on the bit address once it is done.
+ * I_SYNC		Writeback of inode is running. The bit is set during
+ *			data writeback, and cleared with a wakeup on the bit
+ *			address once it is done. The bit is also used to pin
+ *			the inode in memory for flusher thread.
  *
  * I_REFERENCED		Marks the inode as recently references on the LRU list.
  *
@@ -1842,6 +1851,13 @@
        spin_unlock(&inode->i_lock);
 }
 
+enum file_time_flags {
+	S_ATIME = 1,
+	S_MTIME = 2,
+	S_CTIME = 4,
+	S_VERSION = 8,
+};
+
 extern void touch_atime(struct path *);
 static inline void file_accessed(struct file *file)
 {
@@ -2349,7 +2365,7 @@
 
 extern void __iget(struct inode * inode);
 extern void iget_failed(struct inode *);
-extern void end_writeback(struct inode *);
+extern void clear_inode(struct inode *);
 extern void __destroy_inode(struct inode *);
 extern struct inode *new_inode_pseudo(struct super_block *sb);
 extern struct inode *new_inode(struct super_block *sb);
@@ -2453,8 +2469,6 @@
 };
 
 void dio_end_io(struct bio *bio, int error);
-void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	struct block_device *bdev, const struct iovec *iov, loff_t offset,
@@ -2469,12 +2483,11 @@
 				    offset, nr_segs, get_block, NULL, NULL,
 				    DIO_LOCKING | DIO_SKIP_HOLES);
 }
-#else
-static inline void inode_dio_wait(struct inode *inode)
-{
-}
 #endif
 
+void inode_dio_wait(struct inode *inode);
+void inode_dio_done(struct inode *inode);
+
 extern const struct file_operations generic_ro_fops;
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
@@ -2578,7 +2591,7 @@
 extern int inode_newsize_ok(const struct inode *, loff_t offset);
 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
 
-extern void file_update_time(struct file *file);
+extern int file_update_time(struct file *file);
 
 extern int generic_show_options(struct seq_file *m, struct dentry *root);
 extern void save_mount_options(struct super_block *sb, char *options);
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4..55d8702 100644
--- a/include/linux/fsl/mxs-dma.h
+++ b/include/linux/fsl/mxs-dma.h
@@ -15,14 +15,6 @@
 	int chan_irq;
 };
 
-static inline int mxs_dma_is_apbh(struct dma_chan *chan)
-{
-	return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
-}
-
-static inline int mxs_dma_is_apbx(struct dma_chan *chan)
-{
-	return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
-}
-
+extern int mxs_dma_is_apbh(struct dma_chan *chan);
+extern int mxs_dma_is_apbx(struct dma_chan *chan);
 #endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 91d0e0a3..63d966d 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -60,7 +60,7 @@
 #define FS_EVENTS_POSS_ON_CHILD   (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
 				   FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
 				   FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
-				   FS_DELETE)
+				   FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
 
 #define FS_MOVE			(FS_MOVED_FROM | FS_MOVED_TO)
 
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 8f2ab8f..9303348 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -54,6 +54,9 @@
  * 7.18
  *  - add FUSE_IOCTL_DIR flag
  *  - add FUSE_NOTIFY_DELETE
+ *
+ * 7.19
+ *  - add FUSE_FALLOCATE
  */
 
 #ifndef _LINUX_FUSE_H
@@ -85,7 +88,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 18
+#define FUSE_KERNEL_MINOR_VERSION 19
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -278,6 +281,7 @@
 	FUSE_POLL          = 40,
 	FUSE_NOTIFY_REPLY  = 41,
 	FUSE_BATCH_FORGET  = 42,
+	FUSE_FALLOCATE     = 43,
 
 	/* CUSE specific operations */
 	CUSE_INIT          = 4096,
@@ -571,6 +575,14 @@
 	__u64	kh;
 };
 
+struct fuse_fallocate_in {
+	__u64	fh;
+	__u64	offset;
+	__u64	length;
+	__u32	mode;
+	__u32	padding;
+};
+
 struct fuse_in_header {
 	__u32	len;
 	__u32	opcode;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 73c28de..7a11401 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -110,6 +110,9 @@
 #define genl_dereference(p)					\
 	rcu_dereference_protected(p, lockdep_genl_is_held())
 
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
 #endif /* __KERNEL__ */
 
 #endif	/* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b..1e49be4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -391,4 +391,16 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+			      unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
 #endif /* __LINUX_GFP_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8af7a2..4c59b11 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -59,6 +59,8 @@
 #define HPAGE_PMD_MASK HPAGE_MASK
 #define HPAGE_PMD_SIZE HPAGE_SIZE
 
+extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
 #define transparent_hugepage_enabled(__vma)				\
 	((transparent_hugepage_flags &					\
 	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 000837e..d5d6bbe 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -284,6 +284,14 @@
 
 #include <asm/hugetlb.h>
 
+#ifndef arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+				       struct page *page, int writable)
+{
+	return entry;
+}
+#endif
+
 static inline struct hstate *page_hstate(struct page *page)
 {
 	return size_to_hstate(PAGE_SIZE << compound_order(page));
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/i2c-mux-gpio.h
similarity index 74%
rename from include/linux/gpio-i2cmux.h
rename to include/linux/i2c-mux-gpio.h
index 4a333bb..a36343a3 100644
--- a/include/linux/gpio-i2cmux.h
+++ b/include/linux/i2c-mux-gpio.h
@@ -1,5 +1,5 @@
 /*
- * gpio-i2cmux interface to platform code
+ * i2c-mux-gpio interface to platform code
  *
  * Peter Korsgaard <peter.korsgaard@barco.com>
  *
@@ -8,14 +8,14 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef _LINUX_GPIO_I2CMUX_H
-#define _LINUX_GPIO_I2CMUX_H
+#ifndef _LINUX_I2C_MUX_GPIO_H
+#define _LINUX_I2C_MUX_GPIO_H
 
 /* MUX has no specific idle mode */
-#define GPIO_I2CMUX_NO_IDLE	((unsigned)-1)
+#define I2C_MUX_GPIO_NO_IDLE	((unsigned)-1)
 
 /**
- * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
  * @parent: Parent I2C bus adapter number
  * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
  * @values: Array of bitmasks of GPIO settings (low/high) for each
@@ -25,7 +25,7 @@
  * @n_gpios: Number of GPIOs used to control MUX
  * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
  */
-struct gpio_i2cmux_platform_data {
+struct i2c_mux_gpio_platform_data {
 	int parent;
 	int base_nr;
 	const unsigned *values;
@@ -35,4 +35,4 @@
 	unsigned idle;
 };
 
-#endif /* _LINUX_GPIO_I2CMUX_H */
+#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644
index 0000000..a65c864
--- /dev/null
+++ b/include/linux/i2c-mux-pinctrl.h
@@ -0,0 +1,41 @@
+/*
+ * i2c-mux-pinctrl platform data
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_I2C_MUX_PINCTRL_H
+#define _LINUX_I2C_MUX_PINCTRL_H
+
+/**
+ * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
+ * @parent_bus_num: Parent I2C bus number
+ * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
+ * @bus_count: Number of child busses. Also the number of elements in
+ *	@pinctrl_states
+ * @pinctrl_states: The names of the pinctrl state to select for each child bus
+ * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
+ *	accessed. If NULL, the most recently used pinctrl state will be left
+ *	selected.
+ */
+struct i2c_mux_pinctrl_platform_data {
+	int parent_bus_num;
+	int base_bus_num;
+	int bus_count;
+	const char **pinctrl_states;
+	const char *pinctrl_state_idle;
+};
+
+#endif
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 747f0cd..c790838 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -34,7 +34,8 @@
  * mux control.
  */
 struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-				void *mux_dev, u32 force_nr, u32 chan_id,
+				struct device *mux_dev,
+				void *mux_priv, u32 force_nr, u32 chan_id,
 				int (*select) (struct i2c_adapter *,
 					       void *mux_dev, u32 chan_id),
 				int (*deselect) (struct i2c_adapter *,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 195d8b3..ddfa041 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -232,6 +232,7 @@
 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
 extern struct i2c_client *i2c_verify_client(struct device *dev);
+extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
 {
@@ -540,7 +541,7 @@
 	__u16 flags;
 #define I2C_M_TEN		0x0010	/* this is a ten bit chip address */
 #define I2C_M_RD		0x0001	/* read data, from slave to master */
-#define I2C_M_NOSTART		0x4000	/* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART		0x4000	/* if I2C_FUNC_NOSTART */
 #define I2C_M_REV_DIR_ADDR	0x2000	/* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_IGNORE_NAK	0x1000	/* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_NO_RD_ACK		0x0800	/* if I2C_FUNC_PROTOCOL_MANGLING */
@@ -553,8 +554,9 @@
 
 #define I2C_FUNC_I2C			0x00000001
 #define I2C_FUNC_10BIT_ADDR		0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING	0x00000004 /* I2C_M_NOSTART etc. */
+#define I2C_FUNC_PROTOCOL_MANGLING	0x00000004 /* I2C_M_IGNORE_NAK etc. */
 #define I2C_FUNC_SMBUS_PEC		0x00000008
+#define I2C_FUNC_NOSTART		0x00000010 /* I2C_M_NOSTART */
 #define I2C_FUNC_SMBUS_BLOCK_PROC_CALL	0x00008000 /* SMBus 2.0 */
 #define I2C_FUNC_SMBUS_QUICK		0x00010000
 #define I2C_FUNC_SMBUS_READ_BYTE	0x00020000
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e4baff5..9e65eff 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -149,6 +149,7 @@
 	.normal_prio	= MAX_PRIO-20,					\
 	.policy		= SCHED_NORMAL,					\
 	.cpus_allowed	= CPU_MASK_ALL,					\
+	.nr_cpus_allowed= NR_CPUS,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.se		= {						\
@@ -157,7 +158,6 @@
 	.rt		= {						\
 		.run_list	= LIST_HEAD_INIT(tsk.rt.run_list),	\
 		.time_slice	= RR_TIMESLICE,				\
-		.nr_cpus_allowed = NR_CPUS,				\
 	},								\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
 	INIT_PUSHABLE_TASKS(tsk)					\
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c911715..e68a8e5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -142,8 +142,6 @@
 extern int __must_check
 request_percpu_irq(unsigned int irq, irq_handler_t handler,
 		   const char *devname, void __percpu *percpu_dev_id);
-
-extern void exit_irq_thread(void);
 #else
 
 extern int __must_check
@@ -177,8 +175,6 @@
 {
 	return request_irq(irq, handler, 0, devname, percpu_dev_id);
 }
-
-static inline void exit_irq_thread(void) { }
 #endif
 
 extern void free_irq(unsigned int, void *);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1a30180..df38db2 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,11 +6,7 @@
 #include <linux/workqueue.h>
 
 enum {
-	ICQ_IOPRIO_CHANGED	= 1 << 0,
-	ICQ_CGROUP_CHANGED	= 1 << 1,
 	ICQ_EXITED		= 1 << 2,
-
-	ICQ_CHANGED_MASK	= ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
 };
 
 /*
@@ -100,6 +96,7 @@
  */
 struct io_context {
 	atomic_long_t refcount;
+	atomic_t active_ref;
 	atomic_t nr_tasks;
 
 	/* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@
 	struct work_struct release_work;
 };
 
-static inline struct io_context *ioc_task_link(struct io_context *ioc)
+/**
+ * get_io_context_active - get active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Only iocs with active reference can issue new IOs.  This function
+ * acquires an active reference on @ioc.  The caller must already have an
+ * active reference on @ioc.
+ */
+static inline void get_io_context_active(struct io_context *ioc)
 {
-	/*
-	 * if ref count is zero, don't allow sharing (ioc is going away, it's
-	 * a race).
-	 */
-	if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
-		atomic_inc(&ioc->nr_tasks);
-		return ioc;
-	}
+	WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
+	WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+	atomic_long_inc(&ioc->refcount);
+	atomic_inc(&ioc->active_ref);
+}
 
-	return NULL;
+static inline void ioc_task_link(struct io_context *ioc)
+{
+	get_io_context_active(ioc);
+
+	WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
+	atomic_inc(&ioc->nr_tasks);
 }
 
 struct task_struct;
 #ifdef CONFIG_BLOCK
 void put_io_context(struct io_context *ioc);
+void put_io_context_active(struct io_context *ioc);
 void exit_io_context(struct task_struct *task);
 struct io_context *get_task_io_context(struct task_struct *task,
 				       gfp_t gfp_flags, int node);
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
-void ioc_cgroup_changed(struct io_context *ioc);
-unsigned int icq_get_changed(struct io_cq *icq);
 #else
 struct io_context;
 static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d937580..450293f 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -35,12 +35,13 @@
 #define IOMMU_FAULT_WRITE	0x1
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
-				struct device *, unsigned long, int);
+			struct device *, unsigned long, int, void *);
 
 struct iommu_domain {
 	struct iommu_ops *ops;
 	void *priv;
 	iommu_fault_handler_t handler;
+	void *handler_token;
 };
 
 #define IOMMU_CAP_CACHE_COHERENCY	0x1
@@ -95,7 +96,7 @@
 extern int iommu_domain_has_cap(struct iommu_domain *domain,
 				unsigned long cap);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler);
+			iommu_fault_handler_t handler, void *token);
 extern int iommu_device_group(struct device *dev, unsigned int *groupid);
 
 /**
@@ -132,7 +133,8 @@
 	 * invoke it.
 	 */
 	if (domain->handler)
-		ret = domain->handler(domain, dev, iova, flags);
+		ret = domain->handler(domain, dev, iova, flags,
+						domain->handler_token);
 
 	return ret;
 }
@@ -191,7 +193,7 @@
 }
 
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler)
+				iommu_fault_handler_t handler, void *token)
 {
 }
 
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 76dad48..beb9ce1 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -42,26 +42,14 @@
 };
 
 /*
+ * Fallback BE priority
+ */
+#define IOPRIO_NORM	(4)
+
+/*
  * if process has set io priority explicitly, use that. if not, convert
  * the cpu scheduler nice value to an io priority
  */
-#define IOPRIO_NORM	(4)
-static inline int task_ioprio(struct io_context *ioc)
-{
-	if (ioprio_valid(ioc->ioprio))
-		return IOPRIO_PRIO_DATA(ioc->ioprio);
-
-	return IOPRIO_NORM;
-}
-
-static inline int task_ioprio_class(struct io_context *ioc)
-{
-	if (ioprio_valid(ioc->ioprio))
-		return IOPRIO_PRIO_CLASS(ioc->ioprio);
-
-	return IOPRIO_CLASS_BE;
-}
-
 static inline int task_nice_ioprio(struct task_struct *task)
 {
 	return (task_nice(task) + 20) / 5;
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 8a297a5..5499c92 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -62,6 +62,8 @@
 	unsigned int    mq_queues_max;   /* initialized to DFLT_QUEUESMAX */
 	unsigned int    mq_msg_max;      /* initialized to DFLT_MSGMAX */
 	unsigned int    mq_msgsize_max;  /* initialized to DFLT_MSGSIZEMAX */
+	unsigned int    mq_msg_default;
+	unsigned int    mq_msgsize_default;
 
 	/* user_ns which owns the ipc ns */
 	struct user_namespace *user_ns;
@@ -90,11 +92,41 @@
 
 #ifdef CONFIG_POSIX_MQUEUE
 extern int mq_init_ns(struct ipc_namespace *ns);
-/* default values */
-#define DFLT_QUEUESMAX 256     /* max number of message queues */
-#define DFLT_MSGMAX    10      /* max number of messages in each queue */
-#define HARD_MSGMAX    (32768*sizeof(void *)/4)
-#define DFLT_MSGSIZEMAX 8192   /* max message size */
+/*
+ * POSIX Message Queue default values:
+ *
+ * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
+ * DFLT_*MAX: Default values for the maximum unprivileged limits
+ * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
+ *   an attribute to the open call and the queue must be created
+ * HARD_*: Highest value the maximums can be set to.  These are enforced
+ *   on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
+ *   suitably high)
+ *
+ * POSIX Requirements:
+ *   Per app minimum openable message queues - 8.  This does not map well
+ *     to the fact that we limit the number of queues on a per namespace
+ *     basis instead of a per app basis.  So, make the default high enough
+ *     that no given app should have a hard time opening 8 queues.
+ *   Minimum maximum for HARD_MSGMAX - 32767.  I bumped this to 65536.
+ *   Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this.  However,
+ *     we have run into a situation where running applications in the wild
+ *     require this to be at least 5MB, and preferably 10MB, so I set the
+ *     value to 16MB in hopes that this user is the worst of the bunch and
+ *     the new maximum will handle anyone else.  I may have to revisit this
+ *     in the future.
+ */
+#define MIN_QUEUESMAX			1
+#define DFLT_QUEUESMAX		      256
+#define HARD_QUEUESMAX		     1024
+#define MIN_MSGMAX			1
+#define DFLT_MSG		       10U
+#define DFLT_MSGMAX		       10
+#define HARD_MSGMAX		    65536
+#define MIN_MSGSIZEMAX		      128
+#define DFLT_MSGSIZE		     8192U
+#define DFLT_MSGSIZEMAX		     8192
+#define HARD_MSGSIZEMAX	    (16*1024*1024)
 #else
 static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
 #endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 61f5cec..a5261e3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -301,8 +301,6 @@
  * @irq_pm_shutdown:	function called from core code on shutdown once per chip
  * @irq_print_chip:	optional to print special chip info in show_interrupts
  * @flags:		chip specific flags
- *
- * @release:		release function solely used by UML
  */
 struct irq_chip {
 	const char	*name;
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index d211732..c8f3297 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -479,12 +479,6 @@
 	 * How many handles used this transaction? [t_handle_lock]
 	 */
 	int t_handle_count;
-
-	/*
-	 * This transaction is being forced and some process is
-	 * waiting for it to finish.
-	 */
-	unsigned int t_synchronous_commit:1;
 };
 
 /**
@@ -531,6 +525,8 @@
  *  transaction
  * @j_commit_request: Sequence number of the most recent transaction wanting
  *     commit
+ * @j_commit_waited: Sequence number of the most recent transaction someone
+ *     is waiting for to commit.
  * @j_uuid: Uuid of client object.
  * @j_task: Pointer to the current commit thread for this journal
  * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
@@ -696,6 +692,13 @@
 	tid_t			j_commit_request;
 
 	/*
+	 * Sequence number of the most recent transaction someone is waiting
+	 * for to commit.
+	 * [j_state_lock]
+	 */
+	tid_t                   j_commit_waited;
+
+	/*
 	 * Journal uuid: identifies the object (filesystem, LVM volume etc)
 	 * backed by this journal.  This will eventually be replaced by an array
 	 * of uuids, allowing us to index multiple devices within a single
@@ -861,7 +864,8 @@
 extern int	   journal_recover    (journal_t *journal);
 extern int	   journal_wipe       (journal_t *, int);
 extern int	   journal_skip_recovery	(journal_t *);
-extern void	   journal_update_superblock	(journal_t *, int);
+extern void	   journal_update_sb_log_tail	(journal_t *, tid_t, unsigned int,
+						 int);
 extern void	   journal_abort      (journal_t *, int);
 extern int	   journal_errno      (journal_t *);
 extern void	   journal_ack_err    (journal_t *);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 912c30a..f334c7f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -31,6 +31,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <crypto/hash.h>
 #endif
 
 #define journal_oom_retry 1
@@ -147,12 +148,24 @@
 #define JBD2_CRC32_CHKSUM   1
 #define JBD2_MD5_CHKSUM     2
 #define JBD2_SHA1_CHKSUM    3
+#define JBD2_CRC32C_CHKSUM  4
 
 #define JBD2_CRC32_CHKSUM_SIZE 4
 
 #define JBD2_CHECKSUM_BYTES (32 / sizeof(u32))
 /*
  * Commit block header for storing transactional checksums:
+ *
+ * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum*
+ * fields are used to store a checksum of the descriptor and data blocks.
+ *
+ * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum
+ * field is used to store crc32c(uuid+commit_block).  Each journal metadata
+ * block gets its own checksum, and data block checksums are stored in
+ * journal_block_tag (in the descriptor).  The other h_chksum* fields are
+ * not used.
+ *
+ * Checksum v1 and v2 are mutually exclusive features.
  */
 struct commit_header {
 	__be32		h_magic;
@@ -175,13 +188,19 @@
 typedef struct journal_block_tag_s
 {
 	__be32		t_blocknr;	/* The on-disk block number */
-	__be32		t_flags;	/* See below */
+	__be16		t_checksum;	/* truncated crc32c(uuid+seq+block) */
+	__be16		t_flags;	/* See below */
 	__be32		t_blocknr_high; /* most-significant high 32bits. */
 } journal_block_tag_t;
 
 #define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
 #define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
 
+/* Tail of descriptor block, for checksumming */
+struct jbd2_journal_block_tail {
+	__be32		t_checksum;	/* crc32c(uuid+descr_block) */
+};
+
 /*
  * The revoke descriptor: used on disk to describe a series of blocks to
  * be revoked from the log
@@ -192,6 +211,10 @@
 	__be32		 r_count;	/* Count of bytes used in the block */
 } jbd2_journal_revoke_header_t;
 
+/* Tail of revoke block, for checksumming */
+struct jbd2_journal_revoke_tail {
+	__be32		r_checksum;	/* crc32c(uuid+revoke_block) */
+};
 
 /* Definitions for the journal tag flags word: */
 #define JBD2_FLAG_ESCAPE		1	/* on-disk block is escaped */
@@ -241,7 +264,10 @@
 	__be32	s_max_trans_data;	/* Limit of data blocks per trans. */
 
 /* 0x0050 */
-	__u32	s_padding[44];
+	__u8	s_checksum_type;	/* checksum type */
+	__u8	s_padding2[3];
+	__u32	s_padding[42];
+	__be32	s_checksum;		/* crc32c(superblock) */
 
 /* 0x0100 */
 	__u8	s_users[16*48];		/* ids of all fs'es sharing the log */
@@ -263,13 +289,15 @@
 #define JBD2_FEATURE_INCOMPAT_REVOKE		0x00000001
 #define JBD2_FEATURE_INCOMPAT_64BIT		0x00000002
 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT	0x00000004
+#define JBD2_FEATURE_INCOMPAT_CSUM_V2		0x00000008
 
 /* Features known to this kernel version: */
 #define JBD2_KNOWN_COMPAT_FEATURES	JBD2_FEATURE_COMPAT_CHECKSUM
 #define JBD2_KNOWN_ROCOMPAT_FEATURES	0
 #define JBD2_KNOWN_INCOMPAT_FEATURES	(JBD2_FEATURE_INCOMPAT_REVOKE | \
 					JBD2_FEATURE_INCOMPAT_64BIT | \
-					JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
+					JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+					JBD2_FEATURE_INCOMPAT_CSUM_V2)
 
 #ifdef __KERNEL__
 
@@ -939,6 +967,12 @@
 	 * superblock pointer here
 	 */
 	void *j_private;
+
+	/* Reference to checksum algorithm driver via cryptoapi */
+	struct crypto_shash *j_chksum_driver;
+
+	/* Precomputed journal UUID checksum for seeding other checksums */
+	__u32 j_csum_seed;
 };
 
 /*
@@ -1268,6 +1302,25 @@
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
+static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
+			      const void *address, unsigned int length)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
+	} desc;
+	int err;
+
+	desc.shash.tfm = journal->j_chksum_driver;
+	desc.shash.flags = 0;
+	*(u32 *)desc.ctx = crc;
+
+	err = crypto_shash_update(&desc.shash, address, length);
+	BUG_ON(err);
+
+	return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 #define buffer_trace_init(bh)	do {} while (0)
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 6230f85..6133679 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -12,6 +12,7 @@
 	BH_State,		/* Pins most journal_head state */
 	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
 	BH_Unshadow,		/* Dummy bit, for BJ_Shadow wakeup filtering */
+	BH_Verified,		/* Metadata block has been verified ok */
 	BH_JBDPrivateStart,	/* First bit available for private use by FS */
 };
 
@@ -24,6 +25,7 @@
 BUFFER_FNS(RevokeValid, revokevalid)
 TAS_BUFFER_FNS(RevokeValid, revokevalid)
 BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Verified, verified)
 
 static inline struct buffer_head *jh2bh(struct journal_head *jh)
 {
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 3875719..6883e19 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -36,6 +36,7 @@
 
 /* Look up a kernel symbol and return it in a text buffer. */
 extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
 extern int sprint_backtrace(char *buffer, unsigned long address);
 
 /* Look up a kernel symbol and print it to the kernel messages. */
@@ -80,6 +81,12 @@
 	return 0;
 }
 
+static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
+{
+	*buffer = '\0';
+	return 0;
+}
+
 static inline int sprint_backtrace(char *buffer, unsigned long addr)
 {
 	*buffer = '\0';
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
new file mode 100644
index 0000000..2dcd1b3
--- /dev/null
+++ b/include/linux/kcmp.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_KCMP_H
+#define _LINUX_KCMP_H
+
+/* Comparison type */
+enum kcmp_type {
+	KCMP_FILE,
+	KCMP_VM,
+	KCMP_FILES,
+	KCMP_FS,
+	KCMP_SIGHAND,
+	KCMP_IO,
+	KCMP_SYSVSEM,
+
+	KCMP_TYPES,
+};
+
+#endif /* _LINUX_KCMP_H */
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 26a6571..a1bdf69 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -32,6 +32,8 @@
 #define KPF_KSM			21
 #define KPF_THP			22
 
+#ifdef __KERNEL__
+
 /* kernel hacking assistances
  * WARNING: subject to change, never rely on them!
  */
@@ -44,4 +46,6 @@
 #define KPF_ARCH		38
 #define KPF_UNCACHED		39
 
+#endif /* __KERNEL__ */
+
 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ec55a3c..e07f5e0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,6 +35,7 @@
 #define LLONG_MAX	((long long)(~0ULL>>1))
 #define LLONG_MIN	(-LLONG_MAX - 1)
 #define ULLONG_MAX	(~0ULL)
+#define SIZE_MAX	(~(size_t)0)
 
 #define STACK_MAGIC	0xdeadbeef
 
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0d7d6a1..37c5f726 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,8 +1,58 @@
 #ifndef LINUX_KEXEC_H
 #define LINUX_KEXEC_H
 
-#ifdef CONFIG_KEXEC
+/* kexec system call -  It loads the new kernel to boot into.
+ * kexec does not sync, or unmount filesystems so if you need
+ * that to happen you need to do that yourself.
+ */
+
 #include <linux/types.h>
+
+/* kexec flags for different usage scenarios */
+#define KEXEC_ON_CRASH		0x00000001
+#define KEXEC_PRESERVE_CONTEXT	0x00000002
+#define KEXEC_ARCH_MASK		0xffff0000
+
+/* These values match the ELF architecture values.
+ * Unless there is a good reason that should continue to be the case.
+ */
+#define KEXEC_ARCH_DEFAULT ( 0 << 16)
+#define KEXEC_ARCH_386     ( 3 << 16)
+#define KEXEC_ARCH_X86_64  (62 << 16)
+#define KEXEC_ARCH_PPC     (20 << 16)
+#define KEXEC_ARCH_PPC64   (21 << 16)
+#define KEXEC_ARCH_IA_64   (50 << 16)
+#define KEXEC_ARCH_ARM     (40 << 16)
+#define KEXEC_ARCH_S390    (22 << 16)
+#define KEXEC_ARCH_SH      (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+#define KEXEC_ARCH_MIPS    ( 8 << 16)
+
+/* The artificial cap on the number of segments passed to kexec_load. */
+#define KEXEC_SEGMENT_MAX 16
+
+#ifndef __KERNEL__
+/*
+ * This structure is used to hold the arguments that are used when
+ * loading  kernel binaries.
+ */
+struct kexec_segment {
+	const void *buf;
+	size_t bufsz;
+	const void *mem;
+	size_t memsz;
+};
+
+/* Load a new kernel image as described by the kexec_segment array
+ * consisting of passed number of segments at the entry-point address.
+ * The flags allow different useage types.
+ */
+extern int kexec_load(void *, size_t, struct kexec_segment *,
+		unsigned long int);
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_KEXEC
 #include <linux/list.h>
 #include <linux/linkage.h>
 #include <linux/compat.h>
@@ -67,11 +117,10 @@
 #define IND_DONE         0x4
 #define IND_SOURCE       0x8
 
-#define KEXEC_SEGMENT_MAX 16
 struct kexec_segment {
 	void __user *buf;
 	size_t bufsz;
-	unsigned long mem;	/* User space sees this as a (void *) ... */
+	unsigned long mem;
 	size_t memsz;
 };
 
@@ -175,25 +224,6 @@
 #define kexec_flush_icache_page(page)
 #endif
 
-#define KEXEC_ON_CRASH		0x00000001
-#define KEXEC_PRESERVE_CONTEXT	0x00000002
-#define KEXEC_ARCH_MASK		0xffff0000
-
-/* These values match the ELF architecture values.
- * Unless there is a good reason that should continue to be the case.
- */
-#define KEXEC_ARCH_DEFAULT ( 0 << 16)
-#define KEXEC_ARCH_386     ( 3 << 16)
-#define KEXEC_ARCH_X86_64  (62 << 16)
-#define KEXEC_ARCH_PPC     (20 << 16)
-#define KEXEC_ARCH_PPC64   (21 << 16)
-#define KEXEC_ARCH_IA_64   (50 << 16)
-#define KEXEC_ARCH_ARM     (40 << 16)
-#define KEXEC_ARCH_S390    (22 << 16)
-#define KEXEC_ARCH_SH      (42 << 16)
-#define KEXEC_ARCH_MIPS_LE (10 << 16)
-#define KEXEC_ARCH_MIPS    ( 8 << 16)
-
 /* List of defined/legal kexec flags */
 #ifndef CONFIG_KEXEC_JUMP
 #define KEXEC_FLAGS    KEXEC_ON_CRASH
@@ -228,4 +258,5 @@
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
 #endif /* CONFIG_KEXEC */
+#endif /* __KERNEL__ */
 #endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key.h b/include/linux/key.h
index 52318007..4cd22ed 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -308,9 +308,6 @@
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
 #endif
-
-extern void key_replace_session_keyring(void);
-
 /*
  * the userspace interface
  */
@@ -334,7 +331,6 @@
 #define key_fsuid_changed(t)		do { } while(0)
 #define key_fsgid_changed(t)		do { } while(0)
 #define key_init()			do { } while(0)
-#define key_replace_session_keyring()	do { } while(0)
 
 #endif /* CONFIG_KEYS */
 #endif /* __KERNEL__ */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index dd99c329..5398d58 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -66,40 +66,10 @@
 	void *data;
 };
 
-/* Allocate a subprocess_info structure */
-struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
-						  char **envp, gfp_t gfp_mask);
-
-/* Set various pieces of state into the subprocess_info structure */
-void call_usermodehelper_setfns(struct subprocess_info *info,
-		    int (*init)(struct subprocess_info *info, struct cred *new),
-		    void (*cleanup)(struct subprocess_info *info),
-		    void *data);
-
-/* Actually execute the sub-process */
-int call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-/* Free the subprocess_info. This is only needed if you're not going
-   to call call_usermodehelper_exec */
-void call_usermodehelper_freeinfo(struct subprocess_info *info);
-
-static inline int
+extern int
 call_usermodehelper_fns(char *path, char **argv, char **envp, int wait,
 			int (*init)(struct subprocess_info *info, struct cred *new),
-			void (*cleanup)(struct subprocess_info *), void *data)
-{
-	struct subprocess_info *info;
-	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-
-	info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-	if (info == NULL)
-		return -ENOMEM;
-
-	call_usermodehelper_setfns(info, init, cleanup, data);
-
-	return call_usermodehelper_exec(info, wait);
-}
+			void (*cleanup)(struct subprocess_info *), void *data);
 
 static inline int
 call_usermodehelper(char *path, char **argv, char **envp, int wait)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 35f7237..d6bd501 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -21,6 +21,7 @@
  * is passed to the kernel.
  */
 enum kmsg_dump_reason {
+	KMSG_DUMP_UNDEF,
 	KMSG_DUMP_PANIC,
 	KMSG_DUMP_OOPS,
 	KMSG_DUMP_EMERG,
@@ -31,23 +32,37 @@
 
 /**
  * struct kmsg_dumper - kernel crash message dumper structure
- * @dump:	The callback which gets called on crashes. The buffer is passed
- * 		as two sections, where s1 (length l1) contains the older
- * 		messages and s2 (length l2) contains the newer.
  * @list:	Entry in the dumper list (private)
+ * @dump:	Call into dumping code which will retrieve the data with
+ * 		through the record iterator
+ * @max_reason:	filter for highest reason number that should be dumped
  * @registered:	Flag that specifies if this is already registered
  */
 struct kmsg_dumper {
-	void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
-			const char *s1, unsigned long l1,
-			const char *s2, unsigned long l2);
 	struct list_head list;
-	int registered;
+	void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+	enum kmsg_dump_reason max_reason;
+	bool active;
+	bool registered;
+
+	/* private state of the kmsg iterator */
+	u32 cur_idx;
+	u32 next_idx;
+	u64 cur_seq;
+	u64 next_seq;
 };
 
 #ifdef CONFIG_PRINTK
 void kmsg_dump(enum kmsg_dump_reason reason);
 
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+			char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+			  char *buf, size_t size, size_t *len);
+
+void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+
 int kmsg_dump_register(struct kmsg_dumper *dumper);
 
 int kmsg_dump_unregister(struct kmsg_dumper *dumper);
@@ -56,6 +71,22 @@
 {
 }
 
+static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+				const char *line, size_t size, size_t *len)
+{
+	return false;
+}
+
+static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+					char *buf, size_t size, size_t *len)
+{
+	return false;
+}
+
+static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+}
+
 static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
 {
 	return -EINVAL;
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 8877123..e00c3b0 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -40,6 +40,16 @@
 	/* Get the LCD panel power status (0: full on, 1..3: controller
 	   power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
 	int (*get_power)(struct lcd_device *);
+	/*
+	 * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
+	 * and this callback would be called proir to fb driver's callback.
+	 *
+	 * P.S. note that if early_set_power is not NULL then early fb notifier
+	 *	would be registered.
+	 */
+	int (*early_set_power)(struct lcd_device *, int power);
+	/* revert the effects of the early blank event. */
+	int (*r_early_set_power)(struct lcd_device *, int power);
 	/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
 	int (*set_power)(struct lcd_device *, int power);
 	/* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index eeae6e7..4b13347 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -92,7 +92,7 @@
  * @als2_resistor_sel: internal resistance from ALS2 input to ground
  * @als_vmin: als input voltage calibrated for max brightness in mV
  * @als_vmax: als input voltage calibrated for min brightness in mV
- * @brt_val: brightness value (0-255)
+ * @brt_val: brightness value (0-127)
  * @pwm_data: PWM control functions (only valid when the mode is PWM)
  */
 struct lm3530_platform_data {
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5884def..39eee41 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -73,6 +73,8 @@
 	struct led_trigger	*trigger;
 	struct list_head	 trig_list;
 	void			*trigger_data;
+	/* true if activated - deactivate routine uses it to do cleanup */
+	bool			activated;
 #endif
 };
 
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402c..f01e5f6 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -23,28 +23,17 @@
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/notifier.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name)	name##_lock_init()
-#define br_read_lock(name)	name##_local_lock()
-#define br_read_unlock(name)	name##_local_unlock()
-#define br_write_lock(name)	name##_global_lock_online()
-#define br_write_unlock(name)	name##_global_unlock_online()
+#define br_lock_init(name)	lg_lock_init(name, #name)
+#define br_read_lock(name)	lg_local_lock(name)
+#define br_read_unlock(name)	lg_local_unlock(name)
+#define br_write_lock(name)	lg_global_lock(name)
+#define br_write_unlock(name)	lg_global_unlock(name)
 
-#define DECLARE_BRLOCK(name)	DECLARE_LGLOCK(name)
 #define DEFINE_BRLOCK(name)	DEFINE_LGLOCK(name)
 
-
-#define lg_lock_init(name)	name##_lock_init()
-#define lg_local_lock(name)	name##_local_lock()
-#define lg_local_unlock(name)	name##_local_unlock()
-#define lg_local_lock_cpu(name, cpu)	name##_local_lock_cpu(cpu)
-#define lg_local_unlock_cpu(name, cpu)	name##_local_unlock_cpu(cpu)
-#define lg_global_lock(name)	name##_global_lock()
-#define lg_global_unlock(name)	name##_global_unlock()
-#define lg_global_lock_online(name) name##_global_lock_online()
-#define lg_global_unlock_online(name) name##_global_unlock_online()
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define LOCKDEP_INIT_MAP lockdep_init_map
 
@@ -59,142 +48,26 @@
 #define DEFINE_LGLOCK_LOCKDEP(name)
 #endif
 
-
-#define DECLARE_LGLOCK(name)						\
- extern void name##_lock_init(void);					\
- extern void name##_local_lock(void);					\
- extern void name##_local_unlock(void);					\
- extern void name##_local_lock_cpu(int cpu);				\
- extern void name##_local_unlock_cpu(int cpu);				\
- extern void name##_global_lock(void);					\
- extern void name##_global_unlock(void);				\
- extern void name##_global_lock_online(void);				\
- extern void name##_global_unlock_online(void);				\
+struct lglock {
+	arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lock_class_key lock_key;
+	struct lockdep_map    lock_dep_map;
+#endif
+};
 
 #define DEFINE_LGLOCK(name)						\
-									\
- DEFINE_SPINLOCK(name##_cpu_lock);					\
- cpumask_t name##_cpus __read_mostly;					\
- DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
- DEFINE_LGLOCK_LOCKDEP(name);						\
-									\
- static int								\
- name##_lg_cpu_callback(struct notifier_block *nb,			\
-				unsigned long action, void *hcpu)	\
- {									\
-	switch (action & ~CPU_TASKS_FROZEN) {				\
-	case CPU_UP_PREPARE:						\
-		spin_lock(&name##_cpu_lock);				\
-		cpu_set((unsigned long)hcpu, name##_cpus);		\
-		spin_unlock(&name##_cpu_lock);				\
-		break;							\
-	case CPU_UP_CANCELED: case CPU_DEAD:				\
-		spin_lock(&name##_cpu_lock);				\
-		cpu_clear((unsigned long)hcpu, name##_cpus);		\
-		spin_unlock(&name##_cpu_lock);				\
-	}								\
-	return NOTIFY_OK;						\
- }									\
- static struct notifier_block name##_lg_cpu_notifier = {		\
-	.notifier_call = name##_lg_cpu_callback,			\
- };									\
- void name##_lock_init(void) {						\
-	int i;								\
-	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
-	for_each_possible_cpu(i) {					\
-		arch_spinlock_t *lock;					\
-		lock = &per_cpu(name##_lock, i);			\
-		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\
-	}								\
-	register_hotcpu_notifier(&name##_lg_cpu_notifier);		\
-	get_online_cpus();						\
-	for_each_online_cpu(i)						\
-		cpu_set(i, name##_cpus);				\
-	put_online_cpus();						\
- }									\
- EXPORT_SYMBOL(name##_lock_init);					\
-									\
- void name##_local_lock(void) {						\
-	arch_spinlock_t *lock;						\
-	preempt_disable();						\
-	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
-	lock = &__get_cpu_var(name##_lock);				\
-	arch_spin_lock(lock);						\
- }									\
- EXPORT_SYMBOL(name##_local_lock);					\
-									\
- void name##_local_unlock(void) {					\
-	arch_spinlock_t *lock;						\
-	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
-	lock = &__get_cpu_var(name##_lock);				\
-	arch_spin_unlock(lock);						\
-	preempt_enable();						\
- }									\
- EXPORT_SYMBOL(name##_local_unlock);					\
-									\
- void name##_local_lock_cpu(int cpu) {					\
-	arch_spinlock_t *lock;						\
-	preempt_disable();						\
-	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
-	lock = &per_cpu(name##_lock, cpu);				\
-	arch_spin_lock(lock);						\
- }									\
- EXPORT_SYMBOL(name##_local_lock_cpu);					\
-									\
- void name##_local_unlock_cpu(int cpu) {				\
-	arch_spinlock_t *lock;						\
-	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
-	lock = &per_cpu(name##_lock, cpu);				\
-	arch_spin_unlock(lock);						\
-	preempt_enable();						\
- }									\
- EXPORT_SYMBOL(name##_local_unlock_cpu);				\
-									\
- void name##_global_lock_online(void) {					\
-	int i;								\
-	spin_lock(&name##_cpu_lock);					\
-	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
-	for_each_cpu(i, &name##_cpus) {					\
-		arch_spinlock_t *lock;					\
-		lock = &per_cpu(name##_lock, i);			\
-		arch_spin_lock(lock);					\
-	}								\
- }									\
- EXPORT_SYMBOL(name##_global_lock_online);				\
-									\
- void name##_global_unlock_online(void) {				\
-	int i;								\
-	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
-	for_each_cpu(i, &name##_cpus) {					\
-		arch_spinlock_t *lock;					\
-		lock = &per_cpu(name##_lock, i);			\
-		arch_spin_unlock(lock);					\
-	}								\
-	spin_unlock(&name##_cpu_lock);					\
- }									\
- EXPORT_SYMBOL(name##_global_unlock_online);				\
-									\
- void name##_global_lock(void) {					\
-	int i;								\
-	preempt_disable();						\
-	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
-	for_each_possible_cpu(i) {					\
-		arch_spinlock_t *lock;					\
-		lock = &per_cpu(name##_lock, i);			\
-		arch_spin_lock(lock);					\
-	}								\
- }									\
- EXPORT_SYMBOL(name##_global_lock);					\
-									\
- void name##_global_unlock(void) {					\
-	int i;								\
-	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
-	for_each_possible_cpu(i) {					\
-		arch_spinlock_t *lock;					\
-		lock = &per_cpu(name##_lock, i);			\
-		arch_spin_unlock(lock);					\
-	}								\
-	preempt_enable();						\
- }									\
- EXPORT_SYMBOL(name##_global_unlock);
+	DEFINE_LGLOCK_LOCKDEP(name);					\
+	DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)			\
+	= __ARCH_SPIN_LOCK_UNLOCKED;					\
+	struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
 #endif
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 11a966e..4d24d64 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -54,7 +54,7 @@
 
 extern int	nlmclnt_proc(struct nlm_host *host, int cmd,
 					struct file_lock *fl);
-extern int	lockd_up(void);
-extern void	lockd_down(void);
+extern int	lockd_up(struct net *net);
+extern void	lockd_down(struct net *net);
 
 #endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f94efd2..83e7ba9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -63,12 +63,7 @@
 					gfp_t gfp_mask);
 
 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
-struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
-				       enum lru_list);
-void mem_cgroup_lru_del_list(struct page *, enum lru_list);
-void mem_cgroup_lru_del(struct page *);
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
-					 enum lru_list, enum lru_list);
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 
 /* For coalescing uncharge for reducing memcg' overhead*/
 extern void mem_cgroup_uncharge_start(void);
@@ -79,6 +74,8 @@
 
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 				     int order);
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+				  struct mem_cgroup *memcg);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
 
 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +89,13 @@
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
 	struct mem_cgroup *memcg;
+	int match;
+
 	rcu_read_lock();
 	memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
+	match = __mem_cgroup_same_or_subtree(cgroup, memcg);
 	rcu_read_unlock();
-	return cgroup == memcg;
+	return match;
 }
 
 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -114,17 +114,11 @@
 /*
  * For memory reclaim.
  */
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
-				    struct zone *zone);
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
-				    struct zone *zone);
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
-					int nid, int zid, unsigned int lrumask);
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
-						      struct zone *zone);
-struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
+void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 					struct task_struct *p);
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
@@ -251,25 +245,8 @@
 	return &zone->lruvec;
 }
 
-static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
-						     struct page *page,
-						     enum lru_list lru)
-{
-	return &zone->lruvec;
-}
-
-static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
-{
-}
-
-static inline void mem_cgroup_lru_del(struct page *page)
-{
-}
-
-static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
-						       struct page *page,
-						       enum lru_list from,
-						       enum lru_list to)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
+						    struct zone *zone)
 {
 	return &zone->lruvec;
 }
@@ -333,35 +310,27 @@
 }
 
 static inline int
-mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 {
 	return 1;
 }
 
 static inline int
-mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 {
 	return 1;
 }
 
 static inline unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
-				unsigned int lru_mask)
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
 	return 0;
 }
 
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
+static inline void
+mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+			      int increment)
 {
-	return NULL;
-}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
-{
-	return NULL;
 }
 
 static inline void
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7c727a9..4aa4273 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -225,8 +225,8 @@
 		policy_zone = k;
 }
 
-int do_migrate_pages(struct mm_struct *mm,
-	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+		     const nodemask_t *to, int flags);
 
 
 #ifdef CONFIG_TMPFS
@@ -354,9 +354,8 @@
 	return false;
 }
 
-static inline int do_migrate_pages(struct mm_struct *mm,
-			const nodemask_t *from_nodes,
-			const nodemask_t *to_nodes, int flags)
+static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+				   const nodemask_t *to, int flags)
 {
 	return 0;
 }
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fccc300..91dd3ef 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -7,6 +7,7 @@
 #ifndef MFD_AB8500_H
 #define MFD_AB8500_H
 
+#include <linux/atomic.h>
 #include <linux/mutex.h>
 
 struct device;
@@ -194,6 +195,14 @@
 #define AB9540_INT_GPIO52F		123
 #define AB9540_INT_GPIO53F		124
 #define AB9540_INT_GPIO54F		125 /* not 8505 */
+/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
+#define AB8505_INT_KEYSTUCK		128
+#define AB8505_INT_IKR			129
+#define AB8505_INT_IKP			130
+#define AB8505_INT_KP			131
+#define AB8505_INT_KEYDEGLITCH		132
+#define AB8505_INT_MODPWRSTATUSF	134
+#define AB8505_INT_MODPWRSTATUSR	135
 
 /*
  * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -203,8 +212,8 @@
  * which is larger.
  */
 #define AB8500_NR_IRQS			112
-#define AB8505_NR_IRQS			128
-#define AB9540_NR_IRQS			128
+#define AB8505_NR_IRQS			136
+#define AB9540_NR_IRQS			136
 /* This is set to the roof of any AB8500 chip variant IRQ counts */
 #define AB8500_MAX_NR_IRQS		AB9540_NR_IRQS
 
@@ -216,6 +225,7 @@
  * @dev: parent device
  * @lock: read/write operations lock
  * @irq_lock: genirq bus lock
+ * @transfer_ongoing: 0 if no transfer ongoing
  * @irq: irq line
  * @version: chip version id (e.g. ab8500 or ab9540)
  * @chip_id: chip revision id
@@ -234,7 +244,7 @@
 	struct device	*dev;
 	struct mutex	lock;
 	struct mutex	irq_lock;
-
+	atomic_t	transfer_ongoing;
 	int		irq_base;
 	int		irq;
 	enum ab8500_version version;
@@ -280,6 +290,8 @@
 				 enum ab8500_version version);
 extern int __devexit ab8500_exit(struct ab8500 *ab8500);
 
+extern int ab8500_suspend(struct ab8500 *ab8500);
+
 static inline int is_ab8500(struct ab8500 *ab)
 {
 	return ab->version == AB8500_VERSION_AB8500;
diff --git a/include/linux/mfd/anatop.h b/include/linux/mfd/anatop.h
index 22c1007..7f92acf 100644
--- a/include/linux/mfd/anatop.h
+++ b/include/linux/mfd/anatop.h
@@ -34,7 +34,7 @@
 	spinlock_t reglock;
 };
 
-extern u32 anatop_get_bits(struct anatop *, u32, int, int);
-extern void anatop_set_bits(struct anatop *, u32, int, int, u32);
+extern u32 anatop_read_reg(struct anatop *, u32);
+extern void anatop_write_reg(struct anatop *, u32, u32, u32);
 
 #endif /*  __LINUX_MFD_ANATOP_H */
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index ef6faa5..e1148d0 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -31,6 +31,8 @@
 
 	unsigned int gpio_base;
 
+	unsigned int clock_rate;
+
 	struct asic3_led *leds;
 };
 
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 8313cd9..0507c4c 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -33,6 +33,18 @@
 
 #include <linux/mfd/da9052/reg.h>
 
+/* Common - HWMON Channel Definations */
+#define DA9052_ADC_VDDOUT	0
+#define DA9052_ADC_ICH		1
+#define DA9052_ADC_TBAT	2
+#define DA9052_ADC_VBAT	3
+#define DA9052_ADC_IN4		4
+#define DA9052_ADC_IN5		5
+#define DA9052_ADC_IN6		6
+#define DA9052_ADC_TSI		7
+#define DA9052_ADC_TJUNC	8
+#define DA9052_ADC_VBBAT	9
+
 #define DA9052_IRQ_DCIN	0
 #define DA9052_IRQ_VBUS	1
 #define DA9052_IRQ_DCINREM	2
@@ -79,6 +91,9 @@
 	struct device *dev;
 	struct regmap *regmap;
 
+	struct mutex auxadc_lock;
+	struct completion done;
+
 	int irq_base;
 	struct regmap_irq_chip_data *irq_data;
 	u8 chip_id;
@@ -86,6 +101,10 @@
 	int chip_irq;
 };
 
+/* ADC API */
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
+int da9052_adc_read_temp(struct da9052 *da9052);
+
 /* Device I/O API */
 static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
 {
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 0000000..594bc59
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+	DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+	struct device *dev;
+
+	struct regmap *regmap;
+
+	int gpio_hwen;
+	int irq;
+
+	unsigned have_als:1;
+	unsigned have_backlights:1;
+	unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+	struct lm3533 *lm3533;
+	struct device *dev;
+	int id;
+};
+
+struct lm3533_als_platform_data {
+	unsigned pwm_mode:1;		/* PWM input mode (default analog) */
+	u8 r_select;			/* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+	char *name;
+	u16 max_current;		/* 5000 - 29800 uA (800 uA step) */
+	u8 default_brightness;		/* 0 - 255 */
+	u8 pwm;				/* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+	char *name;
+	const char *default_trigger;
+	u16 max_current;		/* 5000 - 29800 uA (800 uA step) */
+	u8 pwm;				/* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+	LM3533_BOOST_FREQ_500KHZ,
+	LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+	LM3533_BOOST_OVP_16V,
+	LM3533_BOOST_OVP_24V,
+	LM3533_BOOST_OVP_32V,
+	LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+	int gpio_hwen;
+
+	enum lm3533_boost_ovp boost_ovp;
+	enum lm3533_boost_freq boost_freq;
+
+	struct lm3533_als_platform_data *als;
+
+	struct lm3533_bl_platform_data *backlights;
+	int num_backlights;
+
+	struct lm3533_led_platform_data *leds;
+	int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+								u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif	/* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644
index 0000000..fec5256
--- /dev/null
+++ b/include/linux/mfd/lpc_ich.h
@@ -0,0 +1,48 @@
+/*
+ *  linux/drivers/mfd/lpc_ich.h
+ *
+ *  Copyright (c) 2012 Extreme Engineering Solution, Inc.
+ *  Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef LPC_ICH_H
+#define LPC_ICH_H
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO	0
+#define ICH_RES_IO_SMI	1
+#define ICH_RES_MEM_OFF	2
+#define ICH_RES_MEM_GCS	0
+
+/* GPIO resources */
+#define ICH_RES_GPIO	0
+#define ICH_RES_GPE0	1
+
+/* GPIO compatibility */
+#define ICH_I3100_GPIO		0x401
+#define ICH_V5_GPIO		0x501
+#define ICH_V6_GPIO		0x601
+#define ICH_V7_GPIO		0x701
+#define ICH_V9_GPIO		0x801
+#define ICH_V10CORP_GPIO	0xa01
+#define ICH_V10CONS_GPIO	0xa11
+
+struct lpc_ich_info {
+	char name[32];
+	unsigned int iTCO_version;
+	unsigned int gpio_version;
+};
+
+#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644
index 0000000..68263c5
--- /dev/null
+++ b/include/linux/mfd/max77693-private.h
@@ -0,0 +1,227 @@
+/*
+ * max77693-private.h - Voltage regulator driver for the Maxim 77693
+ *
+ *  Copyright (C) 2012 Samsung Electrnoics
+ *  SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __LINUX_MFD_MAX77693_PRIV_H
+#define __LINUX_MFD_MAX77693_PRIV_H
+
+#include <linux/i2c.h>
+
+#define MAX77693_NUM_IRQ_MUIC_REGS	3
+#define MAX77693_REG_INVALID		(0xff)
+
+/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
+enum max77693_pmic_reg {
+	MAX77693_LED_REG_IFLASH1			= 0x00,
+	MAX77693_LED_REG_IFLASH2			= 0x01,
+	MAX77693_LED_REG_ITORCH				= 0x02,
+	MAX77693_LED_REG_ITORCHTIMER			= 0x03,
+	MAX77693_LED_REG_FLASH_TIMER			= 0x04,
+	MAX77693_LED_REG_FLASH_EN			= 0x05,
+	MAX77693_LED_REG_MAX_FLASH1			= 0x06,
+	MAX77693_LED_REG_MAX_FLASH2			= 0x07,
+	MAX77693_LED_REG_MAX_FLASH3			= 0x08,
+	MAX77693_LED_REG_MAX_FLASH4			= 0x09,
+	MAX77693_LED_REG_VOUT_CNTL			= 0x0A,
+	MAX77693_LED_REG_VOUT_FLASH1			= 0x0B,
+	MAX77693_LED_REG_VOUT_FLASH2			= 0x0C,
+	MAX77693_LED_REG_FLASH_INT			= 0x0E,
+	MAX77693_LED_REG_FLASH_INT_MASK			= 0x0F,
+	MAX77693_LED_REG_FLASH_INT_STATUS		= 0x10,
+
+	MAX77693_PMIC_REG_PMIC_ID1			= 0x20,
+	MAX77693_PMIC_REG_PMIC_ID2			= 0x21,
+	MAX77693_PMIC_REG_INTSRC			= 0x22,
+	MAX77693_PMIC_REG_INTSRC_MASK			= 0x23,
+	MAX77693_PMIC_REG_TOPSYS_INT			= 0x24,
+	MAX77693_PMIC_REG_TOPSYS_INT_MASK		= 0x26,
+	MAX77693_PMIC_REG_TOPSYS_STAT			= 0x28,
+	MAX77693_PMIC_REG_MAINCTRL1			= 0x2A,
+	MAX77693_PMIC_REG_LSCNFG			= 0x2B,
+
+	MAX77693_CHG_REG_CHG_INT			= 0xB0,
+	MAX77693_CHG_REG_CHG_INT_MASK			= 0xB1,
+	MAX77693_CHG_REG_CHG_INT_OK			= 0xB2,
+	MAX77693_CHG_REG_CHG_DETAILS_00			= 0xB3,
+	MAX77693_CHG_REG_CHG_DETAILS_01			= 0xB4,
+	MAX77693_CHG_REG_CHG_DETAILS_02			= 0xB5,
+	MAX77693_CHG_REG_CHG_DETAILS_03			= 0xB6,
+	MAX77693_CHG_REG_CHG_CNFG_00			= 0xB7,
+	MAX77693_CHG_REG_CHG_CNFG_01			= 0xB8,
+	MAX77693_CHG_REG_CHG_CNFG_02			= 0xB9,
+	MAX77693_CHG_REG_CHG_CNFG_03			= 0xBA,
+	MAX77693_CHG_REG_CHG_CNFG_04			= 0xBB,
+	MAX77693_CHG_REG_CHG_CNFG_05			= 0xBC,
+	MAX77693_CHG_REG_CHG_CNFG_06			= 0xBD,
+	MAX77693_CHG_REG_CHG_CNFG_07			= 0xBE,
+	MAX77693_CHG_REG_CHG_CNFG_08			= 0xBF,
+	MAX77693_CHG_REG_CHG_CNFG_09			= 0xC0,
+	MAX77693_CHG_REG_CHG_CNFG_10			= 0xC1,
+	MAX77693_CHG_REG_CHG_CNFG_11			= 0xC2,
+	MAX77693_CHG_REG_CHG_CNFG_12			= 0xC3,
+	MAX77693_CHG_REG_CHG_CNFG_13			= 0xC4,
+	MAX77693_CHG_REG_CHG_CNFG_14			= 0xC5,
+	MAX77693_CHG_REG_SAFEOUT_CTRL			= 0xC6,
+
+	MAX77693_PMIC_REG_END,
+};
+
+/* Slave addr = 0x4A: MUIC */
+enum max77693_muic_reg {
+	MAX77693_MUIC_REG_ID		= 0x00,
+	MAX77693_MUIC_REG_INT1		= 0x01,
+	MAX77693_MUIC_REG_INT2		= 0x02,
+	MAX77693_MUIC_REG_INT3		= 0x03,
+	MAX77693_MUIC_REG_STATUS1	= 0x04,
+	MAX77693_MUIC_REG_STATUS2	= 0x05,
+	MAX77693_MUIC_REG_STATUS3	= 0x06,
+	MAX77693_MUIC_REG_INTMASK1	= 0x07,
+	MAX77693_MUIC_REG_INTMASK2	= 0x08,
+	MAX77693_MUIC_REG_INTMASK3	= 0x09,
+	MAX77693_MUIC_REG_CDETCTRL1	= 0x0A,
+	MAX77693_MUIC_REG_CDETCTRL2	= 0x0B,
+	MAX77693_MUIC_REG_CTRL1		= 0x0C,
+	MAX77693_MUIC_REG_CTRL2		= 0x0D,
+	MAX77693_MUIC_REG_CTRL3		= 0x0E,
+
+	MAX77693_MUIC_REG_END,
+};
+
+/* Slave addr = 0x90: Haptic */
+enum max77693_haptic_reg {
+	MAX77693_HAPTIC_REG_STATUS		= 0x00,
+	MAX77693_HAPTIC_REG_CONFIG1		= 0x01,
+	MAX77693_HAPTIC_REG_CONFIG2		= 0x02,
+	MAX77693_HAPTIC_REG_CONFIG_CHNL		= 0x03,
+	MAX77693_HAPTIC_REG_CONFG_CYC1		= 0x04,
+	MAX77693_HAPTIC_REG_CONFG_CYC2		= 0x05,
+	MAX77693_HAPTIC_REG_CONFIG_PER1		= 0x06,
+	MAX77693_HAPTIC_REG_CONFIG_PER2		= 0x07,
+	MAX77693_HAPTIC_REG_CONFIG_PER3		= 0x08,
+	MAX77693_HAPTIC_REG_CONFIG_PER4		= 0x09,
+	MAX77693_HAPTIC_REG_CONFIG_DUTY1	= 0x0A,
+	MAX77693_HAPTIC_REG_CONFIG_DUTY2	= 0x0B,
+	MAX77693_HAPTIC_REG_CONFIG_PWM1		= 0x0C,
+	MAX77693_HAPTIC_REG_CONFIG_PWM2		= 0x0D,
+	MAX77693_HAPTIC_REG_CONFIG_PWM3		= 0x0E,
+	MAX77693_HAPTIC_REG_CONFIG_PWM4		= 0x0F,
+	MAX77693_HAPTIC_REG_REV			= 0x10,
+
+	MAX77693_HAPTIC_REG_END,
+};
+
+enum max77693_irq_source {
+	LED_INT = 0,
+	TOPSYS_INT,
+	CHG_INT,
+	MUIC_INT1,
+	MUIC_INT2,
+	MUIC_INT3,
+
+	MAX77693_IRQ_GROUP_NR,
+};
+
+enum max77693_irq {
+	/* PMIC - FLASH */
+	MAX77693_LED_IRQ_FLED2_OPEN,
+	MAX77693_LED_IRQ_FLED2_SHORT,
+	MAX77693_LED_IRQ_FLED1_OPEN,
+	MAX77693_LED_IRQ_FLED1_SHORT,
+	MAX77693_LED_IRQ_MAX_FLASH,
+
+	/* PMIC - TOPSYS */
+	MAX77693_TOPSYS_IRQ_T120C_INT,
+	MAX77693_TOPSYS_IRQ_T140C_INT,
+	MAX77693_TOPSYS_IRQ_LOWSYS_INT,
+
+	/* PMIC - Charger */
+	MAX77693_CHG_IRQ_BYP_I,
+	MAX77693_CHG_IRQ_THM_I,
+	MAX77693_CHG_IRQ_BAT_I,
+	MAX77693_CHG_IRQ_CHG_I,
+	MAX77693_CHG_IRQ_CHGIN_I,
+
+	/* MUIC INT1 */
+	MAX77693_MUIC_IRQ_INT1_ADC,
+	MAX77693_MUIC_IRQ_INT1_ADC_LOW,
+	MAX77693_MUIC_IRQ_INT1_ADC_ERR,
+	MAX77693_MUIC_IRQ_INT1_ADC1K,
+
+	/* MUIC INT2 */
+	MAX77693_MUIC_IRQ_INT2_CHGTYP,
+	MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
+	MAX77693_MUIC_IRQ_INT2_DCDTMR,
+	MAX77693_MUIC_IRQ_INT2_DXOVP,
+	MAX77693_MUIC_IRQ_INT2_VBVOLT,
+	MAX77693_MUIC_IRQ_INT2_VIDRM,
+
+	/* MUIC INT3 */
+	MAX77693_MUIC_IRQ_INT3_EOC,
+	MAX77693_MUIC_IRQ_INT3_CGMBC,
+	MAX77693_MUIC_IRQ_INT3_OVP,
+	MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
+	MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
+	MAX77693_MUIC_IRQ_INT3_BAT_DET,
+
+	MAX77693_IRQ_NR,
+};
+
+struct max77693_dev {
+	struct device *dev;
+	struct i2c_client *i2c;		/* 0xCC , PMIC, Charger, Flash LED */
+	struct i2c_client *muic;	/* 0x4A , MUIC */
+	struct i2c_client *haptic;	/* 0x90 , Haptic */
+	struct mutex iolock;
+
+	int type;
+
+	struct regmap *regmap;
+	struct regmap *regmap_muic;
+	struct regmap *regmap_haptic;
+
+	struct irq_domain *irq_domain;
+
+	int irq;
+	int irq_gpio;
+	bool wakeup;
+	struct mutex irqlock;
+	int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
+	int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
+};
+
+enum max77693_types {
+	TYPE_MAX77693,
+};
+
+extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
+extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
+				u8 *buf);
+extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
+extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
+				u8 *buf);
+extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
+
+extern int max77693_irq_init(struct max77693_dev *max77686);
+extern void max77693_irq_exit(struct max77693_dev *max77686);
+extern int max77693_irq_resume(struct max77693_dev *max77686);
+
+#endif /*  __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644
index 0000000..1d28ae9
--- /dev/null
+++ b/include/linux/mfd/max77693.h
@@ -0,0 +1,36 @@
+/*
+ * max77693.h - Driver for the Maxim 77693
+ *
+ *  Copyright (C) 2012 Samsung Electrnoics
+ *  SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_H
+#define __LINUX_MFD_MAX77693_H
+
+struct max77693_platform_data {
+	int wakeup;
+};
+#endif	/* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644
index 0000000..d179227
--- /dev/null
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
+ * functions in one PCI endpoint functions. This driver simply
+ * registers the platform devices in this iomemregion and exports a few
+ * functions to access common registers
+ */
+
+#ifndef __STA2X11_MFD_H
+#define __STA2X11_MFD_H
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * The MFD PCI block includes the GPIO peripherals and other register blocks.
+ * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
+ */
+#define GSTA_GPIO_PER_BLOCK	32
+#define GSTA_NR_BLOCKS		4
+#define GSTA_NR_GPIO		(GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
+
+/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
+struct sta2x11_gpio_pdata {
+	unsigned pinconfig[GSTA_NR_GPIO];
+};
+
+/* Macros below lifted from sh_pfc.h, with minor differences */
+#define PINMUX_TYPE_NONE		0
+#define PINMUX_TYPE_FUNCTION		1
+#define PINMUX_TYPE_OUTPUT_LOW		2
+#define PINMUX_TYPE_OUTPUT_HIGH		3
+#define PINMUX_TYPE_INPUT		4
+#define PINMUX_TYPE_INPUT_PULLUP	5
+#define PINMUX_TYPE_INPUT_PULLDOWN	6
+
+/* Give names to GPIO pins, like PXA does, taken from the manual */
+#define STA2X11_GPIO0			0
+#define STA2X11_GPIO1			1
+#define STA2X11_GPIO2			2
+#define STA2X11_GPIO3			3
+#define STA2X11_GPIO4			4
+#define STA2X11_GPIO5			5
+#define STA2X11_GPIO6			6
+#define STA2X11_GPIO7			7
+#define STA2X11_GPIO8_RGBOUT_RED7	8
+#define STA2X11_GPIO9_RGBOUT_RED6	9
+#define STA2X11_GPIO10_RGBOUT_RED5	10
+#define STA2X11_GPIO11_RGBOUT_RED4	11
+#define STA2X11_GPIO12_RGBOUT_RED3	12
+#define STA2X11_GPIO13_RGBOUT_RED2	13
+#define STA2X11_GPIO14_RGBOUT_RED1	14
+#define STA2X11_GPIO15_RGBOUT_RED0	15
+#define STA2X11_GPIO16_RGBOUT_GREEN7	16
+#define STA2X11_GPIO17_RGBOUT_GREEN6	17
+#define STA2X11_GPIO18_RGBOUT_GREEN5	18
+#define STA2X11_GPIO19_RGBOUT_GREEN4	19
+#define STA2X11_GPIO20_RGBOUT_GREEN3	20
+#define STA2X11_GPIO21_RGBOUT_GREEN2	21
+#define STA2X11_GPIO22_RGBOUT_GREEN1	22
+#define STA2X11_GPIO23_RGBOUT_GREEN0	23
+#define STA2X11_GPIO24_RGBOUT_BLUE7	24
+#define STA2X11_GPIO25_RGBOUT_BLUE6	25
+#define STA2X11_GPIO26_RGBOUT_BLUE5	26
+#define STA2X11_GPIO27_RGBOUT_BLUE4	27
+#define STA2X11_GPIO28_RGBOUT_BLUE3	28
+#define STA2X11_GPIO29_RGBOUT_BLUE2	29
+#define STA2X11_GPIO30_RGBOUT_BLUE1	30
+#define STA2X11_GPIO31_RGBOUT_BLUE0	31
+#define STA2X11_GPIO32_RGBOUT_VSYNCH	32
+#define STA2X11_GPIO33_RGBOUT_HSYNCH	33
+#define STA2X11_GPIO34_RGBOUT_DEN	34
+#define STA2X11_GPIO35_ETH_CRS_DV	35
+#define STA2X11_GPIO36_ETH_TXD1		36
+#define STA2X11_GPIO37_ETH_TXD0		37
+#define STA2X11_GPIO38_ETH_TX_EN	38
+#define STA2X11_GPIO39_MDIO		39
+#define STA2X11_GPIO40_ETH_REF_CLK	40
+#define STA2X11_GPIO41_ETH_RXD1		41
+#define STA2X11_GPIO42_ETH_RXD0		42
+#define STA2X11_GPIO43_MDC		43
+#define STA2X11_GPIO44_CAN_TX		44
+#define STA2X11_GPIO45_CAN_RX		45
+#define STA2X11_GPIO46_MLB_DAT		46
+#define STA2X11_GPIO47_MLB_SIG		47
+#define STA2X11_GPIO48_SPI0_CLK		48
+#define STA2X11_GPIO49_SPI0_TXD		49
+#define STA2X11_GPIO50_SPI0_RXD		50
+#define STA2X11_GPIO51_SPI0_FRM		51
+#define STA2X11_GPIO52_SPI1_CLK		52
+#define STA2X11_GPIO53_SPI1_TXD		53
+#define STA2X11_GPIO54_SPI1_RXD		54
+#define STA2X11_GPIO55_SPI1_FRM		55
+#define STA2X11_GPIO56_SPI2_CLK		56
+#define STA2X11_GPIO57_SPI2_TXD		57
+#define STA2X11_GPIO58_SPI2_RXD		58
+#define STA2X11_GPIO59_SPI2_FRM		59
+#define STA2X11_GPIO60_I2C0_SCL		60
+#define STA2X11_GPIO61_I2C0_SDA		61
+#define STA2X11_GPIO62_I2C1_SCL		62
+#define STA2X11_GPIO63_I2C1_SDA		63
+#define STA2X11_GPIO64_I2C2_SCL		64
+#define STA2X11_GPIO65_I2C2_SDA		65
+#define STA2X11_GPIO66_I2C3_SCL		66
+#define STA2X11_GPIO67_I2C3_SDA		67
+#define STA2X11_GPIO68_MSP0_RCK		68
+#define STA2X11_GPIO69_MSP0_RXD		69
+#define STA2X11_GPIO70_MSP0_RFS		70
+#define STA2X11_GPIO71_MSP0_TCK		71
+#define STA2X11_GPIO72_MSP0_TXD		72
+#define STA2X11_GPIO73_MSP0_TFS		73
+#define STA2X11_GPIO74_MSP0_SCK		74
+#define STA2X11_GPIO75_MSP1_CK		75
+#define STA2X11_GPIO76_MSP1_RXD		76
+#define STA2X11_GPIO77_MSP1_FS		77
+#define STA2X11_GPIO78_MSP1_TXD		78
+#define STA2X11_GPIO79_MSP2_CK		79
+#define STA2X11_GPIO80_MSP2_RXD		80
+#define STA2X11_GPIO81_MSP2_FS		81
+#define STA2X11_GPIO82_MSP2_TXD		82
+#define STA2X11_GPIO83_MSP3_CK		83
+#define STA2X11_GPIO84_MSP3_RXD		84
+#define STA2X11_GPIO85_MSP3_FS		85
+#define STA2X11_GPIO86_MSP3_TXD		86
+#define STA2X11_GPIO87_MSP4_CK		87
+#define STA2X11_GPIO88_MSP4_RXD		88
+#define STA2X11_GPIO89_MSP4_FS		89
+#define STA2X11_GPIO90_MSP4_TXD		90
+#define STA2X11_GPIO91_MSP5_CK		91
+#define STA2X11_GPIO92_MSP5_RXD		92
+#define STA2X11_GPIO93_MSP5_FS		93
+#define STA2X11_GPIO94_MSP5_TXD		94
+#define STA2X11_GPIO95_SDIO3_DAT3	95
+#define STA2X11_GPIO96_SDIO3_DAT2	96
+#define STA2X11_GPIO97_SDIO3_DAT1	97
+#define STA2X11_GPIO98_SDIO3_DAT0	98
+#define STA2X11_GPIO99_SDIO3_CLK	99
+#define STA2X11_GPIO100_SDIO3_CMD	100
+#define STA2X11_GPIO101			101
+#define STA2X11_GPIO102			102
+#define STA2X11_GPIO103			103
+#define STA2X11_GPIO104			104
+#define STA2X11_GPIO105_SDIO2_DAT3	105
+#define STA2X11_GPIO106_SDIO2_DAT2	106
+#define STA2X11_GPIO107_SDIO2_DAT1	107
+#define STA2X11_GPIO108_SDIO2_DAT0	108
+#define STA2X11_GPIO109_SDIO2_CLK	109
+#define STA2X11_GPIO110_SDIO2_CMD	110
+#define STA2X11_GPIO111			111
+#define STA2X11_GPIO112			112
+#define STA2X11_GPIO113			113
+#define STA2X11_GPIO114			114
+#define STA2X11_GPIO115_SDIO1_DAT3	115
+#define STA2X11_GPIO116_SDIO1_DAT2	116
+#define STA2X11_GPIO117_SDIO1_DAT1	117
+#define STA2X11_GPIO118_SDIO1_DAT0	118
+#define STA2X11_GPIO119_SDIO1_CLK	119
+#define STA2X11_GPIO120_SDIO1_CMD	120
+#define STA2X11_GPIO121			121
+#define STA2X11_GPIO122			122
+#define STA2X11_GPIO123			123
+#define STA2X11_GPIO124			124
+#define STA2X11_GPIO125_UART2_TXD	125
+#define STA2X11_GPIO126_UART2_RXD	126
+#define STA2X11_GPIO127_UART3_TXD	127
+
+/*
+ * The APB bridge has its own registers, needed by our users as well.
+ * They are accessed with the following read/mask/write function.
+ */
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+/* CAN and MLB */
+#define APBREG_BSR	0x00	/* Bridge Status Reg */
+#define APBREG_PAER	0x08	/* Peripherals Address Error Reg */
+#define APBREG_PWAC	0x20	/* Peripheral Write Access Control reg */
+#define APBREG_PRAC	0x40	/* Peripheral Read Access Control reg */
+#define APBREG_PCG	0x60	/* Peripheral Clock Gating Reg */
+#define APBREG_PUR	0x80	/* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG	0xA0	/* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_CAN	(1 << 1)
+#define APBREG_MLB	(1 << 3)
+
+/* SARAC */
+#define APBREG_BSR_SARAC     0x100 /* Bridge Status Reg */
+#define APBREG_PAER_SARAC    0x108 /* Peripherals Address Error Reg */
+#define APBREG_PWAC_SARAC    0x120 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC_SARAC    0x140 /* Peripheral Read Access Control reg */
+#define APBREG_PCG_SARAC     0x160 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR_SARAC     0x180 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_SARAC	(1 << 2)
+
+/*
+ * The system controller has its own registers. Some of these are accessed
+ * by out users as well, using the following read/mask/write/function
+ */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+#define SCTL_SCCTL		0x00	/* System controller control register */
+#define SCTL_ARMCFG		0x04	/* ARM configuration register */
+#define SCTL_SCPLLCTL		0x08	/* PLL control status register */
+#define SCTL_SCPLLFCTRL		0x0c	/* PLL frequency control register */
+#define SCTL_SCRESFRACT		0x10	/* PLL fractional input register */
+#define SCTL_SCRESCTRL1		0x14	/* Peripheral reset control 1 */
+#define SCTL_SCRESXTRL2		0x18	/* Peripheral reset control 2 */
+#define SCTL_SCPEREN0		0x1c	/* Peripheral clock enable register 0 */
+#define SCTL_SCPEREN1		0x20	/* Peripheral clock enable register 1 */
+#define SCTL_SCPEREN2		0x24	/* Peripheral clock enable register 2 */
+#define SCTL_SCGRST		0x28	/* Peripheral global reset */
+#define SCTL_SCPCIPMCR1		0x30	/* PCI power management control 1 */
+#define SCTL_SCPCIPMCR2		0x34	/* PCI power management control 2 */
+#define SCTL_SCPCIPMSR1		0x38	/* PCI power management status 1 */
+#define SCTL_SCPCIPMSR2		0x3c	/* PCI power management status 2 */
+#define SCTL_SCPCIPMSR3		0x40	/* PCI power management status 3 */
+#define SCTL_SCINTREN		0x44	/* Interrupt enable */
+#define SCTL_SCRISR		0x48	/* RAW interrupt status */
+#define SCTL_SCCLKSTAT0		0x4c	/* Peripheral clocks status 0 */
+#define SCTL_SCCLKSTAT1		0x50	/* Peripheral clocks status 1 */
+#define SCTL_SCCLKSTAT2		0x54	/* Peripheral clocks status 2 */
+#define SCTL_SCRSTSTA		0x58	/* Reset status register */
+
+#define SCTL_SCRESCTRL1_USB_PHY_POR	(1 << 0)
+#define SCTL_SCRESCTRL1_USB_OTG	(1 << 1)
+#define SCTL_SCRESCTRL1_USB_HRST	(1 << 2)
+#define SCTL_SCRESCTRL1_USB_PHY_HOST	(1 << 3)
+#define SCTL_SCRESCTRL1_SATAII	(1 << 4)
+#define SCTL_SCRESCTRL1_VIP		(1 << 5)
+#define SCTL_SCRESCTRL1_PER_MMC0	(1 << 6)
+#define SCTL_SCRESCTRL1_PER_MMC1	(1 << 7)
+#define SCTL_SCRESCTRL1_PER_GPIO0	(1 << 8)
+#define SCTL_SCRESCTRL1_PER_GPIO1	(1 << 9)
+#define SCTL_SCRESCTRL1_PER_GPIO2	(1 << 10)
+#define SCTL_SCRESCTRL1_PER_GPIO3	(1 << 11)
+#define SCTL_SCRESCTRL1_PER_MTU0	(1 << 12)
+#define SCTL_SCRESCTRL1_KER_SPI0	(1 << 13)
+#define SCTL_SCRESCTRL1_KER_SPI1	(1 << 14)
+#define SCTL_SCRESCTRL1_KER_SPI2	(1 << 15)
+#define SCTL_SCRESCTRL1_KER_MCI0	(1 << 16)
+#define SCTL_SCRESCTRL1_KER_MCI1	(1 << 17)
+#define SCTL_SCRESCTRL1_PRE_HSI2C0	(1 << 18)
+#define SCTL_SCRESCTRL1_PER_HSI2C1	(1 << 19)
+#define SCTL_SCRESCTRL1_PER_HSI2C2	(1 << 20)
+#define SCTL_SCRESCTRL1_PER_HSI2C3	(1 << 21)
+#define SCTL_SCRESCTRL1_PER_MSP0	(1 << 22)
+#define SCTL_SCRESCTRL1_PER_MSP1	(1 << 23)
+#define SCTL_SCRESCTRL1_PER_MSP2	(1 << 24)
+#define SCTL_SCRESCTRL1_PER_MSP3	(1 << 25)
+#define SCTL_SCRESCTRL1_PER_MSP4	(1 << 26)
+#define SCTL_SCRESCTRL1_PER_MSP5	(1 << 27)
+#define SCTL_SCRESCTRL1_PER_MMC	(1 << 28)
+#define SCTL_SCRESCTRL1_KER_MSP0	(1 << 29)
+#define SCTL_SCRESCTRL1_KER_MSP1	(1 << 30)
+#define SCTL_SCRESCTRL1_KER_MSP2	(1 << 31)
+
+#define SCTL_SCPEREN0_UART0		(1 << 0)
+#define SCTL_SCPEREN0_UART1		(1 << 1)
+#define SCTL_SCPEREN0_UART2		(1 << 2)
+#define SCTL_SCPEREN0_UART3		(1 << 3)
+#define SCTL_SCPEREN0_MSP0		(1 << 4)
+#define SCTL_SCPEREN0_MSP1		(1 << 5)
+#define SCTL_SCPEREN0_MSP2		(1 << 6)
+#define SCTL_SCPEREN0_MSP3		(1 << 7)
+#define SCTL_SCPEREN0_MSP4		(1 << 8)
+#define SCTL_SCPEREN0_MSP5		(1 << 9)
+#define SCTL_SCPEREN0_SPI0		(1 << 10)
+#define SCTL_SCPEREN0_SPI1		(1 << 11)
+#define SCTL_SCPEREN0_SPI2		(1 << 12)
+#define SCTL_SCPEREN0_I2C0		(1 << 13)
+#define SCTL_SCPEREN0_I2C1		(1 << 14)
+#define SCTL_SCPEREN0_I2C2		(1 << 15)
+#define SCTL_SCPEREN0_I2C3		(1 << 16)
+#define SCTL_SCPEREN0_SVDO_LVDS		(1 << 17)
+#define SCTL_SCPEREN0_USB_HOST		(1 << 18)
+#define SCTL_SCPEREN0_USB_OTG		(1 << 19)
+#define SCTL_SCPEREN0_MCI0		(1 << 20)
+#define SCTL_SCPEREN0_MCI1		(1 << 21)
+#define SCTL_SCPEREN0_MCI2		(1 << 22)
+#define SCTL_SCPEREN0_MCI3		(1 << 23)
+#define SCTL_SCPEREN0_SATA		(1 << 24)
+#define SCTL_SCPEREN0_ETHERNET		(1 << 25)
+#define SCTL_SCPEREN0_VIC		(1 << 26)
+#define SCTL_SCPEREN0_DMA_AUDIO		(1 << 27)
+#define SCTL_SCPEREN0_DMA_SOC		(1 << 28)
+#define SCTL_SCPEREN0_RAM		(1 << 29)
+#define SCTL_SCPEREN0_VIP		(1 << 30)
+#define SCTL_SCPEREN0_ARM		(1 << 31)
+
+#define SCTL_SCPEREN1_UART0		(1 << 0)
+#define SCTL_SCPEREN1_UART1		(1 << 1)
+#define SCTL_SCPEREN1_UART2		(1 << 2)
+#define SCTL_SCPEREN1_UART3		(1 << 3)
+#define SCTL_SCPEREN1_MSP0		(1 << 4)
+#define SCTL_SCPEREN1_MSP1		(1 << 5)
+#define SCTL_SCPEREN1_MSP2		(1 << 6)
+#define SCTL_SCPEREN1_MSP3		(1 << 7)
+#define SCTL_SCPEREN1_MSP4		(1 << 8)
+#define SCTL_SCPEREN1_MSP5		(1 << 9)
+#define SCTL_SCPEREN1_SPI0		(1 << 10)
+#define SCTL_SCPEREN1_SPI1		(1 << 11)
+#define SCTL_SCPEREN1_SPI2		(1 << 12)
+#define SCTL_SCPEREN1_I2C0		(1 << 13)
+#define SCTL_SCPEREN1_I2C1		(1 << 14)
+#define SCTL_SCPEREN1_I2C2		(1 << 15)
+#define SCTL_SCPEREN1_I2C3		(1 << 16)
+#define SCTL_SCPEREN1_USB_PHY		(1 << 17)
+
+#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 8516fd1..f8d5b4d 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -117,7 +117,7 @@
  * @no_autorepeat: disable key autorepeat
  */
 struct stmpe_keypad_platform_data {
-	struct matrix_keymap_data *keymap_data;
+	const struct matrix_keymap_data *keymap_data;
 	unsigned int debounce_ms;
 	unsigned int scan_count;
 	bool no_autorepeat;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 1c6c286..dd8dc0a 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -18,6 +18,7 @@
 #define __LINUX_MFD_TPS65910_H
 
 #include <linux/gpio.h>
+#include <linux/regmap.h>
 
 /* TPS chip id list */
 #define TPS65910			0
@@ -783,6 +784,18 @@
 #define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3		0x4
 #define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP		0x8
 
+/*
+ * Sleep keepon data: Maintains the state in sleep mode
+ * @therm_keepon: Keep on the thermal monitoring in sleep state.
+ * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
+ * @i2chs_keepon: Keep on high speed internal clock in sleep state.
+ */
+struct tps65910_sleep_keepon_data {
+	unsigned therm_keepon:1;
+	unsigned clkout32k_keepon:1;
+	unsigned i2chs_keepon:1;
+};
+
 /**
  * struct tps65910_board
  * Board platform data may be used to initialize regulators.
@@ -794,6 +807,8 @@
 	int irq_base;
 	int vmbch_threshold;
 	int vmbch2_threshold;
+	bool en_dev_slp;
+	struct tps65910_sleep_keepon_data *slp_keepon;
 	bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
 	unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
 	struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
@@ -809,16 +824,14 @@
 	struct regmap *regmap;
 	struct mutex io_mutex;
 	unsigned int id;
-	int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
-	int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
 
 	/* Client devices */
 	struct tps65910_pmic *pmic;
 	struct tps65910_rtc *rtc;
 	struct tps65910_power *power;
 
-	/* GPIO Handling */
-	struct gpio_chip gpio;
+	/* Device node parsed board data */
+	struct tps65910_board *of_plat_data;
 
 	/* IRQ Handling */
 	struct mutex irq_lock;
@@ -826,6 +839,7 @@
 	int irq_base;
 	int irq_num;
 	u32 irq_mask;
+	struct irq_domain *domain;
 };
 
 struct tps65910_platform_data {
@@ -833,9 +847,6 @@
 	int irq_base;
 };
 
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
 int tps65910_irq_init(struct tps65910 *tps65910, int irq,
 		struct tps65910_platform_data *pdata);
 int tps65910_irq_exit(struct tps65910 *tps65910);
@@ -845,4 +856,28 @@
 	return tps65910->id;
 }
 
+static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
+		unsigned int *val)
+{
+	return regmap_read(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
+		unsigned int val)
+{
+	return regmap_write(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
+		u8 mask)
+{
+	return regmap_update_bits(tps65910->regmap, reg, mask, mask);
+}
+
+static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
+		u8 mask)
+{
+	return regmap_update_bits(tps65910->regmap, reg, mask, 0);
+}
+
 #endif /*  __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index b15b5f0..6659487 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -27,6 +27,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/mfd/core.h>
+#include <linux/regulator/consumer.h>
 
 #define TWL6040_REG_ASICID		0x01
 #define TWL6040_REG_ASICREV		0x02
@@ -203,6 +204,7 @@
 struct twl6040 {
 	struct device *dev;
 	struct regmap *regmap;
+	struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
 	struct mutex mutex;
 	struct mutex io_mutex;
 	struct mutex irq_mutex;
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 4b12118..4a3b83a 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -17,6 +17,7 @@
 
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/regmap.h>
 
@@ -338,6 +339,7 @@
 #define WM831X_FLL_CLK_SRC_WIDTH                     2  /* FLL_CLK_SRC - [1:0] */
 
 struct regulator_dev;
+struct irq_domain;
 
 #define WM831X_NUM_IRQ_REGS 5
 #define WM831X_NUM_GPIO_REGS 16
@@ -367,7 +369,7 @@
 
 	int irq;  /* Our chip IRQ */
 	struct mutex irq_lock;
-	int irq_base;
+	struct irq_domain *irq_domain;
 	int irq_masks_cur[WM831X_NUM_IRQ_REGS];   /* Currently active value */
 	int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
 
@@ -382,7 +384,8 @@
 
 	/* Used by the interrupt controller code to post writes */
 	int gpio_update[WM831X_NUM_GPIO_REGS];
-	bool gpio_level[WM831X_NUM_GPIO_REGS];
+	bool gpio_level_high[WM831X_NUM_GPIO_REGS];
+	bool gpio_level_low[WM831X_NUM_GPIO_REGS];
 
 	struct mutex auxadc_lock;
 	struct list_head auxadc_pending;
@@ -417,6 +420,11 @@
 void wm831x_irq_exit(struct wm831x *wm831x);
 void wm831x_auxadc_init(struct wm831x *wm831x);
 
+static inline int wm831x_irq(struct wm831x *wm831x, int irq)
+{
+	return irq_create_mapping(wm831x->irq_domain, irq);
+}
+
 extern struct regmap_config wm831x_regmap_config;
 
 #endif
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 98fcc97..9192b64 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -602,6 +602,7 @@
 extern const u16 wm8352_mode3_defaults[];
 
 struct wm8350;
+struct regmap;
 
 struct wm8350_hwmon {
 	struct platform_device *pdev;
@@ -612,13 +613,7 @@
 	struct device *dev;
 
 	/* device IO */
-	union {
-		struct i2c_client *i2c_client;
-		struct spi_device *spi_device;
-	};
-	int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest);
-	int (*write_dev)(struct wm8350 *wm8350, char reg, int size,
-			 void *src);
+	struct regmap *regmap;
 	u16 *reg_cache;
 
 	struct mutex auxadc_mutex;
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 0147b69..2de565b 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -24,19 +24,14 @@
 #include <linux/mfd/wm8400.h>
 #include <linux/mutex.h>
 #include <linux/platform_device.h>
-
-struct regmap;
+#include <linux/regmap.h>
 
 #define WM8400_REGISTER_COUNT 0x55
 
 struct wm8400 {
 	struct device *dev;
-
-	struct mutex io_lock;
 	struct regmap *regmap;
 
-	u16 reg_cache[WM8400_REGISTER_COUNT];
-
 	struct platform_device regulators[6];
 };
 
@@ -930,6 +925,11 @@
 
 u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
 int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val);
+
+static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
+				  u16 mask, u16 val)
+{
+	return regmap_update_bits(wm8400->regmap, reg, mask, val);
+}
 
 #endif
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 6695c3e..1f17330 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -57,6 +57,7 @@
 
 	enum wm8994_type type;
 	int revision;
+	int cust_id;
 
 	struct device *dev;
 	struct regmap *regmap;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 86e6a03..0535489 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2212,6 +2212,9 @@
 /*
  * R256 (0x100) - Chip Revision
  */
+#define WM8994_CUST_ID_MASK                     0xFF00  /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_SHIFT                         8  /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_WIDTH                         8  /* CUST_ID - [15:8] */
 #define WM8994_CHIP_REV_MASK                    0x000F  /* CHIP_REV - [3:0] */
 #define WM8994_CHIP_REV_SHIFT                        0  /* CHIP_REV - [3:0] */
 #define WM8994_CHIP_REV_WIDTH                        4  /* CHIP_REV - [3:0] */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6e27fa9..6a8f002 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -64,6 +64,7 @@
 	MLX4_MAX_NUM_PF		= 16,
 	MLX4_MAX_NUM_VF		= 64,
 	MLX4_MFUNC_MAX		= 80,
+	MLX4_MAX_EQ_NUM		= 1024,
 	MLX4_MFUNC_EQ_NUM	= 4,
 	MLX4_MFUNC_MAX_EQES     = 8,
 	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
@@ -239,6 +240,10 @@
 	return (major << 32) | (minor << 16) | subminor;
 }
 
+struct mlx4_phys_caps {
+	u32			num_phys_eqs;
+};
+
 struct mlx4_caps {
 	u64			fw_ver;
 	u32			function;
@@ -499,6 +504,7 @@
 	unsigned long		flags;
 	unsigned long		num_slaves;
 	struct mlx4_caps	caps;
+	struct mlx4_phys_caps	phys_caps;
 	struct radix_tree_root	qp_table_tree;
 	u8			rev_id;
 	char			board_id[MLX4_BOARD_ID_LEN];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d5c37f..b36d08c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -321,6 +321,7 @@
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	VM_BUG_ON(PageSlab(page));
 	bit_spin_lock(PG_compound_lock, &page->flags);
 #endif
 }
@@ -328,6 +329,7 @@
 static inline void compound_unlock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	VM_BUG_ON(PageSlab(page));
 	bit_spin_unlock(PG_compound_lock, &page->flags);
 #endif
 }
@@ -871,8 +873,6 @@
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
 int shmem_zero_setup(struct vm_area_struct *);
 
 extern int can_do_mlock(void);
@@ -951,11 +951,9 @@
 extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 extern int vmtruncate(struct inode *inode, loff_t offset);
-extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
 int truncate_inode_page(struct address_space *mapping, struct page *page);
 int generic_error_remove_page(struct address_space *mapping, struct page *page);
-
 int invalidate_inode_page(struct page *page);
 
 #ifdef CONFIG_MMU
@@ -1394,7 +1392,7 @@
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long flags,
 	vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap(struct file *, unsigned long,
+extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 227fd3e..1397ccf 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -21,22 +21,22 @@
 	return !PageSwapBacked(page);
 }
 
-static inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void add_page_to_lru_list(struct page *page,
+				struct lruvec *lruvec, enum lru_list lru)
 {
-	struct lruvec *lruvec;
-
-	lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+	int nr_pages = hpage_nr_pages(page);
+	mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
 	list_add(&page->lru, &lruvec->lists[lru]);
-	__mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
+	__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
 }
 
-static inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void del_page_from_lru_list(struct page *page,
+				struct lruvec *lruvec, enum lru_list lru)
 {
-	mem_cgroup_lru_del_list(page, lru);
+	int nr_pages = hpage_nr_pages(page);
+	mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
 	list_del(&page->lru);
-	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
+	__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
 }
 
 /**
@@ -61,7 +61,7 @@
  * Returns the LRU list a page was on, as an index into the array of LRU
  * lists; and clears its Unevictable or Active flags, ready for freeing.
  */
-static inline enum lru_list page_off_lru(struct page *page)
+static __always_inline enum lru_list page_off_lru(struct page *page)
 {
 	enum lru_list lru;
 
@@ -85,7 +85,7 @@
  * Returns the LRU list a page should be on, as an index
  * into the array of LRU lists.
  */
-static inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list page_lru(struct page *page)
 {
 	enum lru_list lru;
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 26574c7..704a626 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -57,8 +57,18 @@
 		};
 
 		union {
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+	defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 			/* Used for cmpxchg_double in slub */
 			unsigned long counters;
+#else
+			/*
+			 * Keep _count separate from slub cmpxchg_double data.
+			 * As the rest of the double word is protected by
+			 * slab_lock but _count is not.
+			 */
+			unsigned counters;
+#endif
 
 			struct {
 
@@ -345,17 +355,6 @@
 	/* Architecture-specific MM context */
 	mm_context_t context;
 
-	/* Swap token stuff */
-	/*
-	 * Last value of global fault stamp as seen by this process.
-	 * In other words, this value gives an indication of how long
-	 * it has been since this task got the token.
-	 * Look at mm/thrash.c
-	 */
-	unsigned int faultstamp;
-	unsigned int token_priority;
-	unsigned int last_interval;
-
 	unsigned long flags; /* Must use atomic bitops to access the bits */
 
 	struct core_state *core_state; /* coredumping support */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 629b823..d76513b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -58,6 +58,10 @@
 	unsigned int		generic_cmd6_time;	/* Units: 10ms */
 	unsigned int            power_off_longtime;     /* Units: ms */
 	unsigned int		hs_max_dtr;
+#define MMC_HIGH_26_MAX_DTR	26000000
+#define MMC_HIGH_52_MAX_DTR	52000000
+#define MMC_HIGH_DDR_MAX_DTR	52000000
+#define MMC_HS200_MAX_DTR	200000000
 	unsigned int		sectors;
 	unsigned int		card_type;
 	unsigned int		hc_erase_size;		/* In sectors */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 8f66e28..7a7ebd3 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -125,6 +125,7 @@
 	struct mmc_request	*mrq;
 	struct mmc_command	*cmd;
 	struct mmc_data		*data;
+	struct workqueue_struct	*card_workqueue;
 
 	/* DMA interface members*/
 	int			use_dma;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index cbde4b7..0707d22 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -297,6 +297,7 @@
 
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
+	bool			sdio_irq_pending;
 	atomic_t		sdio_irq_thread_abort;
 
 	mmc_pm_flag_t		pm_flags;	/* requested pm features */
@@ -352,6 +353,7 @@
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
 	host->ops->enable_sdio_irq(host, 0);
+	host->sdio_irq_pending = true;
 	wake_up_process(host->sdio_irq_thread);
 }
 
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index b822a2c..d425cab 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -354,66 +354,6 @@
 #define EXT_CSD_CARD_TYPE_SDR_1_2V	(1<<5)	/* Card can run at 200MHz */
 						/* SDR mode @1.2V I/O */
 
-#define EXT_CSD_CARD_TYPE_SDR_200	(EXT_CSD_CARD_TYPE_SDR_1_8V | \
-					 EXT_CSD_CARD_TYPE_SDR_1_2V)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL	(EXT_CSD_CARD_TYPE_SDR_200 | \
-					 EXT_CSD_CARD_TYPE_52 | \
-					 EXT_CSD_CARD_TYPE_26)
-
-#define	EXT_CSD_CARD_TYPE_SDR_1_2V_ALL	(EXT_CSD_CARD_TYPE_SDR_1_2V | \
-					 EXT_CSD_CARD_TYPE_52 | \
-					 EXT_CSD_CARD_TYPE_26)
-
-#define	EXT_CSD_CARD_TYPE_SDR_1_8V_ALL	(EXT_CSD_CARD_TYPE_SDR_1_8V | \
-					 EXT_CSD_CARD_TYPE_52 | \
-					 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V	(EXT_CSD_CARD_TYPE_SDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_DDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V	(EXT_CSD_CARD_TYPE_SDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_DDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V	(EXT_CSD_CARD_TYPE_SDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_DDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V	(EXT_CSD_CARD_TYPE_SDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_DDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52	(EXT_CSD_CARD_TYPE_SDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_DDR_52 | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52	(EXT_CSD_CARD_TYPE_SDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_DDR_52 | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V	(EXT_CSD_CARD_TYPE_SDR_200 | \
-						 EXT_CSD_CARD_TYPE_DDR_1_8V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V	(EXT_CSD_CARD_TYPE_SDR_200 | \
-						 EXT_CSD_CARD_TYPE_DDR_1_2V | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52	(EXT_CSD_CARD_TYPE_SDR_200 | \
-						 EXT_CSD_CARD_TYPE_DDR_52 | \
-						 EXT_CSD_CARD_TYPE_52 | \
-						 EXT_CSD_CARD_TYPE_26)
-
 #define EXT_CSD_BUS_WIDTH_1	0	/* Card is in 1 bit mode */
 #define EXT_CSD_BUS_WIDTH_4	1	/* Card is in 4 bit mode */
 #define EXT_CSD_BUS_WIDTH_8	2	/* Card is in 8 bit mode */
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/include/linux/mmc/mxs-mmc.h
similarity index 80%
rename from arch/arm/mach-mxs/include/mach/mmc.h
rename to include/linux/mmc/mxs-mmc.h
index 211547a..7c2ad3a 100644
--- a/arch/arm/mach-mxs/include/mach/mmc.h
+++ b/include/linux/mmc/mxs-mmc.h
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef __MACH_MXS_MMC_H__
-#define __MACH_MXS_MMC_H__
+#ifndef __LINUX_MMC_MXS_MMC_H__
+#define __LINUX_MMC_MXS_MMC_H__
 
 struct mxs_mmc_platform_data {
 	int wp_gpio;	/* write protect pin */
@@ -15,4 +15,5 @@
 #define SLOTF_4_BIT_CAPABLE	(1 << 0)
 #define SLOTF_8_BIT_CAPABLE	(1 << 1)
 };
-#endif /* __MACH_MXS_MMC_H__ */
+
+#endif /* __LINUX_MMC_MXS_MMC_H__ */
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
index 5cdc96d..e78c0e2 100644
--- a/include/linux/mmc/sdhci-spear.h
+++ b/include/linux/mmc/sdhci-spear.h
@@ -4,7 +4,7 @@
  * SDHCI declarations specific to ST SPEAr platform
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index c9fe66c..17446d3 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -98,7 +98,9 @@
 
 #define SDIO_CCCR_IF		0x07	/* bus interface controls */
 
+#define  SDIO_BUS_WIDTH_MASK	0x03	/* data bus width setting */
 #define  SDIO_BUS_WIDTH_1BIT	0x00
+#define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT	0x02
 #define  SDIO_BUS_ECSI		0x20	/* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI		0x40	/* Support continuous SPI interrupt */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index c04ecfe..580bd58 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,7 +4,7 @@
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
 #else
-#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
+#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
 #endif
 
 #ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41aa49b..2427706 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
  */
 #define PAGE_ALLOC_COSTLY_ORDER 3
 
-#define MIGRATE_UNMOVABLE     0
-#define MIGRATE_RECLAIMABLE   1
-#define MIGRATE_MOVABLE       2
-#define MIGRATE_PCPTYPES      3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE       3
-#define MIGRATE_ISOLATE       4 /* can't allocate from here */
-#define MIGRATE_TYPES         5
+enum {
+	MIGRATE_UNMOVABLE,
+	MIGRATE_RECLAIMABLE,
+	MIGRATE_MOVABLE,
+	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
+	MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+	/*
+	 * MIGRATE_CMA migration type is designed to mimic the way
+	 * ZONE_MOVABLE works.  Only movable pages can be allocated
+	 * from MIGRATE_CMA pageblocks and page allocator never
+	 * implicitly change migration type of MIGRATE_CMA pageblock.
+	 *
+	 * The way to use it is to change migratetype of a range of
+	 * pageblocks to MIGRATE_CMA which can be done by
+	 * __free_pageblock_cma() function.  What is important though
+	 * is that a range of pageblocks must be aligned to
+	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
+	 * a single pageblock.
+	 */
+	MIGRATE_CMA,
+#endif
+	MIGRATE_ISOLATE,	/* can't allocate from here */
+	MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define cma_wmark_pages(zone)	zone->min_cma_pages
+#else
+#  define is_migrate_cma(migratetype) false
+#  define cma_wmark_pages(zone) 0
+#endif
 
 #define for_each_migratetype_order(order, type) \
 	for (order = 0; order < MAX_ORDER; order++) \
@@ -159,8 +185,25 @@
 	return (lru == LRU_UNEVICTABLE);
 }
 
+struct zone_reclaim_stat {
+	/*
+	 * The pageout code in vmscan.c keeps track of how many of the
+	 * mem/swap backed and file backed pages are refeferenced.
+	 * The higher the rotated/scanned ratio, the more valuable
+	 * that cache is.
+	 *
+	 * The anon LRU stats live in [0], file LRU stats in [1]
+	 */
+	unsigned long		recent_rotated[2];
+	unsigned long		recent_scanned[2];
+};
+
 struct lruvec {
 	struct list_head lists[NR_LRU_LISTS];
+	struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+	struct zone *zone;
+#endif
 };
 
 /* Mask used at gathering information at once (see memcontrol.c) */
@@ -169,16 +212,12 @@
 #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
 
-/* Isolate inactive pages */
-#define ISOLATE_INACTIVE	((__force isolate_mode_t)0x1)
-/* Isolate active pages */
-#define ISOLATE_ACTIVE		((__force isolate_mode_t)0x2)
 /* Isolate clean file */
-#define ISOLATE_CLEAN		((__force isolate_mode_t)0x4)
+#define ISOLATE_CLEAN		((__force isolate_mode_t)0x1)
 /* Isolate unmapped file */
-#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x8)
+#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
 /* Isolate for asynchronous migration */
-#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x10)
+#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
 
 /* LRU Isolation modes. */
 typedef unsigned __bitwise__ isolate_mode_t;
@@ -287,19 +326,6 @@
 #error ZONES_SHIFT -- too many zones configured adjust calculation
 #endif
 
-struct zone_reclaim_stat {
-	/*
-	 * The pageout code in vmscan.c keeps track of how many of the
-	 * mem/swap backed and file backed pages are refeferenced.
-	 * The higher the rotated/scanned ratio, the more valuable
-	 * that cache is.
-	 *
-	 * The anon LRU stats live in [0], file LRU stats in [1]
-	 */
-	unsigned long		recent_rotated[2];
-	unsigned long		recent_scanned[2];
-};
-
 struct zone {
 	/* Fields commonly accessed by the page allocator */
 
@@ -347,6 +373,13 @@
 	/* see spanned/present_pages for more description */
 	seqlock_t		span_seqlock;
 #endif
+#ifdef CONFIG_CMA
+	/*
+	 * CMA needs to increase watermark levels during the allocation
+	 * process to make sure that the system is not starved.
+	 */
+	unsigned long		min_cma_pages;
+#endif
 	struct free_area	free_area[MAX_ORDER];
 
 #ifndef CONFIG_SPARSEMEM
@@ -374,8 +407,6 @@
 	spinlock_t		lru_lock;
 	struct lruvec		lruvec;
 
-	struct zone_reclaim_stat reclaim_stat;
-
 	unsigned long		pages_scanned;	   /* since last reclaim */
 	unsigned long		flags;		   /* zone flags, see below */
 
@@ -701,6 +732,17 @@
 				     unsigned long size,
 				     enum memmap_context context);
 
+extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+
+static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+{
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+	return lruvec->zone;
+#else
+	return container_of(lruvec, struct zone, lruvec);
+#endif
+}
+
 #ifdef CONFIG_HAVE_MEMORY_PRESENT
 void memory_present(int nid, unsigned long start, unsigned long end);
 #else
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1b14d25..d6a5806 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -128,7 +128,7 @@
  * The ops can have NULL set or get functions.
  */
 #define module_param_cb(name, ops, arg, perm)				      \
-	__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0)
+	__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
 
 /**
  * <level>_param_cb - general callback for a module/cmdline parameter
@@ -192,7 +192,7 @@
 		 { (void *)set, (void *)get };				\
 	__module_param_call(MODULE_PARAM_PREFIX,			\
 			    name, &__param_ops_##name, arg,		\
-			    (perm) + sizeof(__check_old_set_param(set))*0, 0)
+			    (perm) + sizeof(__check_old_set_param(set))*0, -1)
 
 /* We don't get oldget: it's often a new-style param_get_uint, etc. */
 static inline int
@@ -272,7 +272,7 @@
  */
 #define core_param(name, var, type, perm)				\
 	param_check_##type(name, &(var));				\
-	__module_param_call("", name, &param_ops_##type, &var, perm, 0)
+	__module_param_call("", name, &param_ops_##type, &var, perm, -1)
 #endif /* !MODULE */
 
 /**
@@ -290,7 +290,7 @@
 		= { len, string };					\
 	__module_param_call(MODULE_PARAM_PREFIX, name,			\
 			    &param_ops_string,				\
-			    .str = &__param_string_##name, perm, 0);	\
+			    .str = &__param_string_##name, perm, -1);	\
 	__MODULE_PARM_TYPE(name, "string")
 
 /**
@@ -432,7 +432,7 @@
 	__module_param_call(MODULE_PARAM_PREFIX, name,			\
 			    &param_array_ops,				\
 			    .arr = &__param_arr_##name,			\
-			    perm, 0);					\
+			    perm, -1);					\
 	__MODULE_PARM_TYPE(name, "array of " #type)
 
 extern struct kernel_param_ops param_array_ops;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 34066e6..11cc2ac 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -21,8 +21,9 @@
 #define CT_LE_W(v)	cpu_to_le16(v)
 #define CT_LE_L(v)	cpu_to_le32(v)
 
+#define MSDOS_ROOT_INO	 1	/* The root inode number */
+#define MSDOS_FSINFO_INO 2	/* Used for managing the FSINFO block */
 
-#define MSDOS_ROOT_INO	1	/* == MINIX_ROOT_INO */
 #define MSDOS_DIR_BITS	5	/* log2(sizeof(struct msdos_dir_entry)) */
 
 /* directory limit */
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
index 69b6dbf..ed3c4e0 100644
--- a/include/linux/mtd/gpmi-nand.h
+++ b/include/linux/mtd/gpmi-nand.h
@@ -23,12 +23,12 @@
 #define GPMI_NAND_RES_SIZE	6
 
 /* Resource names for the GPMI NAND driver. */
-#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "GPMI NAND GPMI Registers"
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
 #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME  "GPMI NAND GPMI Interrupt"
-#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "GPMI NAND BCH Registers"
-#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "GPMI NAND BCH Interrupt"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
 #define GPMI_NAND_DMA_CHANNELS_RES_NAME    "GPMI NAND DMA Channels"
-#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "GPMI NAND DMA Interrupt"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "gpmi-dma"
 
 /**
  * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cf5ea8c..63dadc0 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -157,6 +157,15 @@
 	unsigned int erasesize_mask;
 	unsigned int writesize_mask;
 
+	/*
+	 * read ops return -EUCLEAN if max number of bitflips corrected on any
+	 * one region comprising an ecc step equals or exceeds this value.
+	 * Settable by driver, else defaults to ecc_strength.  User can override
+	 * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
+	 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+	 */
+	unsigned int bitflip_threshold;
+
 	// Kernel-only stuff starts here.
 	const char *name;
 	int index;
@@ -164,7 +173,7 @@
 	/* ECC layout structure pointer - read only! */
 	struct nand_ecclayout *ecclayout;
 
-	/* max number of correctible bit errors per writesize */
+	/* max number of correctible bit errors per ecc step */
 	unsigned int ecc_strength;
 
 	/* Data for variable erase regions. If numeraseregions is zero,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 1482340..57977c6 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -161,8 +161,6 @@
  * Option constants for bizarre disfunctionality and real
  * features.
  */
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR	0x00000001
 /* Buswidth is 16 bit */
 #define NAND_BUSWIDTH_16	0x00000002
 /* Device supports partial programming without padding */
@@ -207,7 +205,6 @@
 	(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
 
 /* Macros to identify the above */
-#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
 #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
 #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
 #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
@@ -216,7 +213,7 @@
 					&& (chip->page_shift > 9))
 
 /* Mask to zero out the chip options, which come from the id table */
-#define NAND_CHIPOPTIONS_MSK	(0x0000ffff & ~NAND_NO_AUTOINCR)
+#define NAND_CHIPOPTIONS_MSK	0x0000ffff
 
 /* Non chip related options */
 /* This option skips the bbt scan during initialization. */
@@ -363,21 +360,20 @@
 	int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
 			uint8_t *calc_ecc);
 	int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-			uint8_t *buf, int page);
+			uint8_t *buf, int oob_required, int page);
 	void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-			const uint8_t *buf);
+			const uint8_t *buf, int oob_required);
 	int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
-			uint8_t *buf, int page);
+			uint8_t *buf, int oob_required, int page);
 	int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
 			uint32_t offs, uint32_t len, uint8_t *buf);
 	void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-			const uint8_t *buf);
+			const uint8_t *buf, int oob_required);
 	int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
 			int page);
 	int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-			int page, int sndcmd);
-	int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page,
-			int sndcmd);
+			int page);
+	int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
 	int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
 			int page);
 };
@@ -459,6 +455,8 @@
  * @pagemask:		[INTERN] page number mask = number of (pages / chip) - 1
  * @pagebuf:		[INTERN] holds the pagenumber which is currently in
  *			data_buf.
+ * @pagebuf_bitflips:	[INTERN] holds the bitflip count for the page which is
+ *			currently in data_buf.
  * @subpagesize:	[INTERN] holds the subpagesize
  * @onfi_version:	[INTERN] holds the chip ONFI version (BCD encoded),
  *			non 0 if ONFI supported.
@@ -505,7 +503,8 @@
 	int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
 			int status, int page);
 	int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-			const uint8_t *buf, int page, int cached, int raw);
+			const uint8_t *buf, int oob_required, int page,
+			int cached, int raw);
 
 	int chip_delay;
 	unsigned int options;
@@ -519,6 +518,7 @@
 	uint64_t chipsize;
 	int pagemask;
 	int pagebuf;
+	unsigned int pagebuf_bitflips;
 	int subpagesize;
 	uint8_t cellinfo;
 	int badblockpos;
@@ -654,6 +654,7 @@
 	void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
 	void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
 	void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+	unsigned char (*read_byte)(struct mtd_info *mtd);
 	void *priv;
 };
 
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 30b0c4e..51bf8ad 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -18,7 +18,6 @@
 struct mv643xx_eth_shared_platform_data {
 	struct mbus_dram_target_info	*dram;
 	struct platform_device	*shared_smi;
-	unsigned int		t_clk;
 	/*
 	 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
 	 * limit of 9KiB will be used.
diff --git a/include/linux/net.h b/include/linux/net.h
index 2d7510f..e9ac2df 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,5 +313,8 @@
 	MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
 		     "-type-" __stringify(type))
 
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+	MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+		     name)
 #endif /* __KERNEL__ */
 #endif	/* _LINUX_NET_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7fd468..d94cb14 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2795,15 +2795,15 @@
 #define netif_info(priv, type, dev, fmt, args...)		\
 	netif_level(info, priv, type, dev, fmt, ##args)
 
-#if defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...)		\
-	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define netif_dbg(priv, type, netdev, format, args...)		\
 do {								\
 	if (netif_msg_##type(priv))				\
 		dynamic_netdev_dbg(netdev, format, ##args);	\
 } while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...)		\
+	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 #else
 #define netif_dbg(priv, type, dev, format, args...)			\
 ({									\
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h
index abb1650..826fc58 100644
--- a/include/linux/netfilter/xt_HMARK.h
+++ b/include/linux/netfilter/xt_HMARK.h
@@ -27,7 +27,12 @@
 		__u16	src;
 		__u16	dst;
 	} p16;
+	struct {
+		__be16	src;
+		__be16	dst;
+	} b16;
 	__u32	v32;
+	__be32	b32;
 };
 
 struct xt_hmark_info {
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0987146..af2d2fa 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -69,6 +69,10 @@
 #define NFS4_CDFC4_FORE_OR_BOTH 0x3
 #define NFS4_CDFC4_BACK_OR_BOTH 0x7
 
+#define NFS4_CDFS4_FORE 0x1
+#define NFS4_CDFS4_BACK 0x2
+#define NFS4_CDFS4_BOTH 0x3
+
 #define NFS4_SET_TO_SERVER_TIME	0
 #define NFS4_SET_TO_CLIENT_TIME	1
 
@@ -526,6 +530,13 @@
 #define FATTR4_WORD1_MOUNTED_ON_FILEID  (1UL << 23)
 #define FATTR4_WORD1_FS_LAYOUT_TYPES    (1UL << 30)
 #define FATTR4_WORD2_LAYOUT_BLKSIZE     (1UL << 1)
+#define FATTR4_WORD2_MDSTHRESHOLD       (1UL << 4)
+
+/* MDS threshold bitmap bits */
+#define THRESHOLD_RD                    (1UL << 0)
+#define THRESHOLD_WR                    (1UL << 1)
+#define THRESHOLD_RD_IO                 (1UL << 2)
+#define THRESHOLD_WR_IO                 (1UL << 3)
 
 #define NFSPROC4_NULL 0
 #define NFSPROC4_COMPOUND 1
@@ -596,6 +607,8 @@
 	NFSPROC4_CLNT_TEST_STATEID,
 	NFSPROC4_CLNT_FREE_STATEID,
 	NFSPROC4_CLNT_GETDEVICELIST,
+	NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
+	NFSPROC4_CLNT_DESTROY_CLIENTID,
 };
 
 /* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 52a1bdb..b23cfc1 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -102,6 +102,7 @@
 	int error;
 
 	struct list_head list;
+	struct nfs4_threshold	*mdsthreshold;
 };
 
 struct nfs_open_dir_context {
@@ -179,8 +180,7 @@
 	__be32			cookieverf[2];
 
 	unsigned long		npages;
-	unsigned long		ncommit;
-	struct list_head	commit_list;
+	struct nfs_mds_commit_info commit_info;
 
 	/* Open contexts for shared mmap writes */
 	struct list_head	open_files;
@@ -201,8 +201,10 @@
 
 	/* pNFS layout information */
 	struct pnfs_layout_hdr *layout;
-	atomic_t		commits_outstanding;
 #endif /* CONFIG_NFS_V4*/
+	/* how many bytes have been written/read and how many bytes queued up */
+	__u64 write_io;
+	__u64 read_io;
 #ifdef CONFIG_NFS_FSCACHE
 	struct fscache_cookie	*fscache;
 #endif
@@ -230,7 +232,6 @@
 #define NFS_INO_FSCACHE		(5)		/* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK	(6)		/* FS-Cache cookie management lock */
 #define NFS_INO_COMMIT		(7)		/* inode is committing unstable writes */
-#define NFS_INO_PNFS_COMMIT	(8)		/* use pnfs code for commit */
 #define NFS_INO_LAYOUTCOMMIT	(9)		/* layoutcommit required */
 #define NFS_INO_LAYOUTCOMMITTING (10)		/* layoutcommit inflight */
 
@@ -317,11 +318,6 @@
 	return NFS_SERVER(inode)->caps & cap;
 }
 
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
-	return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
 static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
 {
 	dentry->d_time = verf;
@@ -552,8 +548,8 @@
 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 extern int  nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern void nfs_commit_free(struct nfs_commit_data *data);
 #else
 static inline int
 nfs_commit_inode(struct inode *inode, int how)
@@ -569,12 +565,6 @@
 }
 
 /*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-extern void nfs_writedata_free(struct nfs_write_data *);
-
-/*
  * linux/fs/nfs/read.c
  */
 extern int  nfs_readpage(struct file *, struct page *);
@@ -585,12 +575,6 @@
 			       struct page *);
 
 /*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-extern void nfs_readdata_free(struct nfs_read_data *);
-
-/*
  * linux/fs/nfs3proc.c
  */
 #ifdef CONFIG_NFS_V3_ACL
@@ -654,6 +638,7 @@
 #define NFSDBG_FSCACHE		0x0800
 #define NFSDBG_PNFS		0x1000
 #define NFSDBG_PNFS_LD		0x2000
+#define NFSDBG_STATE		0x4000
 #define NFSDBG_ALL		0xFFFF
 
 #ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7073fc7..f58325a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -17,7 +17,7 @@
 struct nfs4_sequence_res;
 struct nfs_server;
 struct nfs4_minor_version_ops;
-struct server_scope;
+struct nfs41_server_scope;
 struct nfs41_impl_id;
 
 /*
@@ -25,6 +25,7 @@
  */
 struct nfs_client {
 	atomic_t		cl_count;
+	atomic_t		cl_mds_count;
 	int			cl_cons_state;	/* current construction state (-ve: init error) */
 #define NFS_CS_READY		0		/* ready to be used */
 #define NFS_CS_INITING		1		/* busy initialising */
@@ -35,6 +36,9 @@
 #define NFS_CS_RENEWD		3		/* - renewd started */
 #define NFS_CS_STOP_RENEW	4		/* no more state to renew */
 #define NFS_CS_CHECK_LEASE_TIME	5		/* need to check lease time */
+	unsigned long		cl_flags;	/* behavior switches */
+#define NFS_CS_NORESVPORT	0		/* - use ephemeral src port */
+#define NFS_CS_DISCRTRY		1		/* - disconnect on RPC retry */
 	struct sockaddr_storage	cl_addr;	/* server identifier */
 	size_t			cl_addrlen;
 	char *			cl_hostname;	/* hostname of server */
@@ -61,9 +65,6 @@
 
 	struct rpc_wait_queue	cl_rpcwaitq;
 
-	/* used for the setclientid verifier */
-	struct timespec		cl_boot_time;
-
 	/* idmapper */
 	struct idmap *		cl_idmap;
 
@@ -79,16 +80,17 @@
 	u32			cl_seqid;
 	/* The flags used for obtaining the clientid during EXCHANGE_ID */
 	u32			cl_exchange_flags;
-	struct nfs4_session	*cl_session; 	/* sharred session */
+	struct nfs4_session	*cl_session;	/* shared session */
+	struct nfs41_server_owner *cl_serverowner;
+	struct nfs41_server_scope *cl_serverscope;
+	struct nfs41_impl_id	*cl_implid;
 #endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
 	struct fscache_cookie	*fscache;	/* client index cache cookie */
 #endif
 
-	struct server_scope	*server_scope;	/* from exchange_id */
-	struct nfs41_impl_id	*impl_id;	/* from exchange_id */
-	struct net		*net;
+	struct net		*cl_net;
 };
 
 /*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index eac30d6..88d166b 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,7 +27,6 @@
 	PG_CLEAN,
 	PG_NEED_COMMIT,
 	PG_NEED_RESCHED,
-	PG_PARTIAL_READ_FAILED,
 	PG_COMMIT_TO_DS,
 };
 
@@ -37,7 +36,6 @@
 	struct page		*wb_page;	/* page to read in/write out */
 	struct nfs_open_context	*wb_context;	/* File state context info */
 	struct nfs_lock_context	*wb_lock_context;	/* lock context info */
-	atomic_t		wb_complete;	/* i/os we're waiting for */
 	pgoff_t			wb_index;	/* Offset >> PAGE_CACHE_SHIFT */
 	unsigned int		wb_offset,	/* Offset & ~PAGE_CACHE_MASK */
 				wb_pgbase,	/* Start of page data */
@@ -68,7 +66,9 @@
 	int 			pg_ioflags;
 	int			pg_error;
 	const struct rpc_call_ops *pg_rpc_callops;
+	const struct nfs_pgio_completion_ops *pg_completion_ops;
 	struct pnfs_layout_segment *pg_lseg;
+	struct nfs_direct_req	*pg_dreq;
 };
 
 #define NFS_WBACK_BUSY(req)	(test_bit(PG_BUSY,&(req)->wb_flags))
@@ -84,6 +84,7 @@
 extern	void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 			     struct inode *inode,
 			     const struct nfs_pageio_ops *pg_ops,
+			     const struct nfs_pgio_completion_ops *compl_ops,
 			     size_t bsize,
 			     int how);
 extern	int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -95,26 +96,17 @@
 				struct nfs_page *req);
 extern  int nfs_wait_on_request(struct nfs_page *);
 extern	void nfs_unlock_request(struct nfs_page *req);
+extern	void nfs_unlock_and_release_request(struct nfs_page *req);
 
 /*
- * Lock the page of an asynchronous request without getting a new reference
+ * Lock the page of an asynchronous request
  */
 static inline int
-nfs_lock_request_dontget(struct nfs_page *req)
-{
-	return !test_and_set_bit(PG_BUSY, &req->wb_flags);
-}
-
-static inline int
 nfs_lock_request(struct nfs_page *req)
 {
-	if (test_and_set_bit(PG_BUSY, &req->wb_flags))
-		return 0;
-	kref_get(&req->wb_kref);
-	return 1;
+	return !test_and_set_bit(PG_BUSY, &req->wb_flags);
 }
 
-
 /**
  * nfs_list_add_request - Insert a request into a list
  * @req: request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7ba3551..8aadd90 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -35,6 +35,15 @@
 	return a->major == b->major && a->minor == b->minor;
 }
 
+struct nfs4_threshold {
+	__u32	bm;
+	__u32	l_type;
+	__u64	rd_sz;
+	__u64	wr_sz;
+	__u64	rd_io_sz;
+	__u64	wr_io_sz;
+};
+
 struct nfs_fattr {
 	unsigned int		valid;		/* which fields are valid */
 	umode_t			mode;
@@ -67,6 +76,7 @@
 	unsigned long		gencount;
 	struct nfs4_string	*owner_name;
 	struct nfs4_string	*group_name;
+	struct nfs4_threshold	*mdsthreshold;	/* pNFS threshold hints */
 };
 
 #define NFS_ATTR_FATTR_TYPE		(1U << 0)
@@ -106,14 +116,14 @@
 		| NFS_ATTR_FATTR_FILEID \
 		| NFS_ATTR_FATTR_ATIME \
 		| NFS_ATTR_FATTR_MTIME \
-		| NFS_ATTR_FATTR_CTIME)
+		| NFS_ATTR_FATTR_CTIME \
+		| NFS_ATTR_FATTR_CHANGE)
 #define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
 		| NFS_ATTR_FATTR_BLOCKS_USED)
 #define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
 		| NFS_ATTR_FATTR_SPACE_USED)
 #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
-		| NFS_ATTR_FATTR_SPACE_USED \
-		| NFS_ATTR_FATTR_CHANGE)
+		| NFS_ATTR_FATTR_SPACE_USED)
 
 /*
  * Info on the file system
@@ -338,7 +348,7 @@
 	const struct qstr *	name;
 	const struct nfs_server *server;	 /* Needed for ID mapping */
 	const u32 *		bitmask;
-	const u32 *		dir_bitmask;
+	const u32 *		open_bitmap;
 	__u32			claim;
 	struct nfs4_sequence_args	seq_args;
 };
@@ -349,7 +359,6 @@
 	struct nfs4_change_info	cinfo;
 	__u32                   rflags;
 	struct nfs_fattr *      f_attr;
-	struct nfs_fattr *      dir_attr;
 	struct nfs_seqid *	seqid;
 	const struct nfs_server *server;
 	fmode_t			delegation_type;
@@ -519,12 +528,29 @@
 };
 
 /*
+ * Arguments to the commit call.
+ */
+struct nfs_commitargs {
+	struct nfs_fh		*fh;
+	__u64			offset;
+	__u32			count;
+	const u32		*bitmask;
+	struct nfs4_sequence_args	seq_args;
+};
+
+struct nfs_commitres {
+	struct nfs_fattr	*fattr;
+	struct nfs_writeverf	*verf;
+	const struct nfs_server *server;
+	struct nfs4_sequence_res	seq_res;
+};
+
+/*
  * Common arguments to the unlink call
  */
 struct nfs_removeargs {
 	const struct nfs_fh	*fh;
 	struct qstr		name;
-	const u32 *		bitmask;
 	struct nfs4_sequence_args	seq_args;
 };
 
@@ -543,7 +569,6 @@
 	const struct nfs_fh		*new_dir;
 	const struct qstr		*old_name;
 	const struct qstr		*new_name;
-	const u32			*bitmask;
 	struct nfs4_sequence_args	seq_args;
 };
 
@@ -839,7 +864,6 @@
 	struct nfs_fh *			fh;
 	struct nfs_fattr *		fattr;
 	struct nfs4_change_info		dir_cinfo;
-	struct nfs_fattr *		dir_fattr;
 	struct nfs4_sequence_res	seq_res;
 };
 
@@ -1061,6 +1085,21 @@
 };
 
 #ifdef CONFIG_NFS_V4_1
+
+struct pnfs_commit_bucket {
+	struct list_head written;
+	struct list_head committing;
+	struct pnfs_layout_segment *wlseg;
+	struct pnfs_layout_segment *clseg;
+};
+
+struct pnfs_ds_commit_info {
+	int nwritten;
+	int ncommitting;
+	int nbuckets;
+	struct pnfs_commit_bucket *buckets;
+};
+
 #define NFS4_EXCHANGE_ID_LEN	(48)
 struct nfs41_exchange_id_args {
 	struct nfs_client		*client;
@@ -1070,13 +1109,13 @@
 	u32				flags;
 };
 
-struct server_owner {
+struct nfs41_server_owner {
 	uint64_t			minor_id;
 	uint32_t			major_id_sz;
 	char				major_id[NFS4_OPAQUE_LIMIT];
 };
 
-struct server_scope {
+struct nfs41_server_scope {
 	uint32_t			server_scope_sz;
 	char 				server_scope[NFS4_OPAQUE_LIMIT];
 };
@@ -1087,10 +1126,18 @@
 	struct nfstime4			date;
 };
 
+struct nfs41_bind_conn_to_session_res {
+	struct nfs4_session		*session;
+	u32				dir;
+	bool				use_conn_in_rdma_mode;
+};
+
 struct nfs41_exchange_id_res {
-	struct nfs_client		*client;
+	u64				clientid;
+	u32				seqid;
 	u32				flags;
-	struct server_scope		*server_scope;
+	struct nfs41_server_owner	*server_owner;
+	struct nfs41_server_scope	*server_scope;
 	struct nfs41_impl_id		*impl_id;
 };
 
@@ -1143,35 +1190,116 @@
 	struct nfs4_sequence_res	seq_res;
 };
 
+#else
+
+struct pnfs_ds_commit_info {
+};
+
 #endif /* CONFIG_NFS_V4_1 */
 
 struct nfs_page;
 
 #define NFS_PAGEVEC_SIZE	(8U)
 
-struct nfs_read_data {
-	struct rpc_task		task;
-	struct inode		*inode;
-	struct rpc_cred		*cred;
-	struct nfs_fattr	fattr;	/* fattr storage */
-	struct list_head	pages;	/* Coalesced read requests */
-	struct list_head	list;	/* lists of struct nfs_read_data */
-	struct nfs_page		*req;	/* multi ops per nfs_page */
+struct nfs_page_array {
 	struct page		**pagevec;
-	unsigned int		npages;	/* Max length of pagevec */
-	struct nfs_readargs args;
-	struct nfs_readres  res;
-	unsigned long		timestamp;	/* For lease renewal */
-	struct pnfs_layout_segment *lseg;
-	struct nfs_client	*ds_clp;	/* pNFS data server */
-	const struct rpc_call_ops *mds_ops;
-	int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
-	__u64			mds_offset;
-	int			pnfs_error;
+	unsigned int		npages;		/* Max length of pagevec */
 	struct page		*page_array[NFS_PAGEVEC_SIZE];
 };
 
+struct nfs_read_data {
+	struct nfs_pgio_header	*header;
+	struct list_head	list;
+	struct rpc_task		task;
+	struct nfs_fattr	fattr;	/* fattr storage */
+	struct nfs_readargs args;
+	struct nfs_readres  res;
+	unsigned long		timestamp;	/* For lease renewal */
+	int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
+	__u64			mds_offset;
+	struct nfs_page_array	pages;
+	struct nfs_client	*ds_clp;	/* pNFS data server */
+};
+
+/* used as flag bits in nfs_pgio_header */
+enum {
+	NFS_IOHDR_ERROR = 0,
+	NFS_IOHDR_EOF,
+	NFS_IOHDR_REDO,
+	NFS_IOHDR_NEED_COMMIT,
+	NFS_IOHDR_NEED_RESCHED,
+};
+
+struct nfs_pgio_header {
+	struct inode		*inode;
+	struct rpc_cred		*cred;
+	struct list_head	pages;
+	struct list_head	rpc_list;
+	atomic_t		refcnt;
+	struct nfs_page		*req;
+	struct nfs_writeverf	*verf;
+	struct pnfs_layout_segment *lseg;
+	loff_t			io_start;
+	const struct rpc_call_ops *mds_ops;
+	void (*release) (struct nfs_pgio_header *hdr);
+	const struct nfs_pgio_completion_ops *completion_ops;
+	struct nfs_direct_req	*dreq;
+	spinlock_t		lock;
+	/* fields protected by lock */
+	int			pnfs_error;
+	int			error;		/* merge with pnfs_error */
+	unsigned long		good_bytes;	/* boundary of good data */
+	unsigned long		flags;
+};
+
+struct nfs_read_header {
+	struct nfs_pgio_header	header;
+	struct nfs_read_data	rpc_data;
+};
+
 struct nfs_write_data {
+	struct nfs_pgio_header	*header;
+	struct list_head	list;
+	struct rpc_task		task;
+	struct nfs_fattr	fattr;
+	struct nfs_writeverf	verf;
+	struct nfs_writeargs	args;		/* argument struct */
+	struct nfs_writeres	res;		/* result struct */
+	unsigned long		timestamp;	/* For lease renewal */
+	int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
+	__u64			mds_offset;	/* Filelayout dense stripe */
+	struct nfs_page_array	pages;
+	struct nfs_client	*ds_clp;	/* pNFS data server */
+};
+
+struct nfs_write_header {
+	struct nfs_pgio_header	header;
+	struct nfs_write_data	rpc_data;
+	struct nfs_writeverf	verf;
+};
+
+struct nfs_mds_commit_info {
+	atomic_t rpcs_out;
+	unsigned long		ncommit;
+	struct list_head	list;
+};
+
+struct nfs_commit_data;
+struct nfs_inode;
+struct nfs_commit_completion_ops {
+	void (*error_cleanup) (struct nfs_inode *nfsi);
+	void (*completion) (struct nfs_commit_data *data);
+};
+
+struct nfs_commit_info {
+	spinlock_t			*lock;
+	struct nfs_mds_commit_info	*mds;
+	struct pnfs_ds_commit_info	*ds;
+	struct nfs_direct_req		*dreq;	/* O_DIRECT request */
+	const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_commit_data {
 	struct rpc_task		task;
 	struct inode		*inode;
 	struct rpc_cred		*cred;
@@ -1179,22 +1307,22 @@
 	struct nfs_writeverf	verf;
 	struct list_head	pages;		/* Coalesced requests we wish to flush */
 	struct list_head	list;		/* lists of struct nfs_write_data */
-	struct nfs_page		*req;		/* multi ops per nfs_page */
-	struct page		**pagevec;
-	unsigned int		npages;		/* Max length of pagevec */
-	struct nfs_writeargs	args;		/* argument struct */
-	struct nfs_writeres	res;		/* result struct */
+	struct nfs_direct_req	*dreq;		/* O_DIRECT request */
+	struct nfs_commitargs	args;		/* argument struct */
+	struct nfs_commitres	res;		/* result struct */
+	struct nfs_open_context *context;
 	struct pnfs_layout_segment *lseg;
 	struct nfs_client	*ds_clp;	/* pNFS data server */
 	int			ds_commit_index;
 	const struct rpc_call_ops *mds_ops;
-	int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
-#ifdef CONFIG_NFS_V4
-	unsigned long		timestamp;	/* For lease renewal */
-#endif
-	__u64			mds_offset;	/* Filelayout dense stripe */
-	int			pnfs_error;
-	struct page		*page_array[NFS_PAGEVEC_SIZE];
+	const struct nfs_commit_completion_ops *completion_ops;
+	int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
+};
+
+struct nfs_pgio_completion_ops {
+	void	(*error_cleanup)(struct list_head *head);
+	void	(*init_hdr)(struct nfs_pgio_header *hdr);
+	void	(*completion)(struct nfs_pgio_header *hdr);
 };
 
 struct nfs_unlinkdata {
@@ -1234,11 +1362,13 @@
 
 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
 			    struct nfs_fsinfo *);
+	struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
+				      struct nfs_fh *, struct nfs_fattr *);
 	int	(*getattr) (struct nfs_server *, struct nfs_fh *,
 			    struct nfs_fattr *);
 	int	(*setattr) (struct dentry *, struct nfs_fattr *,
 			    struct iattr *);
-	int	(*lookup)  (struct rpc_clnt *clnt, struct inode *, struct qstr *,
+	int	(*lookup)  (struct inode *, struct qstr *,
 			    struct nfs_fh *, struct nfs_fattr *);
 	int	(*access)  (struct inode *, struct nfs_access_entry *);
 	int	(*readlink)(struct inode *, struct page *, unsigned int,
@@ -1277,8 +1407,9 @@
 	void	(*write_setup)  (struct nfs_write_data *, struct rpc_message *);
 	void	(*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
 	int	(*write_done)  (struct rpc_task *, struct nfs_write_data *);
-	void	(*commit_setup) (struct nfs_write_data *, struct rpc_message *);
-	int	(*commit_done) (struct rpc_task *, struct nfs_write_data *);
+	void	(*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+	void	(*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+	int	(*commit_done) (struct rpc_task *, struct nfs_commit_data *);
 	int	(*lock)(struct file *, int, struct file_lock *);
 	int	(*lock_check_bounds)(const struct file_lock *);
 	void	(*clear_acl_cache)(struct inode *);
@@ -1287,9 +1418,9 @@
 				struct nfs_open_context *ctx,
 				int open_flags,
 				struct iattr *iattr);
-	int	(*init_client) (struct nfs_client *, const struct rpc_timeout *,
-				const char *, rpc_authflavor_t, int);
-	int	(*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+	struct nfs_client *
+		(*init_client) (struct nfs_client *, const struct rpc_timeout *,
+				const char *, rpc_authflavor_t);
 };
 
 /*
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index f85308e..e33f747 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -103,6 +103,7 @@
 	struct nfsd4_fs_locations ex_fslocs;
 	int			ex_nflavors;
 	struct exp_flavor_info	ex_flavors[MAX_SECINFO_LIST];
+	struct cache_detail	*cd;
 };
 
 /* an "export key" (expkey) maps a filehandlefragement to an
@@ -129,24 +130,22 @@
 /*
  * Function declarations
  */
-int			nfsd_export_init(void);
-void			nfsd_export_shutdown(void);
-void			nfsd_export_flush(void);
+int			nfsd_export_init(struct net *);
+void			nfsd_export_shutdown(struct net *);
+void			nfsd_export_flush(struct net *);
 struct svc_export *	rqst_exp_get_by_name(struct svc_rqst *,
 					     struct path *);
 struct svc_export *	rqst_exp_parent(struct svc_rqst *,
 					struct path *);
 struct svc_export *	rqst_find_fsidzero_export(struct svc_rqst *);
-int			exp_rootfh(struct auth_domain *, 
+int			exp_rootfh(struct net *, struct auth_domain *,
 					char *path, struct knfsd_fh *, int maxsize);
 __be32			exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
 __be32			nfserrno(int errno);
 
-extern struct cache_detail svc_export_cache;
-
 static inline void exp_put(struct svc_export *exp)
 {
-	cache_put(&exp->h, &svc_export_cache);
+	cache_put(&exp->h, exp->cd);
 }
 
 static inline void exp_get(struct svc_export *exp)
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 0efe8d4..1cb775f 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -20,6 +20,10 @@
 /* must call put_device() when done with returned i2c_client device */
 extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
 
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(
+						struct device_node *node);
+
 #else
 static inline void of_i2c_register_devices(struct i2c_adapter *adap)
 {
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f93e217..bb115de 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,7 +5,7 @@
 
 struct pci_dev;
 struct of_irq;
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
 
 struct device_node;
 struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3d76475..e4c29bc 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,8 +43,9 @@
 extern void compare_swap_oom_score_adj(int old_val, int new_val);
 extern int test_set_oom_score_adj(int new_val);
 
-extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-			const nodemask_t *nodemask, unsigned long totalpages);
+extern unsigned long oom_badness(struct task_struct *p,
+		struct mem_cgroup *memcg, const nodemask_t *nodemask,
+		unsigned long totalpages);
 extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
 extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
 
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1..3bdcab3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,7 +3,7 @@
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
  * this will fail with -EBUSY.
  *
  * For isolating all pages in the range finally, the caller have to
@@ -11,27 +11,27 @@
  * test it.
  */
 extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			 unsigned migratetype);
 
 /*
  * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
  * target range is [start_pfn, end_pfn)
  */
 extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			unsigned migratetype);
 
 /*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
  */
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
 
 /*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
  */
 extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
 
 
 #endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index efa26b4..7cfad3b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -460,11 +460,11 @@
  */
 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 {
-	int ret;
+	int ret = 0;
 	char __user *end = uaddr + size - 1;
 
 	if (unlikely(size == 0))
-		return 0;
+		return ret;
 
 	/*
 	 * Writing zeroes into userspace here is OK, because we know that if
@@ -489,11 +489,11 @@
 					       int size)
 {
 	volatile char c;
-	int ret;
+	int ret = 0;
 	const char __user *end = uaddr + size - 1;
 
 	if (unlikely(size == 0))
-		return 0;
+		return ret;
 
 	while (uaddr <= end) {
 		ret = __get_user(c, uaddr);
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index a6ee9aa..a7b4fc3 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller platform data header file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 17b7b5b..fefb4e1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -176,6 +176,8 @@
 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
 	/* Provide indication device is assigned by a Virtual Machine Manager */
 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
+	/* Device causes system crash if in D3 during S3 sleep */
+	PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
 };
 
 enum pci_irq_reroute_variant {
@@ -687,7 +689,7 @@
 void pci_read_bridge_bases(struct pci_bus *child);
 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 					  struct resource *res);
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
 extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
@@ -1692,7 +1694,8 @@
 /* Arch may override this (weak) */
 extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
 
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
 {
 	return pdev ? pdev->dev.of_node : NULL;
 }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3329965..ab741b0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2506,6 +2506,7 @@
 #define PCI_DEVICE_ID_INTEL_MRST_SD2	0x084F
 #define PCI_DEVICE_ID_INTEL_I960	0x0960
 #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
+#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
 #define PCI_DEVICE_ID_INTEL_8257X_SOL	0x1062
 #define PCI_DEVICE_ID_INTEL_82573E_SOL	0x1085
 #define PCI_DEVICE_ID_INTEL_82573L_SOL	0x108F
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f325786..45db49f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -555,6 +555,8 @@
 	PERF_RECORD_MAX,			/* non-ABI */
 };
 
+#define PERF_MAX_STACK_DEPTH		127
+
 enum perf_callchain_context {
 	PERF_CONTEXT_HV			= (__u64)-32,
 	PERF_CONTEXT_KERNEL		= (__u64)-128,
@@ -609,8 +611,6 @@
 #include <linux/sysfs.h>
 #include <asm/local.h>
 
-#define PERF_MAX_STACK_DEPTH		255
-
 struct perf_callchain_entry {
 	__u64				nr;
 	__u64				ip[PERF_MAX_STACK_DEPTH];
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 4f75e53..241065c 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -18,6 +18,8 @@
 #include <linux/power_supply.h>
 
 enum data_source {
+	CM_BATTERY_PRESENT,
+	CM_NO_BATTERY,
 	CM_FUEL_GAUGE,
 	CM_CHARGER_STAT,
 };
@@ -29,6 +31,16 @@
 	CM_POLL_CHARGING_ONLY,
 };
 
+enum cm_event_types {
+	CM_EVENT_UNKNOWN = 0,
+	CM_EVENT_BATT_FULL,
+	CM_EVENT_BATT_IN,
+	CM_EVENT_BATT_OUT,
+	CM_EVENT_EXT_PWR_IN_OUT,
+	CM_EVENT_CHG_START_STOP,
+	CM_EVENT_OTHERS,
+};
+
 /**
  * struct charger_global_desc
  * @rtc_name: the name of RTC used to wake up the system from suspend.
@@ -38,11 +50,18 @@
  *	rtc_only_wakeup() returning false.
  *	If the RTC given to CM is the only wakeup reason,
  *	rtc_only_wakeup should return true.
+ * @assume_timer_stops_in_suspend:
+ *	Assume that the jiffy timer stops in suspend-to-RAM.
+ *	When enabled, CM does not rely on jiffies value in
+ *	suspend_again and assumes that jiffies value does not
+ *	change during suspend.
  */
 struct charger_global_desc {
 	char *rtc_name;
 
 	bool (*rtc_only_wakeup)(void);
+
+	bool assume_timer_stops_in_suspend;
 };
 
 /**
@@ -50,6 +69,11 @@
  * @psy_name: the name of power-supply-class for charger manager
  * @polling_mode:
  *	Determine which polling mode will be used
+ * @fullbatt_vchkdrop_ms:
+ * @fullbatt_vchkdrop_uV:
+ *	Check voltage drop after the battery is fully charged.
+ *	If it has dropped more than fullbatt_vchkdrop_uV after
+ *	fullbatt_vchkdrop_ms, CM will restart charging.
  * @fullbatt_uV: voltage in microvolt
  *	If it is not being charged and VBATT >= fullbatt_uV,
  *	it is assumed to be full.
@@ -76,6 +100,8 @@
 	enum polling_modes polling_mode;
 	unsigned int polling_interval_ms;
 
+	unsigned int fullbatt_vchkdrop_ms;
+	unsigned int fullbatt_vchkdrop_uV;
 	unsigned int fullbatt_uV;
 
 	enum data_source battery_present;
@@ -101,6 +127,11 @@
  * @fuel_gauge: power_supply for fuel gauge
  * @charger_stat: array of power_supply for chargers
  * @charger_enabled: the state of charger
+ * @fullbatt_vchk_jiffies_at:
+ *	jiffies at the time full battery check will occur.
+ * @fullbatt_vchk_uV: voltage in microvolt
+ *	criteria for full battery
+ * @fullbatt_vchk_work: work queue for full battery check
  * @emergency_stop:
  *	When setting true, stop charging
  * @last_temp_mC: the measured temperature in milli-Celsius
@@ -121,6 +152,10 @@
 
 	bool charger_enabled;
 
+	unsigned long fullbatt_vchk_jiffies_at;
+	unsigned int fullbatt_vchk_uV;
+	struct delayed_work fullbatt_vchk_work;
+
 	int emergency_stop;
 	int last_temp_mC;
 
@@ -134,14 +169,13 @@
 #ifdef CONFIG_CHARGER_MANAGER
 extern int setup_charger_manager(struct charger_global_desc *gd);
 extern bool cm_suspend_again(void);
+extern void cm_notify_event(struct power_supply *psy,
+				enum cm_event_types type, char *msg);
 #else
-static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd)
-{ }
-
-static bool __maybe_unused cm_suspend_again(void)
-{
-	return false;
-}
+static inline int setup_charger_manager(struct charger_global_desc *gd)
+{ return 0; }
+static inline bool cm_suspend_again(void) { return false; }
+static inline void cm_notify_event(struct power_supply *psy,
+				enum cm_event_types type, char *msg) { }
 #endif
-
 #endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index e01b167..89dd84f 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -116,6 +116,18 @@
 	MAX17042_VFSOC		= 0xFF,
 };
 
+/* Registers specific to max17047/50 */
+enum max17047_register {
+	MAX17047_QRTbl00	= 0x12,
+	MAX17047_FullSOCThr	= 0x13,
+	MAX17047_QRTbl10	= 0x22,
+	MAX17047_QRTbl20	= 0x32,
+	MAX17047_V_empty	= 0x3A,
+	MAX17047_QRTbl30	= 0x42,
+};
+
+enum max170xx_chip_type {MAX17042, MAX17047};
+
 /*
  * used for setting a register to a desired value
  * addr : address for a register
@@ -144,6 +156,7 @@
 	u16	shdntimer;	/* 0x03F */
 
 	/* App data */
+	u16	full_soc_thresh;	/* 0x13 */
 	u16	design_cap;	/* 0x18 */
 	u16	ichgt_term;	/* 0x1E */
 
@@ -162,6 +175,10 @@
 	u16	lavg_empty;	/* 0x36 */
 	u16	dqacc;		/* 0x45 */
 	u16	dpacc;		/* 0x46 */
+	u16	qrtbl00;	/* 0x12 */
+	u16	qrtbl10;	/* 0x22 */
+	u16	qrtbl20;	/* 0x32 */
+	u16	qrtbl30;	/* 0x42 */
 
 	/* Cell technology from power_supply.h */
 	u16	cell_technology;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index c38c13d..3b912be 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -96,6 +96,7 @@
 	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -211,7 +212,7 @@
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
 
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+#ifdef CONFIG_POWER_SUPPLY
 extern int power_supply_is_system_supplied(void);
 #else
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
@@ -261,6 +262,7 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
 	case POWER_SUPPLY_PROP_POWER_NOW:
 		return 1;
 	default:
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 78b76e2..3988012 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -113,6 +113,12 @@
 # define PR_SET_MM_START_STACK		5
 # define PR_SET_MM_START_BRK		6
 # define PR_SET_MM_BRK			7
+# define PR_SET_MM_ARG_START		8
+# define PR_SET_MM_ARG_END		9
+# define PR_SET_MM_ENV_START		10
+# define PR_SET_MM_ENV_END		11
+# define PR_SET_MM_AUXV			12
+# define PR_SET_MM_EXE_FILE		13
 
 /*
  * Set specific pid that is allowed to ptrace the current task.
@@ -121,8 +127,8 @@
 #define PR_SET_PTRACER 0x59616d61
 # define PR_SET_PTRACER_ANY ((unsigned long)-1)
 
-#define PR_SET_CHILD_SUBREAPER 36
-#define PR_GET_CHILD_SUBREAPER 37
+#define PR_SET_CHILD_SUBREAPER	36
+#define PR_GET_CHILD_SUBREAPER	37
 
 /*
  * If no_new_privs is set, then operations that grant new privileges (i.e.
@@ -136,7 +142,9 @@
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
  */
-#define PR_SET_NO_NEW_PRIVS 38
-#define PR_GET_NO_NEW_PRIVS 39
+#define PR_SET_NO_NEW_PRIVS	38
+#define PR_GET_NO_NEW_PRIVS	39
+
+#define PR_GET_TID_ADDRESS	40
 
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 7ed7fd4..3b823d4 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -69,12 +69,14 @@
 						       size_t size,
 						       bool ecc);
 void persistent_ram_free(struct persistent_ram_zone *prz);
+void persistent_ram_zap(struct persistent_ram_zone *prz);
 struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
 		bool ecc);
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
 	unsigned int count);
 
+void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
 void *persistent_ram_old(struct persistent_ram_zone *prz);
 void persistent_ram_free_old(struct persistent_ram_zone *prz);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 44835fb..f366320 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -160,7 +160,9 @@
 	PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
 	PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
 	PXA27x_SSP,
+	PXA3xx_SSP,
 	PXA168_SSP,
+	PXA910_SSP,
 	CE4100_SSP,
 };
 
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 0d04cd6..ffc444c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -368,8 +368,11 @@
 			iter->index++;
 			if (likely(*slot))
 				return slot;
-			if (flags & RADIX_TREE_ITER_CONTIG)
+			if (flags & RADIX_TREE_ITER_CONTIG) {
+				/* forbid switching to the next chunk */
+				iter->next_index = 0;
 				break;
+			}
 		}
 	}
 	return NULL;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index adb5e5a..854dc4c 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -87,8 +87,9 @@
 
 #ifdef CONFIG_TINY_RCU
 
-static inline int rcu_needs_cpu(int cpu)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+	*delta_jiffies = ULONG_MAX;
 	return 0;
 }
 
@@ -96,8 +97,9 @@
 
 int rcu_preempt_needs_cpu(void);
 
-static inline int rcu_needs_cpu(int cpu)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+	*delta_jiffies = ULONG_MAX;
 	return rcu_preempt_needs_cpu();
 }
 
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3c6083c..952b793 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -32,7 +32,7 @@
 
 extern void rcu_init(void);
 extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
 extern void rcu_cpu_stall_reset(void);
 
 /*
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fb20189..7d7fbe2 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -119,7 +119,7 @@
 					   unsigned long val, bool force);
 int __must_check res_counter_charge(struct res_counter *counter,
 		unsigned long val, struct res_counter **limit_fail_at);
-int __must_check res_counter_charge_nofail(struct res_counter *counter,
+int res_counter_charge_nofail(struct res_counter *counter,
 		unsigned long val, struct res_counter **limit_fail_at);
 
 /*
@@ -135,6 +135,9 @@
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
 void res_counter_uncharge(struct res_counter *counter, unsigned long val);
 
+void res_counter_uncharge_until(struct res_counter *counter,
+				struct res_counter *top,
+				unsigned long val);
 /**
  * res_counter_margin - calculate chargeable space of a counter
  * @cnt: the counter
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 4d50611..a90ebad 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -20,6 +20,9 @@
 #include <linux/errno.h>
 #include <linux/device.h>
 #include <linux/rio_regs.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
 
 #define RIO_NO_HOPCOUNT		-1
 #define RIO_INVALID_DESTID	0xffff
@@ -254,6 +257,9 @@
 	u32 phys_efptr;
 	unsigned char name[40];
 	void *priv;		/* Master port private data */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	struct dma_device	dma;
+#endif
 };
 
 /**
@@ -395,6 +401,47 @@
 	u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+/**
+ * enum rio_write_type - RIO write transaction types used in DMA transfers
+ *
+ * Note: RapidIO specification defines write (NWRITE) and
+ * write-with-response (NWRITE_R) data transfer operations.
+ * Existing DMA controllers that service RapidIO may use one of these operations
+ * for entire data transfer or their combination with only the last data packet
+ * requires response.
+ */
+enum rio_write_type {
+	RDW_DEFAULT,		/* default method used by DMA driver */
+	RDW_ALL_NWRITE,		/* all packets use NWRITE */
+	RDW_ALL_NWRITE_R,	/* all packets use NWRITE_R */
+	RDW_LAST_NWRITE_R,	/* last packet uses NWRITE_R, others - NWRITE */
+};
+
+struct rio_dma_ext {
+	u16 destid;
+	u64 rio_addr;	/* low 64-bits of 66-bit RapidIO address */
+	u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+	enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+struct rio_dma_data {
+	/* Local data (as scatterlist) */
+	struct scatterlist	*sg;	/* I/O scatter list */
+	unsigned int		sg_len;	/* size of scatter list */
+	/* Remote device address (flat buffer) */
+	u64 rio_addr;	/* low 64-bits of 66-bit RapidIO address */
+	u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+	enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
+{
+	return container_of(ddev, struct rio_mport, dma);
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 /* Architecture and hardware-specific functions */
 extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 7f07470..31ad146 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -377,6 +377,15 @@
 struct rio_dev *rio_dev_get(struct rio_dev *);
 void rio_dev_put(struct rio_dev *);
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
+extern void rio_release_dma(struct dma_chan *dchan);
+extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
+		struct rio_dev *rdev, struct dma_chan *dchan,
+		struct rio_dma_data *data,
+		enum dma_transfer_direction direction, unsigned long flags);
+#endif
+
 /**
  * rio_name - Get the unique RIO device identifier
  * @rdev: RIO device
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fd07c45..3fce545 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -173,8 +173,6 @@
 };
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
-bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
 int try_to_unmap(struct page *, enum ttu_flags flags);
 int try_to_unmap_one(struct page *, struct vm_area_struct *,
 			unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index fcabfb4..f071b39 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,6 +91,9 @@
 #define RTC_PLL_GET	_IOR('p', 0x11, struct rtc_pll_info)  /* Get PLL correction */
 #define RTC_PLL_SET	_IOW('p', 0x12, struct rtc_pll_info)  /* Set PLL correction */
 
+#define RTC_VL_READ	_IOR('p', 0x13, int)	/* Voltage low detector */
+#define RTC_VL_CLR	_IO('p', 0x14)		/* Clear voltage low information */
+
 /* interrupt flags */
 #define RTC_IRQF 0x80	/* Any of the following is active */
 #define RTC_PF 0x40	/* Periodic interrupt */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644
index 0000000..291b1c4
--- /dev/null
+++ b/include/linux/rtc/ds1307.h
@@ -0,0 +1,22 @@
+/*
+ * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
+ * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
+ * same license as the driver
+ */
+
+#ifndef _LINUX_DS1307_H
+#define _LINUX_DS1307_H
+
+#include <linux/types.h>
+
+#define DS1307_TRICKLE_CHARGER_250_OHM	0x01
+#define DS1307_TRICKLE_CHARGER_2K_OHM	0x02
+#define DS1307_TRICKLE_CHARGER_4K_OHM	0x03
+#define DS1307_TRICKLE_CHARGER_NO_DIODE	0x04
+#define DS1307_TRICKLE_CHARGER_DIODE	0x08
+
+struct ds1307_platform_data {
+	u8 trickle_charger_setup;
+};
+
+#endif /* _LINUX_DS1307_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f45c0b2..4059c0f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -145,6 +145,7 @@
 
 
 extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -438,6 +439,7 @@
 					/* leave room for more dump flags */
 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
+#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
@@ -875,6 +877,8 @@
 	 * Number of busy cpus in this group.
 	 */
 	atomic_t nr_busy_cpus;
+
+	unsigned long cpumask[0]; /* iteration mask */
 };
 
 struct sched_group {
@@ -899,6 +903,15 @@
 	return to_cpumask(sg->cpumask);
 }
 
+/*
+ * cpumask masking which cpus in the group are allowed to iterate up the domain
+ * tree.
+ */
+static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+{
+	return to_cpumask(sg->sgp->cpumask);
+}
+
 /**
  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  * @group: The group whose first cpu is to be returned.
@@ -1187,7 +1200,6 @@
 	struct list_head run_list;
 	unsigned long timeout;
 	unsigned int time_slice;
-	int nr_cpus_allowed;
 
 	struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -1252,6 +1264,7 @@
 #endif
 
 	unsigned int policy;
+	int nr_cpus_allowed;
 	cpumask_t cpus_allowed;
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -1301,11 +1314,6 @@
 	unsigned sched_reset_on_fork:1;
 	unsigned sched_contributes_to_load:1;
 
-#ifdef CONFIG_GENERIC_HARDIRQS
-	/* IRQ handler threads */
-	unsigned irq_thread:1;
-#endif
-
 	pid_t pid;
 	pid_t tgid;
 
@@ -1313,10 +1321,9 @@
 	/* Canary value for the -fstack-protector gcc feature */
 	unsigned long stack_canary;
 #endif
-
-	/* 
+	/*
 	 * pointers to (original) parent process, youngest child, younger sibling,
-	 * older sibling, respectively.  (p->father can be replaced with 
+	 * older sibling, respectively.  (p->father can be replaced with
 	 * p->real_parent->pid)
 	 */
 	struct task_struct __rcu *real_parent; /* real parent process */
@@ -1363,8 +1370,6 @@
 					 * credentials (COW) */
 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
 					 * credentials (COW) */
-	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
-
 	char comm[TASK_COMM_LEN]; /* executable name excluding path
 				     - access with [gs]et_task_comm (which lock
 				       it with task_lock())
@@ -1400,6 +1405,8 @@
 	int (*notifier)(void *priv);
 	void *notifier_data;
 	sigset_t *notifier_mask;
+	struct hlist_head task_works;
+
 	struct audit_context *audit_context;
 #ifdef CONFIG_AUDITSYSCALL
 	uid_t loginuid;
@@ -2213,6 +2220,20 @@
 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
 
+static inline void restore_saved_sigmask(void)
+{
+	if (test_and_clear_restore_sigmask())
+		__set_current_blocked(&current->saved_sigmask);
+}
+
+static inline sigset_t *sigmask_to_save(void)
+{
+	sigset_t *res = &current->blocked;
+	if (unlikely(test_restore_sigmask()))
+		res = &current->saved_sigmask;
+	return res;
+}
+
 static inline int kill_cad_pid(int sig, int priv)
 {
 	return kill_pid(cad_pid, sig, priv);
diff --git a/include/linux/security.h b/include/linux/security.h
index ab0e091..4e5a73c 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -86,9 +86,9 @@
 extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
 extern int cap_inode_need_killpriv(struct dentry *dentry);
 extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_file_mmap(struct file *file, unsigned long reqprot,
-			 unsigned long prot, unsigned long flags,
-			 unsigned long addr, unsigned long addr_only);
+extern int cap_mmap_addr(unsigned long addr);
+extern int cap_mmap_file(struct file *file, unsigned long reqprot,
+			 unsigned long prot, unsigned long flags);
 extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
 extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
 			  unsigned long arg4, unsigned long arg5);
@@ -586,15 +586,17 @@
  *	simple integer value.  When @arg represents a user space pointer, it
  *	should never be used by the security module.
  *	Return 0 if permission is granted.
- * @file_mmap :
+ * @mmap_addr :
+ *	Check permissions for a mmap operation at @addr.
+ *	@addr contains virtual address that will be used for the operation.
+ *	Return 0 if permission is granted.
+ * @mmap_file :
  *	Check permissions for a mmap operation.  The @file may be NULL, e.g.
  *	if mapping anonymous memory.
  *	@file contains the file structure for file to map (may be NULL).
  *	@reqprot contains the protection requested by the application.
  *	@prot contains the protection that will be applied by the kernel.
  *	@flags contains the operational flags.
- *	@addr contains virtual address that will be used for the operation.
- *	@addr_only contains a boolean: 0 if file-backed VMA, otherwise 1.
  *	Return 0 if permission is granted.
  * @file_mprotect:
  *	Check permissions before changing memory access permissions.
@@ -1481,10 +1483,10 @@
 	void (*file_free_security) (struct file *file);
 	int (*file_ioctl) (struct file *file, unsigned int cmd,
 			   unsigned long arg);
-	int (*file_mmap) (struct file *file,
+	int (*mmap_addr) (unsigned long addr);
+	int (*mmap_file) (struct file *file,
 			  unsigned long reqprot, unsigned long prot,
-			  unsigned long flags, unsigned long addr,
-			  unsigned long addr_only);
+			  unsigned long flags);
 	int (*file_mprotect) (struct vm_area_struct *vma,
 			      unsigned long reqprot,
 			      unsigned long prot);
@@ -1743,9 +1745,9 @@
 int security_file_alloc(struct file *file);
 void security_file_free(struct file *file);
 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_mmap(struct file *file, unsigned long reqprot,
-			unsigned long prot, unsigned long flags,
-			unsigned long addr, unsigned long addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+			unsigned long flags);
+int security_mmap_addr(unsigned long addr);
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
 			   unsigned long prot);
 int security_file_lock(struct file *file, unsigned int cmd);
@@ -2181,13 +2183,15 @@
 	return 0;
 }
 
-static inline int security_file_mmap(struct file *file, unsigned long reqprot,
-				     unsigned long prot,
-				     unsigned long flags,
-				     unsigned long addr,
-				     unsigned long addr_only)
+static inline int security_mmap_file(struct file *file, unsigned long prot,
+				     unsigned long flags)
 {
-	return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
+	return 0;
+}
+
+static inline int security_mmap_addr(unsigned long addr)
+{
+	return cap_mmap_addr(addr);
 }
 
 static inline int security_file_mprotect(struct vm_area_struct *vma,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 17046cc..26b424a 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -250,12 +250,13 @@
 extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
 				const struct timespec *);
 extern int sigprocmask(int, sigset_t *, sigset_t *);
-extern void set_current_blocked(const sigset_t *);
+extern void set_current_blocked(sigset_t *);
+extern void __set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
 extern int sigsuspend(sigset_t *);
 
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
-extern void block_sigmask(struct k_sigaction *ka, int signr);
+extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping);
 extern void exit_signals(struct task_struct *tsk);
 
 extern struct kmem_cache *sighand_cachep;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0e50171..642cb73 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -225,14 +225,11 @@
 	/* device driver is going to provide hardware time stamp */
 	SKBTX_IN_PROGRESS = 1 << 2,
 
-	/* ensure the originating sk reference is available on driver level */
-	SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
-
 	/* device driver supports TX zero-copy buffers */
-	SKBTX_DEV_ZEROCOPY = 1 << 4,
+	SKBTX_DEV_ZEROCOPY = 1 << 3,
 
 	/* generate wifi status information (where possible) */
-	SKBTX_WIFI_STATUS = 1 << 5,
+	SKBTX_WIFI_STATUS = 1 << 4,
 };
 
 /*
@@ -1896,8 +1893,6 @@
 {
 	int delta = 0;
 
-	if (headroom < NET_SKB_PAD)
-		headroom = NET_SKB_PAD;
 	if (headroom > skb_headroom(skb))
 		delta = headroom - skb_headroom(skb);
 
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a595dce..67d5d94 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,7 +242,7 @@
  */
 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
-	if (size != 0 && n > ULONG_MAX / size)
+	if (size != 0 && n > SIZE_MAX / size)
 		return NULL;
 	return __kmalloc(n * size, flags);
 }
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
deleted file mode 100644
index b4d9fa6..0000000
--- a/include/linux/spi/orion_spi.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * orion_spi.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_SPI_ORION_SPI_H
-#define __LINUX_SPI_ORION_SPI_H
-
-struct orion_spi_info {
-	u32	tclk;		/* no <linux/clk.h> support yet */
-};
-
-
-#endif
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d3e1075..c73d144 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -43,7 +43,7 @@
 	void (*cs_control)(u32 command);
 };
 
-#ifdef CONFIG_ARCH_PXA
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
 #include <mach/dma.h>
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644
index 0000000..6cf7ec9
--- /dev/null
+++ b/include/linux/stmp_device.h
@@ -0,0 +1,20 @@
+/*
+ * basic functions for devices following the "stmp" style register layout
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMP_DEVICE_H__
+#define __STMP_DEVICE_H__
+
+#define STMP_OFFSET_REG_SET	0x4
+#define STMP_OFFSET_REG_CLR	0x8
+#define STMP_OFFSET_REG_TOG	0xc
+
+extern int stmp_reset_block(void __iomem *);
+#endif /* __STMP_DEVICE_H__ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 51b29ac4..40e0a27 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -232,7 +232,6 @@
 	struct svc_pool *	rq_pool;	/* thread pool */
 	struct svc_procedure *	rq_procinfo;	/* procedure info */
 	struct auth_ops *	rq_authop;	/* authentication flavour */
-	u32			rq_flavor;	/* pseudoflavor */
 	struct svc_cred		rq_cred;	/* auth info */
 	void *			rq_xprt_ctxt;	/* transport specific context ptr */
 	struct svc_deferred_req*rq_deferred;	/* deferred request we are replaying */
@@ -416,6 +415,7 @@
  */
 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
+int svc_bind(struct svc_serv *serv, struct net *net);
 struct svc_serv *svc_create(struct svc_program *, unsigned int,
 			    void (*shutdown)(struct svc_serv *, struct net *net));
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 548790e..dd74084 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -15,14 +15,23 @@
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/cache.h>
 #include <linux/hash.h>
+#include <linux/cred.h>
 
-#define SVC_CRED_NGROUPS	32
 struct svc_cred {
 	uid_t			cr_uid;
 	gid_t			cr_gid;
 	struct group_info	*cr_group_info;
+	u32			cr_flavor; /* pseudoflavor */
+	char			*cr_principal; /* for gss */
 };
 
+static inline void free_svc_cred(struct svc_cred *cred)
+{
+	if (cred->cr_group_info)
+		put_group_info(cred->cr_group_info);
+	kfree(cred->cr_principal);
+}
+
 struct svc_rqst;		/* forward decl */
 struct in6_addr;
 
@@ -131,7 +140,7 @@
 extern struct auth_domain *auth_domain_find(char *name);
 extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
 extern int auth_unix_forget_old(struct auth_domain *dom);
-extern void svcauth_unix_purge(void);
+extern void svcauth_unix_purge(struct net *net);
 extern void svcauth_unix_info_release(struct svc_xprt *xpt);
 extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
 
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 7c32daa..726aff1 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -22,7 +22,6 @@
 void gss_svc_shutdown_net(struct net *net);
 int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
 u32 svcauth_gss_flavor(struct auth_domain *dom);
-char *svc_gss_principal(struct svc_rqst *);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b1fd5c7..c84ec68 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -197,6 +197,10 @@
 	struct block_device *bdev;	/* swap device or bdev of swap file */
 	struct file *swap_file;		/* seldom referenced */
 	unsigned int old_block_size;	/* seldom referenced */
+#ifdef CONFIG_FRONTSWAP
+	unsigned long *frontswap_map;	/* frontswap in-use, one bit per page */
+	atomic_t frontswap_pages;	/* frontswap pages in-use counter */
+#endif
 };
 
 struct swap_list_t {
@@ -221,8 +225,8 @@
 /* linux/mm/swap.c */
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
-extern void lru_add_page_tail(struct zone* zone,
-			      struct page *page, struct page *page_tail);
+extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+			      struct lruvec *lruvec);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
@@ -251,7 +255,7 @@
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 					gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
 						  gfp_t gfp_mask, bool noswap);
 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -351,31 +355,14 @@
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct page *, struct block_device **);
 extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
 extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-/* linux/mm/thrash.c */
-extern struct mm_struct *swap_token_mm;
-extern void grab_swap_token(struct mm_struct *);
-extern void __put_swap_token(struct mm_struct *);
-extern void disable_swap_token(struct mem_cgroup *memcg);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
-	return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
-	if (has_swap_token(mm))
-		__put_swap_token(mm);
-}
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 extern void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
 #else
 static inline void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -462,6 +449,11 @@
 {
 }
 
+static inline int page_swapcount(struct page *page)
+{
+	return 0;
+}
+
 #define reuse_swap_page(page)	(page_mapcount(page) == 1)
 
 static inline int try_to_free_swap(struct page *page)
@@ -476,37 +468,11 @@
 	return entry;
 }
 
-/* linux/mm/thrash.c */
-static inline void put_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline void grab_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
-	return 0;
-}
-
-static inline void disable_swap_token(struct mem_cgroup *memcg)
-{
-}
-
 static inline void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
 {
 }
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static inline int
-mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
-	return 0;
-}
-#endif
-
 #endif /* CONFIG_SWAP */
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644
index 0000000..e282624
--- /dev/null
+++ b/include/linux/swapfile.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_SWAPFILE_H
+#define _LINUX_SWAPFILE_H
+
+/*
+ * these were static in swapfile.c but frontswap.c needs them and we don't
+ * want to expose them to the dozens of source files that include swap.h
+ */
+extern spinlock_t swap_lock;
+extern struct swap_list_t swap_list;
+extern struct swap_info_struct *swap_info[];
+extern int try_to_unuse(unsigned int, bool, unsigned long);
+
+#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 792d16d..47ead51 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -9,13 +9,15 @@
  * get good packing density in that tree, so the index should be dense in
  * the low-order bits.
  *
- * We arrange the `type' and `offset' fields so that `type' is at the five
+ * We arrange the `type' and `offset' fields so that `type' is at the seven
  * high-order bits of the swp_entry_t and `offset' is right-aligned in the
- * remaining bits.
+ * remaining bits.  Although `type' itself needs only five bits, we allow for
+ * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  *
  * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  */
-#define SWP_TYPE_SHIFT(e)	(sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT)
+#define SWP_TYPE_SHIFT(e)	((sizeof(e.val) * 8) - \
+			(MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
 #define SWP_OFFSET_MASK(e)	((1UL << SWP_TYPE_SHIFT(e)) - 1)
 
 /*
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3de3acb..19439c7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -858,4 +858,6 @@
 				      unsigned long riovcnt,
 				      unsigned long flags);
 
+asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
+			 unsigned long idx1, unsigned long idx2);
 #endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
new file mode 100644
index 0000000..294d5d5
--- /dev/null
+++ b/include/linux/task_work.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_TASK_WORK_H
+#define _LINUX_TASK_WORK_H
+
+#include <linux/list.h>
+#include <linux/sched.h>
+
+struct task_work;
+typedef void (*task_work_func_t)(struct task_work *);
+
+struct task_work {
+	struct hlist_node hlist;
+	task_work_func_t func;
+	void *data;
+};
+
+static inline void
+init_task_work(struct task_work *twork, task_work_func_t func, void *data)
+{
+	twork->func = func;
+	twork->data = data;
+}
+
+int task_work_add(struct task_struct *task, struct task_work *twork, bool);
+struct task_work *task_work_cancel(struct task_struct *, task_work_func_t);
+void task_work_run(void);
+
+static inline void exit_task_work(struct task_struct *task)
+{
+	if (unlikely(!hlist_empty(&task->task_works)))
+		task_work_run();
+}
+
+#endif	/* _LINUX_TASK_WORK_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4c5b632..5f359db 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -69,16 +69,16 @@
 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 
 
 enum { 
-	TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
-	TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
-	TCP_FLAG_URG = __cpu_to_be32(0x00200000),
-	TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
-	TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
-	TCP_FLAG_RST = __cpu_to_be32(0x00040000),
-	TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
-	TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
-	TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
-	TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
+	TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
+	TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
+	TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
+	TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
+	TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
+	TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
+	TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
+	TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
+	TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
+	TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
 }; 
 
 /*
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index db78775..ccc1899 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -8,6 +8,7 @@
 #define _LINUX_THREAD_INFO_H
 
 #include <linux/types.h>
+#include <linux/bug.h>
 
 struct timespec;
 struct compat_timespec;
@@ -125,10 +126,26 @@
 static inline void set_restore_sigmask(void)
 {
 	set_thread_flag(TIF_RESTORE_SIGMASK);
-	set_thread_flag(TIF_SIGPENDING);
+	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+	clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+	return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+	return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 #endif	/* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
 
+#ifndef HAVE_SET_RESTORE_SIGMASK
+#error "no set_restore_sigmask() provided and default one won't work"
+#endif
+
 #endif	/* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 51bd91d..6a4d82b 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <linux/security.h>
+#include <linux/task_work.h>
 struct linux_binprm;
 
 /*
@@ -153,7 +154,6 @@
 		ptrace_notify(SIGTRAP);
 }
 
-#ifdef TIF_NOTIFY_RESUME
 /**
  * set_notify_resume - cause tracehook_notify_resume() to be called
  * @task:		task that will call tracehook_notify_resume()
@@ -165,8 +165,10 @@
  */
 static inline void set_notify_resume(struct task_struct *task)
 {
+#ifdef TIF_NOTIFY_RESUME
 	if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
 		kick_process(task);
+#endif
 }
 
 /**
@@ -184,7 +186,14 @@
  */
 static inline void tracehook_notify_resume(struct pt_regs *regs)
 {
+	/*
+	 * The caller just cleared TIF_NOTIFY_RESUME. This barrier
+	 * pairs with task_work_add()->set_notify_resume() after
+	 * hlist_add_head(task->task_works);
+	 */
+	smp_mb__after_clear_bit();
+	if (unlikely(!hlist_empty(&current->task_works)))
+		task_work_run();
 }
-#endif	/* TIF_NOTIFY_RESUME */
 
 #endif	/* <linux/tracehook.h> */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4990ef2..9f47ab5 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -268,7 +268,6 @@
 	struct mutex ldisc_mutex;
 	struct tty_ldisc *ldisc;
 
-	struct mutex legacy_mutex;
 	struct mutex termios_mutex;
 	spinlock_t ctrl_lock;
 	/* Termios values are protected by the termios mutex */
@@ -606,12 +605,8 @@
 
 /* tty_mutex.c */
 /* functions for preparation of BKL removal */
-extern void __lockfunc tty_lock(struct tty_struct *tty);
-extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
-				struct tty_struct *tty2);
-extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-				struct tty_struct *tty2);
+extern void __lockfunc tty_lock(void) __acquires(tty_lock);
+extern void __lockfunc tty_unlock(void) __releases(tty_lock);
 
 /*
  * this shall be called only from where BTM is held (like close)
@@ -626,9 +621,9 @@
 static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
 		long timeout)
 {
-	tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
+	tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
 	tty_wait_until_sent(tty, timeout);
-	tty_lock(tty);
+	tty_lock();
 }
 
 /*
@@ -643,16 +638,16 @@
  *
  * Do not use in new code.
  */
-#define wait_event_interruptible_tty(tty, wq, condition)		\
+#define wait_event_interruptible_tty(wq, condition)			\
 ({									\
 	int __ret = 0;							\
 	if (!(condition)) {						\
-		__wait_event_interruptible_tty(tty, wq, condition, __ret);	\
+		__wait_event_interruptible_tty(wq, condition, __ret);	\
 	}								\
 	__ret;								\
 })
 
-#define __wait_event_interruptible_tty(tty, wq, condition, ret)		\
+#define __wait_event_interruptible_tty(wq, condition, ret)		\
 do {									\
 	DEFINE_WAIT(__wait);						\
 									\
@@ -661,9 +656,9 @@
 		if (condition)						\
 			break;						\
 		if (!signal_pending(current)) {				\
-			tty_unlock(tty);					\
+			tty_unlock();					\
 			schedule();					\
-			tty_lock(tty);					\
+			tty_lock();					\
 			continue;					\
 		}							\
 		ret = -ERESTARTSYS;					\
diff --git a/include/linux/types.h b/include/linux/types.h
index 7f480db..9c1bd53 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -25,7 +25,7 @@
 typedef __kernel_ino_t		ino_t;
 typedef __kernel_mode_t		mode_t;
 typedef unsigned short		umode_t;
-typedef __kernel_nlink_t	nlink_t;
+typedef __u32			nlink_t;
 typedef __kernel_off_t		off_t;
 typedef __kernel_pid_t		pid_t;
 typedef __kernel_daddr_t	daddr_t;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 7f855d5..49b3ac2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -126,8 +126,6 @@
 	unsigned		wireless:1;	/* Wireless USB HCD */
 	unsigned		authorized_default:1;
 	unsigned		has_tt:1;	/* Integrated TT in root hub */
-	unsigned		broken_pci_sleep:1;	/* Don't put the
-			controller in PCI-D3 for system sleep */
 
 	unsigned int		irq;		/* irq allocated */
 	void __iomem		*regs;		/* device memory/io */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b455c7c..ddb419c 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -7,11 +7,19 @@
  * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
  */
 
+#ifndef _LINUX_VGA_SWITCHEROO_H_
+#define _LINUX_VGA_SWITCHEROO_H_
+
 #include <linux/fb.h>
 
+struct pci_dev;
+
 enum vga_switcheroo_state {
 	VGA_SWITCHEROO_OFF,
 	VGA_SWITCHEROO_ON,
+	/* below are referred only from vga_switcheroo_get_client_state() */
+	VGA_SWITCHEROO_INIT,
+	VGA_SWITCHEROO_NOT_FOUND,
 };
 
 enum vga_switcheroo_client_id {
@@ -50,6 +58,8 @@
 
 int vga_switcheroo_process_delayed_switch(void);
 
+int vga_switcheroo_get_client_state(struct pci_dev *dev);
+
 #else
 
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -62,5 +72,8 @@
 	int id, bool active) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+
 
 #endif
+#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 370d111..2039c5d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -2640,9 +2640,9 @@
 
 /* Experimental, these three ioctls may change over the next couple of kernel
    versions. */
-#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 96, struct v4l2_enum_dv_timings)
-#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 97, struct v4l2_dv_timings)
-#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 98, struct v4l2_dv_timings_cap)
+#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
 
 /* Reminder: when adding new ioctls please add support for them to
    drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index ac40716..da70f0f 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -45,6 +45,8 @@
 #define	WDIOF_SETTIMEOUT	0x0080  /* Set timeout (in seconds) */
 #define	WDIOF_MAGICCLOSE	0x0100	/* Supports magic close char */
 #define	WDIOF_PRETIMEOUT	0x0200  /* Pretimeout (in seconds), get/set */
+#define	WDIOF_ALARMONLY		0x0400	/* Watchdog triggers a management or
+					   other external alarm not a reboot */
 #define	WDIOF_KEEPALIVEPING	0x8000	/* Keep alive ping reply */
 
 #define	WDIOS_DISABLECARD	0x0001	/* Turn off the watchdog timer */
@@ -54,6 +56,8 @@
 #ifdef __KERNEL__
 
 #include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
 
 struct watchdog_ops;
 struct watchdog_device;
@@ -67,6 +71,8 @@
  * @status:	The routine that shows the status of the watchdog device.
  * @set_timeout:The routine for setting the watchdog devices timeout value.
  * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref:	The ref operation for dyn. allocated watchdog_device structs
+ * @unref:	The unref operation for dyn. allocated watchdog_device structs
  * @ioctl:	The routines that handles extra ioctl calls.
  *
  * The watchdog_ops structure contains a list of low-level operations
@@ -84,11 +90,17 @@
 	unsigned int (*status)(struct watchdog_device *);
 	int (*set_timeout)(struct watchdog_device *, unsigned int);
 	unsigned int (*get_timeleft)(struct watchdog_device *);
+	void (*ref)(struct watchdog_device *);
+	void (*unref)(struct watchdog_device *);
 	long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
 /** struct watchdog_device - The structure that defines a watchdog device
  *
+ * @id:		The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev:	The watchdog's Character device.
+ * @dev:	The device for our watchdog
+ * @parent:	The parent bus device
  * @info:	Pointer to a watchdog_info structure.
  * @ops:	Pointer to the list of watchdog operations.
  * @bootstatus:	Status of the watchdog device at boot.
@@ -96,6 +108,7 @@
  * @min_timeout:The watchdog devices minimum timeout value.
  * @max_timeout:The watchdog devices maximum timeout value.
  * @driver-data:Pointer to the drivers private data.
+ * @lock:	Lock for watchdog core internal use only.
  * @status:	Field that contains the devices internal status bits.
  *
  * The watchdog_device structure contains all information about a
@@ -103,8 +116,15 @@
  *
  * The driver-data field may not be accessed directly. It must be accessed
  * via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
  */
 struct watchdog_device {
+	int id;
+	struct cdev cdev;
+	struct device *dev;
+	struct device *parent;
 	const struct watchdog_info *info;
 	const struct watchdog_ops *ops;
 	unsigned int bootstatus;
@@ -112,12 +132,14 @@
 	unsigned int min_timeout;
 	unsigned int max_timeout;
 	void *driver_data;
+	struct mutex lock;
 	unsigned long status;
 /* Bit numbers for status flags */
 #define WDOG_ACTIVE		0	/* Is the watchdog running/active */
 #define WDOG_DEV_OPEN		1	/* Opened via /dev/watchdog ? */
 #define WDOG_ALLOW_RELEASE	2	/* Did we receive the magic char ? */
 #define WDOG_NO_WAY_OUT		3	/* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED	4	/* Has the device been unregistered */
 };
 
 #ifdef CONFIG_WATCHDOG_NOWAYOUT
@@ -128,6 +150,12 @@
 #define WATCHDOG_NOWAYOUT_INIT_STATUS	0
 #endif
 
+/* Use the following function to check wether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+	return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
 /* Use the following function to set the nowayout feature */
 static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
 {
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a2b84f5..6d0a0fc 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -58,7 +58,6 @@
  * in a manner such that unspecified fields are set to zero.
  */
 struct writeback_control {
-	enum writeback_sync_modes sync_mode;
 	long nr_to_write;		/* Write this many pages, and decrement
 					   this for each page written */
 	long pages_skipped;		/* Pages which were not written */
@@ -71,6 +70,8 @@
 	loff_t range_start;
 	loff_t range_end;
 
+	enum writeback_sync_modes sync_mode;
+
 	unsigned for_kupdate:1;		/* A kupdate writeback */
 	unsigned for_background:1;	/* A background writeback */
 	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
@@ -94,6 +95,7 @@
 				enum wb_reason reason);
 long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void inode_wait_for_writeback(struct inode *inode);
 
 /* writeback.h requires fs.h; it, too, is not included from here. */
 static inline void wait_on_inode(struct inode *inode)
@@ -101,12 +103,6 @@
 	might_sleep();
 	wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
 }
-static inline void inode_sync_wait(struct inode *inode)
-{
-	might_sleep();
-	wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
-							TASK_UNINTERRUPTIBLE);
-}
 
 
 /*
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 66a7b57..3def64b 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1144,6 +1144,12 @@
 	__u8     data[240];
 } __packed;
 
+#define HCI_EV_KEY_REFRESH_COMPLETE	0x30
+struct hci_ev_key_refresh_complete {
+	__u8	status;
+	__le16	handle;
+} __packed;
+
 #define HCI_EV_IO_CAPA_REQUEST		0x31
 struct hci_ev_io_capa_request {
 	bdaddr_t bdaddr;
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 9808877..a7a683e 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -42,6 +42,7 @@
 #include <net/netlabel.h>
 #include <net/request_sock.h>
 #include <linux/atomic.h>
+#include <asm/unaligned.h>
 
 /* known doi values */
 #define CIPSO_V4_DOI_UNKNOWN          0x00000000
@@ -285,7 +286,33 @@
 static inline int cipso_v4_validate(const struct sk_buff *skb,
 				    unsigned char **option)
 {
-	return -ENOSYS;
+	unsigned char *opt = *option;
+	unsigned char err_offset = 0;
+	u8 opt_len = opt[1];
+	u8 opt_iter;
+
+	if (opt_len < 8) {
+		err_offset = 1;
+		goto out;
+	}
+
+	if (get_unaligned_be32(&opt[2]) == 0) {
+		err_offset = 2;
+		goto out;
+	}
+
+	for (opt_iter = 6; opt_iter < opt_len;) {
+		if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+			err_offset = opt_iter + 1;
+			goto out;
+		}
+		opt_iter += opt[opt_iter + 1];
+	}
+
+out:
+	*option = opt + err_offset;
+	return err_offset;
+
 }
 #endif /* CONFIG_NETLABEL */
 
diff --git a/include/net/dst.h b/include/net/dst.h
index bed833d..8197ead 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -60,6 +60,7 @@
 #define DST_NOCOUNT		0x0020
 #define DST_NOPEER		0x0040
 #define DST_FAKE_RTABLE		0x0080
+#define DST_XFRM_TUNNEL		0x0100
 
 	short			error;
 	short			obsolete;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index b94765e..2040bff 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -40,7 +40,10 @@
 	u32			pmtu_orig;
 	u32			pmtu_learned;
 	struct inetpeer_addr_base redirect_learned;
-	struct list_head	gc_list;
+	union {
+		struct list_head	gc_list;
+		struct rcu_head     gc_rcu;
+	};
 	/*
 	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 1937c7d..95e39b6 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1940,6 +1940,11 @@
  *	to also unregister the device. If it returns 1, then mac80211
  *	will also go through the regular complete restart on resume.
  *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ *	modified. The reason is that device_set_wakeup_enable() is
+ *	supposed to be called when the configuration changes, not only
+ *	in suspend().
+ *
  * @add_interface: Called when a netdevice attached to the hardware is
  *	enabled. Because it is not called for monitor mode devices, @start
  *	and @stop must be implemented.
@@ -2966,6 +2971,7 @@
  * ieee80211_generic_frame_duration - Calculate the duration field for a frame
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
  * @frame_len: the length of the frame.
  * @rate: the rate at which the frame is going to be transmitted.
  *
diff --git a/include/net/phonet/gprs.h b/include/net/phonet/gprs.h
index 928daf5..bcd525e 100644
--- a/include/net/phonet/gprs.h
+++ b/include/net/phonet/gprs.h
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/include/net/route.h b/include/net/route.h
index ed2b78e..9870546 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -130,9 +130,9 @@
 {
 	struct flowi4 fl4 = {
 		.flowi4_oif = oif,
+		.flowi4_tos = tos,
 		.daddr = daddr,
 		.saddr = saddr,
-		.flowi4_tos = tos,
 	};
 	return ip_route_output_key(net, &fl4);
 }
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 55ce96b..9d7d54a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,13 +220,16 @@
 
 struct qdisc_skb_cb {
 	unsigned int		pkt_len;
-	unsigned char		data[24];
+	u16			bond_queue_mapping;
+	u16			_pad;
+	unsigned char		data[20];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
 	struct qdisc_skb_cb *qcb;
-	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+
+	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
diff --git a/include/net/sock.h b/include/net/sock.h
index d89f058..4a45216 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -46,6 +46,7 @@
 #include <linux/list_nulls.h>
 #include <linux/timer.h>
 #include <linux/cache.h>
+#include <linux/bitops.h>
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>	/* struct sk_buff */
@@ -921,12 +922,23 @@
 #endif
 };
 
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+	/* Currently active and new sockets should be assigned to cgroups */
+	MEMCG_SOCK_ACTIVE,
+	/* It was ever activated; we must disarm static keys on destruction */
+	MEMCG_SOCK_ACTIVATED,
+};
+
 struct cg_proto {
 	void			(*enter_memory_pressure)(struct sock *sk);
 	struct res_counter	*memory_allocated;	/* Current allocated memory. */
 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
 	int			*memory_pressure;
 	long			*sysctl_mem;
+	unsigned long		flags;
 	/*
 	 * memcg field is used to find which memcg we belong directly
 	 * Each memcg struct can hold more than one cg_proto, so container_of
@@ -942,6 +954,16 @@
 extern int proto_register(struct proto *prot, int alloc_slab);
 extern void proto_unregister(struct proto *prot);
 
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+	return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+	return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
+
 #ifdef SOCK_REFCNT_DEBUG
 static inline void sk_refcnt_debug_inc(struct sock *sk)
 {
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
new file mode 100644
index 0000000..604cb9b
--- /dev/null
+++ b/include/scsi/fcoe_sysfs.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011-2012 Intel Corporation.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef FCOE_SYSFS
+#define FCOE_SYSFS
+
+#include <linux/if_ether.h>
+#include <linux/device.h>
+#include <scsi/fc/fc_fcoe.h>
+
+struct fcoe_ctlr_device;
+struct fcoe_fcf_device;
+
+struct fcoe_sysfs_function_template {
+	void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+	void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
+	void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
+};
+
+#define dev_to_ctlr(d)					\
+	container_of((d), struct fcoe_ctlr_device, dev)
+
+enum fip_conn_type {
+	FIP_CONN_TYPE_UNKNOWN,
+	FIP_CONN_TYPE_FABRIC,
+	FIP_CONN_TYPE_VN2VN,
+};
+
+struct fcoe_ctlr_device {
+	u32				id;
+
+	struct device			dev;
+	struct fcoe_sysfs_function_template *f;
+
+	struct list_head		fcfs;
+	char				work_q_name[20];
+	struct workqueue_struct		*work_q;
+	char				devloss_work_q_name[20];
+	struct workqueue_struct		*devloss_work_q;
+	struct mutex			lock;
+
+	int                             fcf_dev_loss_tmo;
+	enum fip_conn_type              mode;
+
+	/* expected in host order for displaying */
+	struct fcoe_fc_els_lesb         lesb;
+};
+
+static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
+{
+	return (void *)(ctlr + 1);
+}
+
+/* fcf states */
+enum fcf_state {
+	FCOE_FCF_STATE_UNKNOWN,
+	FCOE_FCF_STATE_DISCONNECTED,
+	FCOE_FCF_STATE_CONNECTED,
+	FCOE_FCF_STATE_DELETED,
+};
+
+struct fcoe_fcf_device {
+	u32		    id;
+	struct device	    dev;
+	struct list_head    peers;
+	struct work_struct  delete_work;
+	struct delayed_work dev_loss_work;
+	u32		    dev_loss_tmo;
+	void                *priv;
+	enum fcf_state      state;
+
+	u64                 fabric_name;
+	u64                 switch_name;
+	u32                 fc_map;
+	u16                 vfid;
+	u8                  mac[ETH_ALEN];
+	u8                  priority;
+	u32                 fka_period;
+	u8                  selected;
+	u16                 vlan_id;
+};
+
+#define dev_to_fcf(d)					\
+	container_of((d), struct fcoe_fcf_device, dev)
+/* parentage should never be missing */
+#define fcoe_fcf_dev_to_ctlr_dev(x)		\
+	dev_to_ctlr((x)->dev.parent)
+#define fcoe_fcf_device_priv(x)			\
+	((x)->priv)
+
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+			    struct fcoe_sysfs_function_template *f,
+			    int priv_size);
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
+					    struct fcoe_fcf_device *);
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
+
+int __init fcoe_sysfs_setup(void);
+void __exit fcoe_sysfs_teardown(void);
+
+#endif /* FCOE_SYSFS */
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index cfdb55f..22b07cc 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -29,6 +29,7 @@
 #include <linux/random.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/libfc.h>
+#include <scsi/fcoe_sysfs.h>
 
 #define FCOE_MAX_CMD_LEN	16	/* Supported CDB length */
 
@@ -159,8 +160,24 @@
 };
 
 /**
+ * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
+ * @cltr: The fcoe_ctlr whose private data will be returned
+ */
+static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
+{
+	return (void *)(ctlr + 1);
+}
+
+#define fcoe_ctlr_to_ctlr_dev(x)					\
+	(struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
+
+/**
  * struct fcoe_fcf - Fibre-Channel Forwarder
  * @list:	 list linkage
+ * @event_work:  Work for FC Transport actions queue
+ * @event:       The event to be processed
+ * @fip:         The controller that the FCF was discovered on
+ * @fcf_dev:     The associated fcoe_fcf_device instance
  * @time:	 system time (jiffies) when an advertisement was last received
  * @switch_name: WWN of switch from advertisement
  * @fabric_name: WWN of fabric from advertisement
@@ -182,6 +199,9 @@
  */
 struct fcoe_fcf {
 	struct list_head list;
+	struct work_struct event_work;
+	struct fcoe_ctlr *fip;
+	struct fcoe_fcf_device *fcf_dev;
 	unsigned long time;
 
 	u64 switch_name;
@@ -198,6 +218,9 @@
 	u8 fd_flags:1;
 };
 
+#define fcoe_fcf_to_fcf_dev(x)			\
+	((x)->fcf_dev)
+
 /**
  * struct fcoe_rport - VN2VN remote port
  * @time:	time of create or last beacon packet received from node
@@ -333,6 +356,10 @@
 int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
 			   struct fcoe_percpu_s *fps);
 
+/* FCoE Sysfs helpers */
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+
 /**
  * struct netdev_list
  * A mapping from netdevice to fcoe_transport
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6efb2e1..ba96988 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -151,6 +151,7 @@
 					   SD_LAST_BUGGY_SECTORS */
 	unsigned no_read_disc_info:1;	/* Avoid READ_DISC_INFO cmds */
 	unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
+	unsigned try_rc_10_first:1;	/* Try READ_CAPACACITY_10 first */
 	unsigned is_visible:1;	/* is the device visible in sysfs */
 
 	DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
index ec3f910..0c3c2fb 100644
--- a/include/sound/tea575x-tuner.h
+++ b/include/sound/tea575x-tuner.h
@@ -44,6 +44,7 @@
 
 struct snd_tea575x {
 	struct v4l2_device *v4l2_dev;
+	struct v4l2_file_operations fops;
 	struct video_device vd;		/* video device */
 	int radio_nr;			/* radio_nr */
 	bool tea5759;			/* 5759 chip is present */
@@ -62,7 +63,7 @@
 	int (*ext_init)(struct snd_tea575x *tea);
 };
 
-int snd_tea575x_init(struct snd_tea575x *tea);
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
 void snd_tea575x_exit(struct snd_tea575x *tea);
 
 #endif /* __SOUND_TEA575X_TUNER_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 1169599..c78a233 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -47,6 +47,7 @@
 	 */
 	int (*check_stop_free)(struct se_cmd *);
 	void (*release_cmd)(struct se_cmd *);
+	void (*put_session)(struct se_session *);
 	/*
 	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
 	 */
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
index aff64d8..da6f259 100644
--- a/include/trace/events/jbd.h
+++ b/include/trace/events/jbd.h
@@ -36,19 +36,17 @@
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	char,	sync_commit		)
 		__field(	int,	transaction		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->sync_commit = commit_transaction->t_synchronous_commit;
 		__entry->transaction	= commit_transaction->t_tid;
 	),
 
-	TP_printk("dev %d,%d transaction %d sync %d",
+	TP_printk("dev %d,%d transaction %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->sync_commit)
+		  __entry->transaction)
 );
 
 DEFINE_EVENT(jbd_commit, jbd_start_commit,
@@ -87,19 +85,17 @@
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	char,	sync_commit		)
 		__field(	int,	transaction		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->sync_commit = commit_transaction->t_synchronous_commit;
 		__entry->transaction	= commit_transaction->t_tid;
 	),
 
-	TP_printk("dev %d,%d transaction %d sync %d",
+	TP_printk("dev %d,%d transaction %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->sync_commit)
+		  __entry->transaction)
 );
 
 TRACE_EVENT(jbd_end_commit,
@@ -109,21 +105,19 @@
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	char,	sync_commit		)
 		__field(	int,	transaction		)
 		__field(	int,	head			)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->sync_commit = commit_transaction->t_synchronous_commit;
 		__entry->transaction	= commit_transaction->t_tid;
 		__entry->head		= journal->j_tail_sequence;
 	),
 
-	TP_printk("dev %d,%d transaction %d sync %d head %d",
+	TP_printk("dev %d,%d transaction %d head %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->sync_commit, __entry->head)
+		  __entry->transaction, __entry->head)
 );
 
 TRACE_EVENT(jbd_do_submit_data,
@@ -133,19 +127,17 @@
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	char,	sync_commit		)
 		__field(	int,	transaction		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->sync_commit = commit_transaction->t_synchronous_commit;
 		__entry->transaction	= commit_transaction->t_tid;
 	),
 
-	TP_printk("dev %d,%d transaction %d sync %d",
+	TP_printk("dev %d,%d transaction %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		   __entry->transaction, __entry->sync_commit)
+		   __entry->transaction)
 );
 
 TRACE_EVENT(jbd_cleanup_journal_tail,
@@ -177,24 +169,23 @@
 		  __entry->block_nr, __entry->freed)
 );
 
-TRACE_EVENT(jbd_update_superblock_end,
-	TP_PROTO(journal_t *journal, int wait),
+TRACE_EVENT(journal_write_superblock,
+	TP_PROTO(journal_t *journal, int write_op),
 
-	TP_ARGS(journal, wait),
+	TP_ARGS(journal, write_op),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	int,	wait			)
+		__field(	int,	write_op		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->wait		= wait;
+		__entry->write_op	= write_op;
 	),
 
-	TP_printk("dev %d,%d wait %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		   __entry->wait)
+	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
+		  MINOR(__entry->dev), __entry->write_op)
 );
 
 #endif /* _TRACE_JBD_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 1480900..d274734 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -289,6 +289,7 @@
  *	"In holdoff": Nothing to do, holding off after unsuccessful attempt.
  *	"Begin holdoff": Attempt failed, don't retry until next jiffy.
  *	"Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *	"Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
  *	"More callbacks": Still more callbacks, try again to clear them out.
  *	"Callbacks drained": All callbacks processed, off to dyntick idle!
  *	"Timer": Timer fired to cause CPU to continue processing callbacks.
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f64560e..bab3b87 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -13,7 +13,7 @@
 #define RECLAIM_WB_ANON		0x0001u
 #define RECLAIM_WB_FILE		0x0002u
 #define RECLAIM_WB_MIXED	0x0010u
-#define RECLAIM_WB_SYNC		0x0004u
+#define RECLAIM_WB_SYNC		0x0004u /* Unused, all reclaim async */
 #define RECLAIM_WB_ASYNC	0x0008u
 
 #define show_reclaim_flags(flags)				\
@@ -25,15 +25,15 @@
 		{RECLAIM_WB_ASYNC,	"RECLAIM_WB_ASYNC"}	\
 		) : "RECLAIM_WB_NONE"
 
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
 	(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
-	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+	(RECLAIM_WB_ASYNC) \
 	)
 
-#define trace_shrink_flags(file, sync) ( \
-	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
-			(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \
-	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+#define trace_shrink_flags(file) \
+	( \
+		(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+		(RECLAIM_WB_ASYNC) \
 	)
 
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@
 		unsigned long nr_requested,
 		unsigned long nr_scanned,
 		unsigned long nr_taken,
-		unsigned long nr_lumpy_taken,
-		unsigned long nr_lumpy_dirty,
-		unsigned long nr_lumpy_failed,
 		isolate_mode_t isolate_mode,
 		int file),
 
-	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
+	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
 
 	TP_STRUCT__entry(
 		__field(int, order)
 		__field(unsigned long, nr_requested)
 		__field(unsigned long, nr_scanned)
 		__field(unsigned long, nr_taken)
-		__field(unsigned long, nr_lumpy_taken)
-		__field(unsigned long, nr_lumpy_dirty)
-		__field(unsigned long, nr_lumpy_failed)
 		__field(isolate_mode_t, isolate_mode)
 		__field(int, file)
 	),
@@ -288,22 +282,16 @@
 		__entry->nr_requested = nr_requested;
 		__entry->nr_scanned = nr_scanned;
 		__entry->nr_taken = nr_taken;
-		__entry->nr_lumpy_taken = nr_lumpy_taken;
-		__entry->nr_lumpy_dirty = nr_lumpy_dirty;
-		__entry->nr_lumpy_failed = nr_lumpy_failed;
 		__entry->isolate_mode = isolate_mode;
 		__entry->file = file;
 	),
 
-	TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
+	TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
 		__entry->isolate_mode,
 		__entry->order,
 		__entry->nr_requested,
 		__entry->nr_scanned,
 		__entry->nr_taken,
-		__entry->nr_lumpy_taken,
-		__entry->nr_lumpy_dirty,
-		__entry->nr_lumpy_failed,
 		__entry->file)
 );
 
@@ -313,13 +301,10 @@
 		unsigned long nr_requested,
 		unsigned long nr_scanned,
 		unsigned long nr_taken,
-		unsigned long nr_lumpy_taken,
-		unsigned long nr_lumpy_dirty,
-		unsigned long nr_lumpy_failed,
 		isolate_mode_t isolate_mode,
 		int file),
 
-	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
 
 );
 
@@ -329,13 +314,10 @@
 		unsigned long nr_requested,
 		unsigned long nr_scanned,
 		unsigned long nr_taken,
-		unsigned long nr_lumpy_taken,
-		unsigned long nr_lumpy_dirty,
-		unsigned long nr_lumpy_failed,
 		isolate_mode_t isolate_mode,
 		int file),
 
-	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
 
 );
 
@@ -395,88 +377,6 @@
 		show_reclaim_flags(__entry->reclaim_flags))
 );
 
-TRACE_EVENT(replace_swap_token,
-	TP_PROTO(struct mm_struct *old_mm,
-		 struct mm_struct *new_mm),
-
-	TP_ARGS(old_mm, new_mm),
-
-	TP_STRUCT__entry(
-		__field(struct mm_struct*,	old_mm)
-		__field(unsigned int,		old_prio)
-		__field(struct mm_struct*,	new_mm)
-		__field(unsigned int,		new_prio)
-	),
-
-	TP_fast_assign(
-		__entry->old_mm   = old_mm;
-		__entry->old_prio = old_mm ? old_mm->token_priority : 0;
-		__entry->new_mm   = new_mm;
-		__entry->new_prio = new_mm->token_priority;
-	),
-
-	TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
-		  __entry->old_mm, __entry->old_prio,
-		  __entry->new_mm, __entry->new_prio)
-);
-
-DECLARE_EVENT_CLASS(put_swap_token_template,
-	TP_PROTO(struct mm_struct *swap_token_mm),
-
-	TP_ARGS(swap_token_mm),
-
-	TP_STRUCT__entry(
-		__field(struct mm_struct*, swap_token_mm)
-	),
-
-	TP_fast_assign(
-		__entry->swap_token_mm = swap_token_mm;
-	),
-
-	TP_printk("token_mm=%p", __entry->swap_token_mm)
-);
-
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
-	TP_PROTO(struct mm_struct *swap_token_mm),
-	TP_ARGS(swap_token_mm)
-);
-
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
-	TP_PROTO(struct mm_struct *swap_token_mm),
-	TP_ARGS(swap_token_mm),
-	TP_CONDITION(swap_token_mm != NULL)
-);
-
-TRACE_EVENT_CONDITION(update_swap_token_priority,
-	TP_PROTO(struct mm_struct *mm,
-		 unsigned int old_prio,
-		 struct mm_struct *swap_token_mm),
-
-	TP_ARGS(mm, old_prio, swap_token_mm),
-
-	TP_CONDITION(mm->token_priority != old_prio),
-
-	TP_STRUCT__entry(
-		__field(struct mm_struct*, mm)
-		__field(unsigned int, old_prio)
-		__field(unsigned int, new_prio)
-		__field(struct mm_struct*, swap_token_mm)
-		__field(unsigned int, swap_token_prio)
-	),
-
-	TP_fast_assign(
-		__entry->mm		= mm;
-		__entry->old_prio	= old_prio;
-		__entry->new_prio	= mm->token_priority;
-		__entry->swap_token_mm	= swap_token_mm;
-		__entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
-	),
-
-	TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
-		  __entry->mm, __entry->old_prio, __entry->new_prio,
-		  __entry->swap_token_mm, __entry->swap_token_prio)
-);
-
 #endif /* _TRACE_VMSCAN_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 7b81887..b453d92 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -372,6 +372,35 @@
 	  )
 );
 
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+	TP_PROTO(struct inode *inode),
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__array(char, name, 32)
+		__field(unsigned long, ino)
+		__field(unsigned long, state)
+		__field(unsigned long, dirtied_when)
+	),
+
+	TP_fast_assign(
+		strncpy(__entry->name,
+		        dev_name(inode_to_bdi(inode)->dev), 32);
+		__entry->ino		= inode->i_ino;
+		__entry->state		= inode->i_state;
+		__entry->dirtied_when	= inode->dirtied_when;
+	),
+
+	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+		  __entry->name,
+		  __entry->ino,
+		  show_inode_state(__entry->state),
+		  __entry->dirtied_when,
+		  (jiffies - __entry->dirtied_when) / HZ
+	)
+);
+
 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
 
 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -450,13 +479,6 @@
 	)
 );
 
-DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
-	TP_PROTO(struct inode *inode,
-		 struct writeback_control *wbc,
-		 unsigned long nr_to_write),
-	TP_ARGS(inode, wbc, nr_to_write)
-);
-
 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
 	TP_PROTO(struct inode *inode,
 		 struct writeback_control *wbc,
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
new file mode 100644
index 0000000..609efe8
--- /dev/null
+++ b/include/video/auo_k190xfb.h
@@ -0,0 +1,106 @@
+/*
+ * Definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
+#define _LINUX_VIDEO_AUO_K190XFB_H_
+
+/* Controller standby command needs a param */
+#define AUOK190X_QUIRK_STANDBYPARAM	(1 << 0)
+
+/* Controller standby is completely broken */
+#define AUOK190X_QUIRK_STANDBYBROKEN	(1 << 1)
+
+/*
+ * Resolutions for the displays
+ */
+#define AUOK190X_RESOLUTION_800_600		0
+#define AUOK190X_RESOLUTION_1024_768		1
+
+/*
+ * struct used by auok190x. board specific stuff comes from *board
+ */
+struct auok190xfb_par {
+	struct fb_info *info;
+	struct auok190x_board *board;
+
+	struct regulator *regulator;
+
+	struct mutex io_lock;
+	struct delayed_work work;
+	wait_queue_head_t waitq;
+	int resolution;
+	int rotation;
+	int consecutive_threshold;
+	int update_cnt;
+
+	/* panel and controller informations */
+	int epd_type;
+	int panel_size_int;
+	int panel_size_float;
+	int panel_model;
+	int tcon_version;
+	int lut_version;
+
+	/* individual controller callbacks */
+	void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+	void (*update_all)(struct auok190xfb_par *par);
+	bool (*need_refresh)(struct auok190xfb_par *par);
+	void (*init)(struct auok190xfb_par *par);
+	void (*recover)(struct auok190xfb_par *par);
+
+	int update_mode; /* mode to use for updates */
+	int last_mode; /* update mode last used */
+	int flash;
+
+	/* power management */
+	int autosuspend_delay;
+	bool standby;
+	bool manual_standby;
+};
+
+/**
+ * Board specific platform-data
+ * @init:		initialize the controller interface
+ * @cleanup:		cleanup the controller interface
+ * @wait_for_rdy:	wait until the controller is not busy anymore
+ * @set_ctl:		change an interface control
+ * @set_hdb:		write a value to the data register
+ * @get_hdb:		read a value from the data register
+ * @setup_irq:		method to setup the irq handling on the busy gpio
+ * @gpio_nsleep:	sleep gpio
+ * @gpio_nrst:		reset gpio
+ * @gpio_nbusy:		busy gpio
+ * @resolution:		one of the AUOK190X_RESOLUTION constants
+ * @rotation:		rotation of the framebuffer
+ * @quirks:		controller quirks to honor
+ * @fps:		frames per second for defio
+ */
+struct auok190x_board {
+	int (*init)(struct auok190xfb_par *);
+	void (*cleanup)(struct auok190xfb_par *);
+	int (*wait_for_rdy)(struct auok190xfb_par *);
+
+	void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
+	void (*set_hdb)(struct auok190xfb_par *, u16);
+	u16 (*get_hdb)(struct auok190xfb_par *);
+
+	int (*setup_irq)(struct fb_info *);
+
+	int gpio_nsleep;
+	int gpio_nrst;
+	int gpio_nbusy;
+
+	int resolution;
+	int rotation;
+	int quirks;
+	int fps;
+};
+
+#endif
diff --git a/include/video/exynos_dp.h b/include/video/exynos_dp.h
index 8847a9d..bd8cabd 100644
--- a/include/video/exynos_dp.h
+++ b/include/video/exynos_dp.h
@@ -14,7 +14,7 @@
 
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
-#define MAX_EQ_LOOP 4
+#define MAX_EQ_LOOP 5
 
 enum link_rate_type {
 	LINK_RATE_1_62GBPS = 0x06,
diff --git a/include/video/exynos_mipi_dsim.h b/include/video/exynos_mipi_dsim.h
index 772c770..83ce5e6 100644
--- a/include/video/exynos_mipi_dsim.h
+++ b/include/video/exynos_mipi_dsim.h
@@ -315,6 +315,7 @@
 	int			id;
 	int			bus_id;
 	int			irq;
+	int			panel_reverse;
 
 	struct mipi_dsim_device *master;
 	void			*platform_data;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 1c46a14..c8e59b4 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -51,6 +51,8 @@
 
 struct omap_dss_device;
 struct omap_overlay_manager;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
 
 enum omap_display_type {
 	OMAP_DISPLAY_TYPE_NONE		= 0,
@@ -158,6 +160,13 @@
 	OMAP_DSS_DISPLAY_SUSPENDED,
 };
 
+enum omap_dss_audio_state {
+	OMAP_DSS_AUDIO_DISABLED = 0,
+	OMAP_DSS_AUDIO_ENABLED,
+	OMAP_DSS_AUDIO_CONFIGURED,
+	OMAP_DSS_AUDIO_PLAYING,
+};
+
 /* XXX perhaps this should be removed */
 enum omap_dss_overlay_managers {
 	OMAP_DSS_OVL_MGR_LCD,
@@ -166,8 +175,9 @@
 };
 
 enum omap_dss_rotation_type {
-	OMAP_DSS_ROT_DMA = 0,
-	OMAP_DSS_ROT_VRFB = 1,
+	OMAP_DSS_ROT_DMA	= 1 << 0,
+	OMAP_DSS_ROT_VRFB	= 1 << 1,
+	OMAP_DSS_ROT_TILER	= 1 << 2,
 };
 
 /* clockwise rotation angle */
@@ -309,6 +319,7 @@
 	struct omap_dss_device *default_device;
 	int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
 	void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
+	int (*set_min_bus_tput)(struct device *dev, unsigned long r);
 };
 
 /* Init with the board info */
@@ -316,11 +327,6 @@
 /* HDMI mux init*/
 extern int omap_hdmi_init(enum omap_hdmi_flags flags);
 
-struct omap_display_platform_data {
-	struct omap_dss_board_info *board_data;
-	/* TODO: Additional members to be added when PM is considered */
-};
-
 struct omap_video_timings {
 	/* Unit: pixels */
 	u16 x_res;
@@ -587,6 +593,8 @@
 
 	enum omap_dss_display_state state;
 
+	enum omap_dss_audio_state audio_state;
+
 	/* platform specific  */
 	int (*platform_enable)(struct omap_dss_device *dssdev);
 	void (*platform_disable)(struct omap_dss_device *dssdev);
@@ -599,6 +607,11 @@
 	int hpd_gpio;
 };
 
+struct omap_dss_audio {
+	struct snd_aes_iec958 *iec;
+	struct snd_cea_861_aud_if *cea;
+};
+
 struct omap_dss_driver {
 	struct device_driver driver;
 
@@ -646,6 +659,24 @@
 
 	int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
 	bool (*detect)(struct omap_dss_device *dssdev);
+
+	/*
+	 * For display drivers that support audio. This encompasses
+	 * HDMI and DisplayPort at the moment.
+	 */
+	/*
+	 * Note: These functions might sleep. Do not call while
+	 * holding a spinlock/readlock.
+	 */
+	int (*audio_enable)(struct omap_dss_device *dssdev);
+	void (*audio_disable)(struct omap_dss_device *dssdev);
+	bool (*audio_supported)(struct omap_dss_device *dssdev);
+	int (*audio_config)(struct omap_dss_device *dssdev,
+		struct omap_dss_audio *audio);
+	/* Note: These functions may not sleep */
+	int (*audio_start)(struct omap_dss_device *dssdev);
+	void (*audio_stop)(struct omap_dss_device *dssdev);
+
 };
 
 int omap_dss_register_driver(struct omap_dss_driver *);
@@ -670,6 +701,8 @@
 void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
 		u16 *xres, u16 *yres);
 int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings);
 
 typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
 int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
diff --git a/include/video/sh_mobile_hdmi.h b/include/video/sh_mobile_hdmi.h
index 728f9de..63d20ef 100644
--- a/include/video/sh_mobile_hdmi.h
+++ b/include/video/sh_mobile_hdmi.h
@@ -18,9 +18,11 @@
 /*
  * flags format
  *
- * 0x0000000A
+ * 0x00000CBA
  *
  * A: Audio source select
+ * B: Int output option
+ * C: Chip specific option
  */
 
 /* Audio source select */
@@ -30,6 +32,14 @@
 #define HDMI_SND_SRC_DSD	(2 << 0)
 #define HDMI_SND_SRC_HBR	(3 << 0)
 
+/* Int output option */
+#define HDMI_OUTPUT_PUSH_PULL	(1 << 4) /* System control : output mode */
+#define HDMI_OUTPUT_POLARITY_HI	(1 << 5) /* System control : output polarity */
+
+/* Chip specific option */
+#define HDMI_32BIT_REG		(1 << 8)
+#define HDMI_HAS_HTOP1		(1 << 9)
+
 struct sh_mobile_hdmi_info {
 	unsigned int			 flags;
 	long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq,
diff --git a/init/Kconfig b/init/Kconfig
index 81816b8..d07dcf9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -167,7 +167,7 @@
 	depends on HAVE_KERNEL_BZIP2
 	help
 	  Its compression ratio and speed is intermediate.
-	  Decompression speed is slowest among the three.  The kernel
+	  Decompression speed is slowest among the choices.  The kernel
 	  size is about 10% smaller with bzip2, in comparison to gzip.
 	  Bzip2 uses a large amount of memory. For modern kernels you
 	  will need at least 8MB RAM or more for booting.
@@ -176,10 +176,9 @@
 	bool "LZMA"
 	depends on HAVE_KERNEL_LZMA
 	help
-	  The most recent compression algorithm.
-	  Its ratio is best, decompression speed is between the other
-	  two. Compression is slowest.	The kernel size is about 33%
-	  smaller with LZMA in comparison to gzip.
+	  This compression algorithm's ratio is best.  Decompression speed
+	  is between gzip and bzip2.  Compression is slowest.
+	  The kernel size is about 33% smaller with LZMA in comparison to gzip.
 
 config KERNEL_XZ
 	bool "XZ"
@@ -200,7 +199,7 @@
 	bool "LZO"
 	depends on HAVE_KERNEL_LZO
 	help
-	  Its compression ratio is the poorest among the 4. The kernel
+	  Its compression ratio is the poorest among the choices. The kernel
 	  size is about 10% bigger than gzip; however its speed
 	  (both compression and decompression) is the fastest.
 
@@ -803,7 +802,7 @@
 endif #CGROUP_SCHED
 
 config BLK_CGROUP
-	tristate "Block IO controller"
+	bool "Block IO controller"
 	depends on BLOCK
 	default n
 	---help---
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 42b0707..d3f0aee 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
@@ -330,7 +340,7 @@
 	if (err)
 		return err;
 
-	sys_chdir((const char __user __force *)"/root");
+	sys_chdir("/root");
 	s = current->fs->pwd.dentry->d_sb;
 	ROOT_DEV = s->s_dev;
 	printk(KERN_INFO
@@ -556,5 +566,5 @@
 out:
 	devtmpfs_mount("dev");
 	sys_mount(".", "/", NULL, MS_MOVE, NULL);
-	sys_chroot((const char __user __force *)".");
+	sys_chroot(".");
 }
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 9047330..135959a2 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/unistd.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 32c4799..8cb6db5 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/delay.h>
 #include <linux/raid/md_u.h>
 #include <linux/raid/md_p.h>
@@ -283,7 +293,7 @@
 
 	wait_for_device_probe();
 
-	fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
+	fd = sys_open("/dev/md0", 0, 0);
 	if (fd >= 0) {
 		sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
 		sys_close(fd);
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 6212586..6be2879 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -1,3 +1,12 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
 
 #include <linux/kernel.h>
 #include <linux/fs.h>
@@ -181,7 +190,7 @@
 	char rotator[4] = { '|' , '/' , '-' , '\\' };
 #endif
 
-	out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
+	out_fd = sys_open("/dev/ram", O_RDWR, 0);
 	if (out_fd < 0)
 		goto out;
 
@@ -280,7 +289,7 @@
 	sys_close(out_fd);
 out:
 	kfree(buf);
-	sys_unlink((const char __user __force *) "/dev/ram");
+	sys_unlink("/dev/ram");
 	return res;
 }
 
diff --git a/init/initramfs.c b/init/initramfs.c
index 8216c30..84c6bf1 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
@@ -74,7 +84,7 @@
 	}
 }
 
-static long __init do_utime(char __user *filename, time_t mtime)
+static long __init do_utime(char *filename, time_t mtime)
 {
 	struct timespec t[2];
 
@@ -529,7 +539,7 @@
 	struct linux_dirent64 *dirp;
 	int num;
 
-	fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
+	fd = sys_open("/", O_RDONLY, 0);
 	WARN_ON(fd < 0);
 	if (fd < 0)
 		return;
@@ -589,7 +599,7 @@
 		}
 		printk(KERN_INFO "rootfs image is not initramfs (%s)"
 				"; looks like an initrd\n", err);
-		fd = sys_open((const char __user __force *) "/initrd.image",
+		fd = sys_open("/initrd.image",
 			      O_WRONLY|O_CREAT, 0700);
 		if (fd >= 0) {
 			sys_write(fd, (char *)initrd_start,
diff --git a/init/main.c b/init/main.c
index 1ca6b32..b5cc0a7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -508,7 +508,7 @@
 	parse_early_param();
 	parse_args("Booting kernel", static_command_line, __start___param,
 		   __stop___param - __start___param,
-		   0, 0, &unknown_bootoption);
+		   -1, -1, &unknown_bootoption);
 
 	jump_label_init();
 
@@ -755,13 +755,8 @@
 {
 	int level;
 
-	for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
-		pr_info("initlevel:%d=%s, %d registered initcalls\n",
-			level, initcall_level_names[level],
-			(int) (initcall_levels[level+1]
-				- initcall_levels[level]));
+	for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
 		do_initcall_level(level);
-	}
 }
 
 /*
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 0c09366..383d638 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -13,15 +13,6 @@
 #include <linux/ipc_namespace.h>
 #include <linux/sysctl.h>
 
-/*
- * Define the ranges various user-specified maximum values can
- * be set to.
- */
-#define MIN_MSGMAX	1		/* min value for msg_max */
-#define MAX_MSGMAX	HARD_MSGMAX	/* max value for msg_max */
-#define MIN_MSGSIZEMAX	128		/* min value for msgsize_max */
-#define MAX_MSGSIZEMAX	(8192*128)	/* max value for msgsize_max */
-
 #ifdef CONFIG_PROC_SYSCTL
 static void *get_mq(ctl_table *table)
 {
@@ -31,16 +22,6 @@
 	return which;
 }
 
-static int proc_mq_dointvec(ctl_table *table, int write,
-	void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	struct ctl_table mq_table;
-	memcpy(&mq_table, table, sizeof(mq_table));
-	mq_table.data = get_mq(table);
-
-	return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -52,15 +33,17 @@
 					lenp, ppos);
 }
 #else
-#define proc_mq_dointvec NULL
 #define proc_mq_dointvec_minmax NULL
 #endif
 
+static int msg_queues_limit_min = MIN_QUEUESMAX;
+static int msg_queues_limit_max = HARD_QUEUESMAX;
+
 static int msg_max_limit_min = MIN_MSGMAX;
-static int msg_max_limit_max = MAX_MSGMAX;
+static int msg_max_limit_max = HARD_MSGMAX;
 
 static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
-static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
+static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
 
 static ctl_table mq_sysctls[] = {
 	{
@@ -68,7 +51,9 @@
 		.data		= &init_ipc_ns.mq_queues_max,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_mq_dointvec,
+		.proc_handler	= proc_mq_dointvec_minmax,
+		.extra1		= &msg_queues_limit_min,
+		.extra2		= &msg_queues_limit_max,
 	},
 	{
 		.procname	= "msg_max",
@@ -88,6 +73,24 @@
 		.extra1		= &msg_maxsize_limit_min,
 		.extra2		= &msg_maxsize_limit_max,
 	},
+	{
+		.procname	= "msg_default",
+		.data		= &init_ipc_ns.mq_msg_default,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_mq_dointvec_minmax,
+		.extra1		= &msg_max_limit_min,
+		.extra2		= &msg_max_limit_max,
+	},
+	{
+		.procname	= "msgsize_default",
+		.data		= &init_ipc_ns.mq_msgsize_default,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_mq_dointvec_minmax,
+		.extra1		= &msg_maxsize_limit_min,
+		.extra2		= &msg_maxsize_limit_max,
+	},
 	{}
 };
 
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index b6a0d46..8ce5769 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -24,6 +24,7 @@
 #include <linux/mqueue.h>
 #include <linux/msg.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 #include <linux/netlink.h>
 #include <linux/syscalls.h>
 #include <linux/audit.h>
@@ -49,6 +50,12 @@
 #define STATE_PENDING	1
 #define STATE_READY	2
 
+struct posix_msg_tree_node {
+	struct rb_node		rb_node;
+	struct list_head	msg_list;
+	int			priority;
+};
+
 struct ext_wait_queue {		/* queue of sleeping tasks */
 	struct task_struct *task;
 	struct list_head list;
@@ -61,7 +68,8 @@
 	struct inode vfs_inode;
 	wait_queue_head_t wait_q;
 
-	struct msg_msg **messages;
+	struct rb_root msg_tree;
+	struct posix_msg_tree_node *node_cache;
 	struct mq_attr attr;
 
 	struct sigevent notify;
@@ -109,6 +117,103 @@
 	return ns;
 }
 
+/* Auxiliary functions to manipulate messages' list */
+static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+{
+	struct rb_node **p, *parent = NULL;
+	struct posix_msg_tree_node *leaf;
+
+	p = &info->msg_tree.rb_node;
+	while (*p) {
+		parent = *p;
+		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+
+		if (likely(leaf->priority == msg->m_type))
+			goto insert_msg;
+		else if (msg->m_type < leaf->priority)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+	if (info->node_cache) {
+		leaf = info->node_cache;
+		info->node_cache = NULL;
+	} else {
+		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
+		if (!leaf)
+			return -ENOMEM;
+		rb_init_node(&leaf->rb_node);
+		INIT_LIST_HEAD(&leaf->msg_list);
+		info->qsize += sizeof(*leaf);
+	}
+	leaf->priority = msg->m_type;
+	rb_link_node(&leaf->rb_node, parent, p);
+	rb_insert_color(&leaf->rb_node, &info->msg_tree);
+insert_msg:
+	info->attr.mq_curmsgs++;
+	info->qsize += msg->m_ts;
+	list_add_tail(&msg->m_list, &leaf->msg_list);
+	return 0;
+}
+
+static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
+{
+	struct rb_node **p, *parent = NULL;
+	struct posix_msg_tree_node *leaf;
+	struct msg_msg *msg;
+
+try_again:
+	p = &info->msg_tree.rb_node;
+	while (*p) {
+		parent = *p;
+		/*
+		 * During insert, low priorities go to the left and high to the
+		 * right.  On receive, we want the highest priorities first, so
+		 * walk all the way to the right.
+		 */
+		p = &(*p)->rb_right;
+	}
+	if (!parent) {
+		if (info->attr.mq_curmsgs) {
+			pr_warn_once("Inconsistency in POSIX message queue, "
+				     "no tree element, but supposedly messages "
+				     "should exist!\n");
+			info->attr.mq_curmsgs = 0;
+		}
+		return NULL;
+	}
+	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+	if (unlikely(list_empty(&leaf->msg_list))) {
+		pr_warn_once("Inconsistency in POSIX message queue, "
+			     "empty leaf node but we haven't implemented "
+			     "lazy leaf delete!\n");
+		rb_erase(&leaf->rb_node, &info->msg_tree);
+		if (info->node_cache) {
+			info->qsize -= sizeof(*leaf);
+			kfree(leaf);
+		} else {
+			info->node_cache = leaf;
+		}
+		goto try_again;
+	} else {
+		msg = list_first_entry(&leaf->msg_list,
+				       struct msg_msg, m_list);
+		list_del(&msg->m_list);
+		if (list_empty(&leaf->msg_list)) {
+			rb_erase(&leaf->rb_node, &info->msg_tree);
+			if (info->node_cache) {
+				info->qsize -= sizeof(*leaf);
+				kfree(leaf);
+			} else {
+				info->node_cache = leaf;
+			}
+		}
+	}
+	info->attr.mq_curmsgs--;
+	info->qsize -= msg->m_ts;
+	return msg;
+}
+
 static struct inode *mqueue_get_inode(struct super_block *sb,
 		struct ipc_namespace *ipc_ns, umode_t mode,
 		struct mq_attr *attr)
@@ -129,7 +234,7 @@
 
 	if (S_ISREG(mode)) {
 		struct mqueue_inode_info *info;
-		unsigned long mq_bytes, mq_msg_tblsz;
+		unsigned long mq_bytes, mq_treesize;
 
 		inode->i_fop = &mqueue_file_operations;
 		inode->i_size = FILENT_SIZE;
@@ -143,20 +248,36 @@
 		info->notify_user_ns = NULL;
 		info->qsize = 0;
 		info->user = NULL;	/* set when all is ok */
+		info->msg_tree = RB_ROOT;
+		info->node_cache = NULL;
 		memset(&info->attr, 0, sizeof(info->attr));
-		info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
-		info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
+		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+					   ipc_ns->mq_msg_default);
+		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+					    ipc_ns->mq_msgsize_default);
 		if (attr) {
 			info->attr.mq_maxmsg = attr->mq_maxmsg;
 			info->attr.mq_msgsize = attr->mq_msgsize;
 		}
-		mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
-		info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
-		if (!info->messages)
-			goto out_inode;
+		/*
+		 * We used to allocate a static array of pointers and account
+		 * the size of that array as well as one msg_msg struct per
+		 * possible message into the queue size. That's no longer
+		 * accurate as the queue is now an rbtree and will grow and
+		 * shrink depending on usage patterns.  We can, however, still
+		 * account one msg_msg struct per message, but the nodes are
+		 * allocated depending on priority usage, and most programs
+		 * only use one, or a handful, of priorities.  However, since
+		 * this is pinned memory, we need to assume worst case, so
+		 * that means the min(mq_maxmsg, max_priorities) * struct
+		 * posix_msg_tree_node.
+		 */
+		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+			sizeof(struct posix_msg_tree_node);
 
-		mq_bytes = (mq_msg_tblsz +
-			(info->attr.mq_maxmsg * info->attr.mq_msgsize));
+		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+					  info->attr.mq_msgsize);
 
 		spin_lock(&mq_lock);
 		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -247,11 +368,11 @@
 {
 	struct mqueue_inode_info *info;
 	struct user_struct *user;
-	unsigned long mq_bytes;
-	int i;
+	unsigned long mq_bytes, mq_treesize;
 	struct ipc_namespace *ipc_ns;
+	struct msg_msg *msg;
 
-	end_writeback(inode);
+	clear_inode(inode);
 
 	if (S_ISDIR(inode->i_mode))
 		return;
@@ -259,14 +380,19 @@
 	ipc_ns = get_ns_from_inode(inode);
 	info = MQUEUE_I(inode);
 	spin_lock(&info->lock);
-	for (i = 0; i < info->attr.mq_curmsgs; i++)
-		free_msg(info->messages[i]);
-	kfree(info->messages);
+	while ((msg = msg_get(info)) != NULL)
+		free_msg(msg);
+	kfree(info->node_cache);
 	spin_unlock(&info->lock);
 
 	/* Total amount of bytes accounted for the mqueue */
-	mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
-	    + info->attr.mq_msgsize);
+	mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+		min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+		sizeof(struct posix_msg_tree_node);
+
+	mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+				  info->attr.mq_msgsize);
+
 	user = info->user;
 	if (user) {
 		spin_lock(&mq_lock);
@@ -300,8 +426,9 @@
 		error = -EACCES;
 		goto out_unlock;
 	}
-	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
-			!capable(CAP_SYS_RESOURCE)) {
+	if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+	    (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+	     !capable(CAP_SYS_RESOURCE))) {
 		error = -ENOSPC;
 		goto out_unlock;
 	}
@@ -485,26 +612,6 @@
 	return list_entry(ptr, struct ext_wait_queue, list);
 }
 
-/* Auxiliary functions to manipulate messages' list */
-static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
-{
-	int k;
-
-	k = info->attr.mq_curmsgs - 1;
-	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
-		info->messages[k + 1] = info->messages[k];
-		k--;
-	}
-	info->attr.mq_curmsgs++;
-	info->qsize += ptr->m_ts;
-	info->messages[k + 1] = ptr;
-}
-
-static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
-{
-	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
-	return info->messages[info->attr.mq_curmsgs];
-}
 
 static inline void set_cookie(struct sk_buff *skb, char code)
 {
@@ -585,24 +692,30 @@
 
 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 {
+	int mq_treesize;
+	unsigned long total_size;
+
 	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
-		return 0;
+		return -EINVAL;
 	if (capable(CAP_SYS_RESOURCE)) {
-		if (attr->mq_maxmsg > HARD_MSGMAX)
-			return 0;
+		if (attr->mq_maxmsg > HARD_MSGMAX ||
+		    attr->mq_msgsize > HARD_MSGSIZEMAX)
+			return -EINVAL;
 	} else {
 		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
 				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
-			return 0;
+			return -EINVAL;
 	}
 	/* check for overflow */
 	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
-		return 0;
-	if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
-	    + sizeof (struct msg_msg *))) <
-	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
-		return 0;
-	return 1;
+		return -EOVERFLOW;
+	mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
+		min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
+		sizeof(struct posix_msg_tree_node);
+	total_size = attr->mq_maxmsg * attr->mq_msgsize;
+	if (total_size + mq_treesize < total_size)
+		return -EOVERFLOW;
+	return 0;
 }
 
 /*
@@ -617,12 +730,21 @@
 	int ret;
 
 	if (attr) {
-		if (!mq_attr_ok(ipc_ns, attr)) {
-			ret = -EINVAL;
+		ret = mq_attr_ok(ipc_ns, attr);
+		if (ret)
 			goto out;
-		}
 		/* store for use during create */
 		dentry->d_fsdata = attr;
+	} else {
+		struct mq_attr def_attr;
+
+		def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+					 ipc_ns->mq_msg_default);
+		def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+					  ipc_ns->mq_msgsize_default);
+		ret = mq_attr_ok(ipc_ns, &def_attr);
+		if (ret)
+			goto out;
 	}
 
 	mode &= ~current_umask();
@@ -837,7 +959,8 @@
 		wake_up_interruptible(&info->wait_q);
 		return;
 	}
-	msg_insert(sender->msg, info);
+	if (msg_insert(sender->msg, info))
+		return;
 	list_del(&sender->list);
 	sender->state = STATE_PENDING;
 	wake_up_process(sender->task);
@@ -857,7 +980,8 @@
 	struct mqueue_inode_info *info;
 	ktime_t expires, *timeout = NULL;
 	struct timespec ts;
-	int ret;
+	struct posix_msg_tree_node *new_leaf = NULL;
+	int ret = 0;
 
 	if (u_abs_timeout) {
 		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -905,34 +1029,60 @@
 	msg_ptr->m_ts = msg_len;
 	msg_ptr->m_type = msg_prio;
 
+	/*
+	 * msg_insert really wants us to have a valid, spare node struct so
+	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+	 * fall back to that if necessary.
+	 */
+	if (!info->node_cache)
+		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
 	spin_lock(&info->lock);
 
+	if (!info->node_cache && new_leaf) {
+		/* Save our speculative allocation into the cache */
+		rb_init_node(&new_leaf->rb_node);
+		INIT_LIST_HEAD(&new_leaf->msg_list);
+		info->node_cache = new_leaf;
+		info->qsize += sizeof(*new_leaf);
+		new_leaf = NULL;
+	} else {
+		kfree(new_leaf);
+	}
+
 	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
 		if (filp->f_flags & O_NONBLOCK) {
-			spin_unlock(&info->lock);
 			ret = -EAGAIN;
 		} else {
 			wait.task = current;
 			wait.msg = (void *) msg_ptr;
 			wait.state = STATE_NONE;
 			ret = wq_sleep(info, SEND, timeout, &wait);
+			/*
+			 * wq_sleep must be called with info->lock held, and
+			 * returns with the lock released
+			 */
+			goto out_free;
 		}
-		if (ret < 0)
-			free_msg(msg_ptr);
 	} else {
 		receiver = wq_get_first_waiter(info, RECV);
 		if (receiver) {
 			pipelined_send(info, msg_ptr, receiver);
 		} else {
 			/* adds message to the queue */
-			msg_insert(msg_ptr, info);
+			ret = msg_insert(msg_ptr, info);
+			if (ret)
+				goto out_unlock;
 			__do_notify(info);
 		}
 		inode->i_atime = inode->i_mtime = inode->i_ctime =
 				CURRENT_TIME;
-		spin_unlock(&info->lock);
-		ret = 0;
 	}
+out_unlock:
+	spin_unlock(&info->lock);
+out_free:
+	if (ret)
+		free_msg(msg_ptr);
 out_fput:
 	fput(filp);
 out:
@@ -951,6 +1101,7 @@
 	struct ext_wait_queue wait;
 	ktime_t expires, *timeout = NULL;
 	struct timespec ts;
+	struct posix_msg_tree_node *new_leaf = NULL;
 
 	if (u_abs_timeout) {
 		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -986,7 +1137,26 @@
 		goto out_fput;
 	}
 
+	/*
+	 * msg_insert really wants us to have a valid, spare node struct so
+	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+	 * fall back to that if necessary.
+	 */
+	if (!info->node_cache)
+		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
 	spin_lock(&info->lock);
+
+	if (!info->node_cache && new_leaf) {
+		/* Save our speculative allocation into the cache */
+		rb_init_node(&new_leaf->rb_node);
+		INIT_LIST_HEAD(&new_leaf->msg_list);
+		info->node_cache = new_leaf;
+		info->qsize += sizeof(*new_leaf);
+	} else {
+		kfree(new_leaf);
+	}
+
 	if (info->attr.mq_curmsgs == 0) {
 		if (filp->f_flags & O_NONBLOCK) {
 			spin_unlock(&info->lock);
@@ -1251,6 +1421,8 @@
 	ns->mq_queues_max    = DFLT_QUEUESMAX;
 	ns->mq_msg_max       = DFLT_MSGMAX;
 	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
+	ns->mq_msg_default   = DFLT_MSG;
+	ns->mq_msgsize_default  = DFLT_MSGSIZE;
 
 	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
 	if (IS_ERR(ns->mq_mnt)) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 406c5b2..41c1285 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -393,6 +393,16 @@
 	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 }
 
+static long shm_fallocate(struct file *file, int mode, loff_t offset,
+			  loff_t len)
+{
+	struct shm_file_data *sfd = shm_file_data(file);
+
+	if (!sfd->file->f_op->fallocate)
+		return -EOPNOTSUPP;
+	return sfd->file->f_op->fallocate(file, mode, offset, len);
+}
+
 static unsigned long shm_get_unmapped_area(struct file *file,
 	unsigned long addr, unsigned long len, unsigned long pgoff,
 	unsigned long flags)
@@ -410,6 +420,7 @@
 	.get_unmapped_area	= shm_get_unmapped_area,
 #endif
 	.llseek		= noop_llseek,
+	.fallocate	= shm_fallocate,
 };
 
 static const struct file_operations shm_file_operations_huge = {
@@ -418,6 +429,7 @@
 	.release	= shm_release,
 	.get_unmapped_area	= shm_get_unmapped_area,
 	.llseek		= noop_llseek,
+	.fallocate	= shm_fallocate,
 };
 
 int is_file_shm_hugepages(struct file *file)
@@ -1036,6 +1048,10 @@
 	sfd->file = shp->shm_file;
 	sfd->vm_ops = NULL;
 
+	err = security_mmap_file(file, prot, flags);
+	if (err)
+		goto out_fput;
+
 	down_write(&current->mm->mmap_sem);
 	if (addr && !(shmflg & SHM_REMAP)) {
 		err = -EINVAL;
@@ -1050,7 +1066,7 @@
 			goto invalid;
 	}
 		
-	user_addr = do_mmap (file, addr, size, prot, flags, 0);
+	user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0);
 	*raddr = user_addr;
 	err = 0;
 	if (IS_ERR_VALUE(user_addr))
@@ -1058,6 +1074,7 @@
 invalid:
 	up_write(&current->mm->mmap_sem);
 
+out_fput:
 	fput(file);
 
 out_nattch:
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c07f30..c0cc67a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -5,12 +5,12 @@
 obj-y     = fork.o exec_domain.o panic.o printk.o \
 	    cpu.o exit.o itimer.o time.o softirq.o resource.o \
 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
-	    signal.o sys.o kmod.o workqueue.o pid.o \
+	    signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
 	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
 	    notifier.o ksysfs.o cred.o \
-	    async.o range.o groups.o
+	    async.o range.o groups.o lglock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
@@ -25,6 +25,9 @@
 obj-y += sched/
 obj-y += power/
 
+ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
+obj-$(CONFIG_X86) += kcmp.o
+endif
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0c6af3..2097684 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -255,12 +255,17 @@
 
 EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
 
+static int css_unbias_refcnt(int refcnt)
+{
+	return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
+}
+
 /* the current nr of refs, always >= 0 whether @css is deactivated or not */
 static int css_refcnt(struct cgroup_subsys_state *css)
 {
 	int v = atomic_read(&css->refcnt);
 
-	return v >= 0 ? v : v - CSS_DEACT_BIAS;
+	return css_unbias_refcnt(v);
 }
 
 /* convenient tests for these bits */
@@ -896,10 +901,13 @@
 		mutex_unlock(&cgroup_mutex);
 
 		/*
-		 * Drop the active superblock reference that we took when we
-		 * created the cgroup
+		 * We want to drop the active superblock reference from the
+		 * cgroup creation after all the dentry refs are gone -
+		 * kill_sb gets mighty unhappy otherwise.  Mark
+		 * dentry->d_fsdata with cgroup_diput() to tell
+		 * cgroup_d_release() to call deactivate_super().
 		 */
-		deactivate_super(cgrp->root->sb);
+		dentry->d_fsdata = cgroup_diput;
 
 		/*
 		 * if we're getting rid of the cgroup, refcount should ensure
@@ -925,6 +933,13 @@
 	return 1;
 }
 
+static void cgroup_d_release(struct dentry *dentry)
+{
+	/* did cgroup_diput() tell me to deactivate super? */
+	if (dentry->d_fsdata == cgroup_diput)
+		deactivate_super(dentry->d_sb);
+}
+
 static void remove_dir(struct dentry *d)
 {
 	struct dentry *parent = dget(d->d_parent);
@@ -1532,6 +1547,7 @@
 	static const struct dentry_operations cgroup_dops = {
 		.d_iput = cgroup_diput,
 		.d_delete = cgroup_delete,
+		.d_release = cgroup_d_release,
 	};
 
 	struct inode *inode =
@@ -4971,10 +4987,12 @@
 void __css_put(struct cgroup_subsys_state *css)
 {
 	struct cgroup *cgrp = css->cgroup;
+	int v;
 
 	rcu_read_lock();
-	atomic_dec(&css->refcnt);
-	switch (css_refcnt(css)) {
+	v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
+
+	switch (v) {
 	case 1:
 		if (notify_on_release(cgrp)) {
 			set_bit(CGRP_RELEASABLE, &cgrp->flags);
@@ -5132,7 +5150,7 @@
  * @root: the css supporsed to be an ancestor of the child.
  *
  * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * this function reads css->id, the caller must hold rcu_read_lock().
  * But, considering usual usage, the csses should be valid objects after test.
  * Assuming that the caller will do some action to the child if this returns
  * returns true, the caller must take "child";s reference count.
@@ -5144,18 +5162,18 @@
 {
 	struct css_id *child_id;
 	struct css_id *root_id;
-	bool ret = true;
 
-	rcu_read_lock();
 	child_id  = rcu_dereference(child->id);
+	if (!child_id)
+		return false;
 	root_id = rcu_dereference(root->id);
-	if (!child_id
-	    || !root_id
-	    || (child_id->depth < root_id->depth)
-	    || (child_id->stack[root_id->depth] != root_id->id))
-		ret = false;
-	rcu_read_unlock();
-	return ret;
+	if (!root_id)
+		return false;
+	if (child_id->depth < root_id->depth)
+		return false;
+	if (child_id->stack[root_id->depth] != root_id->id)
+		return false;
+	return true;
 }
 
 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0e6353c..a4eb522 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,7 +10,10 @@
 #include <linux/sched.h>
 #include <linux/unistd.h>
 #include <linux/cpu.h>
+#include <linux/oom.h>
+#include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/bug.h>
 #include <linux/kthread.h>
 #include <linux/stop_machine.h>
 #include <linux/mutex.h>
@@ -173,6 +176,47 @@
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
+/**
+ * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+ * @cpu: a CPU id
+ *
+ * This function walks all processes, finds a valid mm struct for each one and
+ * then clears a corresponding bit in mm's cpumask.  While this all sounds
+ * trivial, there are various non-obvious corner cases, which this function
+ * tries to solve in a safe manner.
+ *
+ * Also note that the function uses a somewhat relaxed locking scheme, so it may
+ * be called only for an already offlined CPU.
+ */
+void clear_tasks_mm_cpumask(int cpu)
+{
+	struct task_struct *p;
+
+	/*
+	 * This function is called after the cpu is taken down and marked
+	 * offline, so its not like new tasks will ever get this cpu set in
+	 * their mm mask. -- Peter Zijlstra
+	 * Thus, we may use rcu_read_lock() here, instead of grabbing
+	 * full-fledged tasklist_lock.
+	 */
+	WARN_ON(cpu_online(cpu));
+	rcu_read_lock();
+	for_each_process(p) {
+		struct task_struct *t;
+
+		/*
+		 * Main thread might exit, but other threads may still have
+		 * a valid mm. Find one.
+		 */
+		t = find_lock_task_mm(p);
+		if (!t)
+			continue;
+		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+		task_unlock(t);
+	}
+	rcu_read_unlock();
+}
+
 static inline void check_for_tasks(int cpu)
 {
 	struct task_struct *p;
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 249152e..9656a3c 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -81,7 +81,7 @@
 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
 
 /**
- * cpm_pm_enter - CPU low power entry notifier
+ * cpu_pm_enter - CPU low power entry notifier
  *
  * Notifies listeners that a single CPU is entering a low power state that may
  * cause some blocks in the same power domain as the cpu to reset.
@@ -89,7 +89,7 @@
  * Must be called on the affected CPU with interrupts disabled.  Platform is
  * responsible for ensuring that cpu_pm_enter is not called twice on the same
  * CPU before cpu_pm_exit is called. Notified drivers can include VFP
- * co-processor, interrupt controller and it's PM extensions, local CPU
+ * co-processor, interrupt controller and its PM extensions, local CPU
  * timers context save/restore which shouldn't be interrupted. Hence it
  * must be called with interrupts disabled.
  *
@@ -115,13 +115,13 @@
 EXPORT_SYMBOL_GPL(cpu_pm_enter);
 
 /**
- * cpm_pm_exit - CPU low power exit notifier
+ * cpu_pm_exit - CPU low power exit notifier
  *
  * Notifies listeners that a single CPU is exiting a low power state that may
  * have caused some blocks in the same power domain as the cpu to reset.
  *
  * Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
@@ -139,7 +139,7 @@
 EXPORT_SYMBOL_GPL(cpu_pm_exit);
 
 /**
- * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ * cpu_cluster_pm_enter - CPU cluster low power entry notifier
  *
  * Notifies listeners that all cpus in a power domain are entering a low power
  * state that may cause some blocks in the same power domain to reset.
@@ -147,7 +147,7 @@
  * Must be called after cpu_pm_enter has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Must be called with interrupts disabled.
@@ -174,7 +174,7 @@
 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
 
 /**
- * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ * cpu_cluster_pm_exit - CPU cluster low power exit notifier
  *
  * Notifies listeners that all cpus in a power domain are exiting form a
  * low power state that may have caused some blocks in the same power domain
@@ -183,7 +183,7 @@
  * Must be called after cpu_pm_exit has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
diff --git a/kernel/cred.c b/kernel/cred.c
index 430557e..de728ac 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -207,13 +207,6 @@
 	validate_creds(cred);
 	alter_cred_subscribers(cred, -1);
 	put_cred(cred);
-
-	cred = (struct cred *) tsk->replacement_session_keyring;
-	if (cred) {
-		tsk->replacement_session_keyring = NULL;
-		validate_creds(cred);
-		put_cred(cred);
-	}
 }
 
 /**
@@ -396,8 +389,6 @@
 	struct cred *new;
 	int ret;
 
-	p->replacement_session_keyring = NULL;
-
 	if (
 #ifdef CONFIG_KEYS
 		!p->cred->thread_keyring &&
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5b06cbb..d7d71d6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -253,9 +253,9 @@
 	return !event->cgrp || event->cgrp == cpuctx->cgrp;
 }
 
-static inline void perf_get_cgroup(struct perf_event *event)
+static inline bool perf_tryget_cgroup(struct perf_event *event)
 {
-	css_get(&event->cgrp->css);
+	return css_tryget(&event->cgrp->css);
 }
 
 static inline void perf_put_cgroup(struct perf_event *event)
@@ -484,7 +484,11 @@
 	event->cgrp = cgrp;
 
 	/* must be done before we fput() the file */
-	perf_get_cgroup(event);
+	if (!perf_tryget_cgroup(event)) {
+		event->cgrp = NULL;
+		ret = -ENOENT;
+		goto out;
+	}
 
 	/*
 	 * all events in a group must monitor
@@ -3181,7 +3185,6 @@
 	event = event->group_leader;
 
 	perf_event_for_each_child(event, func);
-	func(event);
 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
 		perf_event_for_each_child(sibling, func);
 	mutex_unlock(&ctx->mutex);
diff --git a/kernel/exit.c b/kernel/exit.c
index 910a071..2f59cc3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -72,6 +72,18 @@
 		list_del_rcu(&p->tasks);
 		list_del_init(&p->sibling);
 		__this_cpu_dec(process_counts);
+		/*
+		 * If we are the last child process in a pid namespace to be
+		 * reaped, notify the reaper sleeping zap_pid_ns_processes().
+		 */
+		if (IS_ENABLED(CONFIG_PID_NS)) {
+			struct task_struct *parent = p->real_parent;
+
+			if ((task_active_pid_ns(parent)->child_reaper == parent) &&
+			    list_empty(&parent->children) &&
+			    (parent->flags & PF_EXITING))
+				wake_up_process(parent);
+		}
 	}
 	list_del_rcu(&p->thread_group);
 }
@@ -643,6 +655,7 @@
 	mm_release(tsk, mm);
 	if (!mm)
 		return;
+	sync_mm_rss(mm);
 	/*
 	 * Serialize with any possible pending coredump.
 	 * We must hold mmap_sem around checking core_state
@@ -719,12 +732,6 @@
 
 		zap_pid_ns_processes(pid_ns);
 		write_lock_irq(&tasklist_lock);
-		/*
-		 * We can not clear ->child_reaper or leave it alone.
-		 * There may by stealth EXIT_DEAD tasks on ->children,
-		 * forget_original_parent() must move them somewhere.
-		 */
-		pid_ns->child_reaper = init_pid_ns.child_reaper;
 	} else if (father->signal->has_child_subreaper) {
 		struct task_struct *reaper;
 
@@ -884,9 +891,9 @@
 
 	spin_lock(&low_water_lock);
 	if (free < lowest_to_date) {
-		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
-				"left\n",
-				current->comm, free);
+		printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+				"%lu bytes left\n",
+				current->comm, task_pid_nr(current), free);
 		lowest_to_date = free;
 	}
 	spin_unlock(&low_water_lock);
@@ -946,12 +953,13 @@
 	exit_signals(tsk);  /* sets PF_EXITING */
 	/*
 	 * tsk->flags are checked in the futex code to protect against
-	 * an exiting task cleaning up the robust pi futexes.
+	 * an exiting task cleaning up the robust pi futexes, and in
+	 * task_work_add() to avoid the race with exit_task_work().
 	 */
 	smp_mb();
 	raw_spin_unlock_wait(&tsk->pi_lock);
 
-	exit_irq_thread();
+	exit_task_work(tsk);
 
 	if (unlikely(in_atomic()))
 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -1214,7 +1222,7 @@
 	unsigned long state;
 	int retval, status, traced;
 	pid_t pid = task_pid_vnr(p);
-	uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid);
+	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
 	struct siginfo __user *infop;
 
 	if (!likely(wo->wo_flags & WEXITED))
diff --git a/kernel/fork.c b/kernel/fork.c
index 47b4e4f..ab5211b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -386,7 +386,8 @@
 		}
 		charge = 0;
 		if (mpnt->vm_flags & VM_ACCOUNT) {
-			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+			unsigned long len;
+			len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
 			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
 				goto fail_nomem;
 			charge = len;
@@ -614,7 +615,6 @@
 			list_del(&mm->mmlist);
 			spin_unlock(&mmlist_lock);
 		}
-		put_swap_token(mm);
 		if (mm->binfmt)
 			module_put(mm->binfmt->module);
 		mmdrop(mm);
@@ -787,9 +787,6 @@
 	/* Get rid of any cached register state */
 	deactivate_mm(tsk, mm);
 
-	if (tsk->vfork_done)
-		complete_vfork_done(tsk);
-
 	/*
 	 * If we're exiting normally, clear a user-space tid field if
 	 * requested.  We leave this alone when dying by signal, to leave
@@ -810,6 +807,13 @@
 		}
 		tsk->clear_child_tid = NULL;
 	}
+
+	/*
+	 * All done, finally we can wake up parent and return this mm to him.
+	 * Also kthread_stop() uses this completion for synchronization.
+	 */
+	if (tsk->vfork_done)
+		complete_vfork_done(tsk);
 }
 
 /*
@@ -831,10 +835,6 @@
 	memcpy(mm, oldmm, sizeof(*mm));
 	mm_init_cpumask(mm);
 
-	/* Initializing for Swap token stuff */
-	mm->token_priority = 0;
-	mm->last_interval = 0;
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	mm->pmd_huge_pte = NULL;
 #endif
@@ -913,10 +913,6 @@
 		goto fail_nomem;
 
 good_mm:
-	/* Initializing for Swap token stuff */
-	mm->token_priority = 0;
-	mm->last_interval = 0;
-
 	tsk->mm = mm;
 	tsk->active_mm = mm;
 	return 0;
@@ -984,9 +980,8 @@
 	 * Share io context with parent, if CLONE_IO is set
 	 */
 	if (clone_flags & CLONE_IO) {
-		tsk->io_context = ioc_task_link(ioc);
-		if (unlikely(!tsk->io_context))
-			return -ENOMEM;
+		ioc_task_link(ioc);
+		tsk->io_context = ioc;
 	} else if (ioprio_valid(ioc->ioprio)) {
 		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
 		if (unlikely(!new_ioc))
@@ -1420,6 +1415,7 @@
 	 */
 	p->group_leader = p;
 	INIT_LIST_HEAD(&p->thread_group);
+	INIT_HLIST_HEAD(&p->task_works);
 
 	/* Now that the task is set up, run cgroup callbacks if
 	 * necessary. We need to run them before the task is visible
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fc275e4..eebd6d5 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -275,8 +275,10 @@
 	kstat_incr_irqs_this_cpu(irq, desc);
 
 	action = desc->action;
-	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
+	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+		desc->istate |= IRQS_PENDING;
 		goto out_unlock;
+	}
 
 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 	raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 	kstat_incr_irqs_this_cpu(irq, desc);
 
-	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
+	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+		desc->istate |= IRQS_PENDING;
 		goto out_unlock;
+	}
 
 	handle_irq_event(desc);
 
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 8e5c56b..001fa5b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -101,6 +101,9 @@
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
+extern int irq_do_set_affinity(struct irq_data *data,
+			       const struct cpumask *dest, bool force);
+
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bb32326..8c54823 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,6 +7,8 @@
  * This file contains driver APIs to the irq subsystem.
  */
 
+#define pr_fmt(fmt) "genirq: " fmt
+
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -14,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/task_work.h>
 
 #include "internals.h"
 
@@ -139,6 +142,25 @@
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+			bool force)
+{
+	struct irq_desc *desc = irq_data_to_desc(data);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	int ret;
+
+	ret = chip->irq_set_affinity(data, mask, false);
+	switch (ret) {
+	case IRQ_SET_MASK_OK:
+		cpumask_copy(data->affinity, mask);
+	case IRQ_SET_MASK_OK_NOCOPY:
+		irq_set_thread_affinity(desc);
+		ret = 0;
+	}
+
+	return ret;
+}
+
 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 {
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -149,14 +171,7 @@
 		return -EINVAL;
 
 	if (irq_can_move_pcntxt(data)) {
-		ret = chip->irq_set_affinity(data, mask, false);
-		switch (ret) {
-		case IRQ_SET_MASK_OK:
-			cpumask_copy(data->affinity, mask);
-		case IRQ_SET_MASK_OK_NOCOPY:
-			irq_set_thread_affinity(desc);
-			ret = 0;
-		}
+		ret = irq_do_set_affinity(data, mask, false);
 	} else {
 		irqd_set_move_pending(data);
 		irq_copy_pending(desc, mask);
@@ -280,9 +295,8 @@
 static int
 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
-	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct cpumask *set = irq_default_affinity;
-	int ret, node = desc->irq_data.node;
+	int node = desc->irq_data.node;
 
 	/* Excludes PER_CPU and NO_BALANCE interrupts */
 	if (!irq_can_set_affinity(irq))
@@ -308,13 +322,7 @@
 		if (cpumask_intersects(mask, nodemask))
 			cpumask_and(mask, mask, nodemask);
 	}
-	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
-	switch (ret) {
-	case IRQ_SET_MASK_OK:
-		cpumask_copy(desc->irq_data.affinity, mask);
-	case IRQ_SET_MASK_OK_NOCOPY:
-		irq_set_thread_affinity(desc);
-	}
+	irq_do_set_affinity(&desc->irq_data, mask, false);
 	return 0;
 }
 #else
@@ -565,7 +573,7 @@
 		 * IRQF_TRIGGER_* but the PIC does not support multiple
 		 * flow-types?
 		 */
-		pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq,
+		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
 			 chip ? (chip->name ? : "unknown") : "unknown");
 		return 0;
 	}
@@ -600,7 +608,7 @@
 		ret = 0;
 		break;
 	default:
-		pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n",
+		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 		       flags, irq, chip->irq_set_type);
 	}
 	if (unmask)
@@ -773,11 +781,39 @@
 		wake_up(&desc->wait_for_threads);
 }
 
+static void irq_thread_dtor(struct task_work *unused)
+{
+	struct task_struct *tsk = current;
+	struct irq_desc *desc;
+	struct irqaction *action;
+
+	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
+		return;
+
+	action = kthread_data(tsk);
+
+	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+	       tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
+
+
+	desc = irq_to_desc(action->irq);
+	/*
+	 * If IRQTF_RUNTHREAD is set, we need to decrement
+	 * desc->threads_active and wake possible waiters.
+	 */
+	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+		wake_threads_waitq(desc);
+
+	/* Prevent a stale desc->threads_oneshot */
+	irq_finalize_oneshot(desc, action);
+}
+
 /*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
 {
+	struct task_work on_exit_work;
 	static const struct sched_param param = {
 		.sched_priority = MAX_USER_RT_PRIO/2,
 	};
@@ -793,7 +829,9 @@
 		handler_fn = irq_thread_fn;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
-	current->irq_thread = 1;
+
+	init_task_work(&on_exit_work, irq_thread_dtor, NULL);
+	task_work_add(current, &on_exit_work, false);
 
 	while (!irq_wait_for_interrupt(action)) {
 		irqreturn_t action_ret;
@@ -815,44 +853,11 @@
 	 * cannot touch the oneshot mask at this point anymore as
 	 * __setup_irq() might have given out currents thread_mask
 	 * again.
-	 *
-	 * Clear irq_thread. Otherwise exit_irq_thread() would make
-	 * fuzz about an active irq thread going into nirvana.
 	 */
-	current->irq_thread = 0;
+	task_work_cancel(current, irq_thread_dtor);
 	return 0;
 }
 
-/*
- * Called from do_exit()
- */
-void exit_irq_thread(void)
-{
-	struct task_struct *tsk = current;
-	struct irq_desc *desc;
-	struct irqaction *action;
-
-	if (!tsk->irq_thread)
-		return;
-
-	action = kthread_data(tsk);
-
-	pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
-	       tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
-
-	desc = irq_to_desc(action->irq);
-
-	/*
-	 * If IRQTF_RUNTHREAD is set, we need to decrement
-	 * desc->threads_active and wake possible waiters.
-	 */
-	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
-		wake_threads_waitq(desc);
-
-	/* Prevent a stale desc->threads_oneshot */
-	irq_finalize_oneshot(desc, action);
-}
-
 static void irq_setup_forced_threading(struct irqaction *new)
 {
 	if (!force_irqthreads)
@@ -1044,7 +1049,7 @@
 		 * has. The type flags are unreliable as the
 		 * underlying chip implementation can override them.
 		 */
-		pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
 		       irq);
 		ret = -EINVAL;
 		goto out_mask;
@@ -1095,7 +1100,7 @@
 
 		if (nmsk != omsk)
 			/* hope the handler works with current  trigger mode */
-			pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n",
+			pr_warning("irq %d uses trigger mode %u; requested %u\n",
 				   irq, nmsk, omsk);
 	}
 
@@ -1133,7 +1138,7 @@
 
 mismatch:
 	if (!(new->flags & IRQF_PROBE_SHARED)) {
-		pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
 		       irq, new->flags, new->name, old->flags, old->name);
 #ifdef CONFIG_DEBUG_SHIRQ
 		dump_stack();
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index c3c8975..ca3f4aa 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -42,17 +42,8 @@
 	 * For correct operation this depends on the caller
 	 * masking the irqs.
 	 */
-	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-		   < nr_cpu_ids)) {
-		int ret = chip->irq_set_affinity(&desc->irq_data,
-						 desc->pending_mask, false);
-		switch (ret) {
-		case IRQ_SET_MASK_OK:
-			cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
-		case IRQ_SET_MASK_OK_NOCOPY:
-			irq_set_thread_affinity(desc);
-		}
-	}
+	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
 
 	cpumask_clear(desc->pending_mask);
 }
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 079f1d3..2169fee 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -343,7 +343,7 @@
 
 /* Look up a kernel symbol and return it in a text buffer. */
 static int __sprint_symbol(char *buffer, unsigned long address,
-			   int symbol_offset)
+			   int symbol_offset, int add_offset)
 {
 	char *modname;
 	const char *name;
@@ -358,13 +358,13 @@
 	if (name != buffer)
 		strcpy(buffer, name);
 	len = strlen(buffer);
-	buffer += len;
 	offset -= symbol_offset;
 
+	if (add_offset)
+		len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
+
 	if (modname)
-		len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname);
-	else
-		len += sprintf(buffer, "+%#lx/%#lx", offset, size);
+		len += sprintf(buffer + len, " [%s]", modname);
 
 	return len;
 }
@@ -382,12 +382,28 @@
  */
 int sprint_symbol(char *buffer, unsigned long address)
 {
-	return __sprint_symbol(buffer, address, 0);
+	return __sprint_symbol(buffer, address, 0, 1);
 }
-
 EXPORT_SYMBOL_GPL(sprint_symbol);
 
 /**
+ * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function looks up a kernel symbol with @address and stores its name
+ * and module name to @buffer if possible. If no symbol was found, just saves
+ * its @address as is.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_symbol_no_offset(char *buffer, unsigned long address)
+{
+	return __sprint_symbol(buffer, address, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
+
+/**
  * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
  * @buffer: buffer to be stored
  * @address: address to lookup
@@ -403,7 +419,7 @@
  */
 int sprint_backtrace(char *buffer, unsigned long address)
 {
-	return __sprint_symbol(buffer, address, -1);
+	return __sprint_symbol(buffer, address, -1, 1);
 }
 
 /* Look up a kernel symbol and print it to the kernel messages. */
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
new file mode 100644
index 0000000..30b7b22
--- /dev/null
+++ b/kernel/kcmp.c
@@ -0,0 +1,196 @@
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/kcmp.h>
+
+#include <asm/unistd.h>
+
+/*
+ * We don't expose the real in-memory order of objects for security reasons.
+ * But still the comparison results should be suitable for sorting. So we
+ * obfuscate kernel pointers values and compare the production instead.
+ *
+ * The obfuscation is done in two steps. First we xor the kernel pointer with
+ * a random value, which puts pointer into a new position in a reordered space.
+ * Secondly we multiply the xor production with a large odd random number to
+ * permute its bits even more (the odd multiplier guarantees that the product
+ * is unique ever after the high bits are truncated, since any odd number is
+ * relative prime to 2^n).
+ *
+ * Note also that the obfuscation itself is invisible to userspace and if needed
+ * it can be changed to an alternate scheme.
+ */
+static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
+
+static long kptr_obfuscate(long v, int type)
+{
+	return (v ^ cookies[type][0]) * cookies[type][1];
+}
+
+/*
+ * 0 - equal, i.e. v1 = v2
+ * 1 - less than, i.e. v1 < v2
+ * 2 - greater than, i.e. v1 > v2
+ * 3 - not equal but ordering unavailable (reserved for future)
+ */
+static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+{
+	long ret;
+
+	ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+
+	return (ret < 0) | ((ret > 0) << 1);
+}
+
+/* The caller must have pinned the task */
+static struct file *
+get_file_raw_ptr(struct task_struct *task, unsigned int idx)
+{
+	struct file *file = NULL;
+
+	task_lock(task);
+	rcu_read_lock();
+
+	if (task->files)
+		file = fcheck_files(task->files, idx);
+
+	rcu_read_unlock();
+	task_unlock(task);
+
+	return file;
+}
+
+static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
+{
+	if (likely(m2 != m1))
+		mutex_unlock(m2);
+	mutex_unlock(m1);
+}
+
+static int kcmp_lock(struct mutex *m1, struct mutex *m2)
+{
+	int err;
+
+	if (m2 > m1)
+		swap(m1, m2);
+
+	err = mutex_lock_killable(m1);
+	if (!err && likely(m1 != m2)) {
+		err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
+		if (err)
+			mutex_unlock(m1);
+	}
+
+	return err;
+}
+
+SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+		unsigned long, idx1, unsigned long, idx2)
+{
+	struct task_struct *task1, *task2;
+	int ret;
+
+	rcu_read_lock();
+
+	/*
+	 * Tasks are looked up in caller's PID namespace only.
+	 */
+	task1 = find_task_by_vpid(pid1);
+	task2 = find_task_by_vpid(pid2);
+	if (!task1 || !task2)
+		goto err_no_task;
+
+	get_task_struct(task1);
+	get_task_struct(task2);
+
+	rcu_read_unlock();
+
+	/*
+	 * One should have enough rights to inspect task details.
+	 */
+	ret = kcmp_lock(&task1->signal->cred_guard_mutex,
+			&task2->signal->cred_guard_mutex);
+	if (ret)
+		goto err;
+	if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+	    !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+		ret = -EPERM;
+		goto err_unlock;
+	}
+
+	switch (type) {
+	case KCMP_FILE: {
+		struct file *filp1, *filp2;
+
+		filp1 = get_file_raw_ptr(task1, idx1);
+		filp2 = get_file_raw_ptr(task2, idx2);
+
+		if (filp1 && filp2)
+			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
+		else
+			ret = -EBADF;
+		break;
+	}
+	case KCMP_VM:
+		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
+		break;
+	case KCMP_FILES:
+		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
+		break;
+	case KCMP_FS:
+		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
+		break;
+	case KCMP_SIGHAND:
+		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
+		break;
+	case KCMP_IO:
+		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
+		break;
+	case KCMP_SYSVSEM:
+#ifdef CONFIG_SYSVIPC
+		ret = kcmp_ptr(task1->sysvsem.undo_list,
+			       task2->sysvsem.undo_list,
+			       KCMP_SYSVSEM);
+#else
+		ret = -EOPNOTSUPP;
+#endif
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+err_unlock:
+	kcmp_unlock(&task1->signal->cred_guard_mutex,
+		    &task2->signal->cred_guard_mutex);
+err:
+	put_task_struct(task1);
+	put_task_struct(task2);
+
+	return ret;
+
+err_no_task:
+	rcu_read_unlock();
+	return -ESRCH;
+}
+
+static __init int kcmp_cookies_init(void)
+{
+	int i;
+
+	get_random_bytes(cookies, sizeof(cookies));
+
+	for (i = 0; i < KCMP_TYPES; i++)
+		cookies[i][1] |= (~(~0UL >>  1) | 1);
+
+	return 0;
+}
+arch_initcall(kcmp_cookies_init);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 05698a7..ff2c7cb 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -221,13 +221,12 @@
 	return 0;
 }
 
-void call_usermodehelper_freeinfo(struct subprocess_info *info)
+static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
 	if (info->cleanup)
 		(*info->cleanup)(info);
 	kfree(info);
 }
-EXPORT_SYMBOL(call_usermodehelper_freeinfo);
 
 static void umh_complete(struct subprocess_info *sub_info)
 {
@@ -410,7 +409,7 @@
 
 /**
  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
- * depth: New value to assign to usermodehelper_disabled.
+ * @depth: New value to assign to usermodehelper_disabled.
  *
  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
  * writing) and wakeup tasks waiting for it to change.
@@ -479,6 +478,7 @@
  * structure.  This should be passed to call_usermodehelper_exec to
  * exec the process and free the structure.
  */
+static
 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
 						  char **envp, gfp_t gfp_mask)
 {
@@ -494,7 +494,6 @@
   out:
 	return sub_info;
 }
-EXPORT_SYMBOL(call_usermodehelper_setup);
 
 /**
  * call_usermodehelper_setfns - set a cleanup/init function
@@ -512,6 +511,7 @@
  * Function must be runnable in either a process context or the
  * context in which call_usermodehelper_exec is called.
  */
+static
 void call_usermodehelper_setfns(struct subprocess_info *info,
 		    int (*init)(struct subprocess_info *info, struct cred *new),
 		    void (*cleanup)(struct subprocess_info *info),
@@ -521,7 +521,6 @@
 	info->init = init;
 	info->data = data;
 }
-EXPORT_SYMBOL(call_usermodehelper_setfns);
 
 /**
  * call_usermodehelper_exec - start a usermode application
@@ -535,6 +534,7 @@
  * asynchronously if wait is not set, and runs as a child of keventd.
  * (ie. it runs with full root capabilities).
  */
+static
 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
@@ -576,7 +576,25 @@
 	helper_unlock();
 	return retval;
 }
-EXPORT_SYMBOL(call_usermodehelper_exec);
+
+int call_usermodehelper_fns(
+	char *path, char **argv, char **envp, int wait,
+	int (*init)(struct subprocess_info *info, struct cred *new),
+	void (*cleanup)(struct subprocess_info *), void *data)
+{
+	struct subprocess_info *info;
+	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+
+	info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
+	if (info == NULL)
+		return -ENOMEM;
+
+	call_usermodehelper_setfns(info, init, cleanup, data);
+
+	return call_usermodehelper_exec(info, wait);
+}
+EXPORT_SYMBOL(call_usermodehelper_fns);
 
 static int proc_cap_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644
index 0000000..6535a66
--- /dev/null
+++ b/kernel/lglock.c
@@ -0,0 +1,89 @@
+/* See include/linux/lglock.h for description */
+#include <linux/module.h>
+#include <linux/lglock.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+
+/*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+ * Could be added though, just undo lg_lock_init
+ */
+
+void lg_lock_init(struct lglock *lg, char *name)
+{
+	LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+}
+EXPORT_SYMBOL(lg_lock_init);
+
+void lg_local_lock(struct lglock *lg)
+{
+	arch_spinlock_t *lock;
+
+	preempt_disable();
+	rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+	lock = this_cpu_ptr(lg->lock);
+	arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock);
+
+void lg_local_unlock(struct lglock *lg)
+{
+	arch_spinlock_t *lock;
+
+	rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+	lock = this_cpu_ptr(lg->lock);
+	arch_spin_unlock(lock);
+	preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock);
+
+void lg_local_lock_cpu(struct lglock *lg, int cpu)
+{
+	arch_spinlock_t *lock;
+
+	preempt_disable();
+	rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+	lock = per_cpu_ptr(lg->lock, cpu);
+	arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock_cpu);
+
+void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+{
+	arch_spinlock_t *lock;
+
+	rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+	lock = per_cpu_ptr(lg->lock, cpu);
+	arch_spin_unlock(lock);
+	preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+void lg_global_lock(struct lglock *lg)
+{
+	int i;
+
+	preempt_disable();
+	rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+	for_each_possible_cpu(i) {
+		arch_spinlock_t *lock;
+		lock = per_cpu_ptr(lg->lock, i);
+		arch_spin_lock(lock);
+	}
+}
+EXPORT_SYMBOL(lg_global_lock);
+
+void lg_global_unlock(struct lglock *lg)
+{
+	int i;
+
+	rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+	for_each_possible_cpu(i) {
+		arch_spinlock_t *lock;
+		lock = per_cpu_ptr(lg->lock, i);
+		arch_spin_unlock(lock);
+	}
+	preempt_enable();
+}
+EXPORT_SYMBOL(lg_global_unlock);
diff --git a/kernel/panic.c b/kernel/panic.c
index 8ed89a1..d2a5f4e 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,7 @@
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
-int panic_on_oops;
+int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
@@ -108,8 +108,6 @@
 	 */
 	crash_kexec(NULL);
 
-	kmsg_dump(KMSG_DUMP_PANIC);
-
 	/*
 	 * Note smp_send_stop is the usual smp shutdown function, which
 	 * unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@
 	 */
 	smp_send_stop();
 
+	kmsg_dump(KMSG_DUMP_PANIC);
+
 	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
 	bust_spinlocks(0);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 57bc1fd..b3c7fd5 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -149,7 +149,12 @@
 {
 	int nr;
 	int rc;
-	struct task_struct *task;
+	struct task_struct *task, *me = current;
+
+	/* Ignore SIGCHLD causing any terminated children to autoreap */
+	spin_lock_irq(&me->sighand->siglock);
+	me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
+	spin_unlock_irq(&me->sighand->siglock);
 
 	/*
 	 * The last thread in the cgroup-init thread group is terminating.
@@ -179,11 +184,31 @@
 	}
 	read_unlock(&tasklist_lock);
 
+	/* Firstly reap the EXIT_ZOMBIE children we may have. */
 	do {
 		clear_thread_flag(TIF_SIGPENDING);
 		rc = sys_wait4(-1, NULL, __WALL, NULL);
 	} while (rc != -ECHILD);
 
+	/*
+	 * sys_wait4() above can't reap the TASK_DEAD children.
+	 * Make sure they all go away, see __unhash_process().
+	 */
+	for (;;) {
+		bool need_wait = false;
+
+		read_lock(&tasklist_lock);
+		if (!list_empty(&current->children)) {
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+			need_wait = true;
+		}
+		read_unlock(&tasklist_lock);
+
+		if (!need_wait)
+			break;
+		schedule();
+	}
+
 	if (pid_ns->reboot)
 		current->signal->group_exit_code = pid_ns->reboot;
 
@@ -191,6 +216,7 @@
 	return;
 }
 
+#ifdef CONFIG_CHECKPOINT_RESTORE
 static int pid_ns_ctl_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -218,8 +244,8 @@
 	},
 	{ }
 };
-
 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
+#endif	/* CONFIG_CHECKPOINT_RESTORE */
 
 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 {
@@ -253,7 +279,10 @@
 static __init int pid_namespaces_init(void)
 {
 	pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
 	register_sysctl_paths(kern_path, pid_ns_ctl_table);
+#endif
 	return 0;
 }
 
diff --git a/kernel/printk.c b/kernel/printk.c
index 32462d2..dba1821 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -193,12 +193,19 @@
  * separated by ',', and find the message after the ';' character.
  */
 
+enum log_flags {
+	LOG_DEFAULT = 0,
+	LOG_NOCONS = 1,		/* already flushed, do not print to console */
+};
+
 struct log {
 	u64 ts_nsec;		/* timestamp in nanoseconds */
 	u16 len;		/* length of entire record */
 	u16 text_len;		/* length of text buffer */
 	u16 dict_len;		/* length of dictionary buffer */
-	u16 level;		/* syslog level + facility */
+	u8 facility;		/* syslog facility */
+	u8 flags:5;		/* internal record flags */
+	u8 level:3;		/* syslog level */
 };
 
 /*
@@ -227,10 +234,10 @@
 #define LOG_LINE_MAX 1024
 
 /* record buffer */
-#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define LOG_ALIGN 4
 #else
-#define LOG_ALIGN 8
+#define LOG_ALIGN __alignof__(struct log)
 #endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -286,6 +293,7 @@
 
 /* insert record into the buffer, discard old ones, update heads */
 static void log_store(int facility, int level,
+		      enum log_flags flags, u64 ts_nsec,
 		      const char *dict, u16 dict_len,
 		      const char *text, u16 text_len)
 {
@@ -329,8 +337,13 @@
 	msg->text_len = text_len;
 	memcpy(log_dict(msg), dict, dict_len);
 	msg->dict_len = dict_len;
-	msg->level = (facility << 3) | (level & 7);
-	msg->ts_nsec = local_clock();
+	msg->facility = facility;
+	msg->level = level & 7;
+	msg->flags = flags & 0x1f;
+	if (ts_nsec > 0)
+		msg->ts_nsec = ts_nsec;
+	else
+		msg->ts_nsec = local_clock();
 	memset(log_dict(msg) + dict_len, 0, pad_len);
 	msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 
@@ -414,7 +427,9 @@
 	if (!user)
 		return -EBADF;
 
-	mutex_lock(&user->lock);
+	ret = mutex_lock_interruptible(&user->lock);
+	if (ret)
+		return ret;
 	raw_spin_lock(&logbuf_lock);
 	while (user->seq == log_next_seq) {
 		if (file->f_flags & O_NONBLOCK) {
@@ -444,7 +459,7 @@
 	ts_usec = msg->ts_nsec;
 	do_div(ts_usec, 1000);
 	len = sprintf(user->buf, "%u,%llu,%llu;",
-		      msg->level, user->seq, ts_usec);
+		      (msg->facility << 3) | msg->level, user->seq, ts_usec);
 
 	/* escape non-printable characters */
 	for (i = 0; i < msg->text_len; i++) {
@@ -785,6 +800,21 @@
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static size_t print_time(u64 ts, char *buf)
+{
+	unsigned long rem_nsec;
+
+	if (!printk_time)
+		return 0;
+
+	if (!buf)
+		return 15;
+
+	rem_nsec = do_div(ts, 1000000000);
+	return sprintf(buf, "[%5lu.%06lu] ",
+		       (unsigned long)ts, rem_nsec / 1000);
+}
+
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
 	size_t len = 0;
@@ -801,18 +831,7 @@
 		}
 	}
 
-	if (printk_time) {
-		if (buf) {
-			unsigned long long ts = msg->ts_nsec;
-			unsigned long rem_nsec = do_div(ts, 1000000000);
-
-			len += sprintf(buf + len, "[%5lu.%06lu] ",
-					 (unsigned long) ts, rem_nsec / 1000);
-		} else {
-			len += 15;
-		}
-	}
-
+	len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
 	return len;
 }
 
@@ -860,26 +879,49 @@
 {
 	char *text;
 	struct log *msg;
-	int len;
+	int len = 0;
 
 	text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
 	if (!text)
 		return -ENOMEM;
 
-	raw_spin_lock_irq(&logbuf_lock);
-	if (syslog_seq < log_first_seq) {
-		/* messages are gone, move to first one */
-		syslog_seq = log_first_seq;
-		syslog_idx = log_first_idx;
-	}
-	msg = log_from_idx(syslog_idx);
-	len = msg_print_text(msg, true, text, LOG_LINE_MAX);
-	syslog_idx = log_next(syslog_idx);
-	syslog_seq++;
-	raw_spin_unlock_irq(&logbuf_lock);
+	while (size > 0) {
+		size_t n;
 
-	if (len > 0 && copy_to_user(buf, text, len))
-		len = -EFAULT;
+		raw_spin_lock_irq(&logbuf_lock);
+		if (syslog_seq < log_first_seq) {
+			/* messages are gone, move to first one */
+			syslog_seq = log_first_seq;
+			syslog_idx = log_first_idx;
+		}
+		if (syslog_seq == log_next_seq) {
+			raw_spin_unlock_irq(&logbuf_lock);
+			break;
+		}
+		msg = log_from_idx(syslog_idx);
+		n = msg_print_text(msg, true, text, LOG_LINE_MAX);
+		if (n <= size) {
+			syslog_idx = log_next(syslog_idx);
+			syslog_seq++;
+		} else
+			n = 0;
+		raw_spin_unlock_irq(&logbuf_lock);
+
+		if (!n)
+			break;
+
+		len += n;
+		size -= n;
+		buf += n;
+		n = copy_to_user(buf - n, text, n);
+
+		if (n) {
+			len -= n;
+			if (!len)
+				len = -EFAULT;
+			break;
+		}
+	}
 
 	kfree(text);
 	return len;
@@ -909,7 +951,7 @@
 		/*
 		 * Find first record that fits, including all following records,
 		 * into the user-provided buffer for this dump.
-		*/
+		 */
 		seq = clear_seq;
 		idx = clear_idx;
 		while (seq < log_next_seq) {
@@ -919,6 +961,8 @@
 			idx = log_next(idx);
 			seq++;
 		}
+
+		/* move first record forward until length fits into the buffer */
 		seq = clear_seq;
 		idx = clear_idx;
 		while (len > size && seq < log_next_seq) {
@@ -929,7 +973,7 @@
 			seq++;
 		}
 
-		/* last message in this dump */
+		/* last message fitting into this dump */
 		next_seq = log_next_seq;
 
 		len = 0;
@@ -974,6 +1018,7 @@
 {
 	bool clear = false;
 	static int saved_console_loglevel = -1;
+	static DEFINE_MUTEX(syslog_mutex);
 	int error;
 
 	error = check_syslog_permissions(type, from_file);
@@ -1000,11 +1045,17 @@
 			error = -EFAULT;
 			goto out;
 		}
-		error = wait_event_interruptible(log_wait,
-						 syslog_seq != log_next_seq);
+		error = mutex_lock_interruptible(&syslog_mutex);
 		if (error)
 			goto out;
+		error = wait_event_interruptible(log_wait,
+						 syslog_seq != log_next_seq);
+		if (error) {
+			mutex_unlock(&syslog_mutex);
+			goto out;
+		}
 		error = syslog_print(buf, len);
+		mutex_unlock(&syslog_mutex);
 		break;
 	/* Read/clear last kernel messages */
 	case SYSLOG_ACTION_READ_CLEAR:
@@ -1027,6 +1078,7 @@
 	/* Clear ring buffer */
 	case SYSLOG_ACTION_CLEAR:
 		syslog_print_all(NULL, 0, true);
+		break;
 	/* Disable logging to console */
 	case SYSLOG_ACTION_CONSOLE_OFF:
 		if (saved_console_loglevel == -1)
@@ -1259,15 +1311,92 @@
 	}
 }
 
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+	char buf[LOG_LINE_MAX];
+	size_t len;			/* length == 0 means unused buffer */
+	size_t cons;			/* bytes written to console */
+	struct task_struct *owner;	/* task of first print*/
+	u64 ts_nsec;			/* time of first print */
+	u8 level;			/* log level of first message */
+	u8 facility;			/* log level of first message */
+	bool flushed:1;			/* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+	if (cont.flushed)
+		return;
+	if (cont.len == 0)
+		return;
+
+	log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+		  NULL, 0, cont.buf, cont.len);
+
+	cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+	if (cont.len && cont.flushed)
+		return false;
+
+	if (cont.len + len > sizeof(cont.buf)) {
+		cont_flush();
+		return false;
+	}
+
+	if (!cont.len) {
+		cont.facility = facility;
+		cont.level = level;
+		cont.owner = current;
+		cont.ts_nsec = local_clock();
+		cont.cons = 0;
+		cont.flushed = false;
+	}
+
+	memcpy(cont.buf + cont.len, text, len);
+	cont.len += len;
+	return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+	size_t textlen = 0;
+	size_t len;
+
+	if (cont.cons == 0) {
+		textlen += print_time(cont.ts_nsec, text);
+		size -= textlen;
+	}
+
+	len = cont.len - cont.cons;
+	if (len > 0) {
+		if (len+1 > size)
+			len = size-1;
+		memcpy(text + textlen, cont.buf + cont.cons, len);
+		textlen += len;
+		cont.cons = cont.len;
+	}
+
+	if (cont.flushed) {
+		text[textlen++] = '\n';
+		/* got everything, release buffer */
+		cont.len = 0;
+	}
+	return textlen;
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
 			    const char *dict, size_t dictlen,
 			    const char *fmt, va_list args)
 {
 	static int recursion_bug;
-	static char cont_buf[LOG_LINE_MAX];
-	static size_t cont_len;
-	static int cont_level;
-	static struct task_struct *cont_task;
 	static char textbuf[LOG_LINE_MAX];
 	char *text = textbuf;
 	size_t text_len;
@@ -1313,7 +1442,8 @@
 		recursion_bug = 0;
 		printed_len += strlen(recursion_msg);
 		/* emit KERN_CRIT message */
-		log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+		log_store(0, 2, LOG_DEFAULT, 0,
+			  NULL, 0, recursion_msg, printed_len);
 	}
 
 	/*
@@ -1351,55 +1481,37 @@
 	}
 
 	if (!newline) {
-		if (cont_len && (prefix || cont_task != current)) {
-			/*
-			 * Flush earlier buffer, which is either from a
-			 * different thread, or when we got a new prefix.
-			 */
-			log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
-			cont_len = 0;
-		}
+		/*
+		 * Flush the conflicting buffer. An earlier newline was missing,
+		 * or another task also prints continuation lines.
+		 */
+		if (cont.len && (prefix || cont.owner != current))
+			cont_flush();
 
-		if (!cont_len) {
-			cont_level = level;
-			cont_task = current;
-		}
-
-		/* buffer or append to earlier buffer from the same thread */
-		if (cont_len + text_len > sizeof(cont_buf))
-			text_len = sizeof(cont_buf) - cont_len;
-		memcpy(cont_buf + cont_len, text, text_len);
-		cont_len += text_len;
-	} else {
-		if (cont_len && cont_task == current) {
-			if (prefix) {
-				/*
-				 * New prefix from the same thread; flush. We
-				 * either got no earlier newline, or we race
-				 * with an interrupt.
-				 */
-				log_store(facility, cont_level,
-					  NULL, 0, cont_buf, cont_len);
-				cont_len = 0;
-			}
-
-			/* append to the earlier buffer and flush */
-			if (cont_len + text_len > sizeof(cont_buf))
-				text_len = sizeof(cont_buf) - cont_len;
-			memcpy(cont_buf + cont_len, text, text_len);
-			cont_len += text_len;
-			log_store(facility, cont_level,
-				  NULL, 0, cont_buf, cont_len);
-			cont_len = 0;
-			cont_task = NULL;
-			printed_len = cont_len;
-		} else {
-			/* ordinary single and terminated line */
-			log_store(facility, level,
+		/* buffer line if possible, otherwise store it right away */
+		if (!cont_add(facility, level, text, text_len))
+			log_store(facility, level, LOG_DEFAULT, 0,
 				  dict, dictlen, text, text_len);
-			printed_len = text_len;
+	} else {
+		bool stored = false;
+
+		/*
+		 * If an earlier newline was missing and it was the same task,
+		 * either merge it with the current buffer and flush, or if
+		 * there was a race with interrupts (prefix == true) then just
+		 * flush it out and store this line separately.
+		 */
+		if (cont.len && cont.owner == current) {
+			if (!prefix)
+				stored = cont_add(facility, level, text, text_len);
+			cont_flush();
 		}
+
+		if (!stored)
+			log_store(facility, level, LOG_DEFAULT, 0,
+				  dict, dictlen, text, text_len);
 	}
+	printed_len += text_len;
 
 	/*
 	 * Try to acquire and then immediately release the console semaphore.
@@ -1486,11 +1598,18 @@
 #else
 
 #define LOG_LINE_MAX 0
+static struct cont {
+	size_t len;
+	size_t cons;
+	u8 level;
+	bool flushed:1;
+} cont;
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
 static size_t msg_print_text(const struct log *msg, bool syslog,
 			     char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -1782,6 +1901,7 @@
  */
 void console_unlock(void)
 {
+	static char text[LOG_LINE_MAX];
 	static u64 seen_seq;
 	unsigned long flags;
 	bool wake_klogd = false;
@@ -1794,10 +1914,23 @@
 
 	console_may_schedule = 0;
 
+	/* flush buffered message fragment immediately to console */
+	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+		size_t len;
+
+		len = cont_print_text(text, sizeof(text));
+		raw_spin_unlock(&logbuf_lock);
+		stop_critical_timings();
+		call_console_drivers(cont.level, text, len);
+		start_critical_timings();
+		local_irq_restore(flags);
+	} else
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
 again:
 	for (;;) {
 		struct log *msg;
-		static char text[LOG_LINE_MAX];
 		size_t len;
 		int level;
 
@@ -1812,13 +1945,22 @@
 			console_seq = log_first_seq;
 			console_idx = log_first_idx;
 		}
-
+skip:
 		if (console_seq == log_next_seq)
 			break;
 
 		msg = log_from_idx(console_idx);
-		level = msg->level & 7;
+		if (msg->flags & LOG_NOCONS) {
+			/*
+			 * Skip record we have buffered and already printed
+			 * directly to the console when we received it.
+			 */
+			console_idx = log_next(console_idx);
+			console_seq++;
+			goto skip;
+		}
 
+		level = msg->level;
 		len = msg_print_text(msg, false, text, sizeof(text));
 
 		console_idx = log_next(console_idx);
@@ -2300,48 +2442,210 @@
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
  *
- * Iterate through each of the dump devices and call the oops/panic
- * callbacks with the log buffer.
+ * Call each of the registered dumper's dump() callback, which can
+ * retrieve the kmsg records with kmsg_dump_get_line() or
+ * kmsg_dump_get_buffer().
  */
 void kmsg_dump(enum kmsg_dump_reason reason)
 {
-	u64 idx;
 	struct kmsg_dumper *dumper;
-	const char *s1, *s2;
-	unsigned long l1, l2;
 	unsigned long flags;
 
 	if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
 		return;
 
-	/* Theoretically, the log could move on after we do this, but
-	   there's not a lot we can do about that. The new messages
-	   will overwrite the start of what we dump. */
-
-	raw_spin_lock_irqsave(&logbuf_lock, flags);
-	if (syslog_seq < log_first_seq)
-		idx = syslog_idx;
-	else
-		idx = log_first_idx;
-
-	if (idx > log_next_idx) {
-		s1 = log_buf;
-		l1 = log_next_idx;
-
-		s2 = log_buf + idx;
-		l2 = log_buf_len - idx;
-	} else {
-		s1 = "";
-		l1 = 0;
-
-		s2 = log_buf + idx;
-		l2 = log_next_idx - idx;
-	}
-	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-
 	rcu_read_lock();
-	list_for_each_entry_rcu(dumper, &dump_list, list)
-		dumper->dump(dumper, reason, s1, l1, s2, l2);
+	list_for_each_entry_rcu(dumper, &dump_list, list) {
+		if (dumper->max_reason && reason > dumper->max_reason)
+			continue;
+
+		/* initialize iterator with data about the stored records */
+		dumper->active = true;
+
+		raw_spin_lock_irqsave(&logbuf_lock, flags);
+		dumper->cur_seq = clear_seq;
+		dumper->cur_idx = clear_idx;
+		dumper->next_seq = log_next_seq;
+		dumper->next_idx = log_next_idx;
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+		/* invoke dumper which will iterate over records */
+		dumper->dump(dumper, reason);
+
+		/* reset iterator */
+		dumper->active = false;
+	}
 	rcu_read_unlock();
 }
+
+/**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+			char *line, size_t size, size_t *len)
+{
+	unsigned long flags;
+	struct log *msg;
+	size_t l = 0;
+	bool ret = false;
+
+	if (!dumper->active)
+		goto out;
+
+	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	if (dumper->cur_seq < log_first_seq) {
+		/* messages are gone, move to first available one */
+		dumper->cur_seq = log_first_seq;
+		dumper->cur_idx = log_first_idx;
+	}
+
+	/* last entry */
+	if (dumper->cur_seq >= log_next_seq) {
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+		goto out;
+	}
+
+	msg = log_from_idx(dumper->cur_idx);
+	l = msg_print_text(msg, syslog,
+			      line, size);
+
+	dumper->cur_idx = log_next(dumper->cur_idx);
+	dumper->cur_seq++;
+	ret = true;
+	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+out:
+	if (len)
+		*len = l;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+/**
+ * kmsg_dump_get_buffer - copy kmsg log lines
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the end of the kmsg buffer and fill the provided buffer
+ * with as many of the the *youngest* kmsg records that fit into it.
+ * If the buffer is large enough, all available kmsg records will be
+ * copied with a single call.
+ *
+ * Consecutive calls will fill the buffer with the next block of
+ * available older records, not including the earlier retrieved ones.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+			  char *buf, size_t size, size_t *len)
+{
+	unsigned long flags;
+	u64 seq;
+	u32 idx;
+	u64 next_seq;
+	u32 next_idx;
+	size_t l = 0;
+	bool ret = false;
+
+	if (!dumper->active)
+		goto out;
+
+	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	if (dumper->cur_seq < log_first_seq) {
+		/* messages are gone, move to first available one */
+		dumper->cur_seq = log_first_seq;
+		dumper->cur_idx = log_first_idx;
+	}
+
+	/* last entry */
+	if (dumper->cur_seq >= dumper->next_seq) {
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+		goto out;
+	}
+
+	/* calculate length of entire buffer */
+	seq = dumper->cur_seq;
+	idx = dumper->cur_idx;
+	while (seq < dumper->next_seq) {
+		struct log *msg = log_from_idx(idx);
+
+		l += msg_print_text(msg, true, NULL, 0);
+		idx = log_next(idx);
+		seq++;
+	}
+
+	/* move first record forward until length fits into the buffer */
+	seq = dumper->cur_seq;
+	idx = dumper->cur_idx;
+	while (l > size && seq < dumper->next_seq) {
+		struct log *msg = log_from_idx(idx);
+
+		l -= msg_print_text(msg, true, NULL, 0);
+		idx = log_next(idx);
+		seq++;
+	}
+
+	/* last message in next interation */
+	next_seq = seq;
+	next_idx = idx;
+
+	l = 0;
+	while (seq < dumper->next_seq) {
+		struct log *msg = log_from_idx(idx);
+
+		l += msg_print_text(msg, syslog,
+				    buf + l, size - l);
+
+		idx = log_next(idx);
+		seq++;
+	}
+
+	dumper->next_seq = next_seq;
+	dumper->next_idx = next_idx;
+	ret = true;
+	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+out:
+	if (len)
+		*len = l;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+
+/**
+ * kmsg_dump_rewind - reset the interator
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ */
+void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	dumper->cur_seq = clear_seq;
+	dumper->cur_idx = clear_idx;
+	dumper->next_seq = log_next_seq;
+	dumper->next_idx = log_next_idx;
+	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
 #endif
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0da7b88..38ecdda 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1397,6 +1397,8 @@
 	rdp->qlen_lazy += rsp->qlen_lazy;
 	rdp->qlen += rsp->qlen;
 	rdp->n_cbs_adopted += rsp->qlen;
+	if (rsp->qlen_lazy != rsp->qlen)
+		rcu_idle_count_callbacks_posted();
 	rsp->qlen_lazy = 0;
 	rsp->qlen = 0;
 
@@ -1528,7 +1530,7 @@
 {
 	unsigned long flags;
 	struct rcu_head *next, *list, **tail;
-	int bl, count, count_lazy;
+	int bl, count, count_lazy, i;
 
 	/* If no callbacks are ready, just return.*/
 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1551,9 +1553,9 @@
 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
 	tail = rdp->nxttail[RCU_DONE_TAIL];
-	for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-		if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-			rdp->nxttail[count] = &rdp->nxtlist;
+	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+			rdp->nxttail[i] = &rdp->nxtlist;
 	local_irq_restore(flags);
 
 	/* Invoke callbacks. */
@@ -1581,9 +1583,9 @@
 	if (list != NULL) {
 		*tail = rdp->nxtlist;
 		rdp->nxtlist = list;
-		for (count = 0; count < RCU_NEXT_SIZE; count++)
-			if (&rdp->nxtlist == rdp->nxttail[count])
-				rdp->nxttail[count] = tail;
+		for (i = 0; i < RCU_NEXT_SIZE; i++)
+			if (&rdp->nxtlist == rdp->nxttail[i])
+				rdp->nxttail[i] = tail;
 			else
 				break;
 	}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7f5d138..ea05649 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,6 +84,20 @@
 				    /* Process level is worth LLONG_MAX/2. */
 	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
 	atomic_t dynticks;	    /* Even value for idle, else odd. */
+#ifdef CONFIG_RCU_FAST_NO_HZ
+	int dyntick_drain;	    /* Prepare-for-idle state variable. */
+	unsigned long dyntick_holdoff;
+				    /* No retries for the jiffy of failure. */
+	struct timer_list idle_gp_timer;
+				    /* Wake up CPU sleeping with callbacks. */
+	unsigned long idle_gp_timer_expires;
+				    /* When to wake up CPU (for repost). */
+	bool idle_first_pass;	    /* First pass of attempt to go idle? */
+	unsigned long nonlazy_posted;
+				    /* # times non-lazy CBs posted to CPU. */
+	unsigned long nonlazy_posted_snap;
+				    /* idle-period nonlazy_posted snapshot. */
+#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
 /* RCU's kthread states for tracing. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 2411000..5271a02 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1886,8 +1886,9 @@
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
-int rcu_needs_cpu(int cpu)
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+	*delta_jiffies = ULONG_MAX;
 	return rcu_cpu_has_callbacks(cpu);
 }
 
@@ -1962,41 +1963,6 @@
 #define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
 
-/* Loop counter for rcu_prepare_for_idle(). */
-static DEFINE_PER_CPU(int, rcu_dyntick_drain);
-/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
-static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
-/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
-static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
-/* Enable special processing on first attempt to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
-/* Running count of non-lazy callbacks posted, never decremented. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
-/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
-
-/*
- * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
- * callbacks on this CPU, (2) this CPU has not yet attempted to enter
- * dyntick-idle mode, or (3) this CPU is in the process of attempting to
- * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
- * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
- * it is better to incur scheduling-clock interrupts than to spin
- * continuously for the same time duration!
- */
-int rcu_needs_cpu(int cpu)
-{
-	/* Flag a new idle sojourn to the idle-entry state machine. */
-	per_cpu(rcu_idle_first_pass, cpu) = 1;
-	/* If no callbacks, RCU doesn't need the CPU. */
-	if (!rcu_cpu_has_callbacks(cpu))
-		return 0;
-	/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
-	return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
-}
-
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2040,6 +2006,47 @@
 }
 
 /*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ *
+ * The delta_jiffies argument is used to store the time when RCU is
+ * going to need the CPU again if it still has callbacks.  The reason
+ * for this is that rcu_prepare_for_idle() might need to post a timer,
+ * but if so, it will do so after tick_nohz_stop_sched_tick() has set
+ * the wakeup time for this CPU.  This means that RCU's timer can be
+ * delayed until the wakeup time, which defeats the purpose of posting
+ * a timer.
+ */
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	/* Flag a new idle sojourn to the idle-entry state machine. */
+	rdtp->idle_first_pass = 1;
+	/* If no callbacks, RCU doesn't need the CPU. */
+	if (!rcu_cpu_has_callbacks(cpu)) {
+		*delta_jiffies = ULONG_MAX;
+		return 0;
+	}
+	if (rdtp->dyntick_holdoff == jiffies) {
+		/* RCU recently tried and failed, so don't try again. */
+		*delta_jiffies = 1;
+		return 1;
+	}
+	/* Set up for the possibility that RCU will post a timer. */
+	if (rcu_cpu_has_nonlazy_callbacks(cpu))
+		*delta_jiffies = RCU_IDLE_GP_DELAY;
+	else
+		*delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+	return 0;
+}
+
+/*
  * Handler for smp_call_function_single().  The only point of this
  * handler is to wake the CPU up, so the handler does only tracing.
  */
@@ -2075,21 +2082,24 @@
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-	per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-	setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-		    rcu_idle_gp_timer_func, cpu);
-	per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
-	per_cpu(rcu_idle_first_pass, cpu) = 1;
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	rdtp->dyntick_holdoff = jiffies - 1;
+	setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
+	rdtp->idle_gp_timer_expires = jiffies - 1;
+	rdtp->idle_first_pass = 1;
 }
 
 /*
  * Clean up for exit from idle.  Because we are exiting from idle, there
- * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * is no longer any point to ->idle_gp_timer, so cancel it.  This will
  * do nothing if this timer is not active, so just cancel it unconditionally.
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-	del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	del_timer(&rdtp->idle_gp_timer);
 	trace_rcu_prep_idle("Cleanup after idle");
 }
 
@@ -2108,42 +2118,41 @@
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
- * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ * later.  The ->dyntick_drain field controls the sequencing.
  *
  * The caller must have disabled interrupts.
  */
 static void rcu_prepare_for_idle(int cpu)
 {
 	struct timer_list *tp;
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
 	/*
 	 * If this is an idle re-entry, for example, due to use of
 	 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
 	 * loop, then don't take any state-machine actions, unless the
 	 * momentary exit from idle queued additional non-lazy callbacks.
-	 * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
+	 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
 	 * pending.
 	 */
-	if (!per_cpu(rcu_idle_first_pass, cpu) &&
-	    (per_cpu(rcu_nonlazy_posted, cpu) ==
-	     per_cpu(rcu_nonlazy_posted_snap, cpu))) {
+	if (!rdtp->idle_first_pass &&
+	    (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
 		if (rcu_cpu_has_callbacks(cpu)) {
-			tp = &per_cpu(rcu_idle_gp_timer, cpu);
-			mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+			tp = &rdtp->idle_gp_timer;
+			mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
 		}
 		return;
 	}
-	per_cpu(rcu_idle_first_pass, cpu) = 0;
-	per_cpu(rcu_nonlazy_posted_snap, cpu) =
-		per_cpu(rcu_nonlazy_posted, cpu) - 1;
+	rdtp->idle_first_pass = 0;
+	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
 
 	/*
 	 * If there are no callbacks on this CPU, enter dyntick-idle mode.
 	 * Also reset state to avoid prejudicing later attempts.
 	 */
 	if (!rcu_cpu_has_callbacks(cpu)) {
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-		per_cpu(rcu_dyntick_drain, cpu) = 0;
+		rdtp->dyntick_holdoff = jiffies - 1;
+		rdtp->dyntick_drain = 0;
 		trace_rcu_prep_idle("No callbacks");
 		return;
 	}
@@ -2152,36 +2161,37 @@
 	 * If in holdoff mode, just return.  We will presumably have
 	 * refrained from disabling the scheduling-clock tick.
 	 */
-	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+	if (rdtp->dyntick_holdoff == jiffies) {
 		trace_rcu_prep_idle("In holdoff");
 		return;
 	}
 
-	/* Check and update the rcu_dyntick_drain sequencing. */
-	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+	/* Check and update the ->dyntick_drain sequencing. */
+	if (rdtp->dyntick_drain <= 0) {
 		/* First time through, initialize the counter. */
-		per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
-	} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+		rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
+	} else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
 		   !rcu_pending(cpu) &&
 		   !local_softirq_pending()) {
 		/* Can we go dyntick-idle despite still having callbacks? */
-		trace_rcu_prep_idle("Dyntick with callbacks");
-		per_cpu(rcu_dyntick_drain, cpu) = 0;
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-		if (rcu_cpu_has_nonlazy_callbacks(cpu))
-			per_cpu(rcu_idle_gp_timer_expires, cpu) =
+		rdtp->dyntick_drain = 0;
+		rdtp->dyntick_holdoff = jiffies;
+		if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+			trace_rcu_prep_idle("Dyntick with callbacks");
+			rdtp->idle_gp_timer_expires =
 					   jiffies + RCU_IDLE_GP_DELAY;
-		else
-			per_cpu(rcu_idle_gp_timer_expires, cpu) =
+		} else {
+			rdtp->idle_gp_timer_expires =
 					   jiffies + RCU_IDLE_LAZY_GP_DELAY;
-		tp = &per_cpu(rcu_idle_gp_timer, cpu);
-		mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
-		per_cpu(rcu_nonlazy_posted_snap, cpu) =
-			per_cpu(rcu_nonlazy_posted, cpu);
+			trace_rcu_prep_idle("Dyntick with lazy callbacks");
+		}
+		tp = &rdtp->idle_gp_timer;
+		mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
 		return; /* Nothing more to do immediately. */
-	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+	} else if (--(rdtp->dyntick_drain) <= 0) {
 		/* We have hit the limit, so time to give up. */
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+		rdtp->dyntick_holdoff = jiffies;
 		trace_rcu_prep_idle("Begin holdoff");
 		invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
 		return;
@@ -2227,7 +2237,7 @@
  */
 static void rcu_idle_count_callbacks_posted(void)
 {
-	__this_cpu_add(rcu_nonlazy_posted, 1);
+	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2248,12 @@
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-	struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+	struct timer_list *tltp = &rdtp->idle_gp_timer;
 
 	sprintf(cp, "drain=%d %c timer=%lu",
-		per_cpu(rcu_dyntick_drain, cpu),
-		per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
+		rdtp->dyntick_drain,
+		rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
 		timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bebe2b1..ad581aa 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -94,13 +94,15 @@
 	counter->usage -= val;
 }
 
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge_until(struct res_counter *counter,
+				struct res_counter *top,
+				unsigned long val)
 {
 	unsigned long flags;
 	struct res_counter *c;
 
 	local_irq_save(flags);
-	for (c = counter; c != NULL; c = c->parent) {
+	for (c = counter; c != top; c = c->parent) {
 		spin_lock(&c->lock);
 		res_counter_uncharge_locked(c, val);
 		spin_unlock(&c->lock);
@@ -108,6 +110,10 @@
 	local_irq_restore(flags);
 }
 
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+	res_counter_uncharge_until(counter, NULL, val);
+}
 
 static inline unsigned long long *
 res_counter_member(struct res_counter *counter, int member)
diff --git a/kernel/resource.c b/kernel/resource.c
index 7e8ea66..e1d2b8e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -515,8 +515,8 @@
  * @root: root resource descriptor
  * @new: resource descriptor desired by caller
  * @size: requested resource region size
- * @min: minimum size to allocate
- * @max: maximum size to allocate
+ * @min: minimum boundary to allocate
+ * @max: maximum boundary to allocate
  * @align: alignment requested, in bytes
  * @alignf: alignment function, optional, called if not NULL
  * @alignf_data: arbitrary data to pass to the @alignf function
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 39eb601..d5594a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -142,9 +142,8 @@
 #define SCHED_FEAT(name, enabled)	\
 	#name ,
 
-static __read_mostly char *sched_feat_names[] = {
+static const char * const sched_feat_names[] = {
 #include "features.h"
-	NULL
 };
 
 #undef SCHED_FEAT
@@ -2517,25 +2516,32 @@
 	sched_avg_update(this_rq);
 }
 
+#ifdef CONFIG_NO_HZ
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
 /*
  * Called from nohz_idle_balance() to update the load ratings before doing the
  * idle balance.
  */
 void update_idle_cpu_load(struct rq *this_rq)
 {
-	unsigned long curr_jiffies = jiffies;
+	unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
 	unsigned long load = this_rq->load.weight;
 	unsigned long pending_updates;
 
 	/*
-	 * Bloody broken means of dealing with nohz, but better than nothing..
-	 * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
-	 * update and see 0 difference the one time and 2 the next, even though
-	 * we ticked at roughtly the same rate.
-	 *
-	 * Hence we only use this from nohz_idle_balance() and skip this
-	 * nonsense when called from the scheduler_tick() since that's
-	 * guaranteed a stable rate.
+	 * bail if there's load or we're actually up-to-date.
 	 */
 	if (load || curr_jiffies == this_rq->last_load_update_tick)
 		return;
@@ -2547,12 +2553,38 @@
 }
 
 /*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+	struct rq *this_rq = this_rq();
+	unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+	unsigned long pending_updates;
+
+	if (curr_jiffies == this_rq->last_load_update_tick)
+		return;
+
+	raw_spin_lock(&this_rq->lock);
+	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+	if (pending_updates) {
+		this_rq->last_load_update_tick = curr_jiffies;
+		/*
+		 * We were idle, this means load 0, the current load might be
+		 * !0 due to remote wakeups and the sort.
+		 */
+		__update_cpu_load(this_rq, 0, pending_updates);
+	}
+	raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
+/*
  * Called from scheduler_tick()
  */
 static void update_cpu_load_active(struct rq *this_rq)
 {
 	/*
-	 * See the mess in update_idle_cpu_load().
+	 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
 	 */
 	this_rq->last_load_update_tick = jiffies;
 	__update_cpu_load(this_rq, this_rq->load.weight, 1);
@@ -4982,7 +5014,7 @@
 		p->sched_class->set_cpus_allowed(p, new_mask);
 
 	cpumask_copy(&p->cpus_allowed, new_mask);
-	p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+	p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
 /*
@@ -5524,15 +5556,20 @@
 
 #ifdef CONFIG_SCHED_DEBUG
 
-static __read_mostly int sched_domain_debug_enabled;
+static __read_mostly int sched_debug_enabled;
 
-static int __init sched_domain_debug_setup(char *str)
+static int __init sched_debug_setup(char *str)
 {
-	sched_domain_debug_enabled = 1;
+	sched_debug_enabled = 1;
 
 	return 0;
 }
-early_param("sched_debug", sched_domain_debug_setup);
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+	return sched_debug_enabled;
+}
 
 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 				  struct cpumask *groupmask)
@@ -5572,7 +5609,12 @@
 			break;
 		}
 
-		if (!group->sgp->power) {
+		/*
+		 * Even though we initialize ->power to something semi-sane,
+		 * we leave power_orig unset. This allows us to detect if
+		 * domain iteration is still funny without causing /0 traps.
+		 */
+		if (!group->sgp->power_orig) {
 			printk(KERN_CONT "\n");
 			printk(KERN_ERR "ERROR: domain->cpu_power not "
 					"set\n");
@@ -5620,7 +5662,7 @@
 {
 	int level = 0;
 
-	if (!sched_domain_debug_enabled)
+	if (!sched_debug_enabled)
 		return;
 
 	if (!sd) {
@@ -5641,6 +5683,10 @@
 }
 #else /* !CONFIG_SCHED_DEBUG */
 # define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+	return false;
+}
 #endif /* CONFIG_SCHED_DEBUG */
 
 static int sd_degenerate(struct sched_domain *sd)
@@ -5962,6 +6008,44 @@
 	struct sd_data      data;
 };
 
+/*
+ * Build an iteration mask that can exclude certain CPUs from the upwards
+ * domain traversal.
+ *
+ * Asymmetric node setups can result in situations where the domain tree is of
+ * unequal depth, make sure to skip domains that already cover the entire
+ * range.
+ *
+ * In that case build_sched_domains() will have terminated the iteration early
+ * and our sibling sd spans will be empty. Domains should always include the
+ * cpu they're built on, so check that.
+ *
+ */
+static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+{
+	const struct cpumask *span = sched_domain_span(sd);
+	struct sd_data *sdd = sd->private;
+	struct sched_domain *sibling;
+	int i;
+
+	for_each_cpu(i, span) {
+		sibling = *per_cpu_ptr(sdd->sd, i);
+		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+			continue;
+
+		cpumask_set_cpu(i, sched_group_mask(sg));
+	}
+}
+
+/*
+ * Return the canonical balance cpu for this group, this is the first cpu
+ * of this group that's also in the iteration mask.
+ */
+int group_balance_cpu(struct sched_group *sg)
+{
+	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+}
+
 static int
 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 {
@@ -5980,6 +6064,12 @@
 		if (cpumask_test_cpu(i, covered))
 			continue;
 
+		child = *per_cpu_ptr(sdd->sd, i);
+
+		/* See the comment near build_group_mask(). */
+		if (!cpumask_test_cpu(i, sched_domain_span(child)))
+			continue;
+
 		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
 				GFP_KERNEL, cpu_to_node(cpu));
 
@@ -5987,8 +6077,6 @@
 			goto fail;
 
 		sg_span = sched_group_cpus(sg);
-
-		child = *per_cpu_ptr(sdd->sd, i);
 		if (child->child) {
 			child = child->child;
 			cpumask_copy(sg_span, sched_domain_span(child));
@@ -5997,10 +6085,24 @@
 
 		cpumask_or(covered, covered, sg_span);
 
-		sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
-		atomic_inc(&sg->sgp->ref);
+		sg->sgp = *per_cpu_ptr(sdd->sgp, i);
+		if (atomic_inc_return(&sg->sgp->ref) == 1)
+			build_group_mask(sd, sg);
 
-		if (cpumask_test_cpu(cpu, sg_span))
+		/*
+		 * Initialize sgp->power such that even if we mess up the
+		 * domains and no possible iteration will get us here, we won't
+		 * die on a /0 trap.
+		 */
+		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+
+		/*
+		 * Make sure the first group of this domain contains the
+		 * canonical balance cpu. Otherwise the sched_domain iteration
+		 * breaks. See update_sg_lb_stats().
+		 */
+		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
+		    group_balance_cpu(sg) == cpu)
 			groups = sg;
 
 		if (!first)
@@ -6074,6 +6176,7 @@
 
 		cpumask_clear(sched_group_cpus(sg));
 		sg->sgp->power = 0;
+		cpumask_setall(sched_group_mask(sg));
 
 		for_each_cpu(j, span) {
 			if (get_group(j, sdd, NULL) != group)
@@ -6115,7 +6218,7 @@
 		sg = sg->next;
 	} while (sg != sd->groups);
 
-	if (cpu != group_first_cpu(sg))
+	if (cpu != group_balance_cpu(sg))
 		return;
 
 	update_group_power(sd, cpu);
@@ -6165,11 +6268,8 @@
 
 static int __init setup_relax_domain_level(char *str)
 {
-	unsigned long val;
-
-	val = simple_strtoul(str, NULL, 0);
-	if (val < sched_domain_level_max)
-		default_relax_domain_level = val;
+	if (kstrtoint(str, 0, &default_relax_domain_level))
+		pr_warn("Unable to set relax_domain_level\n");
 
 	return 1;
 }
@@ -6279,14 +6379,13 @@
 #ifdef CONFIG_NUMA
 
 static int sched_domains_numa_levels;
-static int sched_domains_numa_scale;
 static int *sched_domains_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 
 static inline int sd_local_flags(int level)
 {
-	if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
+	if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
 		return 0;
 
 	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
@@ -6344,6 +6443,42 @@
 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
 }
 
+static void sched_numa_warn(const char *str)
+{
+	static int done = false;
+	int i,j;
+
+	if (done)
+		return;
+
+	done = true;
+
+	printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+	for (i = 0; i < nr_node_ids; i++) {
+		printk(KERN_WARNING "  ");
+		for (j = 0; j < nr_node_ids; j++)
+			printk(KERN_CONT "%02d ", node_distance(i,j));
+		printk(KERN_CONT "\n");
+	}
+	printk(KERN_WARNING "\n");
+}
+
+static bool find_numa_distance(int distance)
+{
+	int i;
+
+	if (distance == node_distance(0, 0))
+		return true;
+
+	for (i = 0; i < sched_domains_numa_levels; i++) {
+		if (sched_domains_numa_distance[i] == distance)
+			return true;
+	}
+
+	return false;
+}
+
 static void sched_init_numa(void)
 {
 	int next_distance, curr_distance = node_distance(0, 0);
@@ -6351,7 +6486,6 @@
 	int level = 0;
 	int i, j, k;
 
-	sched_domains_numa_scale = curr_distance;
 	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
 	if (!sched_domains_numa_distance)
 		return;
@@ -6362,23 +6496,41 @@
 	 *
 	 * Assumes node_distance(0,j) includes all distances in
 	 * node_distance(i,j) in order to avoid cubic time.
-	 *
-	 * XXX: could be optimized to O(n log n) by using sort()
 	 */
 	next_distance = curr_distance;
 	for (i = 0; i < nr_node_ids; i++) {
 		for (j = 0; j < nr_node_ids; j++) {
-			int distance = node_distance(0, j);
-			if (distance > curr_distance &&
-					(distance < next_distance ||
-					 next_distance == curr_distance))
-				next_distance = distance;
+			for (k = 0; k < nr_node_ids; k++) {
+				int distance = node_distance(i, k);
+
+				if (distance > curr_distance &&
+				    (distance < next_distance ||
+				     next_distance == curr_distance))
+					next_distance = distance;
+
+				/*
+				 * While not a strong assumption it would be nice to know
+				 * about cases where if node A is connected to B, B is not
+				 * equally connected to A.
+				 */
+				if (sched_debug() && node_distance(k, i) != distance)
+					sched_numa_warn("Node-distance not symmetric");
+
+				if (sched_debug() && i && !find_numa_distance(distance))
+					sched_numa_warn("Node-0 not representative");
+			}
+			if (next_distance != curr_distance) {
+				sched_domains_numa_distance[level++] = next_distance;
+				sched_domains_numa_levels = level;
+				curr_distance = next_distance;
+			} else break;
 		}
-		if (next_distance != curr_distance) {
-			sched_domains_numa_distance[level++] = next_distance;
-			sched_domains_numa_levels = level;
-			curr_distance = next_distance;
-		} else break;
+
+		/*
+		 * In case of sched_debug() we verify the above assumption.
+		 */
+		if (!sched_debug())
+			break;
 	}
 	/*
 	 * 'level' contains the number of unique distances, excluding the
@@ -6403,7 +6555,7 @@
 			return;
 
 		for (j = 0; j < nr_node_ids; j++) {
-			struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
+			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
 			if (!mask)
 				return;
 
@@ -6490,7 +6642,7 @@
 
 			*per_cpu_ptr(sdd->sg, j) = sg;
 
-			sgp = kzalloc_node(sizeof(struct sched_group_power),
+			sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
 					GFP_KERNEL, cpu_to_node(j));
 			if (!sgp)
 				return -ENOMEM;
@@ -6543,7 +6695,6 @@
 	if (!sd)
 		return child;
 
-	set_domain_attribute(sd, attr);
 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
 	if (child) {
 		sd->level = child->level + 1;
@@ -6551,6 +6702,7 @@
 		child->parent = sd;
 	}
 	sd->child = child;
+	set_domain_attribute(sd, attr);
 
 	return sd;
 }
@@ -6691,7 +6843,6 @@
 	if (!doms_cur)
 		doms_cur = &fallback_doms;
 	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
-	dattr_cur = NULL;
 	err = build_sched_domains(doms_cur[0], NULL);
 	register_sched_domain_sysctl();
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 940e6d1..c099cc6e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2703,7 +2703,7 @@
 	int want_sd = 1;
 	int sync = wake_flags & WF_SYNC;
 
-	if (p->rt.nr_cpus_allowed == 1)
+	if (p->nr_cpus_allowed == 1)
 		return prev_cpu;
 
 	if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@
 unsigned long scale_rt_power(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	u64 total, available;
+	u64 total, available, age_stamp, avg;
 
-	total = sched_avg_period() + (rq->clock - rq->age_stamp);
+	/*
+	 * Since we're reading these variables without serialization make sure
+	 * we read them once before doing sanity checks on them.
+	 */
+	age_stamp = ACCESS_ONCE(rq->age_stamp);
+	avg = ACCESS_ONCE(rq->rt_avg);
 
-	if (unlikely(total < rq->rt_avg)) {
+	total = sched_avg_period() + (rq->clock - age_stamp);
+
+	if (unlikely(total < avg)) {
 		/* Ensures that power won't end up being negative */
 		available = 0;
 	} else {
-		available = total - rq->rt_avg;
+		available = total - avg;
 	}
 
 	if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,13 +3581,28 @@
 
 	power = 0;
 
-	group = child->groups;
-	do {
-		power += group->sgp->power;
-		group = group->next;
-	} while (group != child->groups);
+	if (child->flags & SD_OVERLAP) {
+		/*
+		 * SD_OVERLAP domains cannot assume that child groups
+		 * span the current group.
+		 */
 
-	sdg->sgp->power = power;
+		for_each_cpu(cpu, sched_group_cpus(sdg))
+			power += power_of(cpu);
+	} else  {
+		/*
+		 * !SD_OVERLAP domains can assume that child groups
+		 * span the current group.
+		 */ 
+
+		group = child->groups;
+		do {
+			power += group->sgp->power;
+			group = group->next;
+		} while (group != child->groups);
+	}
+
+	sdg->sgp->power_orig = sdg->sgp->power = power;
 }
 
 /*
@@ -3610,7 +3632,7 @@
 
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
- * @sd: The sched_domain whose statistics are to be updated.
+ * @env: The load balancing environment.
  * @group: sched_group whose statistics are to be updated.
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
@@ -3630,7 +3652,7 @@
 	int i;
 
 	if (local_group)
-		balance_cpu = group_first_cpu(group);
+		balance_cpu = group_balance_cpu(group);
 
 	/* Tally up the load of all CPUs in the group */
 	max_cpu_load = 0;
@@ -3645,7 +3667,8 @@
 
 		/* Bias balancing toward cpus of our domain */
 		if (local_group) {
-			if (idle_cpu(i) && !first_idle_cpu) {
+			if (idle_cpu(i) && !first_idle_cpu &&
+					cpumask_test_cpu(i, sched_group_mask(group))) {
 				first_idle_cpu = 1;
 				balance_cpu = i;
 			}
@@ -3719,11 +3742,10 @@
 
 /**
  * update_sd_pick_busiest - return 1 on busiest group
- * @sd: sched_domain whose statistics are to be checked
+ * @env: The load balancing environment.
  * @sds: sched_domain statistics
  * @sg: sched_group candidate to be checked for being the busiest
  * @sgs: sched_group statistics
- * @this_cpu: the current cpu
  *
  * Determine if @sg is a busier group than the previously selected
  * busiest group.
@@ -3761,9 +3783,7 @@
 
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
- * @sd: sched_domain whose statistics are to be updated.
- * @this_cpu: Cpu for which load balance is currently performed.
- * @idle: Idle status of this_cpu
+ * @env: The load balancing environment.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
@@ -3852,10 +3872,8 @@
  * Returns 1 when packing is required and a task should be moved to
  * this CPU.  The amount of the imbalance is returned in *imbalance.
  *
- * @sd: The sched_domain whose packing is to be checked.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain which is to be packed
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: returns amount of imbalanced due to packing.
  */
 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
 {
@@ -3881,9 +3899,8 @@
  * fix_small_imbalance - Calculate the minor imbalance that exists
  *			amongst the groups of a sched_domain, during
  *			load balancing.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: Variable to store the imbalance.
  */
 static inline
 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
@@ -4026,11 +4043,7 @@
  * Also calculates the amount of weighted load which should be moved
  * to restore balance.
  *
- * @sd: The sched_domain whose busiest group is to be returned.
- * @this_cpu: The cpu for which load balancing is currently being performed.
- * @imbalance: Variable which stores amount of weighted load which should
- *		be moved to restore balance/put a group to idle.
- * @idle: The idle status of this_cpu.
+ * @env: The load balancing environment.
  * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *	is the appropriate cpu to perform load balancing at this_level.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c5565c3..573e1ca 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -274,13 +274,16 @@
 
 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+	struct task_struct *p;
+
 	if (!rt_entity_is_task(rt_se))
 		return;
 
+	p = rt_task_of(rt_se);
 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
 	rt_rq->rt_nr_total++;
-	if (rt_se->nr_cpus_allowed > 1)
+	if (p->nr_cpus_allowed > 1)
 		rt_rq->rt_nr_migratory++;
 
 	update_rt_migration(rt_rq);
@@ -288,13 +291,16 @@
 
 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+	struct task_struct *p;
+
 	if (!rt_entity_is_task(rt_se))
 		return;
 
+	p = rt_task_of(rt_se);
 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
 	rt_rq->rt_nr_total--;
-	if (rt_se->nr_cpus_allowed > 1)
+	if (p->nr_cpus_allowed > 1)
 		rt_rq->rt_nr_migratory--;
 
 	update_rt_migration(rt_rq);
@@ -1161,7 +1167,7 @@
 
 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
-	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 		enqueue_pushable_task(rq, p);
 
 	inc_nr_running(rq);
@@ -1225,7 +1231,7 @@
 
 	cpu = task_cpu(p);
 
-	if (p->rt.nr_cpus_allowed == 1)
+	if (p->nr_cpus_allowed == 1)
 		goto out;
 
 	/* For anything but wake ups, just return the task_cpu */
@@ -1260,9 +1266,9 @@
 	 * will have to sort it out.
 	 */
 	if (curr && unlikely(rt_task(curr)) &&
-	    (curr->rt.nr_cpus_allowed < 2 ||
+	    (curr->nr_cpus_allowed < 2 ||
 	     curr->prio <= p->prio) &&
-	    (p->rt.nr_cpus_allowed > 1)) {
+	    (p->nr_cpus_allowed > 1)) {
 		int target = find_lowest_rq(p);
 
 		if (target != -1)
@@ -1276,10 +1282,10 @@
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
-	if (rq->curr->rt.nr_cpus_allowed == 1)
+	if (rq->curr->nr_cpus_allowed == 1)
 		return;
 
-	if (p->rt.nr_cpus_allowed != 1
+	if (p->nr_cpus_allowed != 1
 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
 		return;
 
@@ -1395,7 +1401,7 @@
 	 * The previous task needs to be made eligible for pushing
 	 * if it is still active
 	 */
-	if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
+	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
 		enqueue_pushable_task(rq, p);
 }
 
@@ -1408,7 +1414,7 @@
 {
 	if (!task_running(rq, p) &&
 	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
-	    (p->rt.nr_cpus_allowed > 1))
+	    (p->nr_cpus_allowed > 1))
 		return 1;
 	return 0;
 }
@@ -1464,7 +1470,7 @@
 	if (unlikely(!lowest_mask))
 		return -1;
 
-	if (task->rt.nr_cpus_allowed == 1)
+	if (task->nr_cpus_allowed == 1)
 		return -1; /* No other targets possible */
 
 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1556,7 +1562,7 @@
 				     task_running(rq, task) ||
 				     !task->on_rq)) {
 
-				raw_spin_unlock(&lowest_rq->lock);
+				double_unlock_balance(rq, lowest_rq);
 				lowest_rq = NULL;
 				break;
 			}
@@ -1586,7 +1592,7 @@
 
 	BUG_ON(rq->cpu != task_cpu(p));
 	BUG_ON(task_current(rq, p));
-	BUG_ON(p->rt.nr_cpus_allowed <= 1);
+	BUG_ON(p->nr_cpus_allowed <= 1);
 
 	BUG_ON(!p->on_rq);
 	BUG_ON(!rt_task(p));
@@ -1793,9 +1799,9 @@
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
 	    has_pushable_tasks(rq) &&
-	    p->rt.nr_cpus_allowed > 1 &&
+	    p->nr_cpus_allowed > 1 &&
 	    rt_task(rq->curr) &&
-	    (rq->curr->rt.nr_cpus_allowed < 2 ||
+	    (rq->curr->nr_cpus_allowed < 2 ||
 	     rq->curr->prio <= p->prio))
 		push_rt_tasks(rq);
 }
@@ -1817,7 +1823,7 @@
 	 * Only update if the process changes its state from whether it
 	 * can migrate or not.
 	 */
-	if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
+	if ((p->nr_cpus_allowed > 1) == (weight > 1))
 		return;
 
 	rq = task_rq(p);
@@ -1979,6 +1985,8 @@
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
+	struct sched_rt_entity *rt_se = &p->rt;
+
 	update_curr_rt(rq);
 
 	watchdog(rq, p);
@@ -1996,12 +2004,15 @@
 	p->rt.time_slice = RR_TIMESLICE;
 
 	/*
-	 * Requeue to the end of queue if we are not the only element
-	 * on the queue:
+	 * Requeue to the end of queue if we (and all of our ancestors) are the
+	 * only element on the queue
 	 */
-	if (p->rt.run_list.prev != p->rt.run_list.next) {
-		requeue_task_rt(rq, p, 0);
-		set_tsk_need_resched(p);
+	for_each_sched_rt_entity(rt_se) {
+		if (rt_se->run_list.prev != rt_se->run_list.next) {
+			requeue_task_rt(rq, p, 0);
+			set_tsk_need_resched(p);
+			return;
+		}
 	}
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ba9dccf..6d52cea 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -526,6 +526,8 @@
 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 DECLARE_PER_CPU(int, sd_llc_id);
 
+extern int group_balance_cpu(struct sched_group *sg);
+
 #endif /* CONFIG_SMP */
 
 #include "stats.h"
diff --git a/kernel/signal.c b/kernel/signal.c
index f7b4182..6771027 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1656,19 +1656,18 @@
 	info.si_signo = sig;
 	info.si_errno = 0;
 	/*
-	 * we are under tasklist_lock here so our parent is tied to
-	 * us and cannot exit and release its namespace.
+	 * We are under tasklist_lock here so our parent is tied to
+	 * us and cannot change.
 	 *
-	 * the only it can is to switch its nsproxy with sys_unshare,
-	 * bu uncharing pid namespaces is not allowed, so we'll always
-	 * see relevant namespace
+	 * task_active_pid_ns will always return the same pid namespace
+	 * until a task passes through release_task.
 	 *
 	 * write_lock() currently calls preempt_disable() which is the
 	 * same as rcu_read_lock(), but according to Oleg, this is not
 	 * correct to rely on this
 	 */
 	rcu_read_lock();
-	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
 				       task_uid(tsk));
 	rcu_read_unlock();
@@ -2369,24 +2368,34 @@
 }
 
 /**
- * block_sigmask - add @ka's signal mask to current->blocked
- * @ka: action for @signr
- * @signr: signal that has been successfully delivered
+ * signal_delivered - 
+ * @sig:		number of signal being delivered
+ * @info:		siginfo_t of signal being delivered
+ * @ka:			sigaction setting that chose the handler
+ * @regs:		user register state
+ * @stepping:		nonzero if debugger single-step or block-step in use
  *
  * This function should be called when a signal has succesfully been
- * delivered. It adds the mask of signals for @ka to current->blocked
- * so that they are blocked during the execution of the signal
- * handler. In addition, @signr will be blocked unless %SA_NODEFER is
- * set in @ka->sa.sa_flags.
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags.  Tracing is notified.
  */
-void block_sigmask(struct k_sigaction *ka, int signr)
+void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
+			struct pt_regs *regs, int stepping)
 {
 	sigset_t blocked;
 
+	/* A signal was successfully delivered, and the
+	   saved sigmask was stored on the signal frame,
+	   and will be restored by sigreturn.  So we can
+	   simply clear the restore sigmask flag.  */
+	clear_restore_sigmask();
+
 	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
 	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&blocked, signr);
+		sigaddset(&blocked, sig);
 	set_current_blocked(&blocked);
+	tracehook_signal_handler(sig, info, ka, regs, stepping);
 }
 
 /*
@@ -2519,7 +2528,16 @@
  * It is wrong to change ->blocked directly, this helper should be used
  * to ensure the process can't miss a shared signal we are going to block.
  */
-void set_current_blocked(const sigset_t *newset)
+void set_current_blocked(sigset_t *newset)
+{
+	struct task_struct *tsk = current;
+	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
+	spin_lock_irq(&tsk->sighand->siglock);
+	__set_task_blocked(tsk, newset);
+	spin_unlock_irq(&tsk->sighand->siglock);
+}
+
+void __set_current_blocked(const sigset_t *newset)
 {
 	struct task_struct *tsk = current;
 
@@ -2559,7 +2577,7 @@
 		return -EINVAL;
 	}
 
-	set_current_blocked(&newset);
+	__set_current_blocked(&newset);
 	return 0;
 }
 
@@ -3133,7 +3151,7 @@
 			return -EINVAL;
 		}
 
-		set_current_blocked(&new_blocked);
+		__set_current_blocked(&new_blocked);
 	}
 
 	if (oset) {
@@ -3197,7 +3215,6 @@
 	int old = current->blocked.sig[0];
 	sigset_t newset;
 
-	siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
 	set_current_blocked(&newset);
 
 	return old;
@@ -3236,11 +3253,8 @@
 
 #endif
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 int sigsuspend(sigset_t *set)
 {
-	sigdelsetmask(set, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
 	current->saved_sigmask = current->blocked;
 	set_current_blocked(set);
 
@@ -3249,7 +3263,6 @@
 	set_restore_sigmask();
 	return -ERESTARTNOHAND;
 }
-#endif
 
 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
 /**
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index e1a797e..98f60c5 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -31,6 +31,12 @@
 	per_cpu(idle_threads, smp_processor_id()) = current;
 }
 
+/**
+ * idle_init - Initialize the idle thread for a cpu
+ * @cpu:	The cpu for which the idle thread should be initialized
+ *
+ * Creates the thread if it does not exist.
+ */
 static inline void idle_init(unsigned int cpu)
 {
 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
@@ -45,17 +51,16 @@
 }
 
 /**
- * idle_thread_init - Initialize the idle thread for a cpu
- * @cpu:	The cpu for which the idle thread should be initialized
- *
- * Creates the thread if it does not exist.
+ * idle_threads_init - Initialize idle threads for all cpus
  */
 void __init idle_threads_init(void)
 {
-	unsigned int cpu;
+	unsigned int cpu, boot_cpu;
+
+	boot_cpu = smp_processor_id();
 
 	for_each_possible_cpu(cpu) {
-		if (cpu != smp_processor_id())
+		if (cpu != boot_cpu)
 			idle_init(cpu);
 	}
 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 6df4262..e0c8ffc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -36,6 +36,8 @@
 #include <linux/personality.h>
 #include <linux/ptrace.h>
 #include <linux/fs_struct.h>
+#include <linux/file.h>
+#include <linux/mount.h>
 #include <linux/gfp.h>
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
@@ -1378,8 +1380,8 @@
 		memcpy(u->nodename, tmp, len);
 		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
 		errno = 0;
+		uts_proc_notify(UTS_PROC_HOSTNAME);
 	}
-	uts_proc_notify(UTS_PROC_HOSTNAME);
 	up_write(&uts_sem);
 	return errno;
 }
@@ -1429,8 +1431,8 @@
 		memcpy(u->domainname, tmp, len);
 		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
 		errno = 0;
+		uts_proc_notify(UTS_PROC_DOMAINNAME);
 	}
-	uts_proc_notify(UTS_PROC_DOMAINNAME);
 	up_write(&uts_sem);
 	return errno;
 }
@@ -1784,77 +1786,101 @@
 }
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
+{
+	struct vm_area_struct *vma;
+	struct file *exe_file;
+	struct dentry *dentry;
+	int err;
+
+	exe_file = fget(fd);
+	if (!exe_file)
+		return -EBADF;
+
+	dentry = exe_file->f_path.dentry;
+
+	/*
+	 * Because the original mm->exe_file points to executable file, make
+	 * sure that this one is executable as well, to avoid breaking an
+	 * overall picture.
+	 */
+	err = -EACCES;
+	if (!S_ISREG(dentry->d_inode->i_mode)	||
+	    exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+		goto exit;
+
+	err = inode_permission(dentry->d_inode, MAY_EXEC);
+	if (err)
+		goto exit;
+
+	down_write(&mm->mmap_sem);
+
+	/*
+	 * Forbid mm->exe_file change if there are mapped other files.
+	 */
+	err = -EBUSY;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
+						&exe_file->f_path))
+			goto exit_unlock;
+	}
+
+	/*
+	 * The symlink can be changed only once, just to disallow arbitrary
+	 * transitions malicious software might bring in. This means one
+	 * could make a snapshot over all processes running and monitor
+	 * /proc/pid/exe changes to notice unusual activity if needed.
+	 */
+	err = -EPERM;
+	if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
+		goto exit_unlock;
+
+	set_mm_exe_file(mm, exe_file);
+exit_unlock:
+	up_write(&mm->mmap_sem);
+
+exit:
+	fput(exe_file);
+	return err;
+}
+
 static int prctl_set_mm(int opt, unsigned long addr,
 			unsigned long arg4, unsigned long arg5)
 {
 	unsigned long rlim = rlimit(RLIMIT_DATA);
-	unsigned long vm_req_flags;
-	unsigned long vm_bad_flags;
-	struct vm_area_struct *vma;
-	int error = 0;
 	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int error;
 
-	if (arg4 | arg5)
+	if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
 		return -EINVAL;
 
 	if (!capable(CAP_SYS_RESOURCE))
 		return -EPERM;
 
-	if (addr >= TASK_SIZE)
+	if (opt == PR_SET_MM_EXE_FILE)
+		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
+
+	if (addr >= TASK_SIZE || addr < mmap_min_addr)
 		return -EINVAL;
 
+	error = -EINVAL;
+
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, addr);
 
-	if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) {
-		/* It must be existing VMA */
-		if (!vma || vma->vm_start > addr)
-			goto out;
-	}
-
-	error = -EINVAL;
 	switch (opt) {
 	case PR_SET_MM_START_CODE:
+		mm->start_code = addr;
+		break;
 	case PR_SET_MM_END_CODE:
-		vm_req_flags = VM_READ | VM_EXEC;
-		vm_bad_flags = VM_WRITE | VM_MAYSHARE;
-
-		if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-		    (vma->vm_flags & vm_bad_flags))
-			goto out;
-
-		if (opt == PR_SET_MM_START_CODE)
-			mm->start_code = addr;
-		else
-			mm->end_code = addr;
+		mm->end_code = addr;
 		break;
-
 	case PR_SET_MM_START_DATA:
-	case PR_SET_MM_END_DATA:
-		vm_req_flags = VM_READ | VM_WRITE;
-		vm_bad_flags = VM_EXEC | VM_MAYSHARE;
-
-		if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-		    (vma->vm_flags & vm_bad_flags))
-			goto out;
-
-		if (opt == PR_SET_MM_START_DATA)
-			mm->start_data = addr;
-		else
-			mm->end_data = addr;
+		mm->start_data = addr;
 		break;
-
-	case PR_SET_MM_START_STACK:
-
-#ifdef CONFIG_STACK_GROWSUP
-		vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP;
-#else
-		vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN;
-#endif
-		if ((vma->vm_flags & vm_req_flags) != vm_req_flags)
-			goto out;
-
-		mm->start_stack = addr;
+	case PR_SET_MM_END_DATA:
+		mm->end_data = addr;
 		break;
 
 	case PR_SET_MM_START_BRK:
@@ -1881,24 +1907,89 @@
 		mm->brk = addr;
 		break;
 
+	/*
+	 * If command line arguments and environment
+	 * are placed somewhere else on stack, we can
+	 * set them up here, ARG_START/END to setup
+	 * command line argumets and ENV_START/END
+	 * for environment.
+	 */
+	case PR_SET_MM_START_STACK:
+	case PR_SET_MM_ARG_START:
+	case PR_SET_MM_ARG_END:
+	case PR_SET_MM_ENV_START:
+	case PR_SET_MM_ENV_END:
+		if (!vma) {
+			error = -EFAULT;
+			goto out;
+		}
+		if (opt == PR_SET_MM_START_STACK)
+			mm->start_stack = addr;
+		else if (opt == PR_SET_MM_ARG_START)
+			mm->arg_start = addr;
+		else if (opt == PR_SET_MM_ARG_END)
+			mm->arg_end = addr;
+		else if (opt == PR_SET_MM_ENV_START)
+			mm->env_start = addr;
+		else if (opt == PR_SET_MM_ENV_END)
+			mm->env_end = addr;
+		break;
+
+	/*
+	 * This doesn't move auxiliary vector itself
+	 * since it's pinned to mm_struct, but allow
+	 * to fill vector with new values. It's up
+	 * to a caller to provide sane values here
+	 * otherwise user space tools which use this
+	 * vector might be unhappy.
+	 */
+	case PR_SET_MM_AUXV: {
+		unsigned long user_auxv[AT_VECTOR_SIZE];
+
+		if (arg4 > sizeof(user_auxv))
+			goto out;
+		up_read(&mm->mmap_sem);
+
+		if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
+			return -EFAULT;
+
+		/* Make sure the last entry is always AT_NULL */
+		user_auxv[AT_VECTOR_SIZE - 2] = 0;
+		user_auxv[AT_VECTOR_SIZE - 1] = 0;
+
+		BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
+
+		task_lock(current);
+		memcpy(mm->saved_auxv, user_auxv, arg4);
+		task_unlock(current);
+
+		return 0;
+	}
 	default:
-		error = -EINVAL;
 		goto out;
 	}
 
 	error = 0;
-
 out:
 	up_read(&mm->mmap_sem);
-
 	return error;
 }
+
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+	return put_user(me->clear_child_tid, tid_addr);
+}
+
 #else /* CONFIG_CHECKPOINT_RESTORE */
 static int prctl_set_mm(int opt, unsigned long addr,
 			unsigned long arg4, unsigned long arg5)
 {
 	return -EINVAL;
 }
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+	return -EINVAL;
+}
 #endif
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2053,6 +2144,9 @@
 		case PR_SET_MM:
 			error = prctl_set_mm(arg2, arg3, arg4, arg5);
 			break;
+		case PR_GET_TID_ADDRESS:
+			error = prctl_get_tid_address(me, (int __user **)arg2);
+			break;
 		case PR_SET_CHILD_SUBREAPER:
 			me->signal->is_child_subreaper = !!arg2;
 			error = 0;
@@ -2114,7 +2208,6 @@
 		NULL
 	};
 	int ret = -ENOMEM;
-	struct subprocess_info *info;
 
 	if (argv == NULL) {
 		printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
@@ -2122,18 +2215,16 @@
 		goto out;
 	}
 
-	info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
-	if (info == NULL) {
+	ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
+				      NULL, argv_cleanup, NULL);
+out:
+	if (likely(!ret))
+		return 0;
+
+	if (ret == -ENOMEM)
 		argv_free(argv);
-		goto out;
-	}
 
-	call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
-
-	ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
-
-  out:
-	if (ret && force) {
+	if (force) {
 		printk(KERN_WARNING "Failed to start orderly shutdown: "
 		       "forcing the issue\n");
 
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 47bfa16..dbff751 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -203,3 +203,6 @@
 cond_syscall(sys_name_to_handle_at);
 cond_syscall(sys_open_by_handle_at);
 cond_syscall(compat_sys_open_by_handle_at);
+
+/* compare kernel pointers */
+cond_syscall(sys_kcmp);
diff --git a/kernel/task_work.c b/kernel/task_work.c
new file mode 100644
index 0000000..82d1c79
--- /dev/null
+++ b/kernel/task_work.c
@@ -0,0 +1,84 @@
+#include <linux/spinlock.h>
+#include <linux/task_work.h>
+#include <linux/tracehook.h>
+
+int
+task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
+{
+	unsigned long flags;
+	int err = -ESRCH;
+
+#ifndef TIF_NOTIFY_RESUME
+	if (notify)
+		return -ENOTSUPP;
+#endif
+	/*
+	 * We must not insert the new work if the task has already passed
+	 * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
+	 * and check PF_EXITING under pi_lock.
+	 */
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	if (likely(!(task->flags & PF_EXITING))) {
+		hlist_add_head(&twork->hlist, &task->task_works);
+		err = 0;
+	}
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+	/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
+	if (likely(!err) && notify)
+		set_notify_resume(task);
+	return err;
+}
+
+struct task_work *
+task_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+	unsigned long flags;
+	struct task_work *twork;
+	struct hlist_node *pos;
+
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
+		if (twork->func == func) {
+			hlist_del(&twork->hlist);
+			goto found;
+		}
+	}
+	twork = NULL;
+ found:
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+	return twork;
+}
+
+void task_work_run(void)
+{
+	struct task_struct *task = current;
+	struct hlist_head task_works;
+	struct hlist_node *pos;
+
+	raw_spin_lock_irq(&task->pi_lock);
+	hlist_move_list(&task->task_works, &task_works);
+	raw_spin_unlock_irq(&task->pi_lock);
+
+	if (unlikely(hlist_empty(&task_works)))
+		return;
+	/*
+	 * We use hlist to save the space in task_struct, but we want fifo.
+	 * Find the last entry, the list should be short, then process them
+	 * in reverse order.
+	 */
+	for (pos = task_works.first; pos->next; pos = pos->next)
+		;
+
+	for (;;) {
+		struct hlist_node **pprev = pos->pprev;
+		struct task_work *twork = container_of(pos, struct task_work,
+							hlist);
+		twork->func(twork);
+
+		if (pprev == &task_works.first)
+			break;
+		pos = container_of(pprev, struct hlist_node, next);
+	}
+}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9cd928f..7e1ce01 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -297,8 +297,7 @@
 }
 EXPORT_SYMBOL_GPL(clockevents_register_device);
 
-static void clockevents_config(struct clock_event_device *dev,
-			       u32 freq)
+void clockevents_config(struct clock_event_device *dev, u32 freq)
 {
 	u64 sec;
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a3a5b9..8699978 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -274,6 +274,7 @@
 static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
 {
 	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+	unsigned long rcu_delta_jiffies;
 	ktime_t last_update, expires, now;
 	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
 	u64 time_delta;
@@ -322,7 +323,7 @@
 		time_delta = timekeeping_max_deferment();
 	} while (read_seqretry(&xtime_lock, seq));
 
-	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
 	    arch_needs_cpu(cpu)) {
 		next_jiffies = last_jiffies + 1;
 		delta_jiffies = 1;
@@ -330,6 +331,10 @@
 		/* Get the next timer wheel timer */
 		next_jiffies = get_next_timer_interrupt(last_jiffies);
 		delta_jiffies = next_jiffies - last_jiffies;
+		if (rcu_delta_jiffies < delta_jiffies) {
+			next_jiffies = last_jiffies + rcu_delta_jiffies;
+			delta_jiffies = rcu_delta_jiffies;
+		}
 	}
 	/*
 	 * Do not stop the tick, if we are only one off
@@ -576,6 +581,7 @@
 	/* Update jiffies first */
 	select_nohz_load_balancer(0);
 	tick_do_update_jiffies64(now);
+	update_cpu_load_nohz();
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
 	/*
@@ -814,6 +820,16 @@
 	return HRTIMER_RESTART;
 }
 
+static int sched_skew_tick;
+
+static int __init skew_tick(char *str)
+{
+	get_option(&str, &sched_skew_tick);
+
+	return 0;
+}
+early_param("skew_tick", skew_tick);
+
 /**
  * tick_setup_sched_timer - setup the tick emulation timer
  */
@@ -831,6 +847,14 @@
 	/* Get the next period (per cpu) */
 	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
 
+	/* Offset the tick to avert xtime_lock contention. */
+	if (sched_skew_tick) {
+		u64 offset = ktime_to_ns(tick_period) >> 1;
+		do_div(offset, num_possible_cpus());
+		offset *= smp_processor_id();
+		hrtimer_add_expires_ns(&ts->sched_timer, offset);
+	}
+
 	for (;;) {
 		hrtimer_forward(&ts->sched_timer, now, tick_period);
 		hrtimer_start_expires(&ts->sched_timer,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6e46cac..6f46a00 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -962,6 +962,7 @@
 		timekeeper.xtime.tv_sec++;
 		leap = second_overflow(timekeeper.xtime.tv_sec);
 		timekeeper.xtime.tv_sec += leap;
+		timekeeper.wall_to_monotonic.tv_sec -= leap;
 	}
 
 	/* Accumulate raw time */
@@ -1077,6 +1078,7 @@
 		timekeeper.xtime.tv_sec++;
 		leap = second_overflow(timekeeper.xtime.tv_sec);
 		timekeeper.xtime.tv_sec += leap;
+		timekeeper.wall_to_monotonic.tv_sec -= leap;
 	}
 
 	timekeeping_update(false);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6420cda..1d0f6a8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1486,6 +1486,11 @@
 	if (!buffer)
 		return size;
 
+	/* Make sure the requested buffer exists */
+	if (cpu_id != RING_BUFFER_ALL_CPUS &&
+	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+		return size;
+
 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 	size *= BUF_PAGE_SIZE;
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 68032c6..49249c2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -371,7 +371,7 @@
 void tracing_off(void)
 {
 	if (global_trace.buffer)
-		ring_buffer_record_on(global_trace.buffer);
+		ring_buffer_record_off(global_trace.buffer);
 	/*
 	 * This flag is only looked at when buffers haven't been
 	 * allocated yet. We don't really care about the race
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e5e1d85..4b1dfba 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -372,6 +372,13 @@
 
 
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
 static int watchdog_nmi_enable(int cpu)
 {
 	struct perf_event_attr *wd_attr;
@@ -390,11 +397,21 @@
 
 	/* Try to register using hardware perf events */
 	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+	/* save cpu0 error for future comparision */
+	if (cpu == 0 && IS_ERR(event))
+		cpu0_err = PTR_ERR(event);
+
 	if (!IS_ERR(event)) {
-		pr_info("enabled, takes one hw-pmu counter.\n");
+		/* only print for cpu0 or different than cpu0 */
+		if (cpu == 0 || cpu0_err)
+			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
 		goto out_save;
 	}
 
+	/* skip displaying the same error again */
+	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+		return PTR_ERR(event);
 
 	/* vary the KERN level based on the returned errno */
 	if (PTR_ERR(event) == -EOPNOTSUPP)
diff --git a/lib/Kconfig b/lib/Kconfig
index 98230ac..a9e1540 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,6 +19,9 @@
 config GENERIC_STRNCPY_FROM_USER
 	bool
 
+config GENERIC_STRNLEN_USER
+	bool
+
 config GENERIC_FIND_FIRST_BIT
 	bool
 
@@ -36,6 +39,9 @@
 	boolean
 	default n
 
+config STMP_DEVICE
+	bool
+
 config CRC_CCITT
 	tristate "CRC-CCITT functions"
 	help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a42d3ae..ff5bdee 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -241,6 +241,26 @@
 	default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
 	default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
 
+config PANIC_ON_OOPS
+	bool "Panic on Oops" if EXPERT
+	default n
+	help
+	  Say Y here to enable the kernel to panic when it oopses. This
+	  has the same effect as setting oops=panic on the kernel command
+	  line.
+
+	  This feature is useful to ensure that the kernel does not do
+	  anything erroneous after an oops which could result in data
+	  corruption or other issues.
+
+	  Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+	int
+	range 0 1
+	default 0 if !PANIC_ON_OOPS
+	default 1 if PANIC_ON_OOPS
+
 config DETECT_HUNG_TASK
 	bool "Detect Hung Tasks"
 	depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index b98df50..8c31a0c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -126,6 +126,9 @@
 obj-$(CONFIG_DDR) += jedec_ddr_data.o
 
 obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
+obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+
+obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b5a8b6a..06fdfa1 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -369,7 +369,8 @@
  * @nmaskbits: size of bitmap, in bits
  *
  * Exactly @nmaskbits bits are displayed.  Hex digits are grouped into
- * comma-separated sets of eight digits per set.
+ * comma-separated sets of eight digits per set.  Returns the number of
+ * characters which were written to *buf, excluding the trailing \0.
  */
 int bitmap_scnprintf(char *buf, unsigned int buflen,
 	const unsigned long *maskp, int nmaskbits)
@@ -517,8 +518,8 @@
  *
  * Helper routine for bitmap_scnlistprintf().  Write decimal number
  * or range to buf, suppressing output past buf+buflen, with optional
- * comma-prefix.  Return len of what would be written to buf, if it
- * all fit.
+ * comma-prefix.  Return len of what was written to *buf, excluding the
+ * trailing \0.
  */
 static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
 {
@@ -544,9 +545,8 @@
  * the range.  Output format is compatible with the format
  * accepted as input by bitmap_parselist().
  *
- * The return value is the number of characters which would be
- * generated for the given input, excluding the trailing '\0', as
- * per ISO C99.
+ * The return value is the number of characters which were written to *buf
+ * excluding the trailing '\0', as per ISO C99's scnprintf.
  */
 int bitmap_scnlistprintf(char *buf, unsigned int buflen,
 	const unsigned long *maskp, int nmaskbits)
diff --git a/lib/btree.c b/lib/btree.c
index e5ec1e9..f9a4846 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -319,8 +319,8 @@
 
 	if (head->height == 0)
 		return NULL;
-retry:
 	longcpy(key, __key, geo->keylen);
+retry:
 	dec_key(geo, key);
 
 	node = head->node;
@@ -351,7 +351,7 @@
 	}
 miss:
 	if (retry_key) {
-		__key = retry_key;
+		longcpy(key, retry_key, geo->keylen);
 		retry_key = NULL;
 		goto retry;
 	}
@@ -509,6 +509,7 @@
 int btree_insert(struct btree_head *head, struct btree_geo *geo,
 		unsigned long *key, void *val, gfp_t gfp)
 {
+	BUG_ON(!val);
 	return btree_insert_level(head, geo, key, val, 1, gfp);
 }
 EXPORT_SYMBOL_GPL(btree_insert);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 13ef233..518aea7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -430,7 +430,7 @@
  */
 static struct dma_debug_entry *dma_entry_alloc(void)
 {
-	struct dma_debug_entry *entry = NULL;
+	struct dma_debug_entry *entry;
 	unsigned long flags;
 
 	spin_lock_irqsave(&free_entries_lock, flags);
@@ -438,11 +438,14 @@
 	if (list_empty(&free_entries)) {
 		pr_err("DMA-API: debugging out of memory - disabling\n");
 		global_disable = true;
-		goto out;
+		spin_unlock_irqrestore(&free_entries_lock, flags);
+		return NULL;
 	}
 
 	entry = __dma_entry_alloc();
 
+	spin_unlock_irqrestore(&free_entries_lock, flags);
+
 #ifdef CONFIG_STACKTRACE
 	entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
 	entry->stacktrace.entries = entry->st_entries;
@@ -450,9 +453,6 @@
 	save_stack_trace(&entry->stacktrace);
 #endif
 
-out:
-	spin_unlock_irqrestore(&free_entries_lock, flags);
-
 	return entry;
 }
 
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 6ab4587..0777c5a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,23 +10,27 @@
 #include <linux/jiffies.h>
 #include <linux/dynamic_queue_limits.h>
 
-#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
+#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
 
 /* Records completed count and recalculates the queue limit */
 void dql_completed(struct dql *dql, unsigned int count)
 {
 	unsigned int inprogress, prev_inprogress, limit;
-	unsigned int ovlimit, all_prev_completed, completed;
+	unsigned int ovlimit, completed, num_queued;
+	bool all_prev_completed;
+
+	num_queued = ACCESS_ONCE(dql->num_queued);
 
 	/* Can't complete more than what's in queue */
-	BUG_ON(count > dql->num_queued - dql->num_completed);
+	BUG_ON(count > num_queued - dql->num_completed);
 
 	completed = dql->num_completed + count;
 	limit = dql->limit;
-	ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
-	inprogress = dql->num_queued - completed;
+	ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
+	inprogress = num_queued - completed;
 	prev_inprogress = dql->prev_num_queued - dql->num_completed;
-	all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+	all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
 
 	if ((ovlimit && !inprogress) ||
 	    (dql->prev_ovlimit && all_prev_completed)) {
@@ -104,7 +108,7 @@
 	dql->prev_ovlimit = ovlimit;
 	dql->prev_last_obj_cnt = dql->last_obj_cnt;
 	dql->num_completed = completed;
-	dql->prev_num_queued = dql->num_queued;
+	dql->prev_num_queued = num_queued;
 }
 EXPORT_SYMBOL(dql_completed);
 
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 6805453..f7210ad 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -101,6 +101,10 @@
 
 bool should_fail(struct fault_attr *attr, ssize_t size)
 {
+	/* No need to check any other properties if the probability is 0 */
+	if (attr->probability == 0)
+		return false;
+
 	if (attr->task_filter && !fail_task(attr, current))
 		return false;
 
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3810b48..23a5e03 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -31,6 +31,9 @@
 		"list_add corruption. prev->next should be "
 		"next (%p), but was %p. (prev=%p).\n",
 		next, prev->next, prev);
+	WARN(new == prev || new == next,
+	     "list_add double add: new=%p, prev=%p, next=%p.\n",
+	     new, prev, next);
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 86516f5..e796429 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -73,11 +73,24 @@
 static struct kmem_cache *radix_tree_node_cachep;
 
 /*
+ * The radix tree is variable-height, so an insert operation not only has
+ * to build the branch to its corresponding item, it also has to build the
+ * branch to existing items if the size has to be increased (by
+ * radix_tree_extend).
+ *
+ * The worst case is a zero height tree with just a single item at index 0,
+ * and then inserting an item at index ULONG_MAX. This requires 2 new branches
+ * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ * Hence:
+ */
+#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+
+/*
  * Per-cpu pool of preloaded nodes
  */
 struct radix_tree_preload {
 	int nr;
-	struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
@@ -673,6 +686,9 @@
 	 * during iterating; it can be zero only at the beginning.
 	 * And we cannot overflow iter->next_index in a single step,
 	 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
+	 *
+	 * This condition also used by radix_tree_next_slot() to stop
+	 * contiguous iterating, and forbid swithing to the next chunk.
 	 */
 	index = iter->next_index;
 	if (!index && iter->index)
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index 1805a5c..a95bccb 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -22,8 +22,8 @@
 #include <linux/raid/pq.h>
 
 /* Recover two failed data blocks. */
-void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
-		       void **ptrs)
+static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
+		int failb, void **ptrs)
 {
 	u8 *p, *q, *dp, *dq;
 	u8 px, qx, db;
@@ -66,7 +66,8 @@
 }
 
 /* Recover failure of one data block plus the P block */
-void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
+		void **ptrs)
 {
 	u8 *p, *q, *dq;
 	const u8 *qmul;		/* Q multiplier table */
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 37ae619..ecb710c 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -19,8 +19,8 @@
 		boot_cpu_has(X86_FEATURE_SSSE3);
 }
 
-void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
-		       void **ptrs)
+static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
+		int failb, void **ptrs)
 {
 	u8 *p, *q, *dp, *dq;
 	const u8 *pbmul;	/* P multiplier table for B data */
@@ -194,7 +194,8 @@
 }
 
 
-void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
+		void **ptrs)
 {
 	u8 *p, *q, *dq;
 	const u8 *qmul;		/* Q multiplier table */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 525d160..e91fbc2 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@
 	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
 		msg, raw_smp_processor_id(),
 		current->comm, task_pid_nr(current));
-	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+	printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
 			".owner_cpu: %d\n",
 		lock, lock->magic,
 		owner ? owner->comm : "<none>",
@@ -118,7 +118,7 @@
 		/* lockup suspected: */
 		if (print_once) {
 			print_once = 0;
-			spin_dump(lock, "lockup");
+			spin_dump(lock, "lockup suspected");
 #ifdef CONFIG_SMP
 			trigger_all_cpu_backtrace();
 #endif
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644
index 0000000..8ac9bcc
--- /dev/null
+++ b/lib/stmp_device.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+
+#define STMP_MODULE_CLKGATE	(1 << 30)
+#define STMP_MODULE_SFTRST	(1 << 31)
+
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
+{
+	int timeout = 0x400;
+
+	writel(mask, addr + STMP_OFFSET_REG_CLR);
+	udelay(1);
+	while ((readl(addr) & mask) && --timeout)
+		/* nothing */;
+
+	return !timeout;
+}
+
+int stmp_reset_block(void __iomem *reset_addr)
+{
+	int ret;
+	int timeout = 0x400;
+
+	/* clear and poll SFTRST */
+	ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear CLKGATE */
+	writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
+
+	/* set SFTRST to reset the block */
+	writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
+	udelay(1);
+
+	/* poll CLKGATE becoming set */
+	while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
+		/* nothing */;
+	if (unlikely(!timeout))
+		goto error;
+
+	/* clear and poll SFTRST */
+	ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear and poll CLKGATE */
+	ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
+	if (unlikely(ret))
+		goto error;
+
+	return 0;
+
+error:
+	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(stmp_reset_block);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index dd4ece3..1cffc22 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -23,15 +23,15 @@
 int string_get_size(u64 size, const enum string_size_units units,
 		    char *buf, int len)
 {
-	const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
+	static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
 				   "EB", "ZB", "YB", NULL};
-	const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+	static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
 				 "EiB", "ZiB", "YiB", NULL };
-	const char **units_str[] = {
+	static const char **units_str[] = {
 		[STRING_UNITS_10] =  units_10,
 		[STRING_UNITS_2] = units_2,
 	};
-	const unsigned int divisor[] = {
+	static const unsigned int divisor[] = {
 		[STRING_UNITS_10] = 1000,
 		[STRING_UNITS_2] = 1024,
 	};
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index c4c09b0..bb2b201 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -4,37 +4,7 @@
 #include <linux/errno.h>
 
 #include <asm/byteorder.h>
-
-static inline long find_zero(unsigned long mask)
-{
-	long byte = 0;
-
-#ifdef __BIG_ENDIAN
-#ifdef CONFIG_64BIT
-	if (mask >> 32)
-		mask >>= 32;
-	else
-		byte = 4;
-#endif
-	if (mask >> 16)
-		mask >>= 16;
-	else
-		byte += 2;
-	return (mask >> 8) ? byte : byte + 1;
-#else
-#ifdef CONFIG_64BIT
-	if (!((unsigned int) mask)) {
-		mask >>= 32;
-		byte = 4;
-	}
-#endif
-	if (!(mask & 0xffff)) {
-		mask >>= 16;
-		byte += 2;
-	}
-	return (mask & 0xff) ? byte : byte + 1;
-#endif
-}
+#include <asm/word-at-a-time.h>
 
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 #define IS_UNALIGNED(src, dst)	0
@@ -51,8 +21,7 @@
  */
 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
 {
-	const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1;
-	const unsigned long low_bits = REPEAT_BYTE(0x7f);
+	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 	long res = 0;
 
 	/*
@@ -66,18 +35,16 @@
 		goto byte_at_a_time;
 
 	while (max >= sizeof(unsigned long)) {
-		unsigned long c, v, rhs;
+		unsigned long c, data;
 
 		/* Fall back to byte-at-a-time if we get a page fault */
 		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
 			break;
-		rhs = c | low_bits;
-		v = (c + high_bits) & ~rhs;
 		*(unsigned long *)(dst+res) = c;
-		if (v) {
-			v = (c & low_bits) + low_bits;
-			v = ~(v | rhs);
-			return res + find_zero(v);
+		if (has_zero(c, &data, &constants)) {
+			data = prep_zero_mask(c, data, &constants);
+			data = create_zero_mask(data);
+			return res + find_zero(data);
 		}
 		res += sizeof(unsigned long);
 		max -= sizeof(unsigned long);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644
index 0000000..a28df52
--- /dev/null
+++ b/lib/strnlen_user.c
@@ -0,0 +1,138 @@
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+#include <asm/word-at-a-time.h>
+
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+#  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
+#else
+#  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
+/*
+ * Do a strnlen, return length of string *with* final '\0'.
+ * 'count' is the user-supplied count, while 'max' is the
+ * address space maximum.
+ *
+ * Return 0 for exceptions (which includes hitting the address
+ * space maximum), or 'count+1' if hitting the user-supplied
+ * maximum count.
+ *
+ * NOTE! We can sometimes overshoot the user-supplied maximum
+ * if it fits in a aligned 'long'. The caller needs to check
+ * the return value against "> max".
+ */
+static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+{
+	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+	long align, res = 0;
+	unsigned long c;
+
+	/*
+	 * Truncate 'max' to the user-specified limit, so that
+	 * we only have one limit we need to check in the loop
+	 */
+	if (max > count)
+		max = count;
+
+	/*
+	 * Do everything aligned. But that means that we
+	 * need to also expand the maximum..
+	 */
+	align = (sizeof(long) - 1) & (unsigned long)src;
+	src -= align;
+	max += align;
+
+	if (unlikely(__get_user(c,(unsigned long __user *)src)))
+		return 0;
+	c |= aligned_byte_mask(align);
+
+	for (;;) {
+		unsigned long data;
+		if (has_zero(c, &data, &constants)) {
+			data = prep_zero_mask(c, data, &constants);
+			data = create_zero_mask(data);
+			return res + find_zero(data) + 1 - align;
+		}
+		res += sizeof(unsigned long);
+		if (unlikely(max < sizeof(unsigned long)))
+			break;
+		max -= sizeof(unsigned long);
+		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+			return 0;
+	}
+	res -= align;
+
+	/*
+	 * Uhhuh. We hit 'max'. But was that the user-specified maximum
+	 * too? If so, return the marker for "too long".
+	 */
+	if (res >= count)
+		return count+1;
+
+	/*
+	 * Nope: we hit the address space limit, and we still had more
+	 * characters the caller would have wanted. That's 0.
+	 */
+	return 0;
+}
+
+/**
+ * strnlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ * @count: Maximum count (including NUL character)
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * If the string is too long, returns 'count+1'.
+ * On exception (or invalid count), returns 0.
+ */
+long strnlen_user(const char __user *str, long count)
+{
+	unsigned long max_addr, src_addr;
+
+	if (unlikely(count <= 0))
+		return 0;
+
+	max_addr = user_addr_max();
+	src_addr = (unsigned long)str;
+	if (likely(src_addr < max_addr)) {
+		unsigned long max = max_addr - src_addr;
+		return do_strnlen_user(str, count, max);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/**
+ * strlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+long strlen_user(const char __user *str)
+{
+	unsigned long max_addr, src_addr;
+
+	max_addr = user_addr_max();
+	src_addr = (unsigned long)str;
+	if (likely(src_addr < max_addr)) {
+		unsigned long max = max_addr - src_addr;
+		return do_strnlen_user(str, ~0ul, max);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(strlen_user);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 414f46e..45bc1f8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -130,11 +130,9 @@
 	pstart = virt_to_phys(io_tlb_start);
 	pend = virt_to_phys(io_tlb_end);
 
-	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
-	       bytes >> 20, io_tlb_start, io_tlb_end);
-	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
-	       (unsigned long long)pstart,
-	       (unsigned long long)pend);
+	printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+	       (unsigned long long)pstart, (unsigned long long)pend - 1,
+	       bytes >> 20, io_tlb_start, io_tlb_end - 1);
 }
 
 void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index d55769d..bea3f3f 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -11,7 +11,7 @@
 };
 
 #define DEFINE_TEST_FAIL(test)	\
-	const struct test_fail test[] __initdata
+	const struct test_fail test[] __initconst
 
 #define DECLARE_TEST_OK(type, test_type)	\
 	test_type {				\
@@ -21,7 +21,7 @@
 	}
 
 #define DEFINE_TEST_OK(type, test)	\
-	const type test[] __initdata
+	const type test[] __initconst
 
 #define TEST_FAIL(fn, type, fmt, test)					\
 {									\
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abbabec..c3f36d41 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -112,105 +112,198 @@
 /* Decimal conversion is by far the most typical, and is used
  * for /proc and /sys data. This directly impacts e.g. top performance
  * with many processes running. We optimize it for speed
- * using code from
- * http://www.cs.uiowa.edu/~jones/bcd/decimal.html
- * (with permission from the author, Douglas W. Jones). */
+ * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+ * (with permission from the author, Douglas W. Jones).
+ */
 
-/* Formats correctly any integer in [0,99999].
- * Outputs from one to five digits depending on input.
- * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+/* Formats correctly any integer in [0, 999999999] */
 static noinline_for_stack
-char *put_dec_trunc(char *buf, unsigned q)
+char *put_dec_full9(char *buf, unsigned q)
 {
-	unsigned d3, d2, d1, d0;
-	d1 = (q>>4) & 0xf;
-	d2 = (q>>8) & 0xf;
-	d3 = (q>>12);
-
-	d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-	q = (d0 * 0xcd) >> 11;
-	d0 = d0 - 10*q;
-	*buf++ = d0 + '0'; /* least significant digit */
-	d1 = q + 9*d3 + 5*d2 + d1;
-	if (d1 != 0) {
-		q = (d1 * 0xcd) >> 11;
-		d1 = d1 - 10*q;
-		*buf++ = d1 + '0'; /* next digit */
-
-		d2 = q + 2*d2;
-		if ((d2 != 0) || (d3 != 0)) {
-			q = (d2 * 0xd) >> 7;
-			d2 = d2 - 10*q;
-			*buf++ = d2 + '0'; /* next digit */
-
-			d3 = q + 4*d3;
-			if (d3 != 0) {
-				q = (d3 * 0xcd) >> 11;
-				d3 = d3 - 10*q;
-				*buf++ = d3 + '0';  /* next digit */
-				if (q != 0)
-					*buf++ = q + '0'; /* most sign. digit */
-			}
-		}
-	}
-
-	return buf;
-}
-/* Same with if's removed. Always emits five digits */
-static noinline_for_stack
-char *put_dec_full(char *buf, unsigned q)
-{
-	/* BTW, if q is in [0,9999], 8-bit ints will be enough, */
-	/* but anyway, gcc produces better code with full-sized ints */
-	unsigned d3, d2, d1, d0;
-	d1 = (q>>4) & 0xf;
-	d2 = (q>>8) & 0xf;
-	d3 = (q>>12);
+	unsigned r;
 
 	/*
 	 * Possible ways to approx. divide by 10
-	 * gcc -O2 replaces multiply with shifts and adds
-	 * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
-	 * (x * 0x67) >> 10:  1100111
-	 * (x * 0x34) >> 9:    110100 - same
-	 * (x * 0x1a) >> 8:     11010 - same
-	 * (x * 0x0d) >> 7:      1101 - same, shortest code (on i386)
+	 * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
+	 * (x * 0xcccd) >> 19     x <      81920 (x < 262149 when 64-bit mul)
+	 * (x * 0x6667) >> 18     x <      43699
+	 * (x * 0x3334) >> 17     x <      16389
+	 * (x * 0x199a) >> 16     x <      16389
+	 * (x * 0x0ccd) >> 15     x <      16389
+	 * (x * 0x0667) >> 14     x <       2739
+	 * (x * 0x0334) >> 13     x <       1029
+	 * (x * 0x019a) >> 12     x <       1029
+	 * (x * 0x00cd) >> 11     x <       1029 shorter code than * 0x67 (on i386)
+	 * (x * 0x0067) >> 10     x <        179
+	 * (x * 0x0034) >>  9     x <         69 same
+	 * (x * 0x001a) >>  8     x <         69 same
+	 * (x * 0x000d) >>  7     x <         69 same, shortest code (on i386)
+	 * (x * 0x0007) >>  6     x <         19
+	 * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
 	 */
-	d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-	q = (d0 * 0xcd) >> 11;
-	d0 = d0 - 10*q;
-	*buf++ = d0 + '0';
-	d1 = q + 9*d3 + 5*d2 + d1;
-		q = (d1 * 0xcd) >> 11;
-		d1 = d1 - 10*q;
-		*buf++ = d1 + '0';
+	r      = (q * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (q - 10 * r) + '0'; /* 1 */
+	q      = (r * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (r - 10 * q) + '0'; /* 2 */
+	r      = (q * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (q - 10 * r) + '0'; /* 3 */
+	q      = (r * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (r - 10 * q) + '0'; /* 4 */
+	r      = (q * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (q - 10 * r) + '0'; /* 5 */
+	/* Now value is under 10000, can avoid 64-bit multiply */
+	q      = (r * 0x199a) >> 16;
+	*buf++ = (r - 10 * q)  + '0'; /* 6 */
+	r      = (q * 0xcd) >> 11;
+	*buf++ = (q - 10 * r)  + '0'; /* 7 */
+	q      = (r * 0xcd) >> 11;
+	*buf++ = (r - 10 * q) + '0'; /* 8 */
+	*buf++ = q + '0'; /* 9 */
+	return buf;
+}
+#endif
 
-		d2 = q + 2*d2;
-			q = (d2 * 0xd) >> 7;
-			d2 = d2 - 10*q;
-			*buf++ = d2 + '0';
+/* Similar to above but do not pad with zeros.
+ * Code can be easily arranged to print 9 digits too, but our callers
+ * always call put_dec_full9() instead when the number has 9 decimal digits.
+ */
+static noinline_for_stack
+char *put_dec_trunc8(char *buf, unsigned r)
+{
+	unsigned q;
 
-			d3 = q + 4*d3;
-				q = (d3 * 0xcd) >> 11; /* - shorter code */
-				/* q = (d3 * 0x67) >> 10; - would also work */
-				d3 = d3 - 10*q;
-				*buf++ = d3 + '0';
-					*buf++ = q + '0';
+	/* Copy of previous function's body with added early returns */
+	q      = (r * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (r - 10 * q) + '0'; /* 2 */
+	if (q == 0)
+		return buf;
+	r      = (q * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (q - 10 * r) + '0'; /* 3 */
+	if (r == 0)
+		return buf;
+	q      = (r * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (r - 10 * q) + '0'; /* 4 */
+	if (q == 0)
+		return buf;
+	r      = (q * (uint64_t)0x1999999a) >> 32;
+	*buf++ = (q - 10 * r) + '0'; /* 5 */
+	if (r == 0)
+		return buf;
+	q      = (r * 0x199a) >> 16;
+	*buf++ = (r - 10 * q)  + '0'; /* 6 */
+	if (q == 0)
+		return buf;
+	r      = (q * 0xcd) >> 11;
+	*buf++ = (q - 10 * r)  + '0'; /* 7 */
+	if (r == 0)
+		return buf;
+	q      = (r * 0xcd) >> 11;
+	*buf++ = (r - 10 * q) + '0'; /* 8 */
+	if (q == 0)
+		return buf;
+	*buf++ = q + '0'; /* 9 */
+	return buf;
+}
+
+/* There are two algorithms to print larger numbers.
+ * One is generic: divide by 1000000000 and repeatedly print
+ * groups of (up to) 9 digits. It's conceptually simple,
+ * but requires a (unsigned long long) / 1000000000 division.
+ *
+ * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
+ * manipulates them cleverly and generates groups of 4 decimal digits.
+ * It so happens that it does NOT require long long division.
+ *
+ * If long is > 32 bits, division of 64-bit values is relatively easy,
+ * and we will use the first algorithm.
+ * If long long is > 64 bits (strange architecture with VERY large long long),
+ * second algorithm can't be used, and we again use the first one.
+ *
+ * Else (if long is 32 bits and long long is 64 bits) we use second one.
+ */
+
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+
+/* First algorithm: generic */
+
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+	if (n >= 100*1000*1000) {
+		while (n >= 1000*1000*1000)
+			buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
+		if (n >= 100*1000*1000)
+			return put_dec_full9(buf, n);
+	}
+	return put_dec_trunc8(buf, n);
+}
+
+#else
+
+/* Second algorithm: valid only for 64-bit long longs */
+
+static noinline_for_stack
+char *put_dec_full4(char *buf, unsigned q)
+{
+	unsigned r;
+	r      = (q * 0xcccd) >> 19;
+	*buf++ = (q - 10 * r) + '0';
+	q      = (r * 0x199a) >> 16;
+	*buf++ = (r - 10 * q)  + '0';
+	r      = (q * 0xcd) >> 11;
+	*buf++ = (q - 10 * r)  + '0';
+	*buf++ = r + '0';
+	return buf;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+	uint32_t d3, d2, d1, q, h;
+
+	if (n < 100*1000*1000)
+		return put_dec_trunc8(buf, n);
+
+	d1  = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
+	h   = (n >> 32);
+	d2  = (h      ) & 0xffff;
+	d3  = (h >> 16); /* implicit "& 0xffff" */
+
+	q   = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+
+	buf = put_dec_full4(buf, q % 10000);
+	q   = q / 10000;
+
+	d1  = q + 7671 * d3 + 9496 * d2 + 6 * d1;
+	buf = put_dec_full4(buf, d1 % 10000);
+	q   = d1 / 10000;
+
+	d2  = q + 4749 * d3 + 42 * d2;
+	buf = put_dec_full4(buf, d2 % 10000);
+	q   = d2 / 10000;
+
+	d3  = q + 281 * d3;
+	if (!d3)
+		goto done;
+	buf = put_dec_full4(buf, d3 % 10000);
+	q   = d3 / 10000;
+	if (!q)
+		goto done;
+	buf = put_dec_full4(buf, q);
+ done:
+	while (buf[-1] == '0')
+		--buf;
 
 	return buf;
 }
-/* No inlining helps gcc to use registers better */
-static noinline_for_stack
-char *put_dec(char *buf, unsigned long long num)
-{
-	while (1) {
-		unsigned rem;
-		if (num < 100000)
-			return put_dec_trunc(buf, num);
-		rem = do_div(num, 100000);
-		buf = put_dec_full(buf, rem);
-	}
-}
+
+#endif
 
 /*
  * Convert passed number to decimal string.
@@ -220,16 +313,22 @@
  */
 int num_to_str(char *buf, int size, unsigned long long num)
 {
-	char tmp[21];		/* Enough for 2^64 in decimal */
+	char tmp[sizeof(num) * 3];
 	int idx, len;
 
-	len = put_dec(tmp, num) - tmp;
+	/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
+	if (num <= 9) {
+		tmp[0] = '0' + num;
+		len = 1;
+	} else {
+		len = put_dec(tmp, num) - tmp;
+	}
 
 	if (len > size)
 		return 0;
 	for (idx = 0; idx < len; ++idx)
 		buf[idx] = tmp[len - idx - 1];
-	return  len;
+	return len;
 }
 
 #define ZEROPAD	1		/* pad with zero */
@@ -284,6 +383,7 @@
 	char locase;
 	int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
 	int i;
+	bool is_zero = num == 0LL;
 
 	/* locase = 0 or 0x20. ORing digits or letters with 'locase'
 	 * produces same digits or (maybe lowercased) letters */
@@ -305,15 +405,16 @@
 		}
 	}
 	if (need_pfx) {
-		spec.field_width--;
 		if (spec.base == 16)
+			spec.field_width -= 2;
+		else if (!is_zero)
 			spec.field_width--;
 	}
 
 	/* generate full string in tmp[], in reverse order */
 	i = 0;
-	if (num == 0)
-		tmp[i++] = '0';
+	if (num < spec.base)
+		tmp[i++] = digits[num] | locase;
 	/* Generic code, for any base:
 	else do {
 		tmp[i++] = (digits[do_div(num,base)] | locase);
@@ -353,9 +454,11 @@
 	}
 	/* "0x" / "0" prefix */
 	if (need_pfx) {
-		if (buf < end)
-			*buf = '0';
-		++buf;
+		if (spec.base == 16 || !is_zero) {
+			if (buf < end)
+				*buf = '0';
+			++buf;
+		}
 		if (spec.base == 16) {
 			if (buf < end)
 				*buf = ('X' | locase);
@@ -436,7 +539,7 @@
 	else if (ext != 'f' && ext != 's')
 		sprint_symbol(sym, value);
 	else
-		kallsyms_lookup(value, NULL, NULL, NULL, sym);
+		sprint_symbol_no_offset(sym, value);
 
 	return string(buf, end, sym, spec);
 #else
@@ -607,7 +710,7 @@
 	}
 	for (i = 0; i < 4; i++) {
 		char temp[3];	/* hold each IP quad in reverse order */
-		int digits = put_dec_trunc(temp, addr[index]) - temp;
+		int digits = put_dec_trunc8(temp, addr[index]) - temp;
 		if (leading_zeros) {
 			if (digits < 3)
 				*p++ = '0';
@@ -866,13 +969,15 @@
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
 	      struct printf_spec spec)
 {
+	int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
+
 	if (!ptr && *fmt != 'K') {
 		/*
 		 * Print (null) with the same width as a pointer so it makes
 		 * tabular output look nice.
 		 */
 		if (spec.field_width == -1)
-			spec.field_width = 2 * sizeof(void *);
+			spec.field_width = default_width;
 		return string(buf, end, "(null)", spec);
 	}
 
@@ -927,7 +1032,7 @@
 		 */
 		if (in_irq() || in_serving_softirq() || in_nmi()) {
 			if (spec.field_width == -1)
-				spec.field_width = 2 * sizeof(void *);
+				spec.field_width = default_width;
 			return string(buf, end, "pK-error", spec);
 		}
 		if (!((kptr_restrict == 0) ||
@@ -944,7 +1049,7 @@
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
-		spec.field_width = 2 * sizeof(void *);
+		spec.field_width = default_width;
 		spec.flags |= ZEROPAD;
 	}
 	spec.base = 16;
diff --git a/mm/Kconfig b/mm/Kconfig
index e338407..82fed4e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@
 config MIGRATION
 	bool "Page migration"
 	def_bool y
-	depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+	depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
 	help
 	  Allows the migration of the physical location of pages of processes
 	  while the virtual addresses are not changed. This is useful in
@@ -349,6 +349,16 @@
 	  benefit.
 endchoice
 
+config CROSS_MEMORY_ATTACH
+	bool "Cross Memory Support"
+	depends on MMU
+	default y
+	help
+	  Enabling this option adds the system calls process_vm_readv and
+	  process_vm_writev which allow a process with the correct privileges
+	  to directly read from or write to to another process's address space.
+	  See the man page for more details.
+
 #
 # UP and nommu archs use km based percpu allocator
 #
@@ -379,3 +389,20 @@
 	  in a negligible performance hit.
 
 	  If unsure, say Y to enable cleancache
+
+config FRONTSWAP
+	bool "Enable frontswap to cache swap pages if tmem is present"
+	depends on SWAP
+	default n
+	help
+	  Frontswap is so named because it can be thought of as the opposite
+	  of a "backing" store for a swap device.  The data is stored into
+	  "transcendent memory", memory that is not directly accessible or
+	  addressable by the kernel and is of unknown and possibly
+	  time-varying size.  When space in transcendent memory is available,
+	  a significant swap I/O reduction may be achieved.  When none is
+	  available, all frontswap calls are reduced to a single pointer-
+	  compare-against-NULL resulting in a negligible performance hit
+	  and swap data is stored as normal on the matching swap device.
+
+	  If unsure, say Y to enable frontswap.
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00e..2e2fbbe 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,15 +5,18 @@
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-			   vmalloc.o pagewalk.o pgtable-generic.o \
-			   process_vm_access.o
+			   vmalloc.o pagewalk.o pgtable-generic.o
+
+ifdef CONFIG_CROSS_MEMORY_ATTACH
+mmu-$(CONFIG_MMU)	+= process_vm_access.o
+endif
 
 obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
 			   maccess.o page_alloc.o page-writeback.o \
 			   readahead.o swap.o truncate.o vmscan.o shmem.o \
 			   prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
 			   page_isolation.o mm_init.o mmu_context.o percpu.o \
-			   $(mmu-y)
+			   compaction.o $(mmu-y)
 obj-y += init-mm.o
 
 ifdef CONFIG_NO_BOOTMEM
@@ -25,14 +28,14 @@
 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)	+= bounce.o
-obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_FRONTSWAP)	+= frontswap.o
 obj-$(CONFIG_HAS_DMA)	+= dmapool.o
 obj-$(CONFIG_HUGETLBFS)	+= hugetlb.o
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
 obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170..ec4fcb7 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -77,16 +77,16 @@
  */
 static void __init link_bootmem(bootmem_data_t *bdata)
 {
-	struct list_head *iter;
+	bootmem_data_t *ent;
 
-	list_for_each(iter, &bdata_list) {
-		bootmem_data_t *ent;
-
-		ent = list_entry(iter, bootmem_data_t, list);
-		if (bdata->node_min_pfn < ent->node_min_pfn)
-			break;
+	list_for_each_entry(ent, &bdata_list, list) {
+		if (bdata->node_min_pfn < ent->node_min_pfn) {
+			list_add_tail(&bdata->list, &ent->list);
+			return;
+		}
 	}
-	list_add_tail(&bdata->list, iter);
+
+	list_add_tail(&bdata->list, &bdata_list);
 }
 
 /*
@@ -203,7 +203,8 @@
 		} else {
 			unsigned long off = 0;
 
-			while (vec && off < BITS_PER_LONG) {
+			vec >>= start & (BITS_PER_LONG - 1);
+			while (vec) {
 				if (vec & 1) {
 					page = pfn_to_page(start + off);
 					__free_pages_bootmem(page, 0);
@@ -467,7 +468,7 @@
 	return ALIGN(base + off, align) - base;
 }
 
-static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
 					unsigned long size, unsigned long align,
 					unsigned long goal, unsigned long limit)
 {
@@ -588,14 +589,14 @@
 		p_bdata = bootmem_arch_preferred_node(bdata, size, align,
 							goal, limit);
 		if (p_bdata)
-			return alloc_bootmem_core(p_bdata, size, align,
+			return alloc_bootmem_bdata(p_bdata, size, align,
 							goal, limit);
 	}
 #endif
 	return NULL;
 }
 
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+static void * __init alloc_bootmem_core(unsigned long size,
 					unsigned long align,
 					unsigned long goal,
 					unsigned long limit)
@@ -603,7 +604,6 @@
 	bootmem_data_t *bdata;
 	void *region;
 
-restart:
 	region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
 	if (region)
 		return region;
@@ -614,11 +614,25 @@
 		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
 			break;
 
-		region = alloc_bootmem_core(bdata, size, align, goal, limit);
+		region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
 		if (region)
 			return region;
 	}
 
+	return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+					      unsigned long align,
+					      unsigned long goal,
+					      unsigned long limit)
+{
+	void *ptr;
+
+restart:
+	ptr = alloc_bootmem_core(size, align, goal, limit);
+	if (ptr)
+		return ptr;
 	if (goal) {
 		goal = 0;
 		goto restart;
@@ -684,21 +698,56 @@
 	return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 				unsigned long size, unsigned long align,
 				unsigned long goal, unsigned long limit)
 {
 	void *ptr;
 
-	ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+again:
+	ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
+					   align, goal, limit);
 	if (ptr)
 		return ptr;
 
-	ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+	ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
 	if (ptr)
 		return ptr;
 
-	return ___alloc_bootmem(size, align, goal, limit);
+	ptr = alloc_bootmem_core(size, align, goal, limit);
+	if (ptr)
+		return ptr;
+
+	if (goal) {
+		goal = 0;
+		goto again;
+	}
+
+	return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+				   unsigned long align, unsigned long goal)
+{
+	if (WARN_ON_ONCE(slab_is_available()))
+		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+				    unsigned long align, unsigned long goal,
+				    unsigned long limit)
+{
+	void *ptr;
+
+	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+	if (ptr)
+		return ptr;
+
+	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+	panic("Out of memory");
+	return NULL;
 }
 
 /**
@@ -722,7 +771,7 @@
 	if (WARN_ON_ONCE(slab_is_available()))
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-	return  ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+	return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -743,7 +792,7 @@
 		unsigned long new_goal;
 
 		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-		ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+		ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
 						 new_goal, 0);
 		if (ptr)
 			return ptr;
@@ -754,47 +803,6 @@
 
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
-				    unsigned long section_nr)
-{
-	bootmem_data_t *bdata;
-	unsigned long pfn, goal;
-
-	pfn = section_nr_to_pfn(section_nr);
-	goal = pfn << PAGE_SHIFT;
-	bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
-
-	return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
-				   unsigned long align, unsigned long goal)
-{
-	void *ptr;
-
-	if (WARN_ON_ONCE(slab_is_available()))
-		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
-	ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
-	if (ptr)
-		return ptr;
-
-	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-	if (ptr)
-		return ptr;
-
-	return __alloc_bootmem_nopanic(size, align, goal);
-}
-
 #ifndef ARCH_LOW_ADDRESS_LIMIT
 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
 #endif
@@ -839,6 +847,6 @@
 	if (WARN_ON_ONCE(slab_is_available()))
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-	return ___alloc_bootmem_node(pgdat->bdata, size, align,
-				goal, ARCH_LOW_ADDRESS_LIMIT);
+	return ___alloc_bootmem_node(pgdat, size, align,
+				     goal, ARCH_LOW_ADDRESS_LIMIT);
 }
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 5646c74..32e6f41 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -80,7 +80,7 @@
 static int cleancache_get_key(struct inode *inode,
 			      struct cleancache_filekey *key)
 {
-	int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
+	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
 	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
 	struct super_block *sb = inode->i_sb;
 
@@ -88,9 +88,7 @@
 	if (sb->s_export_op != NULL) {
 		fhfn = sb->s_export_op->encode_fh;
 		if  (fhfn) {
-			struct dentry d;
-			d.d_inode = inode;
-			len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
+			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
 			if (len <= 0 || len == 255)
 				return -1;
 			if (maxlen > CLEANCACHE_KEY_MAX)
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c82..7ea259d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/compaction.h>
 
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
-	struct list_head freepages;	/* List of free pages to migrate to */
-	struct list_head migratepages;	/* List of pages being migrated */
-	unsigned long nr_freepages;	/* Number of isolated free pages */
-	unsigned long nr_migratepages;	/* Number of pages to migrate */
-	unsigned long free_pfn;		/* isolate_freepages search base */
-	unsigned long migrate_pfn;	/* isolate_migratepages search base */
-	bool sync;			/* Synchronous migration */
-
-	int order;			/* order a direct compactor needs */
-	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
-	struct zone *zone;
-};
-
 static unsigned long release_freepages(struct list_head *freelist)
 {
 	struct page *page, *next;
@@ -54,24 +35,35 @@
 	return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-				unsigned long blockpfn,
-				struct list_head *freelist)
+static void map_pages(struct list_head *list)
 {
-	unsigned long zone_end_pfn, end_pfn;
+	struct page *page;
+
+	list_for_each_entry(page, list, lru) {
+		arch_alloc_page(page, 0);
+		kernel_map_pages(page, 1, 1);
+	}
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+				unsigned long end_pfn,
+				struct list_head *freelist,
+				bool strict)
+{
 	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor;
 
-	/* Get the last PFN we should scan for free pages at */
-	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-	/* Find the first usable PFN in the block to initialse page cursor */
-	for (; blockpfn < end_pfn; blockpfn++) {
-		if (pfn_valid_within(blockpfn))
-			break;
-	}
 	cursor = pfn_to_page(blockpfn);
 
 	/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@
 		int isolated, i;
 		struct page *page = cursor;
 
-		if (!pfn_valid_within(blockpfn))
+		if (!pfn_valid_within(blockpfn)) {
+			if (strict)
+				return 0;
 			continue;
+		}
 		nr_scanned++;
 
-		if (!PageBuddy(page))
+		if (!PageBuddy(page)) {
+			if (strict)
+				return 0;
 			continue;
+		}
 
 		/* Found a free page, break it into order-0 pages */
 		isolated = split_free_page(page);
+		if (!isolated && strict)
+			return 0;
 		total_isolated += isolated;
 		for (i = 0; i < isolated; i++) {
 			list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@
 	return total_isolated;
 }
 
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
-	int migratetype = get_pageblock_migratetype(page);
-
-	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-		return false;
-
-	/* If the page is a large free page, then allow migration */
-	if (PageBuddy(page) && page_order(page) >= pageblock_order)
-		return true;
-
-	/* If the block is MIGRATE_MOVABLE, allow migration */
-	if (migratetype == MIGRATE_MOVABLE)
-		return true;
-
-	/* Otherwise skip the block */
-	return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
  */
-static void isolate_freepages(struct zone *zone,
-				struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
 {
-	struct page *page;
-	unsigned long high_pfn, low_pfn, pfn;
-	unsigned long flags;
-	int nr_freepages = cc->nr_freepages;
-	struct list_head *freelist = &cc->freepages;
+	unsigned long isolated, pfn, block_end_pfn, flags;
+	struct zone *zone = NULL;
+	LIST_HEAD(freelist);
 
-	/*
-	 * Initialise the free scanner. The starting point is where we last
-	 * scanned from (or the end of the zone if starting). The low point
-	 * is the end of the pageblock the migration scanner is using.
-	 */
-	pfn = cc->free_pfn;
-	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+	if (pfn_valid(start_pfn))
+		zone = page_zone(pfn_to_page(start_pfn));
 
-	/*
-	 * Take care that if the migration scanner is at the end of the zone
-	 * that the free scanner does not accidentally move to the next zone
-	 * in the next isolation cycle.
-	 */
-	high_pfn = min(low_pfn, pfn);
-
-	/*
-	 * Isolate free pages until enough are available to migrate the
-	 * pages on cc->migratepages. We stop searching if the migrate
-	 * and free page scanners meet or enough free pages are isolated.
-	 */
-	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
-					pfn -= pageblock_nr_pages) {
-		unsigned long isolated;
-
-		if (!pfn_valid(pfn))
-			continue;
+	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+		if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+			break;
 
 		/*
-		 * Check for overlapping nodes/zones. It's possible on some
-		 * configurations to have a setup like
-		 * node0 node1 node0
-		 * i.e. it's possible that all pages within a zones range of
-		 * pages do not belong to a single zone.
+		 * On subsequent iterations ALIGN() is actually not needed,
+		 * but we keep it that we not to complicate the code.
 		 */
-		page = pfn_to_page(pfn);
-		if (page_zone(page) != zone)
-			continue;
+		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+		block_end_pfn = min(block_end_pfn, end_pfn);
 
-		/* Check the block is suitable for migration */
-		if (!suitable_migration_target(page))
-			continue;
-
-		/*
-		 * Found a block suitable for isolating free pages from. Now
-		 * we disabled interrupts, double check things are ok and
-		 * isolate the pages. This is to minimise the time IRQs
-		 * are disabled
-		 */
-		isolated = 0;
 		spin_lock_irqsave(&zone->lock, flags);
-		if (suitable_migration_target(page)) {
-			isolated = isolate_freepages_block(zone, pfn, freelist);
-			nr_freepages += isolated;
-		}
+		isolated = isolate_freepages_block(pfn, block_end_pfn,
+						   &freelist, true);
 		spin_unlock_irqrestore(&zone->lock, flags);
 
 		/*
-		 * Record the highest PFN we isolated pages from. When next
-		 * looking for free pages, the search will restart here as
-		 * page migration may have returned some pages to the allocator
+		 * In strict mode, isolate_freepages_block() returns 0 if
+		 * there are any holes in the block (ie. invalid PFNs or
+		 * non-free pages).
 		 */
-		if (isolated)
-			high_pfn = max(high_pfn, pfn);
+		if (!isolated)
+			break;
+
+		/*
+		 * If we managed to isolate pages, it is always (1 << n) *
+		 * pageblock_nr_pages for some non-negative n.  (Max order
+		 * page may span two pageblocks).
+		 */
 	}
 
 	/* split_free_page does not map the pages */
-	list_for_each_entry(page, freelist, lru) {
-		arch_alloc_page(page, 0);
-		kernel_map_pages(page, 1, 1);
+	map_pages(&freelist);
+
+	if (pfn < end_pfn) {
+		/* Loop terminated early, cleanup. */
+		release_freepages(&freelist);
+		return 0;
 	}
 
-	cc->free_pfn = high_pfn;
-	cc->nr_freepages = nr_freepages;
+	/* We don't use freelists for anything. */
+	return pfn;
 }
 
 /* Update the number of anon and file isolated pages in the zone */
@@ -243,37 +200,34 @@
 	return isolated > (inactive + active) / 2;
 }
 
-/* possible outcome of isolate_migratepages */
-typedef enum {
-	ISOLATE_ABORT,		/* Abort compaction now */
-	ISOLATE_NONE,		/* No pages isolated, continue scanning */
-	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone:	Zone pages are in.
+ * @cc:		Compaction control structure.
+ * @low_pfn:	The first PFN of the range.
+ * @end_pfn:	The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-					struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+			   unsigned long low_pfn, unsigned long end_pfn)
 {
-	unsigned long low_pfn, end_pfn;
 	unsigned long last_pageblock_nr = 0, pageblock_nr;
 	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct list_head *migratelist = &cc->migratepages;
-	isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
-
-	/* Do not scan outside zone boundaries */
-	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
-	/* Only scan within a pageblock boundary */
-	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
-	/* Do not cross the free scanner or scan within a memory hole */
-	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
-		cc->migrate_pfn = end_pfn;
-		return ISOLATE_NONE;
-	}
+	isolate_mode_t mode = 0;
+	struct lruvec *lruvec;
 
 	/*
 	 * Ensure that there are not too many pages isolated from the LRU
@@ -283,12 +237,12 @@
 	while (unlikely(too_many_isolated(zone))) {
 		/* async migration should just abort */
 		if (!cc->sync)
-			return ISOLATE_ABORT;
+			return 0;
 
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 		if (fatal_signal_pending(current))
-			return ISOLATE_ABORT;
+			return 0;
 	}
 
 	/* Time to isolate some pages for migration */
@@ -351,7 +305,7 @@
 		 */
 		pageblock_nr = low_pfn >> pageblock_order;
 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-				get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
 			low_pfn += pageblock_nr_pages;
 			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
 			last_pageblock_nr = pageblock_nr;
@@ -374,14 +328,16 @@
 		if (!cc->sync)
 			mode |= ISOLATE_ASYNC_MIGRATE;
 
+		lruvec = mem_cgroup_page_lruvec(page, zone);
+
 		/* Try isolate the page */
-		if (__isolate_lru_page(page, mode, 0) != 0)
+		if (__isolate_lru_page(page, mode) != 0)
 			continue;
 
 		VM_BUG_ON(PageTransCompound(page));
 
 		/* Successfully isolated */
-		del_page_from_lru_list(zone, page, page_lru(page));
+		del_page_from_lru_list(page, lruvec, page_lru(page));
 		list_add(&page->lru, migratelist);
 		cc->nr_migratepages++;
 		nr_isolated++;
@@ -396,11 +352,124 @@
 	acct_isolated(zone, cc);
 
 	spin_unlock_irq(&zone->lru_lock);
-	cc->migrate_pfn = low_pfn;
 
 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
-	return ISOLATE_SUCCESS;
+	return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+	int migratetype = get_pageblock_migratetype(page);
+
+	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+		return false;
+
+	/* If the page is a large free page, then allow migration */
+	if (PageBuddy(page) && page_order(page) >= pageblock_order)
+		return true;
+
+	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+	if (migrate_async_suitable(migratetype))
+		return true;
+
+	/* Otherwise skip the block */
+	return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+				struct compact_control *cc)
+{
+	struct page *page;
+	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+	unsigned long flags;
+	int nr_freepages = cc->nr_freepages;
+	struct list_head *freelist = &cc->freepages;
+
+	/*
+	 * Initialise the free scanner. The starting point is where we last
+	 * scanned from (or the end of the zone if starting). The low point
+	 * is the end of the pageblock the migration scanner is using.
+	 */
+	pfn = cc->free_pfn;
+	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+	/*
+	 * Take care that if the migration scanner is at the end of the zone
+	 * that the free scanner does not accidentally move to the next zone
+	 * in the next isolation cycle.
+	 */
+	high_pfn = min(low_pfn, pfn);
+
+	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+	/*
+	 * Isolate free pages until enough are available to migrate the
+	 * pages on cc->migratepages. We stop searching if the migrate
+	 * and free page scanners meet or enough free pages are isolated.
+	 */
+	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+					pfn -= pageblock_nr_pages) {
+		unsigned long isolated;
+
+		if (!pfn_valid(pfn))
+			continue;
+
+		/*
+		 * Check for overlapping nodes/zones. It's possible on some
+		 * configurations to have a setup like
+		 * node0 node1 node0
+		 * i.e. it's possible that all pages within a zones range of
+		 * pages do not belong to a single zone.
+		 */
+		page = pfn_to_page(pfn);
+		if (page_zone(page) != zone)
+			continue;
+
+		/* Check the block is suitable for migration */
+		if (!suitable_migration_target(page))
+			continue;
+
+		/*
+		 * Found a block suitable for isolating free pages from. Now
+		 * we disabled interrupts, double check things are ok and
+		 * isolate the pages. This is to minimise the time IRQs
+		 * are disabled
+		 */
+		isolated = 0;
+		spin_lock_irqsave(&zone->lock, flags);
+		if (suitable_migration_target(page)) {
+			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+			isolated = isolate_freepages_block(pfn, end_pfn,
+							   freelist, false);
+			nr_freepages += isolated;
+		}
+		spin_unlock_irqrestore(&zone->lock, flags);
+
+		/*
+		 * Record the highest PFN we isolated pages from. When next
+		 * looking for free pages, the search will restart here as
+		 * page migration may have returned some pages to the allocator
+		 */
+		if (isolated)
+			high_pfn = max(high_pfn, pfn);
+	}
+
+	/* split_free_page does not map the pages */
+	map_pages(freelist);
+
+	cc->free_pfn = high_pfn;
+	cc->nr_freepages = nr_freepages;
 }
 
 /*
@@ -449,6 +518,44 @@
 	cc->nr_freepages = nr_freepages;
 }
 
+/* possible outcome of isolate_migratepages */
+typedef enum {
+	ISOLATE_ABORT,		/* Abort compaction now */
+	ISOLATE_NONE,		/* No pages isolated, continue scanning */
+	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+					struct compact_control *cc)
+{
+	unsigned long low_pfn, end_pfn;
+
+	/* Do not scan outside zone boundaries */
+	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+	/* Only scan within a pageblock boundary */
+	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+	/* Do not cross the free scanner or scan within a memory hole */
+	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+		cc->migrate_pfn = end_pfn;
+		return ISOLATE_NONE;
+	}
+
+	/* Perform the isolation */
+	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+	if (!low_pfn)
+		return ISOLATE_ABORT;
+
+	cc->migrate_pfn = low_pfn;
+
+	return ISOLATE_SUCCESS;
+}
+
 static int compact_finished(struct zone *zone,
 			    struct compact_control *cc)
 {
@@ -795,3 +902,5 @@
 	return device_remove_file(&node->dev, &dev_attr_compact);
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79c4b2b..a4a5260 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,7 +29,6 @@
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/security.h>
-#include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
@@ -1478,44 +1477,6 @@
 }
 EXPORT_SYMBOL(generic_file_aio_read);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-	     pgoff_t index, unsigned long nr)
-{
-	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
-		return -EINVAL;
-
-	force_page_cache_readahead(mapping, filp, index, nr);
-	return 0;
-}
-
-SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
-{
-	ssize_t ret;
-	struct file *file;
-
-	ret = -EBADF;
-	file = fget(fd);
-	if (file) {
-		if (file->f_mode & FMODE_READ) {
-			struct address_space *mapping = file->f_mapping;
-			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
-			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
-			unsigned long len = end - start + 1;
-			ret = do_readahead(mapping, file, start, len);
-		}
-		fput(file);
-	}
-	return ret;
-}
-#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
-asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
-{
-	return SYSC_readahead((int) fd, offset, (size_t) count);
-}
-SYSCALL_ALIAS(sys_readahead, SyS_readahead);
-#endif
-
 #ifdef CONFIG_MMU
 /**
  * page_cache_read - adds requested page to the page cache if not already there
@@ -1938,71 +1899,6 @@
 }
 EXPORT_SYMBOL(read_cache_page);
 
-/*
- * The logic we want is
- *
- *	if suid or (sgid and xgrp)
- *		remove privs
- */
-int should_remove_suid(struct dentry *dentry)
-{
-	umode_t mode = dentry->d_inode->i_mode;
-	int kill = 0;
-
-	/* suid always must be killed */
-	if (unlikely(mode & S_ISUID))
-		kill = ATTR_KILL_SUID;
-
-	/*
-	 * sgid without any exec bits is just a mandatory locking mark; leave
-	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
-	 */
-	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-		kill |= ATTR_KILL_SGID;
-
-	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
-		return kill;
-
-	return 0;
-}
-EXPORT_SYMBOL(should_remove_suid);
-
-static int __remove_suid(struct dentry *dentry, int kill)
-{
-	struct iattr newattrs;
-
-	newattrs.ia_valid = ATTR_FORCE | kill;
-	return notify_change(dentry, &newattrs);
-}
-
-int file_remove_suid(struct file *file)
-{
-	struct dentry *dentry = file->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
-	int killsuid;
-	int killpriv;
-	int error = 0;
-
-	/* Fast path for nothing security related */
-	if (IS_NOSEC(inode))
-		return 0;
-
-	killsuid = should_remove_suid(dentry);
-	killpriv = security_inode_need_killpriv(dentry);
-
-	if (killpriv < 0)
-		return killpriv;
-	if (killpriv)
-		error = security_inode_killpriv(dentry);
-	if (!error && killsuid)
-		error = __remove_suid(dentry, killsuid);
-	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-		inode->i_flags |= S_NOSEC;
-
-	return error;
-}
-EXPORT_SYMBOL(file_remove_suid);
-
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
 			const struct iovec *iov, size_t base, size_t bytes)
 {
@@ -2528,7 +2424,9 @@
 	if (err)
 		goto out;
 
-	file_update_time(file);
+	err = file_update_time(file);
+	if (err)
+		goto out;
 
 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
 	if (unlikely(file->f_flags & O_DIRECT)) {
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index a4eb311..213ca1f 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -426,7 +426,9 @@
 	if (ret)
 		goto out_backing;
 
-	file_update_time(filp);
+	ret = file_update_time(filp);
+	if (ret)
+		goto out_backing;
 
 	ret = __xip_file_write (filp, buf, count, pos, ppos);
 
diff --git a/mm/frontswap.c b/mm/frontswap.c
new file mode 100644
index 0000000..e250255
--- /dev/null
+++ b/mm/frontswap.c
@@ -0,0 +1,314 @@
+/*
+ * Frontswap frontend
+ *
+ * This code provides the generic "frontend" layer to call a matching
+ * "backend" driver implementation of frontswap.  See
+ * Documentation/vm/frontswap.txt for more information.
+ *
+ * Copyright (C) 2009-2012 Oracle Corp.  All rights reserved.
+ * Author: Dan Magenheimer
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
+
+/*
+ * frontswap_ops is set by frontswap_register_ops to contain the pointers
+ * to the frontswap "backend" implementation functions.
+ */
+static struct frontswap_ops frontswap_ops __read_mostly;
+
+/*
+ * This global enablement flag reduces overhead on systems where frontswap_ops
+ * has not been registered, so is preferred to the slower alternative: a
+ * function call that checks a non-global.
+ */
+bool frontswap_enabled __read_mostly;
+EXPORT_SYMBOL(frontswap_enabled);
+
+/*
+ * If enabled, frontswap_store will return failure even on success.  As
+ * a result, the swap subsystem will always write the page to swap, in
+ * effect converting frontswap into a writethrough cache.  In this mode,
+ * there is no direct reduction in swap writes, but a frontswap backend
+ * can unilaterally "reclaim" any pages in use with no data loss, thus
+ * providing increases control over maximum memory usage due to frontswap.
+ */
+static bool frontswap_writethrough_enabled __read_mostly;
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * properly configured).  These are for information only so are not protected
+ * against increment races.
+ */
+static u64 frontswap_loads;
+static u64 frontswap_succ_stores;
+static u64 frontswap_failed_stores;
+static u64 frontswap_invalidates;
+
+static inline void inc_frontswap_loads(void) {
+	frontswap_loads++;
+}
+static inline void inc_frontswap_succ_stores(void) {
+	frontswap_succ_stores++;
+}
+static inline void inc_frontswap_failed_stores(void) {
+	frontswap_failed_stores++;
+}
+static inline void inc_frontswap_invalidates(void) {
+	frontswap_invalidates++;
+}
+#else
+static inline void inc_frontswap_loads(void) { }
+static inline void inc_frontswap_succ_stores(void) { }
+static inline void inc_frontswap_failed_stores(void) { }
+static inline void inc_frontswap_invalidates(void) { }
+#endif
+/*
+ * Register operations for frontswap, returning previous thus allowing
+ * detection of multiple backends and possible nesting.
+ */
+struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
+{
+	struct frontswap_ops old = frontswap_ops;
+
+	frontswap_ops = *ops;
+	frontswap_enabled = true;
+	return old;
+}
+EXPORT_SYMBOL(frontswap_register_ops);
+
+/*
+ * Enable/disable frontswap writethrough (see above).
+ */
+void frontswap_writethrough(bool enable)
+{
+	frontswap_writethrough_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_writethrough);
+
+/*
+ * Called when a swap device is swapon'd.
+ */
+void __frontswap_init(unsigned type)
+{
+	struct swap_info_struct *sis = swap_info[type];
+
+	BUG_ON(sis == NULL);
+	if (sis->frontswap_map == NULL)
+		return;
+	if (frontswap_enabled)
+		(*frontswap_ops.init)(type);
+}
+EXPORT_SYMBOL(__frontswap_init);
+
+/*
+ * "Store" data from a page to frontswap and associate it with the page's
+ * swaptype and offset.  Page must be locked and in the swap cache.
+ * If frontswap already contains a page with matching swaptype and
+ * offset, the frontswap implmentation may either overwrite the data and
+ * return success or invalidate the page from frontswap and return failure.
+ */
+int __frontswap_store(struct page *page)
+{
+	int ret = -1, dup = 0;
+	swp_entry_t entry = { .val = page_private(page), };
+	int type = swp_type(entry);
+	struct swap_info_struct *sis = swap_info[type];
+	pgoff_t offset = swp_offset(entry);
+
+	BUG_ON(!PageLocked(page));
+	BUG_ON(sis == NULL);
+	if (frontswap_test(sis, offset))
+		dup = 1;
+	ret = (*frontswap_ops.store)(type, offset, page);
+	if (ret == 0) {
+		frontswap_set(sis, offset);
+		inc_frontswap_succ_stores();
+		if (!dup)
+			atomic_inc(&sis->frontswap_pages);
+	} else if (dup) {
+		/*
+		  failed dup always results in automatic invalidate of
+		  the (older) page from frontswap
+		 */
+		frontswap_clear(sis, offset);
+		atomic_dec(&sis->frontswap_pages);
+		inc_frontswap_failed_stores();
+	} else
+		inc_frontswap_failed_stores();
+	if (frontswap_writethrough_enabled)
+		/* report failure so swap also writes to swap device */
+		ret = -1;
+	return ret;
+}
+EXPORT_SYMBOL(__frontswap_store);
+
+/*
+ * "Get" data from frontswap associated with swaptype and offset that were
+ * specified when the data was put to frontswap and use it to fill the
+ * specified page with data. Page must be locked and in the swap cache.
+ */
+int __frontswap_load(struct page *page)
+{
+	int ret = -1;
+	swp_entry_t entry = { .val = page_private(page), };
+	int type = swp_type(entry);
+	struct swap_info_struct *sis = swap_info[type];
+	pgoff_t offset = swp_offset(entry);
+
+	BUG_ON(!PageLocked(page));
+	BUG_ON(sis == NULL);
+	if (frontswap_test(sis, offset))
+		ret = (*frontswap_ops.load)(type, offset, page);
+	if (ret == 0)
+		inc_frontswap_loads();
+	return ret;
+}
+EXPORT_SYMBOL(__frontswap_load);
+
+/*
+ * Invalidate any data from frontswap associated with the specified swaptype
+ * and offset so that a subsequent "get" will fail.
+ */
+void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+	struct swap_info_struct *sis = swap_info[type];
+
+	BUG_ON(sis == NULL);
+	if (frontswap_test(sis, offset)) {
+		(*frontswap_ops.invalidate_page)(type, offset);
+		atomic_dec(&sis->frontswap_pages);
+		frontswap_clear(sis, offset);
+		inc_frontswap_invalidates();
+	}
+}
+EXPORT_SYMBOL(__frontswap_invalidate_page);
+
+/*
+ * Invalidate all data from frontswap associated with all offsets for the
+ * specified swaptype.
+ */
+void __frontswap_invalidate_area(unsigned type)
+{
+	struct swap_info_struct *sis = swap_info[type];
+
+	BUG_ON(sis == NULL);
+	if (sis->frontswap_map == NULL)
+		return;
+	(*frontswap_ops.invalidate_area)(type);
+	atomic_set(&sis->frontswap_pages, 0);
+	memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+}
+EXPORT_SYMBOL(__frontswap_invalidate_area);
+
+/*
+ * Frontswap, like a true swap device, may unnecessarily retain pages
+ * under certain circumstances; "shrink" frontswap is essentially a
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
+ * unuse enough frontswap pages to attempt to -- subject to memory
+ * constraints -- reduce the number of pages in frontswap to the
+ * number given in the parameter target_pages.
+ */
+void frontswap_shrink(unsigned long target_pages)
+{
+	struct swap_info_struct *si = NULL;
+	int si_frontswap_pages;
+	unsigned long total_pages = 0, total_pages_to_unuse;
+	unsigned long pages = 0, pages_to_unuse = 0;
+	int type;
+	bool locked = false;
+
+	/*
+	 * we don't want to hold swap_lock while doing a very
+	 * lengthy try_to_unuse, but swap_list may change
+	 * so restart scan from swap_list.head each time
+	 */
+	spin_lock(&swap_lock);
+	locked = true;
+	total_pages = 0;
+	for (type = swap_list.head; type >= 0; type = si->next) {
+		si = swap_info[type];
+		total_pages += atomic_read(&si->frontswap_pages);
+	}
+	if (total_pages <= target_pages)
+		goto out;
+	total_pages_to_unuse = total_pages - target_pages;
+	for (type = swap_list.head; type >= 0; type = si->next) {
+		si = swap_info[type];
+		si_frontswap_pages = atomic_read(&si->frontswap_pages);
+		if (total_pages_to_unuse < si_frontswap_pages)
+			pages = pages_to_unuse = total_pages_to_unuse;
+		else {
+			pages = si_frontswap_pages;
+			pages_to_unuse = 0; /* unuse all */
+		}
+		/* ensure there is enough RAM to fetch pages from frontswap */
+		if (security_vm_enough_memory_mm(current->mm, pages))
+			continue;
+		vm_unacct_memory(pages);
+		break;
+	}
+	if (type < 0)
+		goto out;
+	locked = false;
+	spin_unlock(&swap_lock);
+	try_to_unuse(type, true, pages_to_unuse);
+out:
+	if (locked)
+		spin_unlock(&swap_lock);
+	return;
+}
+EXPORT_SYMBOL(frontswap_shrink);
+
+/*
+ * Count and return the number of frontswap pages across all
+ * swap devices.  This is exported so that backend drivers can
+ * determine current usage without reading debugfs.
+ */
+unsigned long frontswap_curr_pages(void)
+{
+	int type;
+	unsigned long totalpages = 0;
+	struct swap_info_struct *si = NULL;
+
+	spin_lock(&swap_lock);
+	for (type = swap_list.head; type >= 0; type = si->next) {
+		si = swap_info[type];
+		totalpages += atomic_read(&si->frontswap_pages);
+	}
+	spin_unlock(&swap_lock);
+	return totalpages;
+}
+EXPORT_SYMBOL(frontswap_curr_pages);
+
+static int __init init_frontswap(void)
+{
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *root = debugfs_create_dir("frontswap", NULL);
+	if (root == NULL)
+		return -ENXIO;
+	debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
+	debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
+	debugfs_create_u64("failed_stores", S_IRUGO, root,
+				&frontswap_failed_stores);
+	debugfs_create_u64("invalidates", S_IRUGO,
+				root, &frontswap_invalidates);
+#endif
+	return 0;
+}
+
+module_init(init_frontswap);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f0e5306..57c4b93 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -636,16 +636,12 @@
 					unsigned long haddr, pmd_t *pmd,
 					struct page *page)
 {
-	int ret = 0;
 	pgtable_t pgtable;
 
 	VM_BUG_ON(!PageCompound(page));
 	pgtable = pte_alloc_one(mm, haddr);
-	if (unlikely(!pgtable)) {
-		mem_cgroup_uncharge_page(page);
-		put_page(page);
+	if (unlikely(!pgtable))
 		return VM_FAULT_OOM;
-	}
 
 	clear_huge_page(page, haddr, HPAGE_PMD_NR);
 	__SetPageUptodate(page);
@@ -675,7 +671,7 @@
 		spin_unlock(&mm->page_table_lock);
 	}
 
-	return ret;
+	return 0;
 }
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
@@ -724,8 +720,14 @@
 			put_page(page);
 			goto out;
 		}
+		if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+							  page))) {
+			mem_cgroup_uncharge_page(page);
+			put_page(page);
+			goto out;
+		}
 
-		return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+		return 0;
 	}
 out:
 	/*
@@ -950,6 +952,8 @@
 		count_vm_event(THP_FAULT_FALLBACK);
 		ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
 						   pmd, orig_pmd, page, haddr);
+		if (ret & VM_FAULT_OOM)
+			split_huge_page(page);
 		put_page(page);
 		goto out;
 	}
@@ -957,6 +961,7 @@
 
 	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
 		put_page(new_page);
+		split_huge_page(page);
 		put_page(page);
 		ret |= VM_FAULT_OOM;
 		goto out;
@@ -968,8 +973,10 @@
 	spin_lock(&mm->page_table_lock);
 	put_page(page);
 	if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+		spin_unlock(&mm->page_table_lock);
 		mem_cgroup_uncharge_page(new_page);
 		put_page(new_page);
+		goto out;
 	} else {
 		pmd_t entry;
 		VM_BUG_ON(!PageHead(page));
@@ -1224,10 +1231,13 @@
 {
 	int i;
 	struct zone *zone = page_zone(page);
+	struct lruvec *lruvec;
 	int tail_count = 0;
 
 	/* prevent PageLRU to go away from under us, and freeze lru stats */
 	spin_lock_irq(&zone->lru_lock);
+	lruvec = mem_cgroup_page_lruvec(page, zone);
+
 	compound_lock(page);
 	/* complete memcg works before add pages to LRU */
 	mem_cgroup_split_huge_fixup(page);
@@ -1302,13 +1312,12 @@
 		BUG_ON(!PageDirty(page_tail));
 		BUG_ON(!PageSwapBacked(page_tail));
 
-
-		lru_add_page_tail(zone, page, page_tail);
+		lru_add_page_tail(page, page_tail, lruvec);
 	}
 	atomic_sub(tail_count, &page->_count);
 	BUG_ON(atomic_read(&page->_count) <= 0);
 
-	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+	__mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
 	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
 
 	ClearPageCompound(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ae8f708..e198831 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -273,8 +273,8 @@
 
 	/* Locate each segment we overlap with, and count that overlap. */
 	list_for_each_entry(rg, head, link) {
-		int seg_from;
-		int seg_to;
+		long seg_from;
+		long seg_to;
 
 		if (rg->to <= f)
 			continue;
@@ -2157,6 +2157,15 @@
 		kref_get(&reservations->refs);
 }
 
+static void resv_map_put(struct vm_area_struct *vma)
+{
+	struct resv_map *reservations = vma_resv_map(vma);
+
+	if (!reservations)
+		return;
+	kref_put(&reservations->refs, resv_map_release);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
 	struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@
 		reserve = (end - start) -
 			region_count(&reservations->regions, start, end);
 
-		kref_put(&reservations->refs, resv_map_release);
+		resv_map_put(vma);
 
 		if (reserve) {
 			hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@
 	}
 	entry = pte_mkyoung(entry);
 	entry = pte_mkhuge(entry);
+	entry = arch_make_huge_pte(entry, vma, page, writable);
 
 	return entry;
 }
@@ -2990,12 +3000,16 @@
 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
 	}
 
-	if (chg < 0)
-		return chg;
+	if (chg < 0) {
+		ret = chg;
+		goto out_err;
+	}
 
 	/* There must be enough pages in the subpool for the mapping */
-	if (hugepage_subpool_get_pages(spool, chg))
-		return -ENOSPC;
+	if (hugepage_subpool_get_pages(spool, chg)) {
+		ret = -ENOSPC;
+		goto out_err;
+	}
 
 	/*
 	 * Check enough hugepages are available for the reservation.
@@ -3004,7 +3018,7 @@
 	ret = hugetlb_acct_memory(h, chg);
 	if (ret < 0) {
 		hugepage_subpool_put_pages(spool, chg);
-		return ret;
+		goto out_err;
 	}
 
 	/*
@@ -3021,6 +3035,10 @@
 	if (!vma || vma->vm_flags & VM_MAYSHARE)
 		region_add(&inode->i_mapping->private_list, from, to);
 	return 0;
+out_err:
+	if (vma)
+		resv_map_put(vma);
+	return ret;
 }
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
diff --git a/mm/internal.h b/mm/internal.h
index 2189af4..2ba87fb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -100,6 +100,39 @@
 extern bool is_free_buddy_page(struct page *page);
 #endif
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+	struct list_head freepages;	/* List of free pages to migrate to */
+	struct list_head migratepages;	/* List of pages being migrated */
+	unsigned long nr_freepages;	/* Number of isolated free pages */
+	unsigned long nr_migratepages;	/* Number of pages to migrate */
+	unsigned long free_pfn;		/* isolate_freepages search base */
+	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+	bool sync;			/* Synchronous migration */
+
+	int order;			/* order a direct compactor needs */
+	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
+	struct zone *zone;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+			   unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
 
 /*
  * function for dealing with page's order in buddy system.
@@ -131,7 +164,8 @@
  * to determine if it's being mapped into a LOCKED vma.
  * If so, mark page as mlocked.
  */
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
+				    struct page *page)
 {
 	VM_BUG_ON(PageLRU(page));
 
@@ -189,7 +223,7 @@
 				 struct vm_area_struct *vma);
 #endif
 #else /* !CONFIG_MMU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
 {
 	return 0;
 }
@@ -309,3 +343,7 @@
 extern u64 hwpoison_filter_flags_value;
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
+
+extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
+        unsigned long, unsigned long,
+        unsigned long, unsigned long);
diff --git a/mm/madvise.c b/mm/madvise.c
index 1ccbba5..deff1b6 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,8 +11,10 @@
 #include <linux/mempolicy.h>
 #include <linux/page-isolation.h>
 #include <linux/hugetlb.h>
+#include <linux/falloc.h>
 #include <linux/sched.h>
 #include <linux/ksm.h>
+#include <linux/fs.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -200,8 +202,7 @@
 				struct vm_area_struct **prev,
 				unsigned long start, unsigned long end)
 {
-	struct address_space *mapping;
-	loff_t offset, endoff;
+	loff_t offset;
 	int error;
 
 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
@@ -217,16 +218,14 @@
 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
 		return -EACCES;
 
-	mapping = vma->vm_file->f_mapping;
-
 	offset = (loff_t)(start - vma->vm_start)
 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-	endoff = (loff_t)(end - vma->vm_start - 1)
-			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-	/* vmtruncate_range needs to take i_mutex */
+	/* filesystem's fallocate may need to take i_mutex */
 	up_read(&current->mm->mmap_sem);
-	error = vmtruncate_range(mapping->host, offset, endoff);
+	error = do_fallocate(vma->vm_file,
+				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				offset, end - start);
 	down_read(&current->mm->mmap_sem);
 	return error;
 }
diff --git a/mm/memblock.c b/mm/memblock.c
index a44eab3..d438209 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -37,6 +37,8 @@
 
 int memblock_debug __initdata_memblock;
 static int memblock_can_resize __initdata_memblock;
+static int memblock_memory_in_slab __initdata_memblock = 0;
+static int memblock_reserved_in_slab __initdata_memblock = 0;
 
 /* inline so we don't get a warning when pr_debug is compiled out */
 static inline const char *memblock_type_name(struct memblock_type *type)
@@ -182,11 +184,29 @@
 	}
 }
 
-static int __init_memblock memblock_double_array(struct memblock_type *type)
+/**
+ * memblock_double_array - double the size of the memblock regions array
+ * @type: memblock type of the regions array being doubled
+ * @new_area_start: starting address of memory range to avoid overlap with
+ * @new_area_size: size of memory range to avoid overlap with
+ *
+ * Double the size of the @type regions array. If memblock is being used to
+ * allocate memory for a new reserved regions array and there is a previously
+ * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
+ * waiting to be reserved, ensure the memory used by the new array does
+ * not overlap.
+ *
+ * RETURNS:
+ * 0 on success, -1 on failure.
+ */
+static int __init_memblock memblock_double_array(struct memblock_type *type,
+						phys_addr_t new_area_start,
+						phys_addr_t new_area_size)
 {
 	struct memblock_region *new_array, *old_array;
 	phys_addr_t old_size, new_size, addr;
 	int use_slab = slab_is_available();
+	int *in_slab;
 
 	/* We don't allow resizing until we know about the reserved regions
 	 * of memory that aren't suitable for allocation
@@ -198,6 +218,12 @@
 	old_size = type->max * sizeof(struct memblock_region);
 	new_size = old_size << 1;
 
+	/* Retrieve the slab flag */
+	if (type == &memblock.memory)
+		in_slab = &memblock_memory_in_slab;
+	else
+		in_slab = &memblock_reserved_in_slab;
+
 	/* Try to find some space for it.
 	 *
 	 * WARNING: We assume that either slab_is_available() and we use it or
@@ -212,14 +238,26 @@
 	if (use_slab) {
 		new_array = kmalloc(new_size, GFP_KERNEL);
 		addr = new_array ? __pa(new_array) : 0;
-	} else
-		addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+	} else {
+		/* only exclude range when trying to double reserved.regions */
+		if (type != &memblock.reserved)
+			new_area_start = new_area_size = 0;
+
+		addr = memblock_find_in_range(new_area_start + new_area_size,
+						memblock.current_limit,
+						new_size, sizeof(phys_addr_t));
+		if (!addr && new_area_size)
+			addr = memblock_find_in_range(0,
+					min(new_area_start, memblock.current_limit),
+					new_size, sizeof(phys_addr_t));
+
+		new_array = addr ? __va(addr) : 0;
+	}
 	if (!addr) {
 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 		       memblock_type_name(type), type->max, type->max * 2);
 		return -1;
 	}
-	new_array = __va(addr);
 
 	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
 		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
@@ -234,22 +272,24 @@
 	type->regions = new_array;
 	type->max <<= 1;
 
-	/* If we use SLAB that's it, we are done */
-	if (use_slab)
-		return 0;
-
-	/* Add the new reserved region now. Should not fail ! */
-	BUG_ON(memblock_reserve(addr, new_size));
-
-	/* If the array wasn't our static init one, then free it. We only do
-	 * that before SLAB is available as later on, we don't know whether
-	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
-	 * anyways
+	/* Free old array. We needn't free it if the array is the
+	 * static one
 	 */
-	if (old_array != memblock_memory_init_regions &&
-	    old_array != memblock_reserved_init_regions)
+	if (*in_slab)
+		kfree(old_array);
+	else if (old_array != memblock_memory_init_regions &&
+		 old_array != memblock_reserved_init_regions)
 		memblock_free(__pa(old_array), old_size);
 
+	/* Reserve the new array if that comes from the memblock.
+	 * Otherwise, we needn't do it
+	 */
+	if (!use_slab)
+		BUG_ON(memblock_reserve(addr, new_size));
+
+	/* Update slab flag */
+	*in_slab = use_slab;
+
 	return 0;
 }
 
@@ -387,7 +427,7 @@
 	 */
 	if (!insert) {
 		while (type->cnt + nr_new > type->max)
-			if (memblock_double_array(type) < 0)
+			if (memblock_double_array(type, obase, size) < 0)
 				return -ENOMEM;
 		insert = true;
 		goto repeat;
@@ -438,7 +478,7 @@
 
 	/* we'll create at most two more regions */
 	while (type->cnt + 2 > type->max)
-		if (memblock_double_array(type) < 0)
+		if (memblock_double_array(type, base, size) < 0)
 			return -ENOMEM;
 
 	for (i = 0; i < type->cnt; i++) {
@@ -528,9 +568,9 @@
  * __next_free_mem_range - next function for for_each_free_mem_range()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Find the first free area from *@idx which matches @nid, fill the out
  * parameters, and update *@idx for the next iteration.  The lower 32bit of
@@ -604,9 +644,9 @@
  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Reverse of __next_free_mem_range().
  */
@@ -855,6 +895,16 @@
 	return memblock_search(&memblock.memory, addr) != -1;
 }
 
+/**
+ * memblock_is_region_memory - check if a region is a subset of memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) is a subset of a memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
 	int idx = memblock_search(&memblock.memory, base);
@@ -867,6 +917,16 @@
 		 memblock.memory.regions[idx].size) >= end;
 }
 
+/**
+ * memblock_is_region_reserved - check if a region intersects reserved memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) intersects a reserved memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
 	memblock_cap_size(base, &size);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f342778..f72b5e5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,7 +59,7 @@
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES	5
-struct mem_cgroup *root_mem_cgroup __read_mostly;
+static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -73,7 +73,7 @@
 #endif
 
 #else
-#define do_swap_account		(0)
+#define do_swap_account		0
 #endif
 
 
@@ -88,18 +88,31 @@
 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
 	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
 	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
-	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
 	MEM_CGROUP_STAT_NSTATS,
 };
 
+static const char * const mem_cgroup_stat_names[] = {
+	"cache",
+	"rss",
+	"mapped_file",
+	"swap",
+};
+
 enum mem_cgroup_events_index {
 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
-	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
 	MEM_CGROUP_EVENTS_NSTATS,
 };
+
+static const char * const mem_cgroup_events_names[] = {
+	"pgpgin",
+	"pgpgout",
+	"pgfault",
+	"pgmajfault",
+};
+
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  * it will be incremated by the number of pages. This counter is used for
@@ -112,13 +125,14 @@
 	MEM_CGROUP_TARGET_NUMAINFO,
 	MEM_CGROUP_NTARGETS,
 };
-#define THRESHOLDS_EVENTS_TARGET (128)
-#define SOFTLIMIT_EVENTS_TARGET (1024)
-#define NUMAINFO_EVENTS_TARGET	(1024)
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+#define NUMAINFO_EVENTS_TARGET	1024
 
 struct mem_cgroup_stat_cpu {
 	long count[MEM_CGROUP_STAT_NSTATS];
 	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+	unsigned long nr_page_events;
 	unsigned long targets[MEM_CGROUP_NTARGETS];
 };
 
@@ -138,7 +152,6 @@
 
 	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
-	struct zone_reclaim_stat reclaim_stat;
 	struct rb_node		tree_node;	/* RB tree node */
 	unsigned long long	usage_in_excess;/* Set to the value by which */
 						/* the soft limit is exceeded*/
@@ -182,7 +195,7 @@
 
 /* For threshold */
 struct mem_cgroup_threshold_ary {
-	/* An array index points to threshold just below usage. */
+	/* An array index points to threshold just below or equal to usage. */
 	int current_threshold;
 	/* Size of entries[] */
 	unsigned int size;
@@ -245,8 +258,8 @@
 		 */
 		struct rcu_head rcu_freeing;
 		/*
-		 * But when using vfree(), that cannot be done at
-		 * interrupt time, so we must then queue the work.
+		 * We also need some space for a worker in deferred freeing.
+		 * By the time we call it, rcu_freeing is no longer in use.
 		 */
 		struct work_struct work_freeing;
 	};
@@ -305,7 +318,7 @@
 	/*
 	 * percpu counter.
 	 */
-	struct mem_cgroup_stat_cpu *stat;
+	struct mem_cgroup_stat_cpu __percpu *stat;
 	/*
 	 * used when a cpu is offlined or other synchronizations
 	 * See mem_cgroup_read_stat().
@@ -360,8 +373,8 @@
  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  * limit reclaim to prevent infinite loops, if they ever occur.
  */
-#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
-#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
+#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
+#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 
 enum charge_type {
 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -377,8 +390,8 @@
 #define _MEM			(0)
 #define _MEMSWAP		(1)
 #define _OOM_TYPE		(2)
-#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
-#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
+#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
+#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 #define MEMFILE_ATTR(val)	((val) & 0xffff)
 /* Used for OOM nofiier */
 #define OOM_CONTROL		(0)
@@ -404,6 +417,7 @@
 {
 	if (mem_cgroup_sockets_enabled) {
 		struct mem_cgroup *memcg;
+		struct cg_proto *cg_proto;
 
 		BUG_ON(!sk->sk_prot->proto_cgroup);
 
@@ -423,9 +437,10 @@
 
 		rcu_read_lock();
 		memcg = mem_cgroup_from_task(current);
-		if (!mem_cgroup_is_root(memcg)) {
+		cg_proto = sk->sk_prot->proto_cgroup(memcg);
+		if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
 			mem_cgroup_get(memcg);
-			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+			sk->sk_cgrp = cg_proto;
 		}
 		rcu_read_unlock();
 	}
@@ -454,6 +469,19 @@
 #endif /* CONFIG_INET */
 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
 
+#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+		return;
+	static_key_slow_dec(&memcg_socket_limit_enabled);
+}
+#else
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+}
+#endif
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -718,12 +746,21 @@
 		nr_pages = -nr_pages; /* for event */
 	}
 
-	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
+	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
 	preempt_enable();
 }
 
 unsigned long
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+	struct mem_cgroup_per_zone *mz;
+
+	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+	return mz->lru_size[lru];
+}
+
+static unsigned long
 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
 			unsigned int lru_mask)
 {
@@ -770,7 +807,7 @@
 {
 	unsigned long val, next;
 
-	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+	val = __this_cpu_read(memcg->stat->nr_page_events);
 	next = __this_cpu_read(memcg->stat->targets[target]);
 	/* from time_after() in jiffies.h */
 	if ((long)next - (long)val < 0) {
@@ -1013,7 +1050,7 @@
 /**
  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  * @zone: zone of the wanted lruvec
- * @mem: memcg of the wanted lruvec
+ * @memcg: memcg of the wanted lruvec
  *
  * Returns the lru list vector holding pages for the given @zone and
  * @mem.  This can be the global zone lruvec, if the memory controller
@@ -1046,19 +1083,11 @@
  */
 
 /**
- * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
- * @zone: zone of the page
+ * mem_cgroup_page_lruvec - return lruvec for adding an lru page
  * @page: the page
- * @lru: current lru
- *
- * This function accounts for @page being added to @lru, and returns
- * the lruvec for the given @zone and the memcg @page is charged to.
- *
- * The callsite is then responsible for physically linking the page to
- * the returned lruvec->lists[@lru].
+ * @zone: zone of the page
  */
-struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
-				       enum lru_list lru)
+struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 {
 	struct mem_cgroup_per_zone *mz;
 	struct mem_cgroup *memcg;
@@ -1071,7 +1100,7 @@
 	memcg = pc->mem_cgroup;
 
 	/*
-	 * Surreptitiously switch any uncharged page to root:
+	 * Surreptitiously switch any uncharged offlist page to root:
 	 * an uncharged page off lru does nothing to secure
 	 * its former mem_cgroup from sudden removal.
 	 *
@@ -1079,85 +1108,60 @@
 	 * under page_cgroup lock: between them, they make all uses
 	 * of pc->mem_cgroup safe.
 	 */
-	if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
 		pc->mem_cgroup = memcg = root_mem_cgroup;
 
 	mz = page_cgroup_zoneinfo(memcg, page);
-	/* compound_order() is stabilized through lru_lock */
-	mz->lru_size[lru] += 1 << compound_order(page);
 	return &mz->lruvec;
 }
 
 /**
- * mem_cgroup_lru_del_list - account for removing an lru page
- * @page: the page
- * @lru: target lru
+ * mem_cgroup_update_lru_size - account for adding or removing an lru page
+ * @lruvec: mem_cgroup per zone lru vector
+ * @lru: index of lru list the page is sitting on
+ * @nr_pages: positive when adding or negative when removing
  *
- * This function accounts for @page being removed from @lru.
- *
- * The callsite is then responsible for physically unlinking
- * @page->lru.
+ * This function must be called when a page is added to or removed from an
+ * lru list.
  */
-void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+				int nr_pages)
 {
 	struct mem_cgroup_per_zone *mz;
-	struct mem_cgroup *memcg;
-	struct page_cgroup *pc;
+	unsigned long *lru_size;
 
 	if (mem_cgroup_disabled())
 		return;
 
-	pc = lookup_page_cgroup(page);
-	memcg = pc->mem_cgroup;
-	VM_BUG_ON(!memcg);
-	mz = page_cgroup_zoneinfo(memcg, page);
-	/* huge page split is done under lru_lock. so, we have no races. */
-	VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
-	mz->lru_size[lru] -= 1 << compound_order(page);
-}
-
-void mem_cgroup_lru_del(struct page *page)
-{
-	mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
-/**
- * mem_cgroup_lru_move_lists - account for moving a page between lrus
- * @zone: zone of the page
- * @page: the page
- * @from: current lru
- * @to: target lru
- *
- * This function accounts for @page being moved between the lrus @from
- * and @to, and returns the lruvec for the given @zone and the memcg
- * @page is charged to.
- *
- * The callsite is then responsible for physically relinking
- * @page->lru to the returned lruvec->lists[@to].
- */
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
-					 struct page *page,
-					 enum lru_list from,
-					 enum lru_list to)
-{
-	/* XXX: Optimize this, especially for @from == @to */
-	mem_cgroup_lru_del_list(page, from);
-	return mem_cgroup_lru_add_list(zone, page, to);
+	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+	lru_size = mz->lru_size + lru;
+	*lru_size += nr_pages;
+	VM_BUG_ON((long)(*lru_size) < 0);
 }
 
 /*
  * Checks whether given mem is same or in the root_mem_cgroup's
  * hierarchy subtree
  */
-static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-		struct mem_cgroup *memcg)
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+				  struct mem_cgroup *memcg)
 {
-	if (root_memcg != memcg) {
-		return (root_memcg->use_hierarchy &&
-			css_is_ancestor(&memcg->css, &root_memcg->css));
-	}
+	if (root_memcg == memcg)
+		return true;
+	if (!root_memcg->use_hierarchy || !memcg)
+		return false;
+	return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
 
-	return true;
+static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+				       struct mem_cgroup *memcg)
+{
+	bool ret;
+
+	rcu_read_lock();
+	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+	rcu_read_unlock();
+	return ret;
 }
 
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1195,19 +1199,15 @@
 	return ret;
 }
 
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 {
 	unsigned long inactive_ratio;
-	int nid = zone_to_nid(zone);
-	int zid = zone_idx(zone);
 	unsigned long inactive;
 	unsigned long active;
 	unsigned long gb;
 
-	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-						BIT(LRU_INACTIVE_ANON));
-	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-					      BIT(LRU_ACTIVE_ANON));
+	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
 
 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
 	if (gb)
@@ -1218,55 +1218,23 @@
 	return inactive * inactive_ratio < active;
 }
 
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 {
 	unsigned long active;
 	unsigned long inactive;
-	int zid = zone_idx(zone);
-	int nid = zone_to_nid(zone);
 
-	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-						BIT(LRU_INACTIVE_FILE));
-	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-					      BIT(LRU_ACTIVE_FILE));
+	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
+	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
 
 	return (active > inactive);
 }
 
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
-						      struct zone *zone)
-{
-	int nid = zone_to_nid(zone);
-	int zid = zone_idx(zone);
-	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
-
-	return &mz->reclaim_stat;
-}
-
-struct zone_reclaim_stat *
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
-{
-	struct page_cgroup *pc;
-	struct mem_cgroup_per_zone *mz;
-
-	if (mem_cgroup_disabled())
-		return NULL;
-
-	pc = lookup_page_cgroup(page);
-	if (!PageCgroupUsed(pc))
-		return NULL;
-	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
-	smp_rmb();
-	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
-	return &mz->reclaim_stat;
-}
-
 #define mem_cgroup_from_res_counter(counter, member)	\
 	container_of(counter, struct mem_cgroup, member)
 
 /**
  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
- * @mem: the memory cgroup
+ * @memcg: the memory cgroup
  *
  * Returns the maximum amount of memory @mem can be charged with, in
  * pages.
@@ -1540,7 +1508,7 @@
 
 /**
  * test_mem_cgroup_node_reclaimable
- * @mem: the target memcg
+ * @memcg: the target memcg
  * @nid: the node ID to be checked.
  * @noswap : specify true here if the user wants flle only information.
  *
@@ -1634,7 +1602,7 @@
  * unused nodes. But scan_nodes is lazily updated and may not cotain
  * enough new information. We need to do double check.
  */
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
 	int nid;
 
@@ -1669,7 +1637,7 @@
 	return 0;
 }
 
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
 	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
 }
@@ -1843,7 +1811,8 @@
 /*
  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  */
-bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
+				  int order)
 {
 	struct oom_wait_info owait;
 	bool locked, need_to_kill;
@@ -1992,7 +1961,7 @@
 	unsigned int nr_pages;
 	struct work_struct work;
 	unsigned long flags;
-#define FLUSHING_CACHED_CHARGE	(0)
+#define FLUSHING_CACHED_CHARGE	0
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 static DEFINE_MUTEX(percpu_charge_mutex);
@@ -2139,7 +2108,7 @@
 	int i;
 
 	spin_lock(&memcg->pcp_counter_lock);
-	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		long x = per_cpu(memcg->stat->count[i], cpu);
 
 		per_cpu(memcg->stat->count[i], cpu) = 0;
@@ -2427,6 +2396,24 @@
 }
 
 /*
+ * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
+ * This is useful when moving usage to parent cgroup.
+ */
+static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
+					unsigned int nr_pages)
+{
+	unsigned long bytes = nr_pages * PAGE_SIZE;
+
+	if (mem_cgroup_is_root(memcg))
+		return;
+
+	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
+	if (do_swap_account)
+		res_counter_uncharge_until(&memcg->memsw,
+						memcg->memsw.parent, bytes);
+}
+
+/*
  * A helper function to get mem_cgroup from ID. must be called under
  * rcu_read_lock(). The caller must check css_is_removed() or some if
  * it's concern. (dropping refcnt from swap can be called against removed
@@ -2481,6 +2468,7 @@
 {
 	struct page_cgroup *pc = lookup_page_cgroup(page);
 	struct zone *uninitialized_var(zone);
+	struct lruvec *lruvec;
 	bool was_on_lru = false;
 	bool anon;
 
@@ -2503,8 +2491,9 @@
 		zone = page_zone(page);
 		spin_lock_irq(&zone->lru_lock);
 		if (PageLRU(page)) {
+			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
 			ClearPageLRU(page);
-			del_page_from_lru_list(zone, page, page_lru(page));
+			del_page_from_lru_list(page, lruvec, page_lru(page));
 			was_on_lru = true;
 		}
 	}
@@ -2522,9 +2511,10 @@
 
 	if (lrucare) {
 		if (was_on_lru) {
+			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
 			VM_BUG_ON(PageLRU(page));
 			SetPageLRU(page);
-			add_page_to_lru_list(zone, page, page_lru(page));
+			add_page_to_lru_list(page, lruvec, page_lru(page));
 		}
 		spin_unlock_irq(&zone->lru_lock);
 	}
@@ -2547,7 +2537,7 @@
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
-#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
+#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
 /*
  * Because tail pages are not marked as "used", set it. We're under
  * zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2578,23 +2568,19 @@
  * @pc:	page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:	mem_cgroup which the page is moved to. @from != @to.
- * @uncharge: whether we should call uncharge and css_put against @from.
  *
  * The caller must confirm following.
  * - page is not on LRU (isolate_page() is useful.)
  * - compound_lock is held when nr_pages > 1
  *
- * This function doesn't do "charge" nor css_get to new cgroup. It should be
- * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
- * true, this function does "uncharge" from old cgroup, but it doesn't if
- * @uncharge is false, so a caller should do "uncharge".
+ * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
+ * from old cgroup.
  */
 static int mem_cgroup_move_account(struct page *page,
 				   unsigned int nr_pages,
 				   struct page_cgroup *pc,
 				   struct mem_cgroup *from,
-				   struct mem_cgroup *to,
-				   bool uncharge)
+				   struct mem_cgroup *to)
 {
 	unsigned long flags;
 	int ret;
@@ -2628,9 +2614,6 @@
 		preempt_enable();
 	}
 	mem_cgroup_charge_statistics(from, anon, -nr_pages);
-	if (uncharge)
-		/* This is not "cancel", but cancel_charge does all we need. */
-		__mem_cgroup_cancel_charge(from, nr_pages);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
@@ -2664,15 +2647,13 @@
 				  struct mem_cgroup *child,
 				  gfp_t gfp_mask)
 {
-	struct cgroup *cg = child->css.cgroup;
-	struct cgroup *pcg = cg->parent;
 	struct mem_cgroup *parent;
 	unsigned int nr_pages;
 	unsigned long uninitialized_var(flags);
 	int ret;
 
 	/* Is ROOT ? */
-	if (!pcg)
+	if (mem_cgroup_is_root(child))
 		return -EINVAL;
 
 	ret = -EBUSY;
@@ -2683,21 +2664,23 @@
 
 	nr_pages = hpage_nr_pages(page);
 
-	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
-	if (ret)
-		goto put_back;
+	parent = parent_mem_cgroup(child);
+	/*
+	 * If no parent, move charges to root cgroup.
+	 */
+	if (!parent)
+		parent = root_mem_cgroup;
 
 	if (nr_pages > 1)
 		flags = compound_lock_irqsave(page);
 
-	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
-	if (ret)
-		__mem_cgroup_cancel_charge(parent, nr_pages);
+	ret = mem_cgroup_move_account(page, nr_pages,
+				pc, child, parent);
+	if (!ret)
+		__mem_cgroup_cancel_local_charge(child, nr_pages);
 
 	if (nr_pages > 1)
 		compound_unlock_irqrestore(page, flags);
-put_back:
 	putback_lru_page(page);
 put:
 	put_page(page);
@@ -2845,24 +2828,7 @@
 	 */
 	if (do_swap_account && PageSwapCache(page)) {
 		swp_entry_t ent = {.val = page_private(page)};
-		struct mem_cgroup *swap_memcg;
-		unsigned short id;
-
-		id = swap_cgroup_record(ent, 0);
-		rcu_read_lock();
-		swap_memcg = mem_cgroup_lookup(id);
-		if (swap_memcg) {
-			/*
-			 * This recorded memcg can be obsolete one. So, avoid
-			 * calling css_tryget
-			 */
-			if (!mem_cgroup_is_root(swap_memcg))
-				res_counter_uncharge(&swap_memcg->memsw,
-						     PAGE_SIZE);
-			mem_cgroup_swap_statistics(swap_memcg, false);
-			mem_cgroup_put(swap_memcg);
-		}
-		rcu_read_unlock();
+		mem_cgroup_uncharge_swap(ent);
 	}
 	/*
 	 * At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3121,6 @@
  * @entry: swap entry to be moved
  * @from:  mem_cgroup which the entry is moved from
  * @to:  mem_cgroup which the entry is moved to
- * @need_fixup: whether we should fixup res_counters and refcounts.
  *
  * It succeeds only when the swap_cgroup's record for this entry is the same
  * as the mem_cgroup's id of @from.
@@ -3166,7 +3131,7 @@
  * both res and memsw, and called css_get().
  */
 static int mem_cgroup_move_swap_account(swp_entry_t entry,
-		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+				struct mem_cgroup *from, struct mem_cgroup *to)
 {
 	unsigned short old_id, new_id;
 
@@ -3185,24 +3150,13 @@
 		 * swap-in, the refcount of @to might be decreased to 0.
 		 */
 		mem_cgroup_get(to);
-		if (need_fixup) {
-			if (!mem_cgroup_is_root(from))
-				res_counter_uncharge(&from->memsw, PAGE_SIZE);
-			mem_cgroup_put(from);
-			/*
-			 * we charged both to->res and to->memsw, so we should
-			 * uncharge to->res.
-			 */
-			if (!mem_cgroup_is_root(to))
-				res_counter_uncharge(&to->res, PAGE_SIZE);
-		}
 		return 0;
 	}
 	return -EINVAL;
 }
 #else
 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
-		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+				struct mem_cgroup *from, struct mem_cgroup *to)
 {
 	return -EINVAL;
 }
@@ -3363,7 +3317,7 @@
 void mem_cgroup_replace_page_cache(struct page *oldpage,
 				  struct page *newpage)
 {
-	struct mem_cgroup *memcg;
+	struct mem_cgroup *memcg = NULL;
 	struct page_cgroup *pc;
 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
 
@@ -3373,11 +3327,20 @@
 	pc = lookup_page_cgroup(oldpage);
 	/* fix accounting on old pages */
 	lock_page_cgroup(pc);
-	memcg = pc->mem_cgroup;
-	mem_cgroup_charge_statistics(memcg, false, -1);
-	ClearPageCgroupUsed(pc);
+	if (PageCgroupUsed(pc)) {
+		memcg = pc->mem_cgroup;
+		mem_cgroup_charge_statistics(memcg, false, -1);
+		ClearPageCgroupUsed(pc);
+	}
 	unlock_page_cgroup(pc);
 
+	/*
+	 * When called from shmem_replace_page(), in some cases the
+	 * oldpage has already been charged, and in some cases not.
+	 */
+	if (!memcg)
+		return;
+
 	if (PageSwapBacked(oldpage))
 		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 
@@ -3793,7 +3756,7 @@
 	goto move_account;
 }
 
-int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 {
 	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
 }
@@ -4051,103 +4014,13 @@
 }
 #endif
 
-
-/* For read statistics */
-enum {
-	MCS_CACHE,
-	MCS_RSS,
-	MCS_FILE_MAPPED,
-	MCS_PGPGIN,
-	MCS_PGPGOUT,
-	MCS_SWAP,
-	MCS_PGFAULT,
-	MCS_PGMAJFAULT,
-	MCS_INACTIVE_ANON,
-	MCS_ACTIVE_ANON,
-	MCS_INACTIVE_FILE,
-	MCS_ACTIVE_FILE,
-	MCS_UNEVICTABLE,
-	NR_MCS_STAT,
-};
-
-struct mcs_total_stat {
-	s64 stat[NR_MCS_STAT];
-};
-
-struct {
-	char *local_name;
-	char *total_name;
-} memcg_stat_strings[NR_MCS_STAT] = {
-	{"cache", "total_cache"},
-	{"rss", "total_rss"},
-	{"mapped_file", "total_mapped_file"},
-	{"pgpgin", "total_pgpgin"},
-	{"pgpgout", "total_pgpgout"},
-	{"swap", "total_swap"},
-	{"pgfault", "total_pgfault"},
-	{"pgmajfault", "total_pgmajfault"},
-	{"inactive_anon", "total_inactive_anon"},
-	{"active_anon", "total_active_anon"},
-	{"inactive_file", "total_inactive_file"},
-	{"active_file", "total_active_file"},
-	{"unevictable", "total_unevictable"}
-};
-
-
-static void
-mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
-	s64 val;
-
-	/* per cpu stat */
-	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
-	s->stat[MCS_CACHE] += val * PAGE_SIZE;
-	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
-	s->stat[MCS_RSS] += val * PAGE_SIZE;
-	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
-	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
-	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
-	s->stat[MCS_PGPGIN] += val;
-	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
-	s->stat[MCS_PGPGOUT] += val;
-	if (do_swap_account) {
-		val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
-		s->stat[MCS_SWAP] += val * PAGE_SIZE;
-	}
-	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
-	s->stat[MCS_PGFAULT] += val;
-	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
-	s->stat[MCS_PGMAJFAULT] += val;
-
-	/* per zone stat */
-	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
-	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
-	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
-	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
-	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
-	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
-	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
-	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
-	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
-	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-}
-
-static void
-mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
-	struct mem_cgroup *iter;
-
-	for_each_mem_cgroup_tree(iter, memcg)
-		mem_cgroup_get_local_stat(iter, s);
-}
-
 #ifdef CONFIG_NUMA
-static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
+				      struct seq_file *m)
 {
 	int nid;
 	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
 	unsigned long node_nr;
-	struct cgroup *cont = m->private;
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
 	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
@@ -4188,64 +4061,100 @@
 }
 #endif /* CONFIG_NUMA */
 
+static const char * const mem_cgroup_lru_names[] = {
+	"inactive_anon",
+	"active_anon",
+	"inactive_file",
+	"active_file",
+	"unevictable",
+};
+
+static inline void mem_cgroup_lru_names_not_uptodate(void)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
+}
+
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
-				 struct cgroup_map_cb *cb)
+				 struct seq_file *m)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-	struct mcs_total_stat mystat;
-	int i;
+	struct mem_cgroup *mi;
+	unsigned int i;
 
-	memset(&mystat, 0, sizeof(mystat));
-	mem_cgroup_get_local_stat(memcg, &mystat);
-
-
-	for (i = 0; i < NR_MCS_STAT; i++) {
-		if (i == MCS_SWAP && !do_swap_account)
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
 			continue;
-		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
 	}
 
+	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
+		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
+			   mem_cgroup_read_events(memcg, i));
+
+	for (i = 0; i < NR_LRU_LISTS; i++)
+		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
+			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
+
 	/* Hierarchical information */
 	{
 		unsigned long long limit, memsw_limit;
 		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
-		cb->fill(cb, "hierarchical_memory_limit", limit);
+		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
 		if (do_swap_account)
-			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+			seq_printf(m, "hierarchical_memsw_limit %llu\n",
+				   memsw_limit);
 	}
 
-	memset(&mystat, 0, sizeof(mystat));
-	mem_cgroup_get_total_stat(memcg, &mystat);
-	for (i = 0; i < NR_MCS_STAT; i++) {
-		if (i == MCS_SWAP && !do_swap_account)
+	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+		long long val = 0;
+
+		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
 			continue;
-		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+		for_each_mem_cgroup_tree(mi, memcg)
+			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
+		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+	}
+
+	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+		unsigned long long val = 0;
+
+		for_each_mem_cgroup_tree(mi, memcg)
+			val += mem_cgroup_read_events(mi, i);
+		seq_printf(m, "total_%s %llu\n",
+			   mem_cgroup_events_names[i], val);
+	}
+
+	for (i = 0; i < NR_LRU_LISTS; i++) {
+		unsigned long long val = 0;
+
+		for_each_mem_cgroup_tree(mi, memcg)
+			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
+		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
 	}
 
 #ifdef CONFIG_DEBUG_VM
 	{
 		int nid, zid;
 		struct mem_cgroup_per_zone *mz;
+		struct zone_reclaim_stat *rstat;
 		unsigned long recent_rotated[2] = {0, 0};
 		unsigned long recent_scanned[2] = {0, 0};
 
 		for_each_online_node(nid)
 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+				rstat = &mz->lruvec.reclaim_stat;
 
-				recent_rotated[0] +=
-					mz->reclaim_stat.recent_rotated[0];
-				recent_rotated[1] +=
-					mz->reclaim_stat.recent_rotated[1];
-				recent_scanned[0] +=
-					mz->reclaim_stat.recent_scanned[0];
-				recent_scanned[1] +=
-					mz->reclaim_stat.recent_scanned[1];
+				recent_rotated[0] += rstat->recent_rotated[0];
+				recent_rotated[1] += rstat->recent_rotated[1];
+				recent_scanned[0] += rstat->recent_scanned[0];
+				recent_scanned[1] += rstat->recent_scanned[1];
 			}
-		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
-		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
-		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
-		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
+		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
+		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
+		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
 	}
 #endif
 
@@ -4307,7 +4216,7 @@
 	usage = mem_cgroup_usage(memcg, swap);
 
 	/*
-	 * current_threshold points to threshold just below usage.
+	 * current_threshold points to threshold just below or equal to usage.
 	 * If it's not true, a threshold was crossed after last
 	 * call of __mem_cgroup_threshold().
 	 */
@@ -4433,14 +4342,15 @@
 	/* Find current threshold */
 	new->current_threshold = -1;
 	for (i = 0; i < size; i++) {
-		if (new->entries[i].threshold < usage) {
+		if (new->entries[i].threshold <= usage) {
 			/*
 			 * new->current_threshold will not be used until
 			 * rcu_assign_pointer(), so it's safe to increment
 			 * it here.
 			 */
 			++new->current_threshold;
-		}
+		} else
+			break;
 	}
 
 	/* Free old spare buffer and save old primary buffer as spare */
@@ -4509,7 +4419,7 @@
 			continue;
 
 		new->entries[j] = thresholds->primary->entries[i];
-		if (new->entries[j].threshold < usage) {
+		if (new->entries[j].threshold <= usage) {
 			/*
 			 * new->current_threshold will not be used
 			 * until rcu_assign_pointer(), so it's safe to increment
@@ -4623,22 +4533,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_NUMA
-static const struct file_operations mem_control_numa_stat_file_operations = {
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
-{
-	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
-	file->f_op = &mem_control_numa_stat_file_operations;
-	return single_open(file, mem_control_numa_stat_show, cont);
-}
-#endif /* CONFIG_NUMA */
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
@@ -4694,7 +4588,7 @@
 	},
 	{
 		.name = "stat",
-		.read_map = mem_control_stat_show,
+		.read_seq_string = mem_control_stat_show,
 	},
 	{
 		.name = "force_empty",
@@ -4726,8 +4620,7 @@
 #ifdef CONFIG_NUMA
 	{
 		.name = "numa_stat",
-		.open = mem_control_numa_stat_open,
-		.mode = S_IRUGO,
+		.read_seq_string = mem_control_numa_stat_show,
 	},
 #endif
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4764,7 +4657,6 @@
 {
 	struct mem_cgroup_per_node *pn;
 	struct mem_cgroup_per_zone *mz;
-	enum lru_list lru;
 	int zone, tmp = node;
 	/*
 	 * This routine is called against possible nodes.
@@ -4782,8 +4674,7 @@
 
 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 		mz = &pn->zoneinfo[zone];
-		for_each_lru(lru)
-			INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
+		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
 		mz->usage_in_excess = 0;
 		mz->on_tree = false;
 		mz->memcg = memcg;
@@ -4826,23 +4717,40 @@
 }
 
 /*
- * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
  * but in process context.  The work_freeing structure is overlaid
  * on the rcu_freeing structure, which itself is overlaid on memsw.
  */
-static void vfree_work(struct work_struct *work)
+static void free_work(struct work_struct *work)
 {
 	struct mem_cgroup *memcg;
+	int size = sizeof(struct mem_cgroup);
 
 	memcg = container_of(work, struct mem_cgroup, work_freeing);
-	vfree(memcg);
+	/*
+	 * We need to make sure that (at least for now), the jump label
+	 * destruction code runs outside of the cgroup lock. This is because
+	 * get_online_cpus(), which is called from the static_branch update,
+	 * can't be called inside the cgroup_lock. cpusets are the ones
+	 * enforcing this dependency, so if they ever change, we might as well.
+	 *
+	 * schedule_work() will guarantee this happens. Be careful if you need
+	 * to move this code around, and make sure it is outside
+	 * the cgroup_lock.
+	 */
+	disarm_sock_keys(memcg);
+	if (size < PAGE_SIZE)
+		kfree(memcg);
+	else
+		vfree(memcg);
 }
-static void vfree_rcu(struct rcu_head *rcu_head)
+
+static void free_rcu(struct rcu_head *rcu_head)
 {
 	struct mem_cgroup *memcg;
 
 	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
-	INIT_WORK(&memcg->work_freeing, vfree_work);
+	INIT_WORK(&memcg->work_freeing, free_work);
 	schedule_work(&memcg->work_freeing);
 }
 
@@ -4868,10 +4776,7 @@
 		free_mem_cgroup_per_zone_info(memcg, node);
 
 	free_percpu(memcg->stat);
-	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
-		kfree_rcu(memcg, rcu_freeing);
-	else
-		call_rcu(&memcg->rcu_freeing, vfree_rcu);
+	call_rcu(&memcg->rcu_freeing, free_rcu);
 }
 
 static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5135,7 +5040,7 @@
 		return NULL;
 	if (PageAnon(page)) {
 		/* we don't move shared anon */
-		if (!move_anon() || page_mapcount(page) > 2)
+		if (!move_anon())
 			return NULL;
 	} else if (!move_file())
 		/* we ignore mapcount for file pages */
@@ -5146,32 +5051,37 @@
 	return page;
 }
 
+#ifdef CONFIG_SWAP
 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
-	int usage_count;
 	struct page *page = NULL;
 	swp_entry_t ent = pte_to_swp_entry(ptent);
 
 	if (!move_anon() || non_swap_entry(ent))
 		return NULL;
-	usage_count = mem_cgroup_count_swap_user(ent, &page);
-	if (usage_count > 1) { /* we don't move shared anon */
-		if (page)
-			put_page(page);
-		return NULL;
-	}
+	/*
+	 * Because lookup_swap_cache() updates some statistics counter,
+	 * we call find_get_page() with swapper_space directly.
+	 */
+	page = find_get_page(&swapper_space, ent.val);
 	if (do_swap_account)
 		entry->val = ent.val;
 
 	return page;
 }
+#else
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+			unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+	return NULL;
+}
+#endif
 
 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
 	struct page *page = NULL;
-	struct inode *inode;
 	struct address_space *mapping;
 	pgoff_t pgoff;
 
@@ -5180,7 +5090,6 @@
 	if (!move_file())
 		return NULL;
 
-	inode = vma->vm_file->f_path.dentry->d_inode;
 	mapping = vma->vm_file->f_mapping;
 	if (pte_none(ptent))
 		pgoff = linear_page_index(vma, addr);
@@ -5479,8 +5388,7 @@
 			if (!isolate_lru_page(page)) {
 				pc = lookup_page_cgroup(page);
 				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
-							     pc, mc.from, mc.to,
-							     false)) {
+							pc, mc.from, mc.to)) {
 					mc.precharge -= HPAGE_PMD_NR;
 					mc.moved_charge += HPAGE_PMD_NR;
 				}
@@ -5510,7 +5418,7 @@
 				goto put;
 			pc = lookup_page_cgroup(page);
 			if (!mem_cgroup_move_account(page, 1, pc,
-						     mc.from, mc.to, false)) {
+						     mc.from, mc.to)) {
 				mc.precharge--;
 				/* we uncharge from mc.from later. */
 				mc.moved_charge++;
@@ -5521,8 +5429,7 @@
 			break;
 		case MC_TARGET_SWAP:
 			ent = target.ent;
-			if (!mem_cgroup_move_swap_account(ent,
-						mc.from, mc.to, false)) {
+			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
 				mc.precharge--;
 				/* we fixup refcnts and charges later. */
 				mc.moved_swap++;
@@ -5598,7 +5505,6 @@
 	if (mm) {
 		if (mc.to)
 			mem_cgroup_move_charge(mm);
-		put_swap_token(mm);
 		mmput(mm);
 	}
 	if (mc.to)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97cc273..ab1e714 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1388,23 +1388,23 @@
 	 */
 	if (!get_page_unless_zero(compound_head(p))) {
 		if (PageHuge(p)) {
-			pr_info("get_any_page: %#lx free huge page\n", pfn);
+			pr_info("%s: %#lx free huge page\n", __func__, pfn);
 			ret = dequeue_hwpoisoned_huge_page(compound_head(p));
 		} else if (is_free_buddy_page(p)) {
-			pr_info("get_any_page: %#lx free buddy page\n", pfn);
+			pr_info("%s: %#lx free buddy page\n", __func__, pfn);
 			/* Set hwpoison bit while page is still isolated */
 			SetPageHWPoison(p);
 			ret = 0;
 		} else {
-			pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
-				pfn, p->flags);
+			pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
+				__func__, pfn, p->flags);
 			ret = -EIO;
 		}
 	} else {
 		/* Not a free page */
 		ret = 1;
 	}
-	unset_migratetype_isolate(p);
+	unset_migratetype_isolate(p, MIGRATE_MOVABLE);
 	unlock_memory_hotplug();
 	return ret;
 }
diff --git a/mm/memory.c b/mm/memory.c
index e40f675..2466d12 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1225,7 +1225,15 @@
 		next = pmd_addr_end(addr, end);
 		if (pmd_trans_huge(*pmd)) {
 			if (next - addr != HPAGE_PMD_SIZE) {
-				VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+#ifdef CONFIG_DEBUG_VM
+				if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
+					pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
+						__func__, addr, end,
+						vma->vm_start,
+						vma->vm_end);
+					BUG();
+				}
+#endif
 				split_huge_page_pmd(vma->vm_mm, pmd);
 			} else if (zap_huge_pmd(tlb, vma, pmd, addr))
 				goto next;
@@ -1366,7 +1374,7 @@
 /**
  * zap_page_range - remove user pages in a given range
  * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
+ * @start: starting address of pages to zap
  * @size: number of bytes to zap
  * @details: details of nonlinear truncation or shared cache invalidation
  *
@@ -2908,7 +2916,6 @@
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
 	page = lookup_swap_cache(entry);
 	if (!page) {
-		grab_swap_token(mm); /* Contend for token _before_ read-in */
 		page = swapin_readahead(entry,
 					GFP_HIGHUSER_MOVABLE, vma, address);
 		if (!page) {
@@ -2938,6 +2945,7 @@
 	}
 
 	locked = lock_page_or_retry(page, mm, flags);
+
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 	if (!locked) {
 		ret |= VM_FAULT_RETRY;
@@ -3486,6 +3494,7 @@
 	if (unlikely(is_vm_hugetlb_page(vma)))
 		return hugetlb_fault(mm, vma, address, flags);
 
+retry:
 	pgd = pgd_offset(mm, address);
 	pud = pud_alloc(mm, pgd, address);
 	if (!pud)
@@ -3499,13 +3508,24 @@
 							  pmd, flags);
 	} else {
 		pmd_t orig_pmd = *pmd;
+		int ret;
+
 		barrier();
 		if (pmd_trans_huge(orig_pmd)) {
 			if (flags & FAULT_FLAG_WRITE &&
 			    !pmd_write(orig_pmd) &&
-			    !pmd_trans_splitting(orig_pmd))
-				return do_huge_pmd_wp_page(mm, vma, address,
-							   pmd, orig_pmd);
+			    !pmd_trans_splitting(orig_pmd)) {
+				ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+							  orig_pmd);
+				/*
+				 * If COW results in an oom, the huge pmd will
+				 * have been split, so retry the fault on the
+				 * pte for a smaller charge.
+				 */
+				if (unlikely(ret & VM_FAULT_OOM))
+					goto retry;
+				return ret;
+			}
 			return 0;
 		}
 	}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6629faf..0d7e3ec 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -74,8 +74,7 @@
 	res->end = start + size - 1;
 	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (request_resource(&iomem_resource, res) < 0) {
-		printk("System RAM resource %llx - %llx cannot be added\n",
-		(unsigned long long)res->start, (unsigned long long)res->end);
+		printk("System RAM resource %pR cannot be added\n", res);
 		kfree(res);
 		res = NULL;
 	}
@@ -502,8 +501,10 @@
 		online_pages_range);
 	if (ret) {
 		mutex_unlock(&zonelists_mutex);
-		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
-			nr_pages, pfn);
+		printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
+		       (unsigned long long) pfn << PAGE_SHIFT,
+		       (((unsigned long long) pfn + nr_pages)
+			    << PAGE_SHIFT) - 1);
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
 		unlock_memory_hotplug();
 		return ret;
@@ -891,7 +892,7 @@
 	nr_pages = end_pfn - start_pfn;
 
 	/* set above range as isolated */
-	ret = start_isolate_page_range(start_pfn, end_pfn);
+	ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 	if (ret)
 		goto out;
 
@@ -956,7 +957,7 @@
 	   We cannot do rollback at this point. */
 	offline_isolated_pages(start_pfn, end_pfn);
 	/* reset pagetype flags and makes migrate type to be MOVABLE */
-	undo_isolate_page_range(start_pfn, end_pfn);
+	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 	/* removal success */
 	zone->present_pages -= offlined_pages;
 	zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -977,11 +978,12 @@
 	return 0;
 
 failed_removal:
-	printk(KERN_INFO "memory offlining %lx to %lx failed\n",
-		start_pfn, end_pfn);
+	printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
+	       (unsigned long long) start_pfn << PAGE_SHIFT,
+	       ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 	/* pushback to free area */
-	undo_isolate_page_range(start_pfn, end_pfn);
+	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 
 out:
 	unlock_memory_hotplug();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 88f9422..1d771e4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -390,7 +390,7 @@
 {
 	if (!pol)
 		return;
-	if (!mpol_store_user_nodemask(pol) && step == 0 &&
+	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 		return;
 
@@ -950,8 +950,8 @@
  *
  * Returns the number of page that could not be moved.
  */
-int do_migrate_pages(struct mm_struct *mm,
-	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+		     const nodemask_t *to, int flags)
 {
 	int busy = 0;
 	int err;
@@ -963,7 +963,7 @@
 
 	down_read(&mm->mmap_sem);
 
-	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
+	err = migrate_vmas(mm, from, to, flags);
 	if (err)
 		goto out;
 
@@ -998,14 +998,34 @@
 	 * moved to an empty node, then there is nothing left worth migrating.
 	 */
 
-	tmp = *from_nodes;
+	tmp = *from;
 	while (!nodes_empty(tmp)) {
 		int s,d;
 		int source = -1;
 		int dest = 0;
 
 		for_each_node_mask(s, tmp) {
-			d = node_remap(s, *from_nodes, *to_nodes);
+
+			/*
+			 * do_migrate_pages() tries to maintain the relative
+			 * node relationship of the pages established between
+			 * threads and memory areas.
+                         *
+			 * However if the number of source nodes is not equal to
+			 * the number of destination nodes we can not preserve
+			 * this node relative relationship.  In that case, skip
+			 * copying memory from a node that is in the destination
+			 * mask.
+			 *
+			 * Example: [2,3,4] -> [3,4,5] moves everything.
+			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
+			 */
+
+			if ((nodes_weight(*from) != nodes_weight(*to)) &&
+						(node_isset(s, *to)))
+				continue;
+
+			d = node_remap(s, *from, *to);
 			if (s == d)
 				continue;
 
@@ -1065,8 +1085,8 @@
 {
 }
 
-int do_migrate_pages(struct mm_struct *mm,
-	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+		     const nodemask_t *to, int flags)
 {
 	return -ENOSYS;
 }
@@ -1157,7 +1177,7 @@
 		if (!list_empty(&pagelist)) {
 			nr_failed = migrate_pages(&pagelist, new_vma_page,
 						(unsigned long)vma,
-						false, true);
+						false, MIGRATE_SYNC);
 			if (nr_failed)
 				putback_lru_pages(&pagelist);
 		}
diff --git a/mm/migrate.c b/mm/migrate.c
index ab81d48..be26d5c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -436,7 +436,10 @@
 		 * is actually a signal that all of the page has become dirty.
 		 * Whereas only part of our page may be dirty.
 		 */
-		__set_page_dirty_nobuffers(newpage);
+		if (PageSwapBacked(page))
+			SetPageDirty(newpage);
+		else
+			__set_page_dirty_nobuffers(newpage);
  	}
 
 	mlock_migrate_page(newpage, page);
diff --git a/mm/mmap.c b/mm/mmap.c
index e8dcfc7..3edfcdf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,15 +971,13 @@
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 			unsigned long len, unsigned long prot,
 			unsigned long flags, unsigned long pgoff)
 {
 	struct mm_struct * mm = current->mm;
 	struct inode *inode;
 	vm_flags_t vm_flags;
-	int error;
-	unsigned long reqprot = prot;
 
 	/*
 	 * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1101,39 +1099,9 @@
 		}
 	}
 
-	error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-	if (error)
-		return error;
-
 	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long offset)
-{
-	if (unlikely(offset + PAGE_ALIGN(len) < offset))
-		return -EINVAL;
-	if (unlikely(offset & ~PAGE_MASK))
-		return -EINVAL;
-	return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long offset)
-{
-	unsigned long ret;
-	struct mm_struct *mm = current->mm;
-
-	down_write(&mm->mmap_sem);
-	ret = do_mmap(file, addr, len, prot, flag, offset);
-	up_write(&mm->mmap_sem);
-	return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,
 		unsigned long, fd, unsigned long, pgoff)
@@ -1165,10 +1133,7 @@
 
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
+	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 	if (file)
 		fput(file);
 out:
@@ -1629,7 +1594,9 @@
 	if (addr & ~PAGE_MASK)
 		return -EINVAL;
 
-	return arch_rebalance_pgtables(addr, len);
+	addr = arch_rebalance_pgtables(addr, len);
+	error = security_mmap_addr(addr);
+	return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1639,33 +1606,34 @@
 {
 	struct vm_area_struct *vma = NULL;
 
-	if (mm) {
-		/* Check the cache first. */
-		/* (Cache hit rate is typically around 35%.) */
-		vma = mm->mmap_cache;
-		if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-			struct rb_node * rb_node;
+	if (WARN_ON_ONCE(!mm))		/* Remove this in linux-3.6 */
+		return NULL;
 
-			rb_node = mm->mm_rb.rb_node;
-			vma = NULL;
+	/* Check the cache first. */
+	/* (Cache hit rate is typically around 35%.) */
+	vma = mm->mmap_cache;
+	if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+		struct rb_node *rb_node;
 
-			while (rb_node) {
-				struct vm_area_struct * vma_tmp;
+		rb_node = mm->mm_rb.rb_node;
+		vma = NULL;
 
-				vma_tmp = rb_entry(rb_node,
-						struct vm_area_struct, vm_rb);
+		while (rb_node) {
+			struct vm_area_struct *vma_tmp;
 
-				if (vma_tmp->vm_end > addr) {
-					vma = vma_tmp;
-					if (vma_tmp->vm_start <= addr)
-						break;
-					rb_node = rb_node->rb_left;
-				} else
-					rb_node = rb_node->rb_right;
-			}
-			if (vma)
-				mm->mmap_cache = vma;
+			vma_tmp = rb_entry(rb_node,
+					   struct vm_area_struct, vm_rb);
+
+			if (vma_tmp->vm_end > addr) {
+				vma = vma_tmp;
+				if (vma_tmp->vm_start <= addr)
+					break;
+				rb_node = rb_node->rb_left;
+			} else
+				rb_node = rb_node->rb_right;
 		}
+		if (vma)
+			mm->mmap_cache = vma;
 	}
 	return vma;
 }
@@ -1818,7 +1786,7 @@
 		return -ENOMEM;
 
 	address &= PAGE_MASK;
-	error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+	error = security_mmap_addr(address);
 	if (error)
 		return error;
 
@@ -2158,7 +2126,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2206,10 +2173,6 @@
 	if (!len)
 		return addr;
 
-	error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-	if (error)
-		return error;
-
 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
 	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2562,10 +2525,6 @@
 	vma->vm_ops = &special_mapping_vmops;
 	vma->vm_private_data = pages;
 
-	ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-	if (ret)
-		goto out;
-
 	ret = insert_vm_struct(mm, vma);
 	if (ret)
 		goto out;
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 7cf7b7d..6830eab 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -86,3 +86,17 @@
 	return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+{
+	enum lru_list lru;
+
+	memset(lruvec, 0, sizeof(struct lruvec));
+
+	for_each_lru(lru)
+		INIT_LIST_HEAD(&lruvec->lists[lru]);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+	lruvec->zone = zone;
+#endif
+}
diff --git a/mm/mremap.c b/mm/mremap.c
index db8d983..21fed20 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -371,10 +371,6 @@
 	if ((addr <= new_addr) && (addr+old_len) > new_addr)
 		goto out;
 
-	ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-	if (ret)
-		goto out;
-
 	ret = do_munmap(mm, new_addr, new_len);
 	if (ret)
 		goto out;
@@ -432,15 +428,17 @@
  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  * This option implies MREMAP_MAYMOVE.
  */
-unsigned long do_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+		unsigned long, new_len, unsigned long, flags,
+		unsigned long, new_addr)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
 	unsigned long charged = 0;
 
+	down_write(&current->mm->mmap_sem);
+
 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
 		goto out;
 
@@ -530,25 +528,11 @@
 			goto out;
 		}
 
-		ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-		if (ret)
-			goto out;
 		ret = move_vma(vma, addr, old_len, new_len, new_addr);
 	}
 out:
 	if (ret & ~PAGE_MASK)
 		vm_unacct_memory(charged);
-	return ret;
-}
-
-SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
-		unsigned long, new_len, unsigned long, flags,
-		unsigned long, new_addr)
-{
-	unsigned long ret;
-
-	down_write(&current->mm->mmap_sem);
-	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
 	up_write(&current->mm->mmap_sem);
 	return ret;
 }
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 1983fb1..d23415c 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -274,6 +274,57 @@
 	return ___alloc_bootmem(size, align, goal, limit);
 }
 
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+						   unsigned long size,
+						   unsigned long align,
+						   unsigned long goal,
+						   unsigned long limit)
+{
+	void *ptr;
+
+again:
+	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+					goal, limit);
+	if (ptr)
+		return ptr;
+
+	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
+					goal, limit);
+	if (ptr)
+		return ptr;
+
+	if (goal) {
+		goal = 0;
+		goto again;
+	}
+
+	return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+				   unsigned long align, unsigned long goal)
+{
+	if (WARN_ON_ONCE(slab_is_available()))
+		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+				    unsigned long align, unsigned long goal,
+				    unsigned long limit)
+{
+	void *ptr;
+
+	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
+	if (ptr)
+		return ptr;
+
+	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+	panic("Out of memory");
+	return NULL;
+}
+
 /**
  * __alloc_bootmem_node - allocate boot memory from a specific node
  * @pgdat: node to allocate from
@@ -292,24 +343,10 @@
 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 				   unsigned long align, unsigned long goal)
 {
-	void *ptr;
-
 	if (WARN_ON_ONCE(slab_is_available()))
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-again:
-	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
-					 goal, -1ULL);
-	if (ptr)
-		return ptr;
-
-	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
-					goal, -1ULL);
-	if (!ptr && goal) {
-		goal = 0;
-		goto again;
-	}
-	return ptr;
+	return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -318,44 +355,6 @@
 	return __alloc_bootmem_node(pgdat, size, align, goal);
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
-				    unsigned long section_nr)
-{
-	unsigned long pfn, goal, limit;
-
-	pfn = section_nr_to_pfn(section_nr);
-	goal = pfn << PAGE_SHIFT;
-	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
-
-	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
-					 SMP_CACHE_BYTES, goal, limit);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
-				   unsigned long align, unsigned long goal)
-{
-	void *ptr;
-
-	if (WARN_ON_ONCE(slab_is_available()))
-		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
-	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
-						 goal, -1ULL);
-	if (ptr)
-		return ptr;
-
-	return __alloc_bootmem_nopanic(size, align, goal);
-}
-
 #ifndef ARCH_LOW_ADDRESS_LIMIT
 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
 #endif
@@ -397,16 +396,9 @@
 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 				       unsigned long align, unsigned long goal)
 {
-	void *ptr;
-
 	if (WARN_ON_ONCE(slab_is_available()))
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
-				goal, ARCH_LOW_ADDRESS_LIMIT);
-	if (ptr)
-		return ptr;
-
-	return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
-				goal, ARCH_LOW_ADDRESS_LIMIT);
+	return ___alloc_bootmem_node(pgdat, size, align, goal,
+				     ARCH_LOW_ADDRESS_LIMIT);
 }
diff --git a/mm/nommu.c b/mm/nommu.c
index bb8f4f0..d4b0c10 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -889,7 +889,6 @@
 				 unsigned long *_capabilities)
 {
 	unsigned long capabilities, rlen;
-	unsigned long reqprot = prot;
 	int ret;
 
 	/* do the simple checks first */
@@ -1047,7 +1046,7 @@
 	}
 
 	/* allow the security API to have its say */
-	ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
+	ret = security_mmap_addr(addr);
 	if (ret < 0)
 		return ret;
 
@@ -1233,7 +1232,7 @@
 /*
  * handle mapping creation for uClinux
  */
-static unsigned long do_mmap_pgoff(struct file *file,
+unsigned long do_mmap_pgoff(struct file *file,
 			    unsigned long addr,
 			    unsigned long len,
 			    unsigned long prot,
@@ -1471,32 +1470,6 @@
 	return -ENOMEM;
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long offset)
-{
-	if (unlikely(offset + PAGE_ALIGN(len) < offset))
-		return -EINVAL;
-	if (unlikely(offset & ~PAGE_MASK))
-		return -EINVAL;
-	return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long offset)
-{
-	unsigned long ret;
-	struct mm_struct *mm = current->mm;
-
-	down_write(&mm->mmap_sem);
-	ret = do_mmap(file, addr, len, prot, flag, offset);
-	up_write(&mm->mmap_sem);
-	return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,
 		unsigned long, fd, unsigned long, pgoff)
@@ -1513,9 +1486,7 @@
 
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
+	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
 	if (file)
 		fput(file);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9f09a1f..ac300c9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -180,10 +180,11 @@
  * predictable as possible.  The goal is to return the highest value for the
  * task consuming the most memory to avoid subsequent oom failures.
  */
-unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-		      const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+			  const nodemask_t *nodemask, unsigned long totalpages)
 {
 	long points;
+	long adj;
 
 	if (oom_unkillable_task(p, memcg, nodemask))
 		return 0;
@@ -192,27 +193,18 @@
 	if (!p)
 		return 0;
 
-	if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+	adj = p->signal->oom_score_adj;
+	if (adj == OOM_SCORE_ADJ_MIN) {
 		task_unlock(p);
 		return 0;
 	}
 
 	/*
-	 * The memory controller may have a limit of 0 bytes, so avoid a divide
-	 * by zero, if necessary.
-	 */
-	if (!totalpages)
-		totalpages = 1;
-
-	/*
 	 * The baseline for the badness score is the proportion of RAM that each
 	 * task's rss, pagetable and swap space use.
 	 */
-	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
-	points += get_mm_counter(p->mm, MM_SWAPENTS);
-
-	points *= 1000;
-	points /= totalpages;
+	points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+		 get_mm_counter(p->mm, MM_SWAPENTS);
 	task_unlock(p);
 
 	/*
@@ -220,23 +212,17 @@
 	 * implementation used by LSMs.
 	 */
 	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-		points -= 30;
+		adj -= 30;
+
+	/* Normalize to oom_score_adj units */
+	adj *= totalpages / 1000;
+	points += adj;
 
 	/*
-	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
-	 * either completely disable oom killing or always prefer a certain
-	 * task.
+	 * Never return 0 for an eligible task regardless of the root bonus and
+	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
 	 */
-	points += p->signal->oom_score_adj;
-
-	/*
-	 * Never return 0 for an eligible task that may be killed since it's
-	 * possible that no single user task uses more than 0.1% of memory and
-	 * no single admin tasks uses more than 3.0%.
-	 */
-	if (points <= 0)
-		return 1;
-	return (points < 1000) ? points : 1000;
+	return points > 0 ? points : 1;
 }
 
 /*
@@ -314,7 +300,7 @@
 {
 	struct task_struct *g, *p;
 	struct task_struct *chosen = NULL;
-	*ppoints = 0;
+	unsigned long chosen_points = 0;
 
 	do_each_thread(g, p) {
 		unsigned int points;
@@ -354,7 +340,7 @@
 			 */
 			if (p == current) {
 				chosen = p;
-				*ppoints = 1000;
+				chosen_points = ULONG_MAX;
 			} else if (!force_kill) {
 				/*
 				 * If this task is not being ptraced on exit,
@@ -367,18 +353,19 @@
 		}
 
 		points = oom_badness(p, memcg, nodemask, totalpages);
-		if (points > *ppoints) {
+		if (points > chosen_points) {
 			chosen = p;
-			*ppoints = points;
+			chosen_points = points;
 		}
 	} while_each_thread(g, p);
 
+	*ppoints = chosen_points * 1000 / totalpages;
 	return chosen;
 }
 
 /**
  * dump_tasks - dump current memory state of all system tasks
- * @mem: current's memory controller, if constrained
+ * @memcg: current's memory controller, if constrained
  * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
@@ -572,7 +559,7 @@
 	}
 
 	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
-	limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
+	limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
 	read_lock(&tasklist_lock);
 	p = select_bad_process(&points, limit, memcg, NULL, false);
 	if (p && PTR_ERR(p) != -1UL)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 26adea8..93d8d2f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,7 +204,7 @@
  * Returns the global number of pages potentially available for dirty
  * page cache.  This is the base value for the global dirty limits.
  */
-unsigned long global_dirtyable_memory(void)
+static unsigned long global_dirtyable_memory(void)
 {
 	unsigned long x;
 
@@ -1568,6 +1568,7 @@
 	unsigned long background_thresh;
 	unsigned long dirty_thresh;
 	global_dirty_limits(&background_thresh, &dirty_thresh);
+	global_dirty_limit = dirty_thresh;
 	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
 	if (ratelimit_pages < 16)
 		ratelimit_pages = 16;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1851df6..4403009 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
@@ -513,10 +514,10 @@
  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
- * other.  That is, if we allocate a small block, and both were   
- * free, the remainder of the region must be split into blocks.   
+ * other.  That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
  * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.            
+ * triggers coalescing into a block of larger size.
  *
  * -- wli
  */
@@ -749,6 +750,24 @@
 	__free_pages(page, order);
 }
 
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+	unsigned i = pageblock_nr_pages;
+	struct page *p = page;
+
+	do {
+		__ClearPageReserved(p);
+		set_page_count(p, 0);
+	} while (++p, --i);
+
+	set_page_refcounted(page);
+	set_pageblock_migratetype(page, MIGRATE_CMA);
+	__free_pages(page, pageblock_order);
+	totalram_pages += pageblock_nr_pages;
+}
+#endif
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -874,11 +893,17 @@
  * This array describes the order lists are fallen back to when
  * the free lists for the desirable migrate type are depleted
  */
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
-	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
-	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
+#else
+	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
+#endif
+	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
+	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
 /*
@@ -973,12 +998,12 @@
 	/* Find the largest possible block of pages in the other list */
 	for (current_order = MAX_ORDER-1; current_order >= order;
 						--current_order) {
-		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+		for (i = 0;; i++) {
 			migratetype = fallbacks[start_migratetype][i];
 
 			/* MIGRATE_RESERVE handled later if necessary */
 			if (migratetype == MIGRATE_RESERVE)
-				continue;
+				break;
 
 			area = &(zone->free_area[current_order]);
 			if (list_empty(&area->free_list[migratetype]))
@@ -993,11 +1018,18 @@
 			 * pages to the preferred allocation list. If falling
 			 * back for a reclaimable kernel allocation, be more
 			 * aggressive about taking ownership of free pages
+			 *
+			 * On the other hand, never change migration
+			 * type of MIGRATE_CMA pageblocks nor move CMA
+			 * pages on different free lists. We don't
+			 * want unmovable pages to be allocated from
+			 * MIGRATE_CMA areas.
 			 */
-			if (unlikely(current_order >= (pageblock_order >> 1)) ||
-					start_migratetype == MIGRATE_RECLAIMABLE ||
-					page_group_by_mobility_disabled) {
-				unsigned long pages;
+			if (!is_migrate_cma(migratetype) &&
+			    (unlikely(current_order >= pageblock_order / 2) ||
+			     start_migratetype == MIGRATE_RECLAIMABLE ||
+			     page_group_by_mobility_disabled)) {
+				int pages;
 				pages = move_freepages_block(zone, page,
 								start_migratetype);
 
@@ -1015,11 +1047,14 @@
 			rmv_page_order(page);
 
 			/* Take ownership for orders >= pageblock_order */
-			if (current_order >= pageblock_order)
+			if (current_order >= pageblock_order &&
+			    !is_migrate_cma(migratetype))
 				change_pageblock_range(page, current_order,
 							start_migratetype);
 
-			expand(zone, page, order, current_order, area, migratetype);
+			expand(zone, page, order, current_order, area,
+			       is_migrate_cma(migratetype)
+			     ? migratetype : start_migratetype);
 
 			trace_mm_page_alloc_extfrag(page, order, current_order,
 				start_migratetype, migratetype);
@@ -1061,17 +1096,17 @@
 	return page;
 }
 
-/* 
+/*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
  * Returns the number of new pages which were placed at *list.
  */
-static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
 			unsigned long count, struct list_head *list,
 			int migratetype, int cold)
 {
-	int i;
-	
+	int mt = migratetype, i;
+
 	spin_lock(&zone->lock);
 	for (i = 0; i < count; ++i) {
 		struct page *page = __rmqueue(zone, order, migratetype);
@@ -1091,7 +1126,12 @@
 			list_add(&page->lru, list);
 		else
 			list_add_tail(&page->lru, list);
-		set_page_private(page, migratetype);
+		if (IS_ENABLED(CONFIG_CMA)) {
+			mt = get_pageblock_migratetype(page);
+			if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+				mt = migratetype;
+		}
+		set_page_private(page, mt);
 		list = &page->lru;
 	}
 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1371,8 +1411,12 @@
 
 	if (order >= pageblock_order - 1) {
 		struct page *endpage = page + (1 << order) - 1;
-		for (; page < endpage; page += pageblock_nr_pages)
-			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+		for (; page < endpage; page += pageblock_nr_pages) {
+			int mt = get_pageblock_migratetype(page);
+			if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+				set_pageblock_migratetype(page,
+							  MIGRATE_MOVABLE);
+		}
 	}
 
 	return 1 << order;
@@ -2086,16 +2130,13 @@
 }
 #endif /* CONFIG_COMPACTION */
 
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
-	struct zonelist *zonelist, enum zone_type high_zoneidx,
-	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+		  nodemask_t *nodemask)
 {
-	struct page *page = NULL;
 	struct reclaim_state reclaim_state;
-	bool drained = false;
+	int progress;
 
 	cond_resched();
 
@@ -2106,7 +2147,7 @@
 	reclaim_state.reclaimed_slab = 0;
 	current->reclaim_state = &reclaim_state;
 
-	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+	progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
 	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
@@ -2114,6 +2155,21 @@
 
 	cond_resched();
 
+	return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+	struct zonelist *zonelist, enum zone_type high_zoneidx,
+	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+	int migratetype, unsigned long *did_some_progress)
+{
+	struct page *page = NULL;
+	bool drained = false;
+
+	*did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+					       nodemask);
 	if (unlikely(!(*did_some_progress)))
 		return NULL;
 
@@ -4244,25 +4300,24 @@
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 
-/* Return a sensible default order for the pageblock size. */
-static inline int pageblock_default_order(void)
-{
-	if (HPAGE_SHIFT > PAGE_SHIFT)
-		return HUGETLB_PAGE_ORDER;
-
-	return MAX_ORDER-1;
-}
-
 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(unsigned int order)
+static inline void __init set_pageblock_order(void)
 {
+	unsigned int order;
+
 	/* Check that pageblock_nr_pages has not already been setup */
 	if (pageblock_order)
 		return;
 
+	if (HPAGE_SHIFT > PAGE_SHIFT)
+		order = HUGETLB_PAGE_ORDER;
+	else
+		order = MAX_ORDER - 1;
+
 	/*
 	 * Assume the largest contiguous order of interest is a huge page.
-	 * This value may be variable depending on boot parameters on IA64
+	 * This value may be variable depending on boot parameters on IA64 and
+	 * powerpc.
 	 */
 	pageblock_order = order;
 }
@@ -4270,15 +4325,13 @@
 
 /*
  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
- * and pageblock_default_order() are unused as pageblock_order is set
- * at compile-time. See include/linux/pageblock-flags.h for the values of
- * pageblock_order based on the kernel config
+ * is unused as pageblock_order is set at compile-time. See
+ * include/linux/pageblock-flags.h for the values of pageblock_order based on
+ * the kernel config
  */
-static inline int pageblock_default_order(unsigned int order)
+static inline void set_pageblock_order(void)
 {
-	return MAX_ORDER-1;
 }
-#define set_pageblock_order(x)	do {} while (0)
 
 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 
@@ -4301,11 +4354,10 @@
 	init_waitqueue_head(&pgdat->kswapd_wait);
 	pgdat->kswapd_max_order = 0;
 	pgdat_page_cgroup_init(pgdat);
-	
+
 	for (j = 0; j < MAX_NR_ZONES; j++) {
 		struct zone *zone = pgdat->node_zones + j;
 		unsigned long size, realsize, memmap_pages;
-		enum lru_list lru;
 
 		size = zone_spanned_pages_in_node(nid, j, zones_size);
 		realsize = size - zone_absent_pages_in_node(nid, j,
@@ -4355,18 +4407,13 @@
 		zone->zone_pgdat = pgdat;
 
 		zone_pcp_init(zone);
-		for_each_lru(lru)
-			INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
-		zone->reclaim_stat.recent_rotated[0] = 0;
-		zone->reclaim_stat.recent_rotated[1] = 0;
-		zone->reclaim_stat.recent_scanned[0] = 0;
-		zone->reclaim_stat.recent_scanned[1] = 0;
+		lruvec_init(&zone->lruvec, zone);
 		zap_zone_vm_stats(zone);
 		zone->flags = 0;
 		if (!size)
 			continue;
 
-		set_pageblock_order(pageblock_default_order());
+		set_pageblock_order();
 		setup_usemap(pgdat, zone, size);
 		ret = init_currently_empty_zone(zone, zone_start_pfn,
 						size, MEMMAP_EARLY);
@@ -4759,7 +4806,7 @@
 	find_zone_movable_pfns_for_nodes();
 
 	/* Print out the zone ranges */
-	printk("Zone PFN ranges:\n");
+	printk("Zone ranges:\n");
 	for (i = 0; i < MAX_NR_ZONES; i++) {
 		if (i == ZONE_MOVABLE)
 			continue;
@@ -4768,22 +4815,25 @@
 				arch_zone_highest_possible_pfn[i])
 			printk(KERN_CONT "empty\n");
 		else
-			printk(KERN_CONT "%0#10lx -> %0#10lx\n",
-				arch_zone_lowest_possible_pfn[i],
-				arch_zone_highest_possible_pfn[i]);
+			printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
+				arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
+				(arch_zone_highest_possible_pfn[i]
+					<< PAGE_SHIFT) - 1);
 	}
 
 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
-	printk("Movable zone start PFN for each node\n");
+	printk("Movable zone start for each node\n");
 	for (i = 0; i < MAX_NUMNODES; i++) {
 		if (zone_movable_pfn[i])
-			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
+			printk("  Node %d: %#010lx\n", i,
+			       zone_movable_pfn[i] << PAGE_SHIFT);
 	}
 
 	/* Print out the early_node_map[] */
-	printk("Early memory PFN ranges\n");
+	printk("Early memory node ranges\n");
 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
-		printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
+		printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
+		       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
 
 	/* Initialise every node */
 	mminit_verify_pageflags_layout();
@@ -4976,14 +5026,7 @@
 	calculate_totalreserve_pages();
 }
 
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
 {
 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
 	unsigned long lowmem_pages = 0;
@@ -5030,6 +5073,11 @@
 
 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+		zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+		zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+		zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
 		setup_zone_migrate_reserve(zone);
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -5038,6 +5086,20 @@
 	calculate_totalreserve_pages();
 }
 
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+	mutex_lock(&zonelists_mutex);
+	__setup_per_zone_wmarks();
+	mutex_unlock(&zonelists_mutex);
+}
+
 /*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
@@ -5415,14 +5477,16 @@
 __count_immobile_pages(struct zone *zone, struct page *page, int count)
 {
 	unsigned long pfn, iter, found;
+	int mt;
+
 	/*
 	 * For avoiding noise data, lru_add_drain_all() should be called
 	 * If ZONE_MOVABLE, the zone never contains immobile pages
 	 */
 	if (zone_idx(zone) == ZONE_MOVABLE)
 		return true;
-
-	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+	mt = get_pageblock_migratetype(page);
+	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
 		return true;
 
 	pfn = page_to_pfn(page);
@@ -5539,7 +5603,7 @@
 	return ret;
 }
 
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
 	struct zone *zone;
 	unsigned long flags;
@@ -5547,12 +5611,259 @@
 	spin_lock_irqsave(&zone->lock, flags);
 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 		goto out;
-	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-	move_freepages_block(zone, page, MIGRATE_MOVABLE);
+	set_pageblock_migratetype(page, migratetype);
+	move_freepages_block(zone, page, migratetype);
 out:
 	spin_unlock_irqrestore(&zone->lock, flags);
 }
 
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+			     pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+				pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+			     int **resultp)
+{
+	return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+	/* This function is based on compact_zone() from compaction.c. */
+
+	unsigned long pfn = start;
+	unsigned int tries = 0;
+	int ret = 0;
+
+	struct compact_control cc = {
+		.nr_migratepages = 0,
+		.order = -1,
+		.zone = page_zone(pfn_to_page(start)),
+		.sync = true,
+	};
+	INIT_LIST_HEAD(&cc.migratepages);
+
+	migrate_prep_local();
+
+	while (pfn < end || !list_empty(&cc.migratepages)) {
+		if (fatal_signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		if (list_empty(&cc.migratepages)) {
+			cc.nr_migratepages = 0;
+			pfn = isolate_migratepages_range(cc.zone, &cc,
+							 pfn, end);
+			if (!pfn) {
+				ret = -EINTR;
+				break;
+			}
+			tries = 0;
+		} else if (++tries == 5) {
+			ret = ret < 0 ? ret : -EBUSY;
+			break;
+		}
+
+		ret = migrate_pages(&cc.migratepages,
+				    __alloc_contig_migrate_alloc,
+				    0, false, MIGRATE_SYNC);
+	}
+
+	putback_lru_pages(&cc.migratepages);
+	return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&zone->lock, flags);
+	zone->min_cma_pages += count;
+	spin_unlock_irqrestore(&zone->lock, flags);
+	setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+	struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+	int did_some_progress = 0;
+	int order = 1;
+
+	/*
+	 * Increase level of watermarks to force kswapd do his job
+	 * to stabilise at new watermark level.
+	 */
+	__update_cma_watermarks(zone, count);
+
+	/* Obey watermarks as if the page was being allocated */
+	while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+		wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+		did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+						      NULL);
+		if (!did_some_progress) {
+			/* Exhausted what can be done so it's blamo time */
+			out_of_memory(zonelist, gfp_mask, order, NULL, false);
+		}
+	}
+
+	/* Restore original watermark levels. */
+	__update_cma_watermarks(zone, -count);
+
+	return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:	start PFN to allocate
+ * @end:	one-past-the-last PFN to allocate
+ * @migratetype:	migratetype of the underlaying pageblocks (either
+ *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *			in range must have the same migratetype and it must
+ *			be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+		       unsigned migratetype)
+{
+	struct zone *zone = page_zone(pfn_to_page(start));
+	unsigned long outer_start, outer_end;
+	int ret = 0, order;
+
+	/*
+	 * What we do here is we mark all pageblocks in range as
+	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
+	 * have different sizes, and due to the way page allocator
+	 * work, we align the range to biggest of the two pages so
+	 * that page allocator won't try to merge buddies from
+	 * different pageblocks and change MIGRATE_ISOLATE to some
+	 * other migration type.
+	 *
+	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+	 * migrate the pages from an unaligned range (ie. pages that
+	 * we are interested in).  This will put all the pages in
+	 * range back to page allocator as MIGRATE_ISOLATE.
+	 *
+	 * When this is done, we take the pages in range from page
+	 * allocator removing them from the buddy system.  This way
+	 * page allocator will never consider using them.
+	 *
+	 * This lets us mark the pageblocks back as
+	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+	 * aligned range but not in the unaligned, original range are
+	 * put back to page allocator so that buddy can use them.
+	 */
+
+	ret = start_isolate_page_range(pfn_max_align_down(start),
+				       pfn_max_align_up(end), migratetype);
+	if (ret)
+		goto done;
+
+	ret = __alloc_contig_migrate_range(start, end);
+	if (ret)
+		goto done;
+
+	/*
+	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
+	 * more, all pages in [start, end) are free in page allocator.
+	 * What we are going to do is to allocate all pages from
+	 * [start, end) (that is remove them from page allocator).
+	 *
+	 * The only problem is that pages at the beginning and at the
+	 * end of interesting range may be not aligned with pages that
+	 * page allocator holds, ie. they can be part of higher order
+	 * pages.  Because of this, we reserve the bigger range and
+	 * once this is done free the pages we are not interested in.
+	 *
+	 * We don't have to hold zone->lock here because the pages are
+	 * isolated thus they won't get removed from buddy.
+	 */
+
+	lru_add_drain_all();
+	drain_all_pages();
+
+	order = 0;
+	outer_start = start;
+	while (!PageBuddy(pfn_to_page(outer_start))) {
+		if (++order >= MAX_ORDER) {
+			ret = -EBUSY;
+			goto done;
+		}
+		outer_start &= ~0UL << order;
+	}
+
+	/* Make sure the range is really isolated. */
+	if (test_pages_isolated(outer_start, end)) {
+		pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+		       outer_start, end);
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/*
+	 * Reclaim enough pages to make sure that contiguous allocation
+	 * will not starve the system.
+	 */
+	__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+	/* Grab isolated pages from freelists. */
+	outer_end = isolate_freepages_range(outer_start, end);
+	if (!outer_end) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/* Free head and tail (if any) */
+	if (start != outer_start)
+		free_contig_range(outer_start, start - outer_start);
+	if (end != outer_end)
+		free_contig_range(end, outer_end - end);
+
+done:
+	undo_isolate_page_range(pfn_max_align_down(start),
+				pfn_max_align_up(end), migratetype);
+	return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+	for (; nr_pages--; ++pfn)
+		__free_page(pfn_to_page(pfn));
+}
+#endif
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
  * All pages in the range must be isolated before calling this.
@@ -5621,7 +5932,7 @@
 }
 #endif
 
-static struct trace_print_flags pageflag_names[] = {
+static const struct trace_print_flags pageflag_names[] = {
 	{1UL << PG_locked,		"locked"	},
 	{1UL << PG_error,		"error"		},
 	{1UL << PG_referenced,		"referenced"	},
@@ -5656,7 +5967,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 	{1UL << PG_hwpoison,		"hwpoison"	},
 #endif
-	{-1UL,				NULL		},
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	{1UL << PG_compound_lock,	"compound_lock"	},
+#endif
 };
 
 static void dump_page_flags(unsigned long flags)
@@ -5665,12 +5978,14 @@
 	unsigned long mask;
 	int i;
 
+	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
+
 	printk(KERN_ALERT "page flags: %#lx(", flags);
 
 	/* remove zone id */
 	flags &= (1UL << NR_PAGEFLAGS) - 1;
 
-	for (i = 0; pageflag_names[i].name && flags; i++) {
+	for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
 
 		mask = pageflag_names[i].mask;
 		if ((flags & mask) != mask)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 1ccbd71..eb750f8 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -392,7 +392,7 @@
 
 /**
  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
- * @end: swap entry to be cmpxchged
+ * @ent: swap entry to be cmpxchged
  * @old: old id
  * @new: new id
  *
@@ -422,7 +422,7 @@
 /**
  * swap_cgroup_record - record mem_cgroup for this swp_entry.
  * @ent: swap entry to be recorded into
- * @mem: mem_cgroup to be recorded
+ * @id: mem_cgroup to be recorded
  *
  * Returns old value at success, 0 at failure.
  * (Of course, old value can be 0.)
diff --git a/mm/page_io.c b/mm/page_io.c
index dc76b4d..34f0292 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/swapops.h>
 #include <linux/writeback.h>
+#include <linux/frontswap.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -98,6 +99,12 @@
 		unlock_page(page);
 		goto out;
 	}
+	if (frontswap_store(page) == 0) {
+		set_page_writeback(page);
+		unlock_page(page);
+		end_page_writeback(page);
+		goto out;
+	}
 	bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
 	if (bio == NULL) {
 		set_page_dirty(page);
@@ -122,6 +129,11 @@
 
 	VM_BUG_ON(!PageLocked(page));
 	VM_BUG_ON(PageUptodate(page));
+	if (frontswap_load(page) == 0) {
+		SetPageUptodate(page);
+		unlock_page(page);
+		goto out;
+	}
 	bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
 	if (bio == NULL) {
 		unlock_page(page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb..c9f0477 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -24,6 +24,7 @@
  * to be MIGRATE_ISOLATE.
  * @start_pfn: The lower PFN of the range to be isolated.
  * @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@
  * start_pfn/end_pfn must be aligned to pageblock_order.
  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			     unsigned migratetype)
 {
 	unsigned long pfn;
 	unsigned long undo_pfn;
@@ -56,7 +57,7 @@
 	for (pfn = start_pfn;
 	     pfn < undo_pfn;
 	     pfn += pageblock_nr_pages)
-		unset_migratetype_isolate(pfn_to_page(pfn));
+		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
 	return -EBUSY;
 }
@@ -64,8 +65,8 @@
 /*
  * Make isolated pages available again.
  */
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			    unsigned migratetype)
 {
 	unsigned long pfn;
 	struct page *page;
@@ -77,7 +78,7 @@
 		page = __first_valid_page(pfn, pageblock_nr_pages);
 		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 			continue;
-		unset_migratetype_isolate(page);
+		unset_migratetype_isolate(page, migratetype);
 	}
 	return 0;
 }
@@ -86,7 +87,7 @@
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
  */
 static int
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index aa9701e..6c118d0 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -162,7 +162,6 @@
 
 /**
  * walk_page_range - walk a memory map's page tables with a callback
- * @mm: memory map to walk
  * @addr: starting address
  * @end: ending address
  * @walk: set of callbacks to invoke for each level of the tree
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 405d331..3707c71 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -360,7 +360,6 @@
  * @chunk: chunk to depopulate
  * @off: offset to the area to depopulate
  * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
  *
  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  * from @chunk.  If @flush is true, vcache is flushed before unmapping
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5a74fea..74c0dda 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -109,8 +109,8 @@
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
-			   pmd_t *pmdp)
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+			  pmd_t *pmdp)
 {
 	pmd_t pmd = pmd_mksplitting(*pmdp);
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index c20ff48..926b466 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -371,15 +371,15 @@
 	/* Check iovecs */
 	if (vm_write)
 		rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
-					   iovstack_l, &iov_l, 1);
+					   iovstack_l, &iov_l);
 	else
 		rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
-					   iovstack_l, &iov_l, 1);
+					   iovstack_l, &iov_l);
 	if (rc <= 0)
 		goto free_iovecs;
 
-	rc = rw_copy_check_uvector(READ, rvec, riovcnt, UIO_FASTIOV,
-				   iovstack_r, &iov_r, 0);
+	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
+				   iovstack_r, &iov_r);
 	if (rc <= 0)
 		goto free_iovecs;
 
@@ -438,16 +438,16 @@
 	if (vm_write)
 		rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
 						  UIO_FASTIOV, iovstack_l,
-						  &iov_l, 1);
+						  &iov_l);
 	else
 		rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
 						  UIO_FASTIOV, iovstack_l,
-						  &iov_l, 1);
+						  &iov_l);
 	if (rc <= 0)
 		goto free_iovecs;
-	rc = compat_rw_copy_check_uvector(READ, rvec, riovcnt,
+	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
 					  UIO_FASTIOV, iovstack_r,
-					  &iov_r, 0);
+					  &iov_r);
 	if (rc <= 0)
 		goto free_iovecs;
 
diff --git a/mm/readahead.c b/mm/readahead.c
index cbcbb02..ea8f8fa 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,6 +17,8 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/pagevec.h>
 #include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
 
 /*
  * Initialise a struct file's readahead state.  Assumes that the caller has
@@ -562,3 +564,41 @@
 	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+
+static ssize_t
+do_readahead(struct address_space *mapping, struct file *filp,
+	     pgoff_t index, unsigned long nr)
+{
+	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+		return -EINVAL;
+
+	force_page_cache_readahead(mapping, filp, index, nr);
+	return 0;
+}
+
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
+{
+	ssize_t ret;
+	struct file *file;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (file) {
+		if (file->f_mode & FMODE_READ) {
+			struct address_space *mapping = file->f_mapping;
+			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+			unsigned long len = end - start + 1;
+			ret = do_readahead(mapping, file, start, len);
+		}
+		fput(file);
+	}
+	return ret;
+}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+	return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad58..0f3b7cd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@
 		pte_unmap_unlock(pte, ptl);
 	}
 
-	/* Pretend the page is referenced if the task has the
-	   swap token and is in the middle of a page fault. */
-	if (mm != current->mm && has_swap_token(mm) &&
-			rwsem_is_locked(&mm->mmap_sem))
-		referenced++;
-
 	(*mapcount)--;
 
 	if (referenced)
diff --git a/mm/shmem.c b/mm/shmem.c
index d7b433a..a15a466 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -53,6 +53,7 @@
 #include <linux/blkdev.h>
 #include <linux/pagevec.h>
 #include <linux/percpu_counter.h>
+#include <linux/falloc.h>
 #include <linux/splice.h>
 #include <linux/security.h>
 #include <linux/swapops.h>
@@ -83,12 +84,25 @@
 	char value[0];
 };
 
+/*
+ * shmem_fallocate and shmem_writepage communicate via inode->i_private
+ * (with i_mutex making sure that it has only one user at a time):
+ * we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+	pgoff_t start;		/* start of range currently being fallocated */
+	pgoff_t next;		/* the next page offset to be fallocated */
+	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
+	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
+};
+
 /* Flag allocation requirements to shmem_getpage */
 enum sgp_type {
 	SGP_READ,	/* don't exceed i_size, don't allocate page */
 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
 	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
-	SGP_WRITE,	/* may exceed i_size, may allocate page */
+	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
+	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
 };
 
 #ifdef CONFIG_TMPFS
@@ -103,6 +117,9 @@
 }
 #endif
 
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+				struct shmem_inode_info *info, pgoff_t index);
 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
 
@@ -423,27 +440,31 @@
 
 /*
  * Remove range of pages and swap entries from radix tree, and free them.
+ * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
  */
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+								 bool unfalloc)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
-	pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
+	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
 	struct pagevec pvec;
 	pgoff_t indices[PAGEVEC_SIZE];
 	long nr_swaps_freed = 0;
 	pgoff_t index;
 	int i;
 
-	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+	if (lend == -1)
+		end = -1;	/* unsigned, so actually very big */
 
 	pagevec_init(&pvec, 0);
 	index = start;
-	while (index <= end) {
+	while (index < end) {
 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+				min(end - index, (pgoff_t)PAGEVEC_SIZE),
 							pvec.pages, indices);
 		if (!pvec.nr)
 			break;
@@ -452,10 +473,12 @@
 			struct page *page = pvec.pages[i];
 
 			index = indices[i];
-			if (index > end)
+			if (index >= end)
 				break;
 
 			if (radix_tree_exceptional_entry(page)) {
+				if (unfalloc)
+					continue;
 				nr_swaps_freed += !shmem_free_swap(mapping,
 								index, page);
 				continue;
@@ -463,9 +486,11 @@
 
 			if (!trylock_page(page))
 				continue;
-			if (page->mapping == mapping) {
-				VM_BUG_ON(PageWriteback(page));
-				truncate_inode_page(mapping, page);
+			if (!unfalloc || !PageUptodate(page)) {
+				if (page->mapping == mapping) {
+					VM_BUG_ON(PageWriteback(page));
+					truncate_inode_page(mapping, page);
+				}
 			}
 			unlock_page(page);
 		}
@@ -476,30 +501,47 @@
 		index++;
 	}
 
-	if (partial) {
+	if (partial_start) {
 		struct page *page = NULL;
 		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
 		if (page) {
-			zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+			unsigned int top = PAGE_CACHE_SIZE;
+			if (start > end) {
+				top = partial_end;
+				partial_end = 0;
+			}
+			zero_user_segment(page, partial_start, top);
 			set_page_dirty(page);
 			unlock_page(page);
 			page_cache_release(page);
 		}
 	}
+	if (partial_end) {
+		struct page *page = NULL;
+		shmem_getpage(inode, end, &page, SGP_READ, NULL);
+		if (page) {
+			zero_user_segment(page, 0, partial_end);
+			set_page_dirty(page);
+			unlock_page(page);
+			page_cache_release(page);
+		}
+	}
+	if (start >= end)
+		return;
 
 	index = start;
 	for ( ; ; ) {
 		cond_resched();
 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+				min(end - index, (pgoff_t)PAGEVEC_SIZE),
 							pvec.pages, indices);
 		if (!pvec.nr) {
-			if (index == start)
+			if (index == start || unfalloc)
 				break;
 			index = start;
 			continue;
 		}
-		if (index == start && indices[0] > end) {
+		if ((index == start || unfalloc) && indices[0] >= end) {
 			shmem_deswap_pagevec(&pvec);
 			pagevec_release(&pvec);
 			break;
@@ -509,19 +551,23 @@
 			struct page *page = pvec.pages[i];
 
 			index = indices[i];
-			if (index > end)
+			if (index >= end)
 				break;
 
 			if (radix_tree_exceptional_entry(page)) {
+				if (unfalloc)
+					continue;
 				nr_swaps_freed += !shmem_free_swap(mapping,
 								index, page);
 				continue;
 			}
 
 			lock_page(page);
-			if (page->mapping == mapping) {
-				VM_BUG_ON(PageWriteback(page));
-				truncate_inode_page(mapping, page);
+			if (!unfalloc || !PageUptodate(page)) {
+				if (page->mapping == mapping) {
+					VM_BUG_ON(PageWriteback(page));
+					truncate_inode_page(mapping, page);
+				}
 			}
 			unlock_page(page);
 		}
@@ -535,7 +581,11 @@
 	info->swapped -= nr_swaps_freed;
 	shmem_recalc_inode(inode);
 	spin_unlock(&info->lock);
+}
 
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+	shmem_undo_range(inode, lstart, lend, false);
 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -597,19 +647,20 @@
 	}
 	BUG_ON(inode->i_blocks);
 	shmem_free_inode(inode->i_sb);
-	end_writeback(inode);
+	clear_inode(inode);
 }
 
 /*
  * If swap found in inode, free it and move page from swapcache to filecache.
  */
 static int shmem_unuse_inode(struct shmem_inode_info *info,
-			     swp_entry_t swap, struct page *page)
+			     swp_entry_t swap, struct page **pagep)
 {
 	struct address_space *mapping = info->vfs_inode.i_mapping;
 	void *radswap;
 	pgoff_t index;
-	int error;
+	gfp_t gfp;
+	int error = 0;
 
 	radswap = swp_to_radix_entry(swap);
 	index = radix_tree_locate_item(&mapping->page_tree, radswap);
@@ -625,22 +676,48 @@
 	if (shmem_swaplist.next != &info->swaplist)
 		list_move_tail(&shmem_swaplist, &info->swaplist);
 
+	gfp = mapping_gfp_mask(mapping);
+	if (shmem_should_replace_page(*pagep, gfp)) {
+		mutex_unlock(&shmem_swaplist_mutex);
+		error = shmem_replace_page(pagep, gfp, info, index);
+		mutex_lock(&shmem_swaplist_mutex);
+		/*
+		 * We needed to drop mutex to make that restrictive page
+		 * allocation, but the inode might have been freed while we
+		 * dropped it: although a racing shmem_evict_inode() cannot
+		 * complete without emptying the radix_tree, our page lock
+		 * on this swapcache page is not enough to prevent that -
+		 * free_swap_and_cache() of our swap entry will only
+		 * trylock_page(), removing swap from radix_tree whatever.
+		 *
+		 * We must not proceed to shmem_add_to_page_cache() if the
+		 * inode has been freed, but of course we cannot rely on
+		 * inode or mapping or info to check that.  However, we can
+		 * safely check if our swap entry is still in use (and here
+		 * it can't have got reused for another page): if it's still
+		 * in use, then the inode cannot have been freed yet, and we
+		 * can safely proceed (if it's no longer in use, that tells
+		 * nothing about the inode, but we don't need to unuse swap).
+		 */
+		if (!page_swapcount(*pagep))
+			error = -ENOENT;
+	}
+
 	/*
 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
 	 * beneath us (pagelock doesn't help until the page is in pagecache).
 	 */
-	error = shmem_add_to_page_cache(page, mapping, index,
+	if (!error)
+		error = shmem_add_to_page_cache(*pagep, mapping, index,
 						GFP_NOWAIT, radswap);
-	/* which does mem_cgroup_uncharge_cache_page on error */
-
 	if (error != -ENOMEM) {
 		/*
 		 * Truncation and eviction use free_swap_and_cache(), which
 		 * only does trylock page: if we raced, best clean up here.
 		 */
-		delete_from_swap_cache(page);
-		set_page_dirty(page);
+		delete_from_swap_cache(*pagep);
+		set_page_dirty(*pagep);
 		if (!error) {
 			spin_lock(&info->lock);
 			info->swapped--;
@@ -660,7 +737,14 @@
 	struct list_head *this, *next;
 	struct shmem_inode_info *info;
 	int found = 0;
-	int error;
+	int error = 0;
+
+	/*
+	 * There's a faint possibility that swap page was replaced before
+	 * caller locked it: caller will come back later with the right page.
+	 */
+	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
+		goto out;
 
 	/*
 	 * Charge page using GFP_KERNEL while we can wait, before taking
@@ -676,7 +760,7 @@
 	list_for_each_safe(this, next, &shmem_swaplist) {
 		info = list_entry(this, struct shmem_inode_info, swaplist);
 		if (info->swapped)
-			found = shmem_unuse_inode(info, swap, page);
+			found = shmem_unuse_inode(info, swap, &page);
 		else
 			list_del_init(&info->swaplist);
 		cond_resched();
@@ -685,8 +769,6 @@
 	}
 	mutex_unlock(&shmem_swaplist_mutex);
 
-	if (!found)
-		mem_cgroup_uncharge_cache_page(page);
 	if (found < 0)
 		error = found;
 out:
@@ -727,6 +809,38 @@
 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
 		goto redirty;
 	}
+
+	/*
+	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
+	 * value into swapfile.c, the only way we can correctly account for a
+	 * fallocated page arriving here is now to initialize it and write it.
+	 *
+	 * That's okay for a page already fallocated earlier, but if we have
+	 * not yet completed the fallocation, then (a) we want to keep track
+	 * of this page in case we have to undo it, and (b) it may not be a
+	 * good idea to continue anyway, once we're pushing into swap.  So
+	 * reactivate the page, and let shmem_fallocate() quit when too many.
+	 */
+	if (!PageUptodate(page)) {
+		if (inode->i_private) {
+			struct shmem_falloc *shmem_falloc;
+			spin_lock(&inode->i_lock);
+			shmem_falloc = inode->i_private;
+			if (shmem_falloc &&
+			    index >= shmem_falloc->start &&
+			    index < shmem_falloc->next)
+				shmem_falloc->nr_unswapped++;
+			else
+				shmem_falloc = NULL;
+			spin_unlock(&inode->i_lock);
+			if (shmem_falloc)
+				goto redirty;
+		}
+		clear_highpage(page);
+		flush_dcache_page(page);
+		SetPageUptodate(page);
+	}
+
 	swap = get_swap_page();
 	if (!swap.val)
 		goto redirty;
@@ -856,6 +970,89 @@
 #endif
 
 /*
+ * When a page is moved from swapcache to shmem filecache (either by the
+ * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
+ * shmem_unuse_inode()), it may have been read in earlier from swap, in
+ * ignorance of the mapping it belongs to.  If that mapping has special
+ * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
+ * we may need to copy to a suitable page before moving to filecache.
+ *
+ * In a future release, this may well be extended to respect cpuset and
+ * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
+ * but for now it is a simple matter of zone.
+ */
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
+{
+	return page_zonenum(page) > gfp_zone(gfp);
+}
+
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+				struct shmem_inode_info *info, pgoff_t index)
+{
+	struct page *oldpage, *newpage;
+	struct address_space *swap_mapping;
+	pgoff_t swap_index;
+	int error;
+
+	oldpage = *pagep;
+	swap_index = page_private(oldpage);
+	swap_mapping = page_mapping(oldpage);
+
+	/*
+	 * We have arrived here because our zones are constrained, so don't
+	 * limit chance of success by further cpuset and node constraints.
+	 */
+	gfp &= ~GFP_CONSTRAINT_MASK;
+	newpage = shmem_alloc_page(gfp, info, index);
+	if (!newpage)
+		return -ENOMEM;
+
+	page_cache_get(newpage);
+	copy_highpage(newpage, oldpage);
+	flush_dcache_page(newpage);
+
+	__set_page_locked(newpage);
+	SetPageUptodate(newpage);
+	SetPageSwapBacked(newpage);
+	set_page_private(newpage, swap_index);
+	SetPageSwapCache(newpage);
+
+	/*
+	 * Our caller will very soon move newpage out of swapcache, but it's
+	 * a nice clean interface for us to replace oldpage by newpage there.
+	 */
+	spin_lock_irq(&swap_mapping->tree_lock);
+	error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
+								   newpage);
+	if (!error) {
+		__inc_zone_page_state(newpage, NR_FILE_PAGES);
+		__dec_zone_page_state(oldpage, NR_FILE_PAGES);
+	}
+	spin_unlock_irq(&swap_mapping->tree_lock);
+
+	if (unlikely(error)) {
+		/*
+		 * Is this possible?  I think not, now that our callers check
+		 * both PageSwapCache and page_private after getting page lock;
+		 * but be defensive.  Reverse old to newpage for clear and free.
+		 */
+		oldpage = newpage;
+	} else {
+		mem_cgroup_replace_page_cache(oldpage, newpage);
+		lru_cache_add_anon(newpage);
+		*pagep = newpage;
+	}
+
+	ClearPageSwapCache(oldpage);
+	set_page_private(oldpage, 0);
+
+	unlock_page(oldpage);
+	page_cache_release(oldpage);
+	page_cache_release(oldpage);
+	return error;
+}
+
+/*
  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
  *
  * If we allocate a new one we do not mark it dirty. That's up to the
@@ -872,6 +1069,7 @@
 	swp_entry_t swap;
 	int error;
 	int once = 0;
+	int alloced = 0;
 
 	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
 		return -EFBIG;
@@ -883,19 +1081,21 @@
 		page = NULL;
 	}
 
-	if (sgp != SGP_WRITE &&
+	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
 		error = -EINVAL;
 		goto failed;
 	}
 
+	/* fallocated page? */
+	if (page && !PageUptodate(page)) {
+		if (sgp != SGP_READ)
+			goto clear;
+		unlock_page(page);
+		page_cache_release(page);
+		page = NULL;
+	}
 	if (page || (sgp == SGP_READ && !swap.val)) {
-		/*
-		 * Once we can get the page lock, it must be uptodate:
-		 * if there were an error in reading back from swap,
-		 * the page would not be inserted into the filecache.
-		 */
-		BUG_ON(page && !PageUptodate(page));
 		*pagep = page;
 		return 0;
 	}
@@ -923,19 +1123,21 @@
 
 		/* We have to do this with page locked to prevent races */
 		lock_page(page);
+		if (!PageSwapCache(page) || page_private(page) != swap.val ||
+		    page->mapping) {
+			error = -EEXIST;	/* try again */
+			goto failed;
+		}
 		if (!PageUptodate(page)) {
 			error = -EIO;
 			goto failed;
 		}
 		wait_on_page_writeback(page);
 
-		/* Someone may have already done it for us */
-		if (page->mapping) {
-			if (page->mapping == mapping &&
-			    page->index == index)
-				goto done;
-			error = -EEXIST;
-			goto failed;
+		if (shmem_should_replace_page(page, gfp)) {
+			error = shmem_replace_page(&page, gfp, info, index);
+			if (error)
+				goto failed;
 		}
 
 		error = mem_cgroup_cache_charge(page, current->mm,
@@ -991,19 +1193,36 @@
 		inode->i_blocks += BLOCKS_PER_PAGE;
 		shmem_recalc_inode(inode);
 		spin_unlock(&info->lock);
+		alloced = true;
 
-		clear_highpage(page);
-		flush_dcache_page(page);
-		SetPageUptodate(page);
+		/*
+		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+		 */
+		if (sgp == SGP_FALLOC)
+			sgp = SGP_WRITE;
+clear:
+		/*
+		 * Let SGP_WRITE caller clear ends if write does not fill page;
+		 * but SGP_FALLOC on a page fallocated earlier must initialize
+		 * it now, lest undo on failure cancel our earlier guarantee.
+		 */
+		if (sgp != SGP_WRITE) {
+			clear_highpage(page);
+			flush_dcache_page(page);
+			SetPageUptodate(page);
+		}
 		if (sgp == SGP_DIRTY)
 			set_page_dirty(page);
 	}
-done:
+
 	/* Perhaps the file has been truncated since we checked */
-	if (sgp != SGP_WRITE &&
+	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
 		error = -EINVAL;
-		goto trunc;
+		if (alloced)
+			goto trunc;
+		else
+			goto failed;
 	}
 	*pagep = page;
 	return 0;
@@ -1012,6 +1231,7 @@
 	 * Error recovery.
 	 */
 trunc:
+	info = SHMEM_I(inode);
 	ClearPageDirty(page);
 	delete_from_page_cache(page);
 	spin_lock(&info->lock);
@@ -1019,6 +1239,7 @@
 	inode->i_blocks -= BLOCKS_PER_PAGE;
 	spin_unlock(&info->lock);
 decused:
+	sbinfo = SHMEM_SB(inode->i_sb);
 	if (sbinfo->max_blocks)
 		percpu_counter_add(&sbinfo->used_blocks, -1);
 unacct:
@@ -1204,6 +1425,14 @@
 	if (pos + copied > inode->i_size)
 		i_size_write(inode, pos + copied);
 
+	if (!PageUptodate(page)) {
+		if (copied < PAGE_CACHE_SIZE) {
+			unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+			zero_user_segments(page, 0, from,
+					from + copied, PAGE_CACHE_SIZE);
+		}
+		SetPageUptodate(page);
+	}
 	set_page_dirty(page);
 	unlock_page(page);
 	page_cache_release(page);
@@ -1462,6 +1691,199 @@
 	return error;
 }
 
+/*
+ * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ */
+static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+				    pgoff_t index, pgoff_t end, int origin)
+{
+	struct page *page;
+	struct pagevec pvec;
+	pgoff_t indices[PAGEVEC_SIZE];
+	bool done = false;
+	int i;
+
+	pagevec_init(&pvec, 0);
+	pvec.nr = 1;		/* start small: we may be there already */
+	while (!done) {
+		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+					pvec.nr, pvec.pages, indices);
+		if (!pvec.nr) {
+			if (origin == SEEK_DATA)
+				index = end;
+			break;
+		}
+		for (i = 0; i < pvec.nr; i++, index++) {
+			if (index < indices[i]) {
+				if (origin == SEEK_HOLE) {
+					done = true;
+					break;
+				}
+				index = indices[i];
+			}
+			page = pvec.pages[i];
+			if (page && !radix_tree_exceptional_entry(page)) {
+				if (!PageUptodate(page))
+					page = NULL;
+			}
+			if (index >= end ||
+			    (page && origin == SEEK_DATA) ||
+			    (!page && origin == SEEK_HOLE)) {
+				done = true;
+				break;
+			}
+		}
+		shmem_deswap_pagevec(&pvec);
+		pagevec_release(&pvec);
+		pvec.nr = PAGEVEC_SIZE;
+		cond_resched();
+	}
+	return index;
+}
+
+static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
+{
+	struct address_space *mapping;
+	struct inode *inode;
+	pgoff_t start, end;
+	loff_t new_offset;
+
+	if (origin != SEEK_DATA && origin != SEEK_HOLE)
+		return generic_file_llseek_size(file, offset, origin,
+							MAX_LFS_FILESIZE);
+	mapping = file->f_mapping;
+	inode = mapping->host;
+	mutex_lock(&inode->i_mutex);
+	/* We're holding i_mutex so we can access i_size directly */
+
+	if (offset < 0)
+		offset = -EINVAL;
+	else if (offset >= inode->i_size)
+		offset = -ENXIO;
+	else {
+		start = offset >> PAGE_CACHE_SHIFT;
+		end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		new_offset = shmem_seek_hole_data(mapping, start, end, origin);
+		new_offset <<= PAGE_CACHE_SHIFT;
+		if (new_offset > offset) {
+			if (new_offset < inode->i_size)
+				offset = new_offset;
+			else if (origin == SEEK_DATA)
+				offset = -ENXIO;
+			else
+				offset = inode->i_size;
+		}
+	}
+
+	if (offset >= 0 && offset != file->f_pos) {
+		file->f_pos = offset;
+		file->f_version = 0;
+	}
+	mutex_unlock(&inode->i_mutex);
+	return offset;
+}
+
+static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+							 loff_t len)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+	struct shmem_falloc shmem_falloc;
+	pgoff_t start, index, end;
+	int error;
+
+	mutex_lock(&inode->i_mutex);
+
+	if (mode & FALLOC_FL_PUNCH_HOLE) {
+		struct address_space *mapping = file->f_mapping;
+		loff_t unmap_start = round_up(offset, PAGE_SIZE);
+		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+
+		if ((u64)unmap_end > (u64)unmap_start)
+			unmap_mapping_range(mapping, unmap_start,
+					    1 + unmap_end - unmap_start, 0);
+		shmem_truncate_range(inode, offset, offset + len - 1);
+		/* No need to unmap again: hole-punching leaves COWed pages */
+		error = 0;
+		goto out;
+	}
+
+	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+	error = inode_newsize_ok(inode, offset + len);
+	if (error)
+		goto out;
+
+	start = offset >> PAGE_CACHE_SHIFT;
+	end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	/* Try to avoid a swapstorm if len is impossible to satisfy */
+	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
+		error = -ENOSPC;
+		goto out;
+	}
+
+	shmem_falloc.start = start;
+	shmem_falloc.next  = start;
+	shmem_falloc.nr_falloced = 0;
+	shmem_falloc.nr_unswapped = 0;
+	spin_lock(&inode->i_lock);
+	inode->i_private = &shmem_falloc;
+	spin_unlock(&inode->i_lock);
+
+	for (index = start; index < end; index++) {
+		struct page *page;
+
+		/*
+		 * Good, the fallocate(2) manpage permits EINTR: we may have
+		 * been interrupted because we are using up too much memory.
+		 */
+		if (signal_pending(current))
+			error = -EINTR;
+		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
+			error = -ENOMEM;
+		else
+			error = shmem_getpage(inode, index, &page, SGP_FALLOC,
+									NULL);
+		if (error) {
+			/* Remove the !PageUptodate pages we added */
+			shmem_undo_range(inode,
+				(loff_t)start << PAGE_CACHE_SHIFT,
+				(loff_t)index << PAGE_CACHE_SHIFT, true);
+			goto undone;
+		}
+
+		/*
+		 * Inform shmem_writepage() how far we have reached.
+		 * No need for lock or barrier: we have the page lock.
+		 */
+		shmem_falloc.next++;
+		if (!PageUptodate(page))
+			shmem_falloc.nr_falloced++;
+
+		/*
+		 * If !PageUptodate, leave it that way so that freeable pages
+		 * can be recognized if we need to rollback on error later.
+		 * But set_page_dirty so that memory pressure will swap rather
+		 * than free the pages we are allocating (and SGP_CACHE pages
+		 * might still be clean: we now need to mark those dirty too).
+		 */
+		set_page_dirty(page);
+		unlock_page(page);
+		page_cache_release(page);
+		cond_resched();
+	}
+
+	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
+		i_size_write(inode, offset + len);
+	inode->i_ctime = CURRENT_TIME;
+undone:
+	spin_lock(&inode->i_lock);
+	inode->i_private = NULL;
+	spin_unlock(&inode->i_lock);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return error;
+}
+
 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1665,6 +2087,7 @@
 		kaddr = kmap_atomic(page);
 		memcpy(kaddr, symname, len);
 		kunmap_atomic(kaddr);
+		SetPageUptodate(page);
 		set_page_dirty(page);
 		unlock_page(page);
 		page_cache_release(page);
@@ -2033,11 +2456,9 @@
 	return dentry;
 }
 
-static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
-				int connectable)
+static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
+				struct inode *parent)
 {
-	struct inode *inode = dentry->d_inode;
-
 	if (*len < 3) {
 		*len = 3;
 		return 255;
@@ -2270,6 +2691,7 @@
 		}
 	}
 	sb->s_export_op = &shmem_export_ops;
+	sb->s_flags |= MS_NOSEC;
 #else
 	sb->s_flags |= MS_NOUSER;
 #endif
@@ -2364,7 +2786,7 @@
 static const struct file_operations shmem_file_operations = {
 	.mmap		= shmem_mmap,
 #ifdef CONFIG_TMPFS
-	.llseek		= generic_file_llseek,
+	.llseek		= shmem_file_llseek,
 	.read		= do_sync_read,
 	.write		= do_sync_write,
 	.aio_read	= shmem_file_aio_read,
@@ -2372,12 +2794,12 @@
 	.fsync		= noop_fsync,
 	.splice_read	= shmem_file_splice_read,
 	.splice_write	= generic_file_splice_write,
+	.fallocate	= shmem_fallocate,
 #endif
 };
 
 static const struct inode_operations shmem_inode_operations = {
 	.setattr	= shmem_setattr,
-	.truncate_range	= shmem_truncate_range,
 #ifdef CONFIG_TMPFS_XATTR
 	.setxattr	= shmem_setxattr,
 	.getxattr	= shmem_getxattr,
diff --git a/mm/slub.c b/mm/slub.c
index 80848cd..8c691fa 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@
 
 	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab = s;
-	page->flags |= 1 << PG_slab;
+	__SetPageSlab(page);
 
 	start = page_address(page);
 
@@ -1514,15 +1514,19 @@
 		freelist = page->freelist;
 		counters = page->counters;
 		new.counters = counters;
-		if (mode)
+		if (mode) {
 			new.inuse = page->objects;
+			new.freelist = NULL;
+		} else {
+			new.freelist = freelist;
+		}
 
 		VM_BUG_ON(new.frozen);
 		new.frozen = 1;
 
 	} while (!__cmpxchg_double_slab(s, page,
 			freelist, counters,
-			NULL, new.counters,
+			new.freelist, new.counters,
 			"lock and freeze"));
 
 	remove_partial(n, page);
@@ -1564,7 +1568,6 @@
 			object = t;
 			available =  page->objects - page->inuse;
 		} else {
-			page->freelist = t;
 			available = put_cpu_partial(s, page, 0);
 			stat(s, CPU_PARTIAL_NODE);
 		}
@@ -1579,7 +1582,7 @@
 /*
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 		struct kmem_cache_cpu *c)
 {
 #ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@
 }
 
 static void
-init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+init_kmem_cache_node(struct kmem_cache_node *n)
 {
 	n->nr_partial = 0;
 	spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@
 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
 	init_tracking(kmem_cache_node, n);
 #endif
-	init_kmem_cache_node(n, kmem_cache_node);
+	init_kmem_cache_node(n);
 	inc_slabs_node(kmem_cache_node, node, page->objects);
 
 	add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@
 		}
 
 		s->node[node] = n;
-		init_kmem_cache_node(n, s);
+		init_kmem_cache_node(n);
 	}
 	return 1;
 }
@@ -3625,7 +3628,7 @@
 			ret = -ENOMEM;
 			goto out;
 		}
-		init_kmem_cache_node(n, s);
+		init_kmem_cache_node(n);
 		s->node[nid] = n;
 	}
 out:
@@ -3968,9 +3971,9 @@
 			}
 			return s;
 		}
-		kfree(n);
 		kfree(s);
 	}
+	kfree(n);
 err:
 	up_write(&slub_lock);
 
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d3..6a4bf91 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -273,10 +273,10 @@
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
-					 unsigned long count)
+					 unsigned long size)
 {
-	unsigned long section_nr;
-
+	pg_data_t *host_pgdat;
+	unsigned long goal;
 	/*
 	 * A page may contain usemaps for other sections preventing the
 	 * page being freed and making a section unremovable while
@@ -287,8 +287,10 @@
 	 * from the same section as the pgdat where possible to avoid
 	 * this problem.
 	 */
-	section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
-	return alloc_bootmem_section(usemap_size() * count, section_nr);
+	goal = __pa(pgdat) & PAGE_SECTION_MASK;
+	host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
+	return __alloc_bootmem_node_nopanic(host_pgdat, size,
+					    SMP_CACHE_BYTES, goal);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +334,9 @@
 #else
 static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
-					 unsigned long count)
+					 unsigned long size)
 {
-	return NULL;
+	return alloc_bootmem_node_nopanic(pgdat, size);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +354,10 @@
 	int size = usemap_size();
 
 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
-								 usemap_count);
+							  size * usemap_count);
 	if (!usemap) {
-		usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
-		if (!usemap) {
-			printk(KERN_WARNING "%s: allocation failed\n", __func__);
-			return;
-		}
+		printk(KERN_WARNING "%s: allocation failed\n", __func__);
+		return;
 	}
 
 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f13..4e7e2ec 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@
 static void __page_cache_release(struct page *page)
 {
 	if (PageLRU(page)) {
-		unsigned long flags;
 		struct zone *zone = page_zone(page);
+		struct lruvec *lruvec;
+		unsigned long flags;
 
 		spin_lock_irqsave(&zone->lru_lock, flags);
+		lruvec = mem_cgroup_page_lruvec(page, zone);
 		VM_BUG_ON(!PageLRU(page));
 		__ClearPageLRU(page);
-		del_page_from_lru_list(zone, page, page_off_lru(page));
+		del_page_from_lru_list(page, lruvec, page_off_lru(page));
 		spin_unlock_irqrestore(&zone->lru_lock, flags);
 	}
 }
@@ -82,6 +84,25 @@
 		if (likely(page != page_head &&
 			   get_page_unless_zero(page_head))) {
 			unsigned long flags;
+
+			/*
+			 * THP can not break up slab pages so avoid taking
+			 * compound_lock().  Slab performs non-atomic bit ops
+			 * on page->flags for better performance.  In particular
+			 * slab_unlock() in slub used to be a hot path.  It is
+			 * still hot on arches that do not support
+			 * this_cpu_cmpxchg_double().
+			 */
+			if (PageSlab(page_head)) {
+				if (PageTail(page)) {
+					if (put_page_testzero(page_head))
+						VM_BUG_ON(1);
+
+					atomic_dec(&page->_mapcount);
+					goto skip_lock_tail;
+				} else
+					goto skip_lock;
+			}
 			/*
 			 * page_head wasn't a dangling pointer but it
 			 * may not be a head page anymore by the time
@@ -92,10 +113,10 @@
 			if (unlikely(!PageTail(page))) {
 				/* __split_huge_page_refcount run before us */
 				compound_unlock_irqrestore(page_head, flags);
-				VM_BUG_ON(PageHead(page_head));
+skip_lock:
 				if (put_page_testzero(page_head))
 					__put_single_page(page_head);
-			out_put_single:
+out_put_single:
 				if (put_page_testzero(page))
 					__put_single_page(page);
 				return;
@@ -115,6 +136,8 @@
 			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 			VM_BUG_ON(atomic_read(&page->_count) != 0);
 			compound_unlock_irqrestore(page_head, flags);
+
+skip_lock_tail:
 			if (put_page_testzero(page_head)) {
 				if (PageHead(page_head))
 					__put_compound_page(page_head);
@@ -162,6 +185,18 @@
 	struct page *page_head = compound_trans_head(page);
 
 	if (likely(page != page_head && get_page_unless_zero(page_head))) {
+
+		/* Ref to put_compound_page() comment. */
+		if (PageSlab(page_head)) {
+			if (likely(PageTail(page))) {
+				__get_page_tail_foll(page, false);
+				return true;
+			} else {
+				put_page(page_head);
+				return false;
+			}
+		}
+
 		/*
 		 * page_head wasn't a dangling pointer but it
 		 * may not be a head page anymore by the time
@@ -202,11 +237,12 @@
 EXPORT_SYMBOL(put_pages_list);
 
 static void pagevec_lru_move_fn(struct pagevec *pvec,
-				void (*move_fn)(struct page *page, void *arg),
-				void *arg)
+	void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
+	void *arg)
 {
 	int i;
 	struct zone *zone = NULL;
+	struct lruvec *lruvec;
 	unsigned long flags = 0;
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@
 			spin_lock_irqsave(&zone->lru_lock, flags);
 		}
 
-		(*move_fn)(page, arg);
+		lruvec = mem_cgroup_page_lruvec(page, zone);
+		(*move_fn)(page, lruvec, arg);
 	}
 	if (zone)
 		spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@
 	pagevec_reinit(pvec);
 }
 
-static void pagevec_move_tail_fn(struct page *page, void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
+				 void *arg)
 {
 	int *pgmoved = arg;
 
 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 		enum lru_list lru = page_lru_base_type(page);
-		struct lruvec *lruvec;
-
-		lruvec = mem_cgroup_lru_move_lists(page_zone(page),
-						   page, lru, lru);
 		list_move_tail(&page->lru, &lruvec->lists[lru]);
 		(*pgmoved)++;
 	}
@@ -276,41 +310,30 @@
 	}
 }
 
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+static void update_page_reclaim_stat(struct lruvec *lruvec,
 				     int file, int rotated)
 {
-	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
-	struct zone_reclaim_stat *memcg_reclaim_stat;
-
-	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
 	reclaim_stat->recent_scanned[file]++;
 	if (rotated)
 		reclaim_stat->recent_rotated[file]++;
-
-	if (!memcg_reclaim_stat)
-		return;
-
-	memcg_reclaim_stat->recent_scanned[file]++;
-	if (rotated)
-		memcg_reclaim_stat->recent_rotated[file]++;
 }
 
-static void __activate_page(struct page *page, void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec,
+			    void *arg)
 {
-	struct zone *zone = page_zone(page);
-
 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 		int file = page_is_file_cache(page);
 		int lru = page_lru_base_type(page);
-		del_page_from_lru_list(zone, page, lru);
 
+		del_page_from_lru_list(page, lruvec, lru);
 		SetPageActive(page);
 		lru += LRU_ACTIVE;
-		add_page_to_lru_list(zone, page, lru);
-		__count_vm_event(PGACTIVATE);
+		add_page_to_lru_list(page, lruvec, lru);
 
-		update_page_reclaim_stat(zone, page, file, 1);
+		__count_vm_event(PGACTIVATE);
+		update_page_reclaim_stat(lruvec, file, 1);
 	}
 }
 
@@ -347,7 +370,7 @@
 	struct zone *zone = page_zone(page);
 
 	spin_lock_irq(&zone->lru_lock);
-	__activate_page(page, NULL);
+	__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
 	spin_unlock_irq(&zone->lru_lock);
 }
 #endif
@@ -414,11 +437,13 @@
 void add_page_to_unevictable_list(struct page *page)
 {
 	struct zone *zone = page_zone(page);
+	struct lruvec *lruvec;
 
 	spin_lock_irq(&zone->lru_lock);
+	lruvec = mem_cgroup_page_lruvec(page, zone);
 	SetPageUnevictable(page);
 	SetPageLRU(page);
-	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+	add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
 	spin_unlock_irq(&zone->lru_lock);
 }
 
@@ -443,11 +468,11 @@
  * be write it out by flusher threads as this is much more effective
  * than the single-page writeout from reclaim.
  */
-static void lru_deactivate_fn(struct page *page, void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+			      void *arg)
 {
 	int lru, file;
 	bool active;
-	struct zone *zone = page_zone(page);
 
 	if (!PageLRU(page))
 		return;
@@ -460,13 +485,13 @@
 		return;
 
 	active = PageActive(page);
-
 	file = page_is_file_cache(page);
 	lru = page_lru_base_type(page);
-	del_page_from_lru_list(zone, page, lru + active);
+
+	del_page_from_lru_list(page, lruvec, lru + active);
 	ClearPageActive(page);
 	ClearPageReferenced(page);
-	add_page_to_lru_list(zone, page, lru);
+	add_page_to_lru_list(page, lruvec, lru);
 
 	if (PageWriteback(page) || PageDirty(page)) {
 		/*
@@ -476,19 +501,17 @@
 		 */
 		SetPageReclaim(page);
 	} else {
-		struct lruvec *lruvec;
 		/*
 		 * The page's writeback ends up during pagevec
 		 * We moves tha page into tail of inactive.
 		 */
-		lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
 		list_move_tail(&page->lru, &lruvec->lists[lru]);
 		__count_vm_event(PGROTATED);
 	}
 
 	if (active)
 		__count_vm_event(PGDEACTIVATE);
-	update_page_reclaim_stat(zone, page, file, 0);
+	update_page_reclaim_stat(lruvec, file, 0);
 }
 
 /*
@@ -588,6 +611,7 @@
 	int i;
 	LIST_HEAD(pages_to_free);
 	struct zone *zone = NULL;
+	struct lruvec *lruvec;
 	unsigned long uninitialized_var(flags);
 
 	for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@
 				zone = pagezone;
 				spin_lock_irqsave(&zone->lru_lock, flags);
 			}
+
+			lruvec = mem_cgroup_page_lruvec(page, zone);
 			VM_BUG_ON(!PageLRU(page));
 			__ClearPageLRU(page);
-			del_page_from_lru_list(zone, page, page_off_lru(page));
+			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 		}
 
 		list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
-void lru_add_page_tail(struct zone* zone,
-		       struct page *page, struct page *page_tail)
+void lru_add_page_tail(struct page *page, struct page *page_tail,
+		       struct lruvec *lruvec)
 {
 	int uninitialized_var(active);
 	enum lru_list lru;
@@ -659,7 +685,8 @@
 	VM_BUG_ON(!PageHead(page));
 	VM_BUG_ON(PageCompound(page_tail));
 	VM_BUG_ON(PageLRU(page_tail));
-	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+	VM_BUG_ON(NR_CPUS != 1 &&
+		  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
 	SetPageLRU(page_tail);
 
@@ -688,20 +715,20 @@
 		 * Use the standard add function to put page_tail on the list,
 		 * but then correct its position so they all end up in order.
 		 */
-		add_page_to_lru_list(zone, page_tail, lru);
+		add_page_to_lru_list(page_tail, lruvec, lru);
 		list_head = page_tail->lru.prev;
 		list_move_tail(&page_tail->lru, list_head);
 	}
 
 	if (!PageUnevictable(page))
-		update_page_reclaim_stat(zone, page_tail, file, active);
+		update_page_reclaim_stat(lruvec, file, active);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static void __pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+				 void *arg)
 {
 	enum lru_list lru = (enum lru_list)arg;
-	struct zone *zone = page_zone(page);
 	int file = is_file_lru(lru);
 	int active = is_active_lru(lru);
 
@@ -712,8 +739,8 @@
 	SetPageLRU(page);
 	if (active)
 		SetPageActive(page);
-	add_page_to_lru_list(zone, page, lru);
-	update_page_reclaim_stat(zone, page, file, active);
+	add_page_to_lru_list(page, lruvec, lru);
+	update_page_reclaim_stat(lruvec, file, active);
 }
 
 /*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d..71373d0 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,8 @@
 #include <linux/memcontrol.h>
 #include <linux/poll.h>
 #include <linux/oom.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -42,7 +44,7 @@
 static void free_swap_count_continuations(struct swap_info_struct *);
 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
 
-static DEFINE_SPINLOCK(swap_lock);
+DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 long nr_swap_pages;
 long total_swap_pages;
@@ -53,9 +55,9 @@
 static const char Bad_offset[] = "Bad swap offset entry ";
 static const char Unused_offset[] = "Unused swap offset entry ";
 
-static struct swap_list_t swap_list = {-1, -1};
+struct swap_list_t swap_list = {-1, -1};
 
-static struct swap_info_struct *swap_info[MAX_SWAPFILES];
+struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
@@ -556,6 +558,7 @@
 			swap_list.next = p->type;
 		nr_swap_pages++;
 		p->inuse_pages--;
+		frontswap_invalidate_page(p->type, offset);
 		if ((p->flags & SWP_BLKDEV) &&
 				disk->fops->swap_slot_free_notify)
 			disk->fops->swap_slot_free_notify(p->bdev, offset);
@@ -601,7 +604,7 @@
  * This does not give an exact answer when swap count is continued,
  * but does include the high COUNT_CONTINUED flag to allow for that.
  */
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
 {
 	int count = 0;
 	struct swap_info_struct *p;
@@ -717,37 +720,6 @@
 	return p != NULL;
 }
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_count_swap_user - count the user of a swap entry
- * @ent: the swap entry to be checked
- * @pagep: the pointer for the swap cache page of the entry to be stored
- *
- * Returns the number of the user of the swap entry. The number is valid only
- * for swaps of anonymous pages.
- * If the entry is found on swap cache, the page is stored to pagep with
- * refcount of it being incremented.
- */
-int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
-	struct page *page;
-	struct swap_info_struct *p;
-	int count = 0;
-
-	page = find_get_page(&swapper_space, ent.val);
-	if (page)
-		count += page_mapcount(page);
-	p = swap_info_get(ent);
-	if (p) {
-		count += swap_count(p->swap_map[swp_offset(ent)]);
-		spin_unlock(&swap_lock);
-	}
-
-	*pagep = page;
-	return count;
-}
-#endif
-
 #ifdef CONFIG_HIBERNATION
 /*
  * Find the swap type that corresponds to given device (if any).
@@ -1016,11 +988,12 @@
 }
 
 /*
- * Scan swap_map from current position to next entry still in use.
+ * Scan swap_map (or frontswap_map if frontswap parameter is true)
+ * from current position to next entry still in use.
  * Recycle to start on reaching the end, returning 0 when empty.
  */
 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
-					unsigned int prev)
+					unsigned int prev, bool frontswap)
 {
 	unsigned int max = si->max;
 	unsigned int i = prev;
@@ -1046,6 +1019,12 @@
 			prev = 0;
 			i = 1;
 		}
+		if (frontswap) {
+			if (frontswap_test(si, i))
+				break;
+			else
+				continue;
+		}
 		count = si->swap_map[i];
 		if (count && swap_count(count) != SWAP_MAP_BAD)
 			break;
@@ -1057,8 +1036,12 @@
  * We completely avoid races by reading each swap page in advance,
  * and then search for the process using it.  All the necessary
  * page table adjustments can then be made atomically.
+ *
+ * if the boolean frontswap is true, only unuse pages_to_unuse pages;
+ * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-static int try_to_unuse(unsigned int type)
+int try_to_unuse(unsigned int type, bool frontswap,
+		 unsigned long pages_to_unuse)
 {
 	struct swap_info_struct *si = swap_info[type];
 	struct mm_struct *start_mm;
@@ -1091,7 +1074,7 @@
 	 * one pass through swap_map is enough, but not necessarily:
 	 * there are races when an instance of an entry might be missed.
 	 */
-	while ((i = find_next_to_unuse(si, i)) != 0) {
+	while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
 		if (signal_pending(current)) {
 			retval = -EINTR;
 			break;
@@ -1258,6 +1241,10 @@
 		 * interactive performance.
 		 */
 		cond_resched();
+		if (frontswap && pages_to_unuse > 0) {
+			if (!--pages_to_unuse)
+				break;
+		}
 	}
 
 	mmput(start_mm);
@@ -1517,7 +1504,8 @@
 }
 
 static void enable_swap_info(struct swap_info_struct *p, int prio,
-				unsigned char *swap_map)
+				unsigned char *swap_map,
+				unsigned long *frontswap_map)
 {
 	int i, prev;
 
@@ -1527,6 +1515,7 @@
 	else
 		p->prio = --least_priority;
 	p->swap_map = swap_map;
+	frontswap_map_set(p, frontswap_map);
 	p->flags |= SWP_WRITEOK;
 	nr_swap_pages += p->pages;
 	total_swap_pages += p->pages;
@@ -1543,6 +1532,7 @@
 		swap_list.head = swap_list.next = p->type;
 	else
 		swap_info[prev]->next = p->type;
+	frontswap_init(p->type);
 	spin_unlock(&swap_lock);
 }
 
@@ -1616,7 +1606,7 @@
 	spin_unlock(&swap_lock);
 
 	oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
-	err = try_to_unuse(type);
+	err = try_to_unuse(type, false, 0); /* force all pages to be unused */
 	compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
 
 	if (err) {
@@ -1627,7 +1617,7 @@
 		 * sys_swapoff for this swap_info_struct at this point.
 		 */
 		/* re-insert swap space back into swap_list */
-		enable_swap_info(p, p->prio, p->swap_map);
+		enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
 		goto out_dput;
 	}
 
@@ -1653,9 +1643,11 @@
 	swap_map = p->swap_map;
 	p->swap_map = NULL;
 	p->flags = 0;
+	frontswap_invalidate_area(type);
 	spin_unlock(&swap_lock);
 	mutex_unlock(&swapon_mutex);
 	vfree(swap_map);
+	vfree(frontswap_map_get(p));
 	/* Destroy swap account informatin */
 	swap_cgroup_swapoff(type);
 
@@ -1924,24 +1916,20 @@
 
 	/*
 	 * Find out how many pages are allowed for a single swap
-	 * device. There are three limiting factors: 1) the number
+	 * device. There are two limiting factors: 1) the number
 	 * of bits for the swap offset in the swp_entry_t type, and
 	 * 2) the number of bits in the swap pte as defined by the
-	 * the different architectures, and 3) the number of free bits
-	 * in an exceptional radix_tree entry. In order to find the
+	 * different architectures. In order to find the
 	 * largest possible bit mask, a swap entry with swap type 0
 	 * and swap offset ~0UL is created, encoded to a swap pte,
 	 * decoded to a swp_entry_t again, and finally the swap
 	 * offset is extracted. This will mask all the bits from
 	 * the initial ~0UL mask that can't be encoded in either
 	 * the swp_entry_t or the architecture definition of a
-	 * swap pte.  Then the same is done for a radix_tree entry.
+	 * swap pte.
 	 */
 	maxpages = swp_offset(pte_to_swp_entry(
-			swp_entry_to_pte(swp_entry(0, ~0UL))));
-	maxpages = swp_offset(radix_to_swp_entry(
-			swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
-
+			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
 	if (maxpages > swap_header->info.last_page) {
 		maxpages = swap_header->info.last_page + 1;
 		/* p->max is an unsigned int: don't overflow it */
@@ -2019,6 +2007,7 @@
 	sector_t span;
 	unsigned long maxpages;
 	unsigned char *swap_map = NULL;
+	unsigned long *frontswap_map = NULL;
 	struct page *page = NULL;
 	struct inode *inode = NULL;
 
@@ -2102,6 +2091,9 @@
 		error = nr_extents;
 		goto bad_swap;
 	}
+	/* frontswap enabled? set up bit-per-page map for frontswap */
+	if (frontswap_enabled)
+		frontswap_map = vzalloc(maxpages / sizeof(long));
 
 	if (p->bdev) {
 		if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2117,14 +2109,15 @@
 	if (swap_flags & SWAP_FLAG_PREFER)
 		prio =
 		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
-	enable_swap_info(p, prio, swap_map);
+	enable_swap_info(p, prio, swap_map, frontswap_map);
 
 	printk(KERN_INFO "Adding %uk swap on %s.  "
-			"Priority:%d extents:%d across:%lluk %s%s\n",
+			"Priority:%d extents:%d across:%lluk %s%s%s\n",
 		p->pages<<(PAGE_SHIFT-10), name, p->prio,
 		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
 		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
-		(p->flags & SWP_DISCARDABLE) ? "D" : "");
+		(p->flags & SWP_DISCARDABLE) ? "D" : "",
+		(frontswap_map) ? "FS" : "");
 
 	mutex_unlock(&swapon_mutex);
 	atomic_inc(&proc_poll_event);
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644
index 57ad495..0000000
--- a/mm/thrash.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
- *
- * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
- * Improved algorithm to pass token:
- * Each task has a priority which is incremented if it contended
- * for the token in an interval less than its previous attempt.
- * If the token is acquired, that task's priority is boosted to prevent
- * the token from bouncing around too often and to let the task make
- * some progress in its execution.
- */
-
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/memcontrol.h>
-
-#include <trace/events/vmscan.h>
-
-#define TOKEN_AGING_INTERVAL	(0xFF)
-
-static DEFINE_SPINLOCK(swap_token_lock);
-struct mm_struct *swap_token_mm;
-static struct mem_cgroup *swap_token_memcg;
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
-	struct mem_cgroup *memcg;
-
-	memcg = try_get_mem_cgroup_from_mm(mm);
-	if (memcg)
-		css_put(mem_cgroup_css(memcg));
-
-	return memcg;
-}
-#else
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
-	return NULL;
-}
-#endif
-
-void grab_swap_token(struct mm_struct *mm)
-{
-	int current_interval;
-	unsigned int old_prio = mm->token_priority;
-	static unsigned int global_faults;
-	static unsigned int last_aging;
-
-	global_faults++;
-
-	current_interval = global_faults - mm->faultstamp;
-
-	if (!spin_trylock(&swap_token_lock))
-		return;
-
-	/* First come first served */
-	if (!swap_token_mm)
-		goto replace_token;
-
-	/*
-	 * Usually, we don't need priority aging because long interval faults
-	 * makes priority decrease quickly. But there is one exception. If the
-	 * token owner task is sleeping, it never make long interval faults.
-	 * Thus, we need a priority aging mechanism instead. The requirements
-	 * of priority aging are
-	 *  1) An aging interval is reasonable enough long. Too short aging
-	 *     interval makes quick swap token lost and decrease performance.
-	 *  2) The swap token owner task have to get priority aging even if
-	 *     it's under sleep.
-	 */
-	if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
-		swap_token_mm->token_priority /= 2;
-		last_aging = global_faults;
-	}
-
-	if (mm == swap_token_mm) {
-		mm->token_priority += 2;
-		goto update_priority;
-	}
-
-	if (current_interval < mm->last_interval)
-		mm->token_priority++;
-	else {
-		if (likely(mm->token_priority > 0))
-			mm->token_priority--;
-	}
-
-	/* Check if we deserve the token */
-	if (mm->token_priority > swap_token_mm->token_priority)
-		goto replace_token;
-
-update_priority:
-	trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
-
-out:
-	mm->faultstamp = global_faults;
-	mm->last_interval = current_interval;
-	spin_unlock(&swap_token_lock);
-	return;
-
-replace_token:
-	mm->token_priority += 2;
-	trace_replace_swap_token(swap_token_mm, mm);
-	swap_token_mm = mm;
-	swap_token_memcg = swap_token_memcg_from_mm(mm);
-	last_aging = global_faults;
-	goto out;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
-	spin_lock(&swap_token_lock);
-	if (likely(mm == swap_token_mm)) {
-		trace_put_swap_token(swap_token_mm);
-		swap_token_mm = NULL;
-		swap_token_memcg = NULL;
-	}
-	spin_unlock(&swap_token_lock);
-}
-
-static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
-{
-	if (!a)
-		return true;
-	if (!b)
-		return true;
-	if (a == b)
-		return true;
-	return false;
-}
-
-void disable_swap_token(struct mem_cgroup *memcg)
-{
-	/* memcg reclaim don't disable unrelated mm token. */
-	if (match_memcg(memcg, swap_token_memcg)) {
-		spin_lock(&swap_token_lock);
-		if (match_memcg(memcg, swap_token_memcg)) {
-			trace_disable_swap_token(swap_token_mm);
-			swap_token_mm = NULL;
-			swap_token_memcg = NULL;
-		}
-		spin_unlock(&swap_token_lock);
-	}
-}
diff --git a/mm/truncate.c b/mm/truncate.c
index 61a183b..75801ac 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -602,31 +602,6 @@
 }
 EXPORT_SYMBOL(vmtruncate);
 
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
-	struct address_space *mapping = inode->i_mapping;
-	loff_t holebegin = round_up(lstart, PAGE_SIZE);
-	loff_t holelen = 1 + lend - holebegin;
-
-	/*
-	 * If the underlying filesystem is not going to provide
-	 * a way to truncate a range of blocks (punch a hole) -
-	 * we should return failure right now.
-	 */
-	if (!inode->i_op->truncate_range)
-		return -ENOSYS;
-
-	mutex_lock(&inode->i_mutex);
-	inode_dio_wait(inode);
-	unmap_mapping_range(mapping, holebegin, holelen, 1);
-	inode->i_op->truncate_range(inode, lstart, lend);
-	/* unmap again to remove racily COWed private pages */
-	unmap_mapping_range(mapping, holebegin, holelen, 1);
-	mutex_unlock(&inode->i_mutex);
-
-	return 0;
-}
-
 /**
  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  * @inode: inode
diff --git a/mm/util.c b/mm/util.c
index ae962b3..8c7265a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/sched.h>
+#include <linux/security.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -341,6 +342,35 @@
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long pgoff)
+{
+	unsigned long ret;
+	struct mm_struct *mm = current->mm;
+
+	ret = security_mmap_file(file, prot, flag);
+	if (!ret) {
+		down_write(&mm->mmap_sem);
+		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
+		up_write(&mm->mmap_sem);
+	}
+	return ret;
+}
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long offset)
+{
+	if (unlikely(offset + PAGE_ALIGN(len) < offset))
+		return -EINVAL;
+	if (unlikely(offset & ~PAGE_MASK))
+		return -EINVAL;
+
+	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(vm_mmap);
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 94dff88..2aad499 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1185,9 +1185,10 @@
 	/* Import existing vmlist entries. */
 	for (tmp = vmlist; tmp; tmp = tmp->next) {
 		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
-		va->flags = tmp->flags | VM_VM_AREA;
+		va->flags = VM_VM_AREA;
 		va->va_start = (unsigned long)tmp->addr;
 		va->va_end = va->va_start + tmp->size;
+		va->vm = tmp;
 		__insert_vmap_area(va);
 	}
 
@@ -2375,8 +2376,8 @@
 		return NULL;
 	}
 
-	vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
-	vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
+	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
 	if (!vas || !vms)
 		goto err_free2;
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33dc256..eeb3bc9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,24 +53,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC:  Do not block
- * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
- * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
- *			page from the LRU and reclaim all pages within a
- *			naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- *			order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE		((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_ASYNC		((__force reclaim_mode_t)0x02u)
-#define RECLAIM_MODE_SYNC		((__force reclaim_mode_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM	((__force reclaim_mode_t)0x08u)
-#define RECLAIM_MODE_COMPACTION		((__force reclaim_mode_t)0x10u)
-
 struct scan_control {
 	/* Incremented by the number of inactive pages that were scanned */
 	unsigned long nr_scanned;
@@ -96,11 +78,8 @@
 
 	int order;
 
-	/*
-	 * Intend to reclaim enough continuous memory rather than reclaim
-	 * enough amount of memory. i.e, mode for high order allocation.
-	 */
-	reclaim_mode_t reclaim_mode;
+	/* Scan (total_size >> priority) pages at once */
+	int priority;
 
 	/*
 	 * The memory cgroup that hit its limit and as a result is the
@@ -115,11 +94,6 @@
 	nodemask_t	*nodemask;
 };
 
-struct mem_cgroup_zone {
-	struct mem_cgroup *mem_cgroup;
-	struct zone *zone;
-};
-
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 #ifdef ARCH_HAS_PREFETCH
@@ -164,44 +138,21 @@
 {
 	return !sc->target_mem_cgroup;
 }
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
-	return !mz->mem_cgroup;
-}
 #else
 static bool global_reclaim(struct scan_control *sc)
 {
 	return true;
 }
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
-	return true;
-}
 #endif
 
-static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
+static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
-	if (!scanning_global_lru(mz))
-		return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
+	if (!mem_cgroup_disabled())
+		return mem_cgroup_get_lru_size(lruvec, lru);
 
-	return &mz->zone->reclaim_stat;
+	return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
-				       enum lru_list lru)
-{
-	if (!scanning_global_lru(mz))
-		return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
-						    zone_to_nid(mz->zone),
-						    zone_idx(mz->zone),
-						    BIT(lru));
-
-	return zone_page_state(mz->zone, NR_LRU_BASE + lru);
-}
-
-
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -364,39 +315,6 @@
 	return ret;
 }
 
-static void set_reclaim_mode(int priority, struct scan_control *sc,
-				   bool sync)
-{
-	reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
-
-	/*
-	 * Initially assume we are entering either lumpy reclaim or
-	 * reclaim/compaction.Depending on the order, we will either set the
-	 * sync mode or just reclaim order-0 pages later.
-	 */
-	if (COMPACTION_BUILD)
-		sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
-	else
-		sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
-
-	/*
-	 * Avoid using lumpy reclaim or reclaim/compaction if possible by
-	 * restricting when its set to either costly allocations or when
-	 * under memory pressure
-	 */
-	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		sc->reclaim_mode |= syncmode;
-	else if (sc->order && priority < DEF_PRIORITY - 2)
-		sc->reclaim_mode |= syncmode;
-	else
-		sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
-	sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
 static inline int is_page_cache_freeable(struct page *page)
 {
 	/*
@@ -416,10 +334,6 @@
 		return 1;
 	if (bdi == current->backing_dev_info)
 		return 1;
-
-	/* lumpy reclaim for hugepage often need a lot of write */
-	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		return 1;
 	return 0;
 }
 
@@ -523,8 +437,7 @@
 			/* synchronous write or broken a_ops? */
 			ClearPageReclaim(page);
 		}
-		trace_mm_vmscan_writepage(page,
-			trace_reclaim_flags(page, sc->reclaim_mode));
+		trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
 		return PAGE_SUCCESS;
 	}
@@ -701,19 +614,15 @@
 };
 
 static enum page_references page_check_references(struct page *page,
-						  struct mem_cgroup_zone *mz,
 						  struct scan_control *sc)
 {
 	int referenced_ptes, referenced_page;
 	unsigned long vm_flags;
 
-	referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
+	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+					  &vm_flags);
 	referenced_page = TestClearPageReferenced(page);
 
-	/* Lumpy reclaim - ignore references */
-	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-		return PAGEREF_RECLAIM;
-
 	/*
 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
 	 * move the page to the unevictable list.
@@ -722,7 +631,7 @@
 		return PAGEREF_RECLAIM;
 
 	if (referenced_ptes) {
-		if (PageAnon(page))
+		if (PageSwapBacked(page))
 			return PAGEREF_ACTIVATE;
 		/*
 		 * All mapped pages start out with page table
@@ -763,9 +672,8 @@
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-				      struct mem_cgroup_zone *mz,
+				      struct zone *zone,
 				      struct scan_control *sc,
-				      int priority,
 				      unsigned long *ret_nr_dirty,
 				      unsigned long *ret_nr_writeback)
 {
@@ -794,7 +702,7 @@
 			goto keep;
 
 		VM_BUG_ON(PageActive(page));
-		VM_BUG_ON(page_zone(page) != mz->zone);
+		VM_BUG_ON(page_zone(page) != zone);
 
 		sc->nr_scanned++;
 
@@ -813,22 +721,11 @@
 
 		if (PageWriteback(page)) {
 			nr_writeback++;
-			/*
-			 * Synchronous reclaim cannot queue pages for
-			 * writeback due to the possibility of stack overflow
-			 * but if it encounters a page under writeback, wait
-			 * for the IO to complete.
-			 */
-			if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
-			    may_enter_fs)
-				wait_on_page_writeback(page);
-			else {
-				unlock_page(page);
-				goto keep_lumpy;
-			}
+			unlock_page(page);
+			goto keep;
 		}
 
-		references = page_check_references(page, mz, sc);
+		references = page_check_references(page, sc);
 		switch (references) {
 		case PAGEREF_ACTIVATE:
 			goto activate_locked;
@@ -879,7 +776,8 @@
 			 * unless under significant pressure.
 			 */
 			if (page_is_file_cache(page) &&
-					(!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+					(!current_is_kswapd() ||
+					 sc->priority >= DEF_PRIORITY - 2)) {
 				/*
 				 * Immediately reclaim when written back.
 				 * Similar in principal to deactivate_page()
@@ -908,7 +806,7 @@
 				goto activate_locked;
 			case PAGE_SUCCESS:
 				if (PageWriteback(page))
-					goto keep_lumpy;
+					goto keep;
 				if (PageDirty(page))
 					goto keep;
 
@@ -994,7 +892,6 @@
 			try_to_free_swap(page);
 		unlock_page(page);
 		putback_lru_page(page);
-		reset_reclaim_mode(sc);
 		continue;
 
 activate_locked:
@@ -1007,8 +904,6 @@
 keep_locked:
 		unlock_page(page);
 keep:
-		reset_reclaim_mode(sc);
-keep_lumpy:
 		list_add(&page->lru, &ret_pages);
 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
 	}
@@ -1020,7 +915,7 @@
 	 * will encounter the same problem
 	 */
 	if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
-		zone_set_flag(mz->zone, ZONE_CONGESTED);
+		zone_set_flag(zone, ZONE_CONGESTED);
 
 	free_hot_cold_page_list(&free_pages, 1);
 
@@ -1041,34 +936,15 @@
  *
  * returns 0 on success, -ve errno on failure.
  */
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
 {
-	bool all_lru_mode;
 	int ret = -EINVAL;
 
 	/* Only take pages on the LRU. */
 	if (!PageLRU(page))
 		return ret;
 
-	all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
-		(ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
-	/*
-	 * When checking the active state, we need to be sure we are
-	 * dealing with comparible boolean values.  Take the logical not
-	 * of each.
-	 */
-	if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
-		return ret;
-
-	if (!all_lru_mode && !!page_is_file_cache(page) != file)
-		return ret;
-
-	/*
-	 * When this function is being called for lumpy reclaim, we
-	 * initially look into all LRU pages, active, inactive and
-	 * unevictable; only give shrink_page_list evictable pages.
-	 */
+	/* Do not give back unevictable pages for compaction */
 	if (PageUnevictable(page))
 		return ret;
 
@@ -1135,54 +1011,39 @@
  * Appropriate locks must be held before calling this function.
  *
  * @nr_to_scan:	The number of pages to look through on the list.
- * @mz:		The mem_cgroup_zone to pull pages from.
+ * @lruvec:	The LRU vector to pull pages from.
  * @dst:	The temp list to put pages on to.
  * @nr_scanned:	The number of pages that were scanned.
  * @sc:		The scan_control struct for this reclaim session
  * @mode:	One of the LRU isolation modes
- * @active:	True [1] if isolating active pages
- * @file:	True [1] if isolating file [!anon] pages
+ * @lru:	LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-		struct mem_cgroup_zone *mz, struct list_head *dst,
+		struct lruvec *lruvec, struct list_head *dst,
 		unsigned long *nr_scanned, struct scan_control *sc,
-		isolate_mode_t mode, int active, int file)
+		isolate_mode_t mode, enum lru_list lru)
 {
-	struct lruvec *lruvec;
-	struct list_head *src;
+	struct list_head *src = &lruvec->lists[lru];
 	unsigned long nr_taken = 0;
-	unsigned long nr_lumpy_taken = 0;
-	unsigned long nr_lumpy_dirty = 0;
-	unsigned long nr_lumpy_failed = 0;
 	unsigned long scan;
-	int lru = LRU_BASE;
-
-	lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
-	if (active)
-		lru += LRU_ACTIVE;
-	if (file)
-		lru += LRU_FILE;
-	src = &lruvec->lists[lru];
 
 	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
 		struct page *page;
-		unsigned long pfn;
-		unsigned long end_pfn;
-		unsigned long page_pfn;
-		int zone_id;
+		int nr_pages;
 
 		page = lru_to_page(src);
 		prefetchw_prev_lru_page(page, src, flags);
 
 		VM_BUG_ON(!PageLRU(page));
 
-		switch (__isolate_lru_page(page, mode, file)) {
+		switch (__isolate_lru_page(page, mode)) {
 		case 0:
-			mem_cgroup_lru_del(page);
+			nr_pages = hpage_nr_pages(page);
+			mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
 			list_move(&page->lru, dst);
-			nr_taken += hpage_nr_pages(page);
+			nr_taken += nr_pages;
 			break;
 
 		case -EBUSY:
@@ -1193,93 +1054,11 @@
 		default:
 			BUG();
 		}
-
-		if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
-			continue;
-
-		/*
-		 * Attempt to take all pages in the order aligned region
-		 * surrounding the tag page.  Only take those pages of
-		 * the same active state as that tag page.  We may safely
-		 * round the target page pfn down to the requested order
-		 * as the mem_map is guaranteed valid out to MAX_ORDER,
-		 * where that page is in a different zone we will detect
-		 * it from its zone id and abort this block scan.
-		 */
-		zone_id = page_zone_id(page);
-		page_pfn = page_to_pfn(page);
-		pfn = page_pfn & ~((1 << sc->order) - 1);
-		end_pfn = pfn + (1 << sc->order);
-		for (; pfn < end_pfn; pfn++) {
-			struct page *cursor_page;
-
-			/* The target page is in the block, ignore it. */
-			if (unlikely(pfn == page_pfn))
-				continue;
-
-			/* Avoid holes within the zone. */
-			if (unlikely(!pfn_valid_within(pfn)))
-				break;
-
-			cursor_page = pfn_to_page(pfn);
-
-			/* Check that we have not crossed a zone boundary. */
-			if (unlikely(page_zone_id(cursor_page) != zone_id))
-				break;
-
-			/*
-			 * If we don't have enough swap space, reclaiming of
-			 * anon page which don't already have a swap slot is
-			 * pointless.
-			 */
-			if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
-			    !PageSwapCache(cursor_page))
-				break;
-
-			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
-				unsigned int isolated_pages;
-
-				mem_cgroup_lru_del(cursor_page);
-				list_move(&cursor_page->lru, dst);
-				isolated_pages = hpage_nr_pages(cursor_page);
-				nr_taken += isolated_pages;
-				nr_lumpy_taken += isolated_pages;
-				if (PageDirty(cursor_page))
-					nr_lumpy_dirty += isolated_pages;
-				scan++;
-				pfn += isolated_pages - 1;
-			} else {
-				/*
-				 * Check if the page is freed already.
-				 *
-				 * We can't use page_count() as that
-				 * requires compound_head and we don't
-				 * have a pin on the page here. If a
-				 * page is tail, we may or may not
-				 * have isolated the head, so assume
-				 * it's not free, it'd be tricky to
-				 * track the head status without a
-				 * page pin.
-				 */
-				if (!PageTail(cursor_page) &&
-				    !atomic_read(&cursor_page->_count))
-					continue;
-				break;
-			}
-		}
-
-		/* If we break out of the loop above, lumpy reclaim failed */
-		if (pfn < end_pfn)
-			nr_lumpy_failed++;
 	}
 
 	*nr_scanned = scan;
-
-	trace_mm_vmscan_lru_isolate(sc->order,
-			nr_to_scan, scan,
-			nr_taken,
-			nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
-			mode, file);
+	trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+				    nr_taken, mode, is_file_lru(lru));
 	return nr_taken;
 }
 
@@ -1316,15 +1095,16 @@
 
 	if (PageLRU(page)) {
 		struct zone *zone = page_zone(page);
+		struct lruvec *lruvec;
 
 		spin_lock_irq(&zone->lru_lock);
+		lruvec = mem_cgroup_page_lruvec(page, zone);
 		if (PageLRU(page)) {
 			int lru = page_lru(page);
-			ret = 0;
 			get_page(page);
 			ClearPageLRU(page);
-
-			del_page_from_lru_list(zone, page, lru);
+			del_page_from_lru_list(page, lruvec, lru);
+			ret = 0;
 		}
 		spin_unlock_irq(&zone->lru_lock);
 	}
@@ -1357,11 +1137,10 @@
 }
 
 static noinline_for_stack void
-putback_inactive_pages(struct mem_cgroup_zone *mz,
-		       struct list_head *page_list)
+putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 {
-	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
-	struct zone *zone = mz->zone;
+	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+	struct zone *zone = lruvec_zone(lruvec);
 	LIST_HEAD(pages_to_free);
 
 	/*
@@ -1379,9 +1158,13 @@
 			spin_lock_irq(&zone->lru_lock);
 			continue;
 		}
+
+		lruvec = mem_cgroup_page_lruvec(page, zone);
+
 		SetPageLRU(page);
 		lru = page_lru(page);
-		add_page_to_lru_list(zone, page, lru);
+		add_page_to_lru_list(page, lruvec, lru);
+
 		if (is_active_lru(lru)) {
 			int file = is_file_lru(lru);
 			int numpages = hpage_nr_pages(page);
@@ -1390,7 +1173,7 @@
 		if (put_page_testzero(page)) {
 			__ClearPageLRU(page);
 			__ClearPageActive(page);
-			del_page_from_lru_list(zone, page, lru);
+			del_page_from_lru_list(page, lruvec, lru);
 
 			if (unlikely(PageCompound(page))) {
 				spin_unlock_irq(&zone->lru_lock);
@@ -1407,112 +1190,24 @@
 	list_splice(&pages_to_free, page_list);
 }
 
-static noinline_for_stack void
-update_isolated_counts(struct mem_cgroup_zone *mz,
-		       struct list_head *page_list,
-		       unsigned long *nr_anon,
-		       unsigned long *nr_file)
-{
-	struct zone *zone = mz->zone;
-	unsigned int count[NR_LRU_LISTS] = { 0, };
-	unsigned long nr_active = 0;
-	struct page *page;
-	int lru;
-
-	/*
-	 * Count pages and clear active flags
-	 */
-	list_for_each_entry(page, page_list, lru) {
-		int numpages = hpage_nr_pages(page);
-		lru = page_lru_base_type(page);
-		if (PageActive(page)) {
-			lru += LRU_ACTIVE;
-			ClearPageActive(page);
-			nr_active += numpages;
-		}
-		count[lru] += numpages;
-	}
-
-	preempt_disable();
-	__count_vm_events(PGDEACTIVATE, nr_active);
-
-	__mod_zone_page_state(zone, NR_ACTIVE_FILE,
-			      -count[LRU_ACTIVE_FILE]);
-	__mod_zone_page_state(zone, NR_INACTIVE_FILE,
-			      -count[LRU_INACTIVE_FILE]);
-	__mod_zone_page_state(zone, NR_ACTIVE_ANON,
-			      -count[LRU_ACTIVE_ANON]);
-	__mod_zone_page_state(zone, NR_INACTIVE_ANON,
-			      -count[LRU_INACTIVE_ANON]);
-
-	*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
-	*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-
-	__mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
-	__mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
-	preempt_enable();
-}
-
-/*
- * Returns true if a direct reclaim should wait on pages under writeback.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
-					unsigned long nr_freed,
-					int priority,
-					struct scan_control *sc)
-{
-	int lumpy_stall_priority;
-
-	/* kswapd should not stall on sync IO */
-	if (current_is_kswapd())
-		return false;
-
-	/* Only stall on lumpy reclaim */
-	if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
-		return false;
-
-	/* If we have reclaimed everything on the isolated list, no stall */
-	if (nr_freed == nr_taken)
-		return false;
-
-	/*
-	 * For high-order allocations, there are two stall thresholds.
-	 * High-cost allocations stall immediately where as lower
-	 * order allocations such as stacks require the scanning
-	 * priority to be much higher before stalling.
-	 */
-	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		lumpy_stall_priority = DEF_PRIORITY;
-	else
-		lumpy_stall_priority = DEF_PRIORITY / 3;
-
-	return priority <= lumpy_stall_priority;
-}
-
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
 static noinline_for_stack unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
-		     struct scan_control *sc, int priority, int file)
+shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+		     struct scan_control *sc, enum lru_list lru)
 {
 	LIST_HEAD(page_list);
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	unsigned long nr_anon;
-	unsigned long nr_file;
 	unsigned long nr_dirty = 0;
 	unsigned long nr_writeback = 0;
-	isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
-	struct zone *zone = mz->zone;
-	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+	isolate_mode_t isolate_mode = 0;
+	int file = is_file_lru(lru);
+	struct zone *zone = lruvec_zone(lruvec);
+	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
 	while (unlikely(too_many_isolated(zone, file, sc))) {
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1217,6 @@
 			return SWAP_CLUSTER_MAX;
 	}
 
-	set_reclaim_mode(priority, sc, false);
-	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-		isolate_mode |= ISOLATE_ACTIVE;
-
 	lru_add_drain();
 
 	if (!sc->may_unmap)
@@ -1535,38 +1226,30 @@
 
 	spin_lock_irq(&zone->lru_lock);
 
-	nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
-				     sc, isolate_mode, 0, file);
+	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+				     &nr_scanned, sc, isolate_mode, lru);
+
+	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
+	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+
 	if (global_reclaim(sc)) {
 		zone->pages_scanned += nr_scanned;
 		if (current_is_kswapd())
-			__count_zone_vm_events(PGSCAN_KSWAPD, zone,
-					       nr_scanned);
+			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
 		else
-			__count_zone_vm_events(PGSCAN_DIRECT, zone,
-					       nr_scanned);
+			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
 	}
 	spin_unlock_irq(&zone->lru_lock);
 
 	if (nr_taken == 0)
 		return 0;
 
-	update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
-
-	nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
+	nr_reclaimed = shrink_page_list(&page_list, zone, sc,
 						&nr_dirty, &nr_writeback);
 
-	/* Check if we should syncronously wait for writeback */
-	if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-		set_reclaim_mode(priority, sc, true);
-		nr_reclaimed += shrink_page_list(&page_list, mz, sc,
-					priority, &nr_dirty, &nr_writeback);
-	}
-
 	spin_lock_irq(&zone->lru_lock);
 
-	reclaim_stat->recent_scanned[0] += nr_anon;
-	reclaim_stat->recent_scanned[1] += nr_file;
+	reclaim_stat->recent_scanned[file] += nr_taken;
 
 	if (global_reclaim(sc)) {
 		if (current_is_kswapd())
@@ -1577,10 +1260,9 @@
 					       nr_reclaimed);
 	}
 
-	putback_inactive_pages(mz, &page_list);
+	putback_inactive_pages(lruvec, &page_list);
 
-	__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
-	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
 
 	spin_unlock_irq(&zone->lru_lock);
 
@@ -1609,14 +1291,15 @@
 	 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
 	 *                     isolated page is PageWriteback
 	 */
-	if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+	if (nr_writeback && nr_writeback >=
+			(nr_taken >> (DEF_PRIORITY - sc->priority)))
 		wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
 
 	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
 		zone_idx(zone),
 		nr_scanned, nr_reclaimed,
-		priority,
-		trace_shrink_flags(file, sc->reclaim_mode));
+		sc->priority,
+		trace_shrink_flags(file));
 	return nr_reclaimed;
 }
 
@@ -1638,30 +1321,32 @@
  * But we had to alter page->flags anyway.
  */
 
-static void move_active_pages_to_lru(struct zone *zone,
+static void move_active_pages_to_lru(struct lruvec *lruvec,
 				     struct list_head *list,
 				     struct list_head *pages_to_free,
 				     enum lru_list lru)
 {
+	struct zone *zone = lruvec_zone(lruvec);
 	unsigned long pgmoved = 0;
 	struct page *page;
+	int nr_pages;
 
 	while (!list_empty(list)) {
-		struct lruvec *lruvec;
-
 		page = lru_to_page(list);
+		lruvec = mem_cgroup_page_lruvec(page, zone);
 
 		VM_BUG_ON(PageLRU(page));
 		SetPageLRU(page);
 
-		lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+		nr_pages = hpage_nr_pages(page);
+		mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
 		list_move(&page->lru, &lruvec->lists[lru]);
-		pgmoved += hpage_nr_pages(page);
+		pgmoved += nr_pages;
 
 		if (put_page_testzero(page)) {
 			__ClearPageLRU(page);
 			__ClearPageActive(page);
-			del_page_from_lru_list(zone, page, lru);
+			del_page_from_lru_list(page, lruvec, lru);
 
 			if (unlikely(PageCompound(page))) {
 				spin_unlock_irq(&zone->lru_lock);
@@ -1677,9 +1362,9 @@
 }
 
 static void shrink_active_list(unsigned long nr_to_scan,
-			       struct mem_cgroup_zone *mz,
+			       struct lruvec *lruvec,
 			       struct scan_control *sc,
-			       int priority, int file)
+			       enum lru_list lru)
 {
 	unsigned long nr_taken;
 	unsigned long nr_scanned;
@@ -1688,15 +1373,14 @@
 	LIST_HEAD(l_active);
 	LIST_HEAD(l_inactive);
 	struct page *page;
-	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 	unsigned long nr_rotated = 0;
-	isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
-	struct zone *zone = mz->zone;
+	isolate_mode_t isolate_mode = 0;
+	int file = is_file_lru(lru);
+	struct zone *zone = lruvec_zone(lruvec);
 
 	lru_add_drain();
 
-	reset_reclaim_mode(sc);
-
 	if (!sc->may_unmap)
 		isolate_mode |= ISOLATE_UNMAPPED;
 	if (!sc->may_writepage)
@@ -1704,18 +1388,15 @@
 
 	spin_lock_irq(&zone->lru_lock);
 
-	nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
-				     isolate_mode, 1, file);
+	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+				     &nr_scanned, sc, isolate_mode, lru);
 	if (global_reclaim(sc))
 		zone->pages_scanned += nr_scanned;
 
 	reclaim_stat->recent_scanned[file] += nr_taken;
 
 	__count_zone_vm_events(PGREFILL, zone, nr_scanned);
-	if (file)
-		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
-	else
-		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
 	spin_unlock_irq(&zone->lru_lock);
 
@@ -1737,7 +1418,8 @@
 			}
 		}
 
-		if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
+		if (page_referenced(page, 0, sc->target_mem_cgroup,
+				    &vm_flags)) {
 			nr_rotated += hpage_nr_pages(page);
 			/*
 			 * Identify referenced, file-backed active pages and
@@ -1770,10 +1452,8 @@
 	 */
 	reclaim_stat->recent_rotated[file] += nr_rotated;
 
-	move_active_pages_to_lru(zone, &l_active, &l_hold,
-						LRU_ACTIVE + file * LRU_FILE);
-	move_active_pages_to_lru(zone, &l_inactive, &l_hold,
-						LRU_BASE   + file * LRU_FILE);
+	move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
+	move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
 	spin_unlock_irq(&zone->lru_lock);
 
@@ -1796,13 +1476,12 @@
 
 /**
  * inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- * @sc:   scan control of this context
+ * @lruvec: LRU vector to check
  *
  * Returns true if the zone does not have enough inactive anon pages,
  * meaning some active anon pages need to be deactivated.
  */
-static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static int inactive_anon_is_low(struct lruvec *lruvec)
 {
 	/*
 	 * If we don't have swap space, anonymous page deactivation
@@ -1811,14 +1490,13 @@
 	if (!total_swap_pages)
 		return 0;
 
-	if (!scanning_global_lru(mz))
-		return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
-						       mz->zone);
+	if (!mem_cgroup_disabled())
+		return mem_cgroup_inactive_anon_is_low(lruvec);
 
-	return inactive_anon_is_low_global(mz->zone);
+	return inactive_anon_is_low_global(lruvec_zone(lruvec));
 }
 #else
-static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static inline int inactive_anon_is_low(struct lruvec *lruvec)
 {
 	return 0;
 }
@@ -1836,7 +1514,7 @@
 
 /**
  * inactive_file_is_low - check if file pages need to be deactivated
- * @mz: memory cgroup and zone to check
+ * @lruvec: LRU vector to check
  *
  * When the system is doing streaming IO, memory pressure here
  * ensures that active file pages get deactivated, until more
@@ -1848,44 +1526,39 @@
  * This uses a different ratio than the anonymous pages, because
  * the page cache uses a use-once replacement algorithm.
  */
-static int inactive_file_is_low(struct mem_cgroup_zone *mz)
+static int inactive_file_is_low(struct lruvec *lruvec)
 {
-	if (!scanning_global_lru(mz))
-		return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
-						       mz->zone);
+	if (!mem_cgroup_disabled())
+		return mem_cgroup_inactive_file_is_low(lruvec);
 
-	return inactive_file_is_low_global(mz->zone);
+	return inactive_file_is_low_global(lruvec_zone(lruvec));
 }
 
-static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
+static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
 {
-	if (file)
-		return inactive_file_is_low(mz);
+	if (is_file_lru(lru))
+		return inactive_file_is_low(lruvec);
 	else
-		return inactive_anon_is_low(mz);
+		return inactive_anon_is_low(lruvec);
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-				 struct mem_cgroup_zone *mz,
-				 struct scan_control *sc, int priority)
+				 struct lruvec *lruvec, struct scan_control *sc)
 {
-	int file = is_file_lru(lru);
-
 	if (is_active_lru(lru)) {
-		if (inactive_list_is_low(mz, file))
-			shrink_active_list(nr_to_scan, mz, sc, priority, file);
+		if (inactive_list_is_low(lruvec, lru))
+			shrink_active_list(nr_to_scan, lruvec, sc, lru);
 		return 0;
 	}
 
-	return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+	return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
 }
 
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
-			     struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
 {
 	if (global_reclaim(sc))
 		return vm_swappiness;
-	return mem_cgroup_swappiness(mz->mem_cgroup);
+	return mem_cgroup_swappiness(sc->target_mem_cgroup);
 }
 
 /*
@@ -1896,17 +1569,18 @@
  *
  * nr[0] = anon pages to scan; nr[1] = file pages to scan
  */
-static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
-			   unsigned long *nr, int priority)
+static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+			   unsigned long *nr)
 {
 	unsigned long anon, file, free;
 	unsigned long anon_prio, file_prio;
 	unsigned long ap, fp;
-	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 	u64 fraction[2], denominator;
 	enum lru_list lru;
 	int noswap = 0;
 	bool force_scan = false;
+	struct zone *zone = lruvec_zone(lruvec);
 
 	/*
 	 * If the zone or memcg is small, nr[l] can be 0.  This
@@ -1918,7 +1592,7 @@
 	 * latencies, so it's better to scan a minimum amount there as
 	 * well.
 	 */
-	if (current_is_kswapd() && mz->zone->all_unreclaimable)
+	if (current_is_kswapd() && zone->all_unreclaimable)
 		force_scan = true;
 	if (!global_reclaim(sc))
 		force_scan = true;
@@ -1932,16 +1606,16 @@
 		goto out;
 	}
 
-	anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
-		zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
-	file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
-		zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+	anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+		get_lru_size(lruvec, LRU_INACTIVE_ANON);
+	file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+		get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
 	if (global_reclaim(sc)) {
-		free  = zone_page_state(mz->zone, NR_FREE_PAGES);
+		free  = zone_page_state(zone, NR_FREE_PAGES);
 		/* If we have very few page cache pages,
 		   force-scan anon pages. */
-		if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
+		if (unlikely(file + free <= high_wmark_pages(zone))) {
 			fraction[0] = 1;
 			fraction[1] = 0;
 			denominator = 1;
@@ -1953,8 +1627,8 @@
 	 * With swappiness at 100, anonymous and file have the same priority.
 	 * This scanning priority is essentially the inverse of IO cost.
 	 */
-	anon_prio = vmscan_swappiness(mz, sc);
-	file_prio = 200 - vmscan_swappiness(mz, sc);
+	anon_prio = vmscan_swappiness(sc);
+	file_prio = 200 - anon_prio;
 
 	/*
 	 * OK, so we have swap space and a fair amount of page cache
@@ -1967,7 +1641,7 @@
 	 *
 	 * anon in [0], file in [1]
 	 */
-	spin_lock_irq(&mz->zone->lru_lock);
+	spin_lock_irq(&zone->lru_lock);
 	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
 		reclaim_stat->recent_scanned[0] /= 2;
 		reclaim_stat->recent_rotated[0] /= 2;
@@ -1983,12 +1657,12 @@
 	 * proportional to the fraction of recently scanned pages on
 	 * each list that were recently referenced and in active use.
 	 */
-	ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+	ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
 	ap /= reclaim_stat->recent_rotated[0] + 1;
 
-	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+	fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
 	fp /= reclaim_stat->recent_rotated[1] + 1;
-	spin_unlock_irq(&mz->zone->lru_lock);
+	spin_unlock_irq(&zone->lru_lock);
 
 	fraction[0] = ap;
 	fraction[1] = fp;
@@ -1998,9 +1672,9 @@
 		int file = is_file_lru(lru);
 		unsigned long scan;
 
-		scan = zone_nr_lru_pages(mz, lru);
-		if (priority || noswap) {
-			scan >>= priority;
+		scan = get_lru_size(lruvec, lru);
+		if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+			scan >>= sc->priority;
 			if (!scan && force_scan)
 				scan = SWAP_CLUSTER_MAX;
 			scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,14 +1683,25 @@
 	}
 }
 
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(struct scan_control *sc)
+{
+	if (COMPACTION_BUILD && sc->order &&
+			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+			 sc->priority < DEF_PRIORITY - 2))
+		return true;
+
+	return false;
+}
+
 /*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
  */
-static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
+static inline bool should_continue_reclaim(struct lruvec *lruvec,
 					unsigned long nr_reclaimed,
 					unsigned long nr_scanned,
 					struct scan_control *sc)
@@ -2025,7 +1710,7 @@
 	unsigned long inactive_lru_pages;
 
 	/* If not in reclaim/compaction mode, stop */
-	if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+	if (!in_reclaim_compaction(sc))
 		return false;
 
 	/* Consider stopping depending on scan and reclaim activity */
@@ -2056,15 +1741,15 @@
 	 * inactive lists are large enough, continue reclaiming
 	 */
 	pages_for_compaction = (2UL << sc->order);
-	inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+	inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
 	if (nr_swap_pages > 0)
-		inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+		inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
 	if (sc->nr_reclaimed < pages_for_compaction &&
 			inactive_lru_pages > pages_for_compaction)
 		return true;
 
 	/* If compaction would go ahead or the allocation would succeed, stop */
-	switch (compaction_suitable(mz->zone, sc->order)) {
+	switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
 	case COMPACT_PARTIAL:
 	case COMPACT_CONTINUE:
 		return false;
@@ -2076,8 +1761,7 @@
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
-				   struct scan_control *sc)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
@@ -2089,7 +1773,7 @@
 restart:
 	nr_reclaimed = 0;
 	nr_scanned = sc->nr_scanned;
-	get_scan_count(mz, sc, nr, priority);
+	get_scan_count(lruvec, sc, nr);
 
 	blk_start_plug(&plug);
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1785,7 @@
 				nr[lru] -= nr_to_scan;
 
 				nr_reclaimed += shrink_list(lru, nr_to_scan,
-							    mz, sc, priority);
+							    lruvec, sc);
 			}
 		}
 		/*
@@ -2112,7 +1796,8 @@
 		 * with multiple processes reclaiming pages, the total
 		 * freeing target can get unreasonably large.
 		 */
-		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+		if (nr_reclaimed >= nr_to_reclaim &&
+		    sc->priority < DEF_PRIORITY)
 			break;
 	}
 	blk_finish_plug(&plug);
@@ -2122,35 +1807,33 @@
 	 * Even if we did not try to evict anon pages at all, we want to
 	 * rebalance the anon lru active/inactive ratio.
 	 */
-	if (inactive_anon_is_low(mz))
-		shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+	if (inactive_anon_is_low(lruvec))
+		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+				   sc, LRU_ACTIVE_ANON);
 
 	/* reclaim/compaction might need reclaim to continue */
-	if (should_continue_reclaim(mz, nr_reclaimed,
-					sc->nr_scanned - nr_scanned, sc))
+	if (should_continue_reclaim(lruvec, nr_reclaimed,
+				    sc->nr_scanned - nr_scanned, sc))
 		goto restart;
 
 	throttle_vm_writeout(sc->gfp_mask);
 }
 
-static void shrink_zone(int priority, struct zone *zone,
-			struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
 {
 	struct mem_cgroup *root = sc->target_mem_cgroup;
 	struct mem_cgroup_reclaim_cookie reclaim = {
 		.zone = zone,
-		.priority = priority,
+		.priority = sc->priority,
 	};
 	struct mem_cgroup *memcg;
 
 	memcg = mem_cgroup_iter(root, NULL, &reclaim);
 	do {
-		struct mem_cgroup_zone mz = {
-			.mem_cgroup = memcg,
-			.zone = zone,
-		};
+		struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
-		shrink_mem_cgroup_zone(priority, &mz, sc);
+		shrink_lruvec(lruvec, sc);
+
 		/*
 		 * Limit reclaim has historically picked one memcg and
 		 * scanned it with decreasing priority levels until
@@ -2226,8 +1909,7 @@
  * the caller that it should consider retrying the allocation instead of
  * further reclaim.
  */
-static bool shrink_zones(int priority, struct zonelist *zonelist,
-					struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 {
 	struct zoneref *z;
 	struct zone *zone;
@@ -2254,7 +1936,8 @@
 		if (global_reclaim(sc)) {
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 				continue;
-			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+			if (zone->all_unreclaimable &&
+					sc->priority != DEF_PRIORITY)
 				continue;	/* Let kswapd poll it */
 			if (COMPACTION_BUILD) {
 				/*
@@ -2286,7 +1969,7 @@
 			/* need some check for avoid more shrink_zone() */
 		}
 
-		shrink_zone(priority, zone, sc);
+		shrink_zone(zone, sc);
 	}
 
 	return aborted_reclaim;
@@ -2337,7 +2020,6 @@
 					struct scan_control *sc,
 					struct shrink_control *shrink)
 {
-	int priority;
 	unsigned long total_scanned = 0;
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	struct zoneref *z;
@@ -2350,11 +2032,9 @@
 	if (global_reclaim(sc))
 		count_vm_event(ALLOCSTALL);
 
-	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+	do {
 		sc->nr_scanned = 0;
-		if (!priority)
-			disable_swap_token(sc->target_mem_cgroup);
-		aborted_reclaim = shrink_zones(priority, zonelist, sc);
+		aborted_reclaim = shrink_zones(zonelist, sc);
 
 		/*
 		 * Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2076,7 @@
 
 		/* Take a nap, wait for some writeback to complete */
 		if (!sc->hibernation_mode && sc->nr_scanned &&
-		    priority < DEF_PRIORITY - 2) {
+		    sc->priority < DEF_PRIORITY - 2) {
 			struct zone *preferred_zone;
 
 			first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2084,7 @@
 						&preferred_zone);
 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
 		}
-	}
+	} while (--sc->priority >= 0);
 
 out:
 	delayacct_freepages_end();
@@ -2442,6 +2122,7 @@
 		.may_unmap = 1,
 		.may_swap = 1,
 		.order = order,
+		.priority = DEF_PRIORITY,
 		.target_mem_cgroup = NULL,
 		.nodemask = nodemask,
 	};
@@ -2474,17 +2155,15 @@
 		.may_unmap = 1,
 		.may_swap = !noswap,
 		.order = 0,
+		.priority = 0,
 		.target_mem_cgroup = memcg,
 	};
-	struct mem_cgroup_zone mz = {
-		.mem_cgroup = memcg,
-		.zone = zone,
-	};
+	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
-	trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
 						      sc.may_writepage,
 						      sc.gfp_mask);
 
@@ -2495,7 +2174,7 @@
 	 * will pick up pages from other mem cgroup's as well. We hack
 	 * the priority and make it zero.
 	 */
-	shrink_mem_cgroup_zone(0, &mz, &sc);
+	shrink_lruvec(lruvec, &sc);
 
 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
@@ -2516,6 +2195,7 @@
 		.may_swap = !noswap,
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
 		.order = 0,
+		.priority = DEF_PRIORITY,
 		.target_mem_cgroup = memcg,
 		.nodemask = NULL, /* we don't care the placement */
 		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2226,7 @@
 }
 #endif
 
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
-			    int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
 {
 	struct mem_cgroup *memcg;
 
@@ -2556,14 +2235,11 @@
 
 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
 	do {
-		struct mem_cgroup_zone mz = {
-			.mem_cgroup = memcg,
-			.zone = zone,
-		};
+		struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
-		if (inactive_anon_is_low(&mz))
-			shrink_active_list(SWAP_CLUSTER_MAX, &mz,
-					   sc, priority, 0);
+		if (inactive_anon_is_low(lruvec))
+			shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+					   sc, LRU_ACTIVE_ANON);
 
 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
 	} while (memcg);
@@ -2672,7 +2348,6 @@
 {
 	int all_zones_ok;
 	unsigned long balanced;
-	int priority;
 	int i;
 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
 	unsigned long total_scanned;
@@ -2696,18 +2371,15 @@
 	};
 loop_again:
 	total_scanned = 0;
+	sc.priority = DEF_PRIORITY;
 	sc.nr_reclaimed = 0;
 	sc.may_writepage = !laptop_mode;
 	count_vm_event(PAGEOUTRUN);
 
-	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+	do {
 		unsigned long lru_pages = 0;
 		int has_under_min_watermark_zone = 0;
 
-		/* The swap token gets in the way of swapout... */
-		if (!priority)
-			disable_swap_token(NULL);
-
 		all_zones_ok = 1;
 		balanced = 0;
 
@@ -2721,14 +2393,15 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+			if (zone->all_unreclaimable &&
+			    sc.priority != DEF_PRIORITY)
 				continue;
 
 			/*
 			 * Do some background aging of the anon list, to give
 			 * pages a chance to be referenced before reclaiming.
 			 */
-			age_active_anon(zone, &sc, priority);
+			age_active_anon(zone, &sc);
 
 			/*
 			 * If the number of buffer_heads in the machine
@@ -2776,7 +2449,8 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+			if (zone->all_unreclaimable &&
+			    sc.priority != DEF_PRIORITY)
 				continue;
 
 			sc.nr_scanned = 0;
@@ -2820,7 +2494,7 @@
 				    !zone_watermark_ok_safe(zone, testorder,
 					high_wmark_pages(zone) + balance_gap,
 					end_zone, 0)) {
-				shrink_zone(priority, zone, &sc);
+				shrink_zone(zone, &sc);
 
 				reclaim_state->reclaimed_slab = 0;
 				nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2551,7 @@
 		 * OK, kswapd is getting into trouble.  Take a nap, then take
 		 * another pass across the zones.
 		 */
-		if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+		if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
 			if (has_under_min_watermark_zone)
 				count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
 			else
@@ -2892,7 +2566,7 @@
 		 */
 		if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
 			break;
-	}
+	} while (--sc.priority >= 0);
 out:
 
 	/*
@@ -2942,7 +2616,8 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+			if (zone->all_unreclaimable &&
+			    sc.priority != DEF_PRIORITY)
 				continue;
 
 			/* Would compaction fail due to lack of free memory? */
@@ -3209,6 +2884,7 @@
 		.nr_to_reclaim = nr_to_reclaim,
 		.hibernation_mode = 1,
 		.order = 0,
+		.priority = DEF_PRIORITY,
 	};
 	struct shrink_control shrink = {
 		.gfp_mask = sc.gfp_mask,
@@ -3386,7 +3062,6 @@
 	const unsigned long nr_pages = 1 << order;
 	struct task_struct *p = current;
 	struct reclaim_state reclaim_state;
-	int priority;
 	struct scan_control sc = {
 		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
 		.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3070,7 @@
 				       SWAP_CLUSTER_MAX),
 		.gfp_mask = gfp_mask,
 		.order = order,
+		.priority = ZONE_RECLAIM_PRIORITY,
 	};
 	struct shrink_control shrink = {
 		.gfp_mask = sc.gfp_mask,
@@ -3417,11 +3093,9 @@
 		 * Free memory by calling shrink zone with increasing
 		 * priorities until we have enough memory freed.
 		 */
-		priority = ZONE_RECLAIM_PRIORITY;
 		do {
-			shrink_zone(priority, zone, &sc);
-			priority--;
-		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+			shrink_zone(zone, &sc);
+		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
 	}
 
 	nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -3536,7 +3210,7 @@
 	if (mapping_unevictable(page_mapping(page)))
 		return 0;
 
-	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+	if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
 		return 0;
 
 	return 1;
@@ -3572,6 +3246,7 @@
 			zone = pagezone;
 			spin_lock_irq(&zone->lru_lock);
 		}
+		lruvec = mem_cgroup_page_lruvec(page, zone);
 
 		if (!PageLRU(page) || !PageUnevictable(page))
 			continue;
@@ -3581,11 +3256,8 @@
 
 			VM_BUG_ON(PageActive(page));
 			ClearPageUnevictable(page);
-			__dec_zone_state(zone, NR_UNEVICTABLE);
-			lruvec = mem_cgroup_lru_move_lists(zone, page,
-						LRU_UNEVICTABLE, lru);
-			list_move(&page->lru, &lruvec->lists[lru]);
-			__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
+			add_page_to_lru_list(page, lruvec, lru);
 			pgrescued++;
 		}
 	}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9b..1bbbbd9 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@
 	"Reclaimable",
 	"Movable",
 	"Reserve",
+#ifdef CONFIG_CMA
+	"CMA",
+#endif
 	"Isolate",
 };
 
@@ -1220,7 +1223,6 @@
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
 #include <linux/debugfs.h>
 
-static struct dentry *extfrag_debug_root;
 
 /*
  * Return an index indicating how much of the available free memory is
@@ -1358,19 +1360,24 @@
 
 static int __init extfrag_debug_init(void)
 {
+	struct dentry *extfrag_debug_root;
+
 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
 	if (!extfrag_debug_root)
 		return -ENOMEM;
 
 	if (!debugfs_create_file("unusable_index", 0444,
 			extfrag_debug_root, NULL, &unusable_file_ops))
-		return -ENOMEM;
+		goto fail;
 
 	if (!debugfs_create_file("extfrag_index", 0444,
 			extfrag_debug_root, NULL, &extfrag_file_ops))
-		return -ENOMEM;
+		goto fail;
 
 	return 0;
+fail:
+	debugfs_remove_recursive(extfrag_debug_root);
+	return -ENOMEM;
 }
 
 module_init(extfrag_debug_init);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 9ee48cb..3d33ecf 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -368,7 +368,7 @@
 				const char *sptr = va_arg(ap, const char *);
 				uint16_t len = 0;
 				if (sptr)
-					len = min_t(uint16_t, strlen(sptr),
+					len = min_t(size_t, strlen(sptr),
 								USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 5af18d1..2a16765 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,10 +192,10 @@
 		s = rest_of_page(data);
 		if (s > count)
 			s = count;
+		BUG_ON(index > limit);
 		sg_set_buf(&sg[index++], data, s);
 		count -= s;
 		data += s;
-		BUG_ON(index > limit);
 	}
 
 	return index-start;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 0301b32..8685296 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1208,9 +1208,7 @@
 	if (addr->sat_addr.s_node == ATADDR_BCAST &&
 	    !sock_flag(sk, SOCK_BROADCAST)) {
 #if 1
-		printk(KERN_WARNING "%s is broken and did not set "
-				    "SO_BROADCAST. It will break when 2.2 is "
-				    "released.\n",
+		pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
 			current->comm);
 #else
 		return -EACCES;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 840e2c6..015471d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -617,6 +617,8 @@
 			 * changes */
 			if (skb_linearize(skb) < 0)
 				goto out;
+			/* skb_linearize() possibly changed skb->data */
+			tt_query = (struct tt_query_packet *)skb->data;
 
 			tt_len = tt_query->tt_data * sizeof(struct tt_change);
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a66c2dc..2ab83d7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -141,13 +141,14 @@
 	struct tt_orig_list_entry *orig_entry;
 
 	orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
-	atomic_dec(&orig_entry->orig_node->tt_size);
 	orig_node_free_ref(orig_entry->orig_node);
 	kfree(orig_entry);
 }
 
 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
 {
+	/* to avoid race conditions, immediately decrease the tt counter */
+	atomic_dec(&orig_entry->orig_node->tt_size);
 	call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
@@ -910,7 +911,6 @@
 		}
 		spin_unlock_bh(list_lock);
 	}
-	atomic_set(&orig_node->tt_size, 0);
 	orig_node->tt_initialised = false;
 }
 
@@ -2031,10 +2031,10 @@
 {
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
-	bool ret = true;
+	bool ret = false;
 
 	if (!atomic_read(&bat_priv->ap_isolation))
-		return false;
+		goto out;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, dst);
 	if (!tt_local_entry)
@@ -2044,10 +2044,10 @@
 	if (!tt_global_entry)
 		goto out;
 
-	if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+	if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
 		goto out;
 
-	ret = false;
+	ret = true;
 
 out:
 	if (tt_global_entry)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 46e7f86..3e18af4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@
 		}
 
 		if (sk->sk_state == BT_CONNECTED || !newsock ||
-		    test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
+		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
 			bt_accept_unlink(sk);
 			if (newsock)
 				sock_graft(sk, newsock);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4eefb7f..94ad124 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3043,6 +3043,50 @@
 	hci_dev_unlock(hdev);
 }
 
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+					 struct sk_buff *skb)
+{
+	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+	struct hci_conn *conn;
+
+	BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+	       __le16_to_cpu(ev->handle));
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+	if (!conn)
+		goto unlock;
+
+	if (!ev->status)
+		conn->sec_level = conn->pending_sec_level;
+
+	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+	if (ev->status && conn->state == BT_CONNECTED) {
+		hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
+		hci_conn_put(conn);
+		goto unlock;
+	}
+
+	if (conn->state == BT_CONFIG) {
+		if (!ev->status)
+			conn->state = BT_CONNECTED;
+
+		hci_proto_connect_cfm(conn, ev->status);
+		hci_conn_put(conn);
+	} else {
+		hci_auth_cfm(conn, ev->status);
+
+		hci_conn_hold(conn);
+		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+		hci_conn_put(conn);
+	}
+
+unlock:
+	hci_dev_unlock(hdev);
+}
+
 static inline u8 hci_get_auth_req(struct hci_conn *conn)
 {
 	/* If remote requests dedicated bonding follow that lead */
@@ -3559,6 +3603,10 @@
 		hci_extended_inquiry_result_evt(hdev, skb);
 		break;
 
+	case HCI_EV_KEY_REFRESH_COMPLETE:
+		hci_key_refresh_complete_evt(hdev, skb);
+		break;
+
 	case HCI_EV_IO_CAPA_REQUEST:
 		hci_io_capa_request_evt(hdev, skb);
 		break;
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 4deaca7..9332bc7 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
 config BT_HIDP
 	tristate "HIDP protocol support"
-	depends on BT && INPUT && HID_SUPPORT
+	depends on BT && INPUT
 	select HID
 	help
 	  HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24f144b..4554e80 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1295,7 +1295,12 @@
 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
 						security_timer.work);
 
-	l2cap_conn_del(conn->hcon, ETIMEDOUT);
+	BT_DBG("conn %p", conn);
+
+	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+		smp_chan_destroy(conn);
+		l2cap_conn_del(conn->hcon, ETIMEDOUT);
+	}
 }
 
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -2910,12 +2915,14 @@
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
 
-		switch (type) {
-		case L2CAP_CONF_RFC:
-			if (olen == sizeof(rfc))
-				memcpy(&rfc, (void *)val, olen);
-			goto done;
-		}
+		if (type != L2CAP_CONF_RFC)
+			continue;
+
+		if (olen != sizeof(rfc))
+			break;
+
+		memcpy(&rfc, (void *)val, olen);
+		goto done;
 	}
 
 	/* Use sane default values in case a misbehaving remote device
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 25d2207..3e5e336 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1598,7 +1598,7 @@
 	else
 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
-	if (!conn) {
+	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
 				 MGMT_STATUS_NOT_CONNECTED);
 		goto failed;
@@ -1873,6 +1873,22 @@
 		pairing_complete(cmd, mgmt_status(status));
 }
 
+static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
+{
+	struct pending_cmd *cmd;
+
+	BT_DBG("status %u", status);
+
+	if (!status)
+		return;
+
+	cmd = find_pairing(conn);
+	if (!cmd)
+		BT_DBG("Unable to find a pending command");
+	else
+		pairing_complete(cmd, mgmt_status(status));
+}
+
 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 		       u16 len)
 {
@@ -1934,6 +1950,8 @@
 	/* For LE, just connecting isn't a proof that the pairing finished */
 	if (cp->addr.type == BDADDR_BREDR)
 		conn->connect_cfm_cb = pairing_complete_cb;
+	else
+		conn->connect_cfm_cb = le_connect_complete_cb;
 
 	conn->security_cfm_cb = pairing_complete_cb;
 	conn->disconn_cfm_cb = pairing_complete_cb;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index aa5d73b..d1820ff 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -710,9 +710,9 @@
 			break;
 		}
 
-		tty_unlock(tty);
+		tty_unlock();
 		schedule();
-		tty_lock(tty);
+		tty_lock();
 	}
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6fc7c47..37df4e9 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -648,7 +648,7 @@
 
 	auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
 
-	ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+	ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
 	if (ret)
 		return SMP_UNSPECIFIED;
 
@@ -703,7 +703,7 @@
 	return 0;
 }
 
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
 {
 	struct smp_ltk *key;
 	struct hci_conn *hcon = conn->hcon;
@@ -712,6 +712,9 @@
 	if (!key)
 		return 0;
 
+	if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+		return 0;
+
 	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
 		return 1;
 
@@ -732,7 +735,7 @@
 
 	hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
 
-	if (smp_ltk_encrypt(conn))
+	if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
 		return 0;
 
 	if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +774,7 @@
 		return 1;
 
 	if (hcon->link_mode & HCI_LM_MASTER)
-		if (smp_ltk_encrypt(conn))
+		if (smp_ltk_encrypt(conn, sec_level))
 			goto done;
 
 	if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0a942fb..e1144e1 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -240,6 +240,7 @@
 		return -ENOMEM;
 
 	dev_net_set(dev, net);
+	dev->rtnl_link_ops = &br_link_ops;
 
 	res = register_netdev(dev);
 	if (res)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2080485..fe41260 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -208,7 +208,7 @@
 	return 0;
 }
 
-static struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops __read_mostly = {
 	.kind		= "bridge",
 	.priv_size	= sizeof(struct net_bridge),
 	.setup		= br_dev_setup,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1a8ad4f..a768b24 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -549,6 +549,7 @@
 #endif
 
 /* br_netlink.c */
+extern struct rtnl_link_ops br_link_ops;
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index aa6f716..554b312 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -4,8 +4,7 @@
  * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
  * License terms: GNU General Public License (GPL) version 2
  *
- * Borrowed heavily from file: pn_dev.c. Thanks to
- *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  *  and Sakari Ailus <sakari.ailus@nokia.com>
  */
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index fb89443..78f1cda 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -220,6 +220,7 @@
 						cfsk_hold, cfsk_put);
 		cf_sk->sk.sk_state = CAIF_CONNECTED;
 		set_tx_flow_on(cf_sk);
+		cf_sk->sk.sk_shutdown = 0;
 		cf_sk->sk.sk_state_change(&cf_sk->sk);
 		break;
 
diff --git a/net/can/raw.c b/net/can/raw.c
index cde1b4a..46cca3a 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -681,9 +681,6 @@
 	if (err < 0)
 		goto free_skb;
 
-	/* to be able to check the received tx sock reference in raw_rcv() */
-	skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
-
 	skb->dev = dev;
 	skb->sk  = sk;
 
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 214c2bb..925ca58 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -59,9 +59,7 @@
  */
 static int ceph_auth_none_create_authorizer(
 	struct ceph_auth_client *ac, int peer_type,
-	struct ceph_authorizer **a,
-	void **buf, size_t *len,
-	void **reply_buf, size_t *reply_len)
+	struct ceph_auth_handshake *auth)
 {
 	struct ceph_auth_none_info *ai = ac->private;
 	struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@
 		dout("built authorizer len %d\n", au->buf_len);
 	}
 
-	*a = (struct ceph_authorizer *)au;
-	*buf = au->buf;
-	*len = au->buf_len;
-	*reply_buf = au->reply_buf;
-	*reply_len = sizeof(au->reply_buf);
+	auth->authorizer = (struct ceph_authorizer *) au;
+	auth->authorizer_buf = au->buf;
+	auth->authorizer_buf_len = au->buf_len;
+	auth->authorizer_reply_buf = au->reply_buf;
+	auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
 	return 0;
 
 bad2:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 1587dc6..a16bf14 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -526,9 +526,7 @@
 
 static int ceph_x_create_authorizer(
 	struct ceph_auth_client *ac, int peer_type,
-	struct ceph_authorizer **a,
-	void **buf, size_t *len,
-	void **reply_buf, size_t *reply_len)
+	struct ceph_auth_handshake *auth)
 {
 	struct ceph_x_authorizer *au;
 	struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@
 		return ret;
 	}
 
-	*a = (struct ceph_authorizer *)au;
-	*buf = au->buf->vec.iov_base;
-	*len = au->buf->vec.iov_len;
-	*reply_buf = au->reply_buf;
-	*reply_len = sizeof(au->reply_buf);
+	auth->authorizer = (struct ceph_authorizer *) au;
+	auth->authorizer_buf = au->buf->vec.iov_base;
+	auth->authorizer_buf_len = au->buf->vec.iov_len;
+	auth->authorizer_reply_buf = au->reply_buf;
+	auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
 	return 0;
 }
 
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a776f75..ba4323b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -504,13 +504,6 @@
 	/* unmount */
 	ceph_osdc_stop(&client->osdc);
 
-	/*
-	 * make sure osd connections close out before destroying the
-	 * auth module, which is needed to free those connections'
-	 * ceph_authorizers.
-	 */
-	ceph_msgr_flush();
-
 	ceph_monc_stop(&client->monc);
 
 	ceph_debugfs_client_cleanup(client);
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index d6ebb13..0896132 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -26,9 +26,9 @@
  * @b: bucket pointer
  * @p: item index in bucket
  */
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
 {
-	if (p >= b->size)
+	if ((__u32)p >= b->size)
 		return 0;
 
 	switch (b->alg) {
@@ -37,38 +37,13 @@
 	case CRUSH_BUCKET_LIST:
 		return ((struct crush_bucket_list *)b)->item_weights[p];
 	case CRUSH_BUCKET_TREE:
-		if (p & 1)
-			return ((struct crush_bucket_tree *)b)->node_weights[p];
-		return 0;
+		return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
 	case CRUSH_BUCKET_STRAW:
 		return ((struct crush_bucket_straw *)b)->item_weights[p];
 	}
 	return 0;
 }
 
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
-	int i, b, c;
-
-	for (b = 0; b < map->max_buckets; b++) {
-		if (map->buckets[b] == NULL)
-			continue;
-		for (i = 0; i < map->buckets[b]->size; i++) {
-			c = map->buckets[b]->items[i];
-			BUG_ON(c >= map->max_devices ||
-			       c < -map->max_buckets);
-			if (c >= 0)
-				map->device_parents[c] = map->buckets[b]->id;
-			else
-				map->bucket_parents[-1-c] = map->buckets[b]->id;
-		}
-	}
-}
-
 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
 {
 	kfree(b->h.perm);
@@ -87,6 +62,8 @@
 
 void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
 {
+	kfree(b->h.perm);
+	kfree(b->h.items);
 	kfree(b->node_weights);
 	kfree(b);
 }
@@ -124,10 +101,9 @@
  */
 void crush_destroy(struct crush_map *map)
 {
-	int b;
-
 	/* buckets */
 	if (map->buckets) {
+		__s32 b;
 		for (b = 0; b < map->max_buckets; b++) {
 			if (map->buckets[b] == NULL)
 				continue;
@@ -138,13 +114,12 @@
 
 	/* rules */
 	if (map->rules) {
+		__u32 b;
 		for (b = 0; b < map->max_rules; b++)
 			kfree(map->rules[b]);
 		kfree(map->rules);
 	}
 
-	kfree(map->bucket_parents);
-	kfree(map->device_parents);
 	kfree(map);
 }
 
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 363f8f7..d7edc24 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -33,9 +33,9 @@
  * @type: storage ruleset type (user defined)
  * @size: output set size
  */
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
 {
-	int i;
+	__u32 i;
 
 	for (i = 0; i < map->max_rules; i++) {
 		if (map->rules[i] &&
@@ -73,7 +73,7 @@
 	unsigned int i, s;
 
 	/* start a new permutation if @x has changed */
-	if (bucket->perm_x != x || bucket->perm_n == 0) {
+	if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
 		dprintk("bucket %d new x=%d\n", bucket->id, x);
 		bucket->perm_x = x;
 
@@ -153,8 +153,8 @@
 			return bucket->h.items[i];
 	}
 
-	BUG_ON(1);
-	return 0;
+	dprintk("bad list sums for bucket %d\n", bucket->h.id);
+	return bucket->h.items[0];
 }
 
 
@@ -220,7 +220,7 @@
 static int bucket_straw_choose(struct crush_bucket_straw *bucket,
 			       int x, int r)
 {
-	int i;
+	__u32 i;
 	int high = 0;
 	__u64 high_draw = 0;
 	__u64 draw;
@@ -240,6 +240,7 @@
 static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
 {
 	dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+	BUG_ON(in->size == 0);
 	switch (in->alg) {
 	case CRUSH_BUCKET_UNIFORM:
 		return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -254,7 +255,7 @@
 		return bucket_straw_choose((struct crush_bucket_straw *)in,
 					   x, r);
 	default:
-		BUG_ON(1);
+		dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
 		return in->items[0];
 	}
 }
@@ -263,7 +264,7 @@
  * true if device is marked "out" (failed, fully offloaded)
  * of the cluster
  */
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
 {
 	if (weight[item] >= 0x10000)
 		return 0;
@@ -288,16 +289,16 @@
  * @recurse_to_leaf: true if we want one device under each item of given type
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  */
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
 			struct crush_bucket *bucket,
-			__u32 *weight,
+			const __u32 *weight,
 			int x, int numrep, int type,
 			int *out, int outpos,
 			int firstn, int recurse_to_leaf,
 			int *out2)
 {
 	int rep;
-	int ftotal, flocal;
+	unsigned int ftotal, flocal;
 	int retry_descent, retry_bucket, skip_rep;
 	struct crush_bucket *in = bucket;
 	int r;
@@ -305,7 +306,7 @@
 	int item = 0;
 	int itemtype;
 	int collide, reject;
-	const int orig_tries = 5; /* attempts before we fall back to search */
+	const unsigned int orig_tries = 5; /* attempts before we fall back to search */
 
 	dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
 		bucket->id, x, outpos, numrep);
@@ -326,7 +327,7 @@
 				r = rep;
 				if (in->alg == CRUSH_BUCKET_UNIFORM) {
 					/* be careful */
-					if (firstn || numrep >= in->size)
+					if (firstn || (__u32)numrep >= in->size)
 						/* r' = r + f_total */
 						r += ftotal;
 					else if (in->size % numrep == 0)
@@ -355,7 +356,11 @@
 					item = bucket_perm_choose(in, x, r);
 				else
 					item = crush_bucket_choose(in, x, r);
-				BUG_ON(item >= map->max_devices);
+				if (item >= map->max_devices) {
+					dprintk("   bad item %d\n", item);
+					skip_rep = 1;
+					break;
+				}
 
 				/* desired type? */
 				if (item < 0)
@@ -366,8 +371,12 @@
 
 				/* keep going? */
 				if (itemtype != type) {
-					BUG_ON(item >= 0 ||
-					       (-1-item) >= map->max_buckets);
+					if (item >= 0 ||
+					    (-1-item) >= map->max_buckets) {
+						dprintk("   bad item type %d\n", type);
+						skip_rep = 1;
+						break;
+					}
 					in = map->buckets[-1-item];
 					retry_bucket = 1;
 					continue;
@@ -416,7 +425,7 @@
 					if (collide && flocal < 3)
 						/* retry locally a few times */
 						retry_bucket = 1;
-					else if (flocal < in->size + orig_tries)
+					else if (flocal <= in->size + orig_tries)
 						/* exhaustive bucket search */
 						retry_bucket = 1;
 					else if (ftotal < 20)
@@ -426,7 +435,7 @@
 						/* else give up */
 						skip_rep = 1;
 					dprintk("  reject %d  collide %d  "
-						"ftotal %d  flocal %d\n",
+						"ftotal %u  flocal %u\n",
 						reject, collide, ftotal,
 						flocal);
 				}
@@ -455,15 +464,12 @@
  * @x: hash input
  * @result: pointer to result vector
  * @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
  */
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
 		  int ruleno, int x, int *result, int result_max,
-		  int force, __u32 *weight)
+		  const __u32 *weight)
 {
 	int result_len;
-	int force_context[CRUSH_MAX_DEPTH];
-	int force_pos = -1;
 	int a[CRUSH_MAX_SET];
 	int b[CRUSH_MAX_SET];
 	int c[CRUSH_MAX_SET];
@@ -474,66 +480,44 @@
 	int osize;
 	int *tmp;
 	struct crush_rule *rule;
-	int step;
+	__u32 step;
 	int i, j;
 	int numrep;
 	int firstn;
 
-	BUG_ON(ruleno >= map->max_rules);
+	if ((__u32)ruleno >= map->max_rules) {
+		dprintk(" bad ruleno %d\n", ruleno);
+		return 0;
+	}
 
 	rule = map->rules[ruleno];
 	result_len = 0;
 	w = a;
 	o = b;
 
-	/*
-	 * determine hierarchical context of force, if any.  note
-	 * that this may or may not correspond to the specific types
-	 * referenced by the crush rule.
-	 */
-	if (force >= 0 &&
-	    force < map->max_devices &&
-	    map->device_parents[force] != 0 &&
-	    !is_out(map, weight, force, x)) {
-		while (1) {
-			force_context[++force_pos] = force;
-			if (force >= 0)
-				force = map->device_parents[force];
-			else
-				force = map->bucket_parents[-1-force];
-			if (force == 0)
-				break;
-		}
-	}
-
 	for (step = 0; step < rule->len; step++) {
+		struct crush_rule_step *curstep = &rule->steps[step];
+
 		firstn = 0;
-		switch (rule->steps[step].op) {
+		switch (curstep->op) {
 		case CRUSH_RULE_TAKE:
-			w[0] = rule->steps[step].arg1;
-
-			/* find position in force_context/hierarchy */
-			while (force_pos >= 0 &&
-			       force_context[force_pos] != w[0])
-				force_pos--;
-			/* and move past it */
-			if (force_pos >= 0)
-				force_pos--;
-
+			w[0] = curstep->arg1;
 			wsize = 1;
 			break;
 
 		case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
 		case CRUSH_RULE_CHOOSE_FIRSTN:
 			firstn = 1;
+			/* fall through */
 		case CRUSH_RULE_CHOOSE_LEAF_INDEP:
 		case CRUSH_RULE_CHOOSE_INDEP:
-			BUG_ON(wsize == 0);
+			if (wsize == 0)
+				break;
 
 			recurse_to_leaf =
-				rule->steps[step].op ==
+				curstep->op ==
 				 CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
-				rule->steps[step].op ==
+				curstep->op ==
 				CRUSH_RULE_CHOOSE_LEAF_INDEP;
 
 			/* reset output */
@@ -545,32 +529,18 @@
 				 * basically, numrep <= 0 means relative to
 				 * the provided result_max
 				 */
-				numrep = rule->steps[step].arg1;
+				numrep = curstep->arg1;
 				if (numrep <= 0) {
 					numrep += result_max;
 					if (numrep <= 0)
 						continue;
 				}
 				j = 0;
-				if (osize == 0 && force_pos >= 0) {
-					/* skip any intermediate types */
-					while (force_pos &&
-					       force_context[force_pos] < 0 &&
-					       rule->steps[step].arg2 !=
-					       map->buckets[-1 -
-					       force_context[force_pos]]->type)
-						force_pos--;
-					o[osize] = force_context[force_pos];
-					if (recurse_to_leaf)
-						c[osize] = force_context[0];
-					j++;
-					force_pos--;
-				}
 				osize += crush_choose(map,
 						      map->buckets[-1-w[i]],
 						      weight,
 						      x, numrep,
-						      rule->steps[step].arg2,
+						      curstep->arg2,
 						      o+osize, j,
 						      firstn,
 						      recurse_to_leaf, c+osize);
@@ -597,7 +567,9 @@
 			break;
 
 		default:
-			BUG_ON(1);
+			dprintk(" unknown op %d at step %d\n",
+				curstep->op, step);
+			break;
 		}
 	}
 	return result_len;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 36fa6bf..b332c3d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -563,6 +563,10 @@
 		m->hdr.seq = cpu_to_le64(++con->out_seq);
 		m->needs_out_seq = false;
 	}
+#ifdef CONFIG_BLOCK
+	else
+		m->bio_iter = NULL;
+#endif
 
 	dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
 	     m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -653,54 +657,57 @@
  * Connection negotiation.
  */
 
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+						int *auth_proto)
 {
-	void *auth_buf;
-	int auth_len = 0;
-	int auth_protocol = 0;
+	struct ceph_auth_handshake *auth;
+
+	if (!con->ops->get_authorizer) {
+		con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+		con->out_connect.authorizer_len = 0;
+
+		return NULL;
+	}
+
+	/* Can't hold the mutex while getting authorizer */
 
 	mutex_unlock(&con->mutex);
-	if (con->ops->get_authorizer)
-		con->ops->get_authorizer(con, &auth_buf, &auth_len,
-					 &auth_protocol, &con->auth_reply_buf,
-					 &con->auth_reply_buf_len,
-					 con->auth_retry);
+
+	auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
 	mutex_lock(&con->mutex);
 
-	if (test_bit(CLOSED, &con->state) ||
-	    test_bit(OPENING, &con->state))
-		return -EAGAIN;
+	if (IS_ERR(auth))
+		return auth;
+	if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+		return ERR_PTR(-EAGAIN);
 
-	con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
-	con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+	con->auth_reply_buf = auth->authorizer_reply_buf;
+	con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
 
-	if (auth_len)
-		ceph_con_out_kvec_add(con, auth_len, auth_buf);
 
-	return 0;
+	return auth;
 }
 
 /*
  * We connected to a peer and are saying hello.
  */
-static void prepare_write_banner(struct ceph_messenger *msgr,
-				 struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
 {
-	ceph_con_out_kvec_reset(con);
 	ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
-	ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
-					&msgr->my_enc_addr);
+	ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+					&con->msgr->my_enc_addr);
 
 	con->out_more = 0;
 	set_bit(WRITE_PENDING, &con->state);
 }
 
-static int prepare_write_connect(struct ceph_messenger *msgr,
-				 struct ceph_connection *con,
-				 int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
 {
 	unsigned int global_seq = get_global_seq(con->msgr, 0);
 	int proto;
+	int auth_proto;
+	struct ceph_auth_handshake *auth;
 
 	switch (con->peer_name.type) {
 	case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +726,32 @@
 	dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
 	     con->connect_seq, global_seq, proto);
 
-	con->out_connect.features = cpu_to_le64(msgr->supported_features);
+	con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
 	con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
 	con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
 	con->out_connect.global_seq = cpu_to_le32(global_seq);
 	con->out_connect.protocol_version = cpu_to_le32(proto);
 	con->out_connect.flags = 0;
 
-	if (include_banner)
-		prepare_write_banner(msgr, con);
-	else
-		ceph_con_out_kvec_reset(con);
-	ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+	auth_proto = CEPH_AUTH_UNKNOWN;
+	auth = get_connect_authorizer(con, &auth_proto);
+	if (IS_ERR(auth))
+		return PTR_ERR(auth);
+
+	con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+	con->out_connect.authorizer_len = auth ?
+		cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+	ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+					&con->out_connect);
+	if (auth && auth->authorizer_buf_len)
+		ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+					auth->authorizer_buf);
 
 	con->out_more = 0;
 	set_bit(WRITE_PENDING, &con->state);
 
-	return prepare_connect_authorizer(con);
+	return 0;
 }
 
 /*
@@ -992,11 +1008,10 @@
 
 
 static int read_partial(struct ceph_connection *con,
-			int *to, int size, void *object)
+			int end, int size, void *object)
 {
-	*to += size;
-	while (con->in_base_pos < *to) {
-		int left = *to - con->in_base_pos;
+	while (con->in_base_pos < end) {
+		int left = end - con->in_base_pos;
 		int have = size - left;
 		int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
 		if (ret <= 0)
@@ -1012,37 +1027,52 @@
  */
 static int read_partial_banner(struct ceph_connection *con)
 {
-	int ret, to = 0;
+	int size;
+	int end;
+	int ret;
 
 	dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
 
 	/* peer's banner */
-	ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+	size = strlen(CEPH_BANNER);
+	end = size;
+	ret = read_partial(con, end, size, con->in_banner);
 	if (ret <= 0)
 		goto out;
-	ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
-			   &con->actual_peer_addr);
+
+	size = sizeof (con->actual_peer_addr);
+	end += size;
+	ret = read_partial(con, end, size, &con->actual_peer_addr);
 	if (ret <= 0)
 		goto out;
-	ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
-			   &con->peer_addr_for_me);
+
+	size = sizeof (con->peer_addr_for_me);
+	end += size;
+	ret = read_partial(con, end, size, &con->peer_addr_for_me);
 	if (ret <= 0)
 		goto out;
+
 out:
 	return ret;
 }
 
 static int read_partial_connect(struct ceph_connection *con)
 {
-	int ret, to = 0;
+	int size;
+	int end;
+	int ret;
 
 	dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
 
-	ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+	size = sizeof (con->in_reply);
+	end = size;
+	ret = read_partial(con, end, size, &con->in_reply);
 	if (ret <= 0)
 		goto out;
-	ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
-			   con->auth_reply_buf);
+
+	size = le32_to_cpu(con->in_reply.authorizer_len);
+	end += size;
+	ret = read_partial(con, end, size, con->auth_reply_buf);
 	if (ret <= 0)
 		goto out;
 
@@ -1377,7 +1407,8 @@
 			return -1;
 		}
 		con->auth_retry = 1;
-		ret = prepare_write_connect(con->msgr, con, 0);
+		ceph_con_out_kvec_reset(con);
+		ret = prepare_write_connect(con);
 		if (ret < 0)
 			return ret;
 		prepare_read_connect(con);
@@ -1397,7 +1428,10 @@
 		       ENTITY_NAME(con->peer_name),
 		       ceph_pr_addr(&con->peer_addr.in_addr));
 		reset_connection(con);
-		prepare_write_connect(con->msgr, con, 0);
+		ceph_con_out_kvec_reset(con);
+		ret = prepare_write_connect(con);
+		if (ret < 0)
+			return ret;
 		prepare_read_connect(con);
 
 		/* Tell ceph about it. */
@@ -1420,7 +1454,10 @@
 		     le32_to_cpu(con->out_connect.connect_seq),
 		     le32_to_cpu(con->in_connect.connect_seq));
 		con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
-		prepare_write_connect(con->msgr, con, 0);
+		ceph_con_out_kvec_reset(con);
+		ret = prepare_write_connect(con);
+		if (ret < 0)
+			return ret;
 		prepare_read_connect(con);
 		break;
 
@@ -1434,7 +1471,10 @@
 		     le32_to_cpu(con->in_connect.global_seq));
 		get_global_seq(con->msgr,
 			       le32_to_cpu(con->in_connect.global_seq));
-		prepare_write_connect(con->msgr, con, 0);
+		ceph_con_out_kvec_reset(con);
+		ret = prepare_write_connect(con);
+		if (ret < 0)
+			return ret;
 		prepare_read_connect(con);
 		break;
 
@@ -1491,10 +1531,10 @@
  */
 static int read_partial_ack(struct ceph_connection *con)
 {
-	int to = 0;
+	int size = sizeof (con->in_temp_ack);
+	int end = size;
 
-	return read_partial(con, &to, sizeof(con->in_temp_ack),
-			    &con->in_temp_ack);
+	return read_partial(con, end, size, &con->in_temp_ack);
 }
 
 
@@ -1627,8 +1667,9 @@
 static int read_partial_message(struct ceph_connection *con)
 {
 	struct ceph_msg *m = con->in_msg;
+	int size;
+	int end;
 	int ret;
-	int to, left;
 	unsigned int front_len, middle_len, data_len;
 	bool do_datacrc = !con->msgr->nocrc;
 	int skip;
@@ -1638,15 +1679,11 @@
 	dout("read_partial_message con %p msg %p\n", con, m);
 
 	/* header */
-	while (con->in_base_pos < sizeof(con->in_hdr)) {
-		left = sizeof(con->in_hdr) - con->in_base_pos;
-		ret = ceph_tcp_recvmsg(con->sock,
-				       (char *)&con->in_hdr + con->in_base_pos,
-				       left);
-		if (ret <= 0)
-			return ret;
-		con->in_base_pos += ret;
-	}
+	size = sizeof (con->in_hdr);
+	end = size;
+	ret = read_partial(con, end, size, &con->in_hdr);
+	if (ret <= 0)
+		return ret;
 
 	crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
 	if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1796,12 @@
 	}
 
 	/* footer */
-	to = sizeof(m->hdr) + sizeof(m->footer);
-	while (con->in_base_pos < to) {
-		left = to - con->in_base_pos;
-		ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
-				       (con->in_base_pos - sizeof(m->hdr)),
-				       left);
-		if (ret <= 0)
-			return ret;
-		con->in_base_pos += ret;
-	}
+	size = sizeof (m->footer);
+	end += size;
+	ret = read_partial(con, end, size, &m->footer);
+	if (ret <= 0)
+		return ret;
+
 	dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
 	     m, front_len, m->footer.front_crc, middle_len,
 	     m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1868,6 @@
  */
 static int try_write(struct ceph_connection *con)
 {
-	struct ceph_messenger *msgr = con->msgr;
 	int ret = 1;
 
 	dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1878,11 @@
 
 	/* open the socket first? */
 	if (con->sock == NULL) {
-		prepare_write_connect(msgr, con, 1);
+		ceph_con_out_kvec_reset(con);
+		prepare_write_banner(con);
+		ret = prepare_write_connect(con);
+		if (ret < 0)
+			goto out;
 		prepare_read_banner(con);
 		set_bit(CONNECTING, &con->state);
 		clear_bit(NEGOTIATING, &con->state);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 10d6008..d0649a9 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -847,6 +847,14 @@
 
 	mutex_unlock(&monc->mutex);
 
+	/*
+	 * flush msgr queue before we destroy ourselves to ensure that:
+	 *  - any work that references our embedded con is finished.
+	 *  - any osd_client or other work that may reference an authorizer
+	 *    finishes before we shut down the auth subsystem.
+	 */
+	ceph_msgr_flush();
+
 	ceph_auth_destroy(monc->auth);
 
 	ceph_msg_put(monc->m_auth);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1b0ef3c..ca59e66 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -139,15 +139,15 @@
 
 	if (req->r_request)
 		ceph_msg_put(req->r_request);
-	if (req->r_reply)
-		ceph_msg_put(req->r_reply);
 	if (req->r_con_filling_msg) {
 		dout("release_request revoking pages %p from con %p\n",
 		     req->r_pages, req->r_con_filling_msg);
 		ceph_con_revoke_message(req->r_con_filling_msg,
 				      req->r_reply);
-		ceph_con_put(req->r_con_filling_msg);
+		req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
 	}
+	if (req->r_reply)
+		ceph_msg_put(req->r_reply);
 	if (req->r_own_pages)
 		ceph_release_page_vector(req->r_pages,
 					 req->r_num_pages);
@@ -278,7 +278,7 @@
 {
 	dst->op = cpu_to_le16(src->op);
 
-	switch (dst->op) {
+	switch (src->op) {
 	case CEPH_OSD_OP_READ:
 	case CEPH_OSD_OP_WRITE:
 		dst->extent.offset =
@@ -664,11 +664,11 @@
 {
 	dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
 	     atomic_read(&osd->o_ref) - 1);
-	if (atomic_dec_and_test(&osd->o_ref)) {
+	if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
 		struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
 
-		if (osd->o_authorizer)
-			ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+		if (ac->ops && ac->ops->destroy_authorizer)
+			ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
 		kfree(osd);
 	}
 }
@@ -841,6 +841,12 @@
 static void __unregister_request(struct ceph_osd_client *osdc,
 				 struct ceph_osd_request *req)
 {
+	if (RB_EMPTY_NODE(&req->r_node)) {
+		dout("__unregister_request %p tid %lld not registered\n",
+			req, req->r_tid);
+		return;
+	}
+
 	dout("__unregister_request %p tid %lld\n", req, req->r_tid);
 	rb_erase(&req->r_node, &osdc->requests);
 	osdc->num_requests--;
@@ -1210,7 +1216,7 @@
 	if (req->r_con_filling_msg == con && req->r_reply == msg) {
 		dout(" dropping con_filling_msg ref %p\n", con);
 		req->r_con_filling_msg = NULL;
-		ceph_con_put(con);
+		con->ops->put(con);
 	}
 
 	if (!req->r_got_reply) {
@@ -2022,7 +2028,7 @@
 		dout("get_reply revoking msg %p from old con %p\n",
 		     req->r_reply, req->r_con_filling_msg);
 		ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
-		ceph_con_put(req->r_con_filling_msg);
+		req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
 		req->r_con_filling_msg = NULL;
 	}
 
@@ -2057,7 +2063,7 @@
 #endif
 	}
 	*skip = 0;
-	req->r_con_filling_msg = ceph_con_get(con);
+	req->r_con_filling_msg = con->ops->get(con);
 	dout("get_reply tid %lld %p\n", tid, m);
 
 out:
@@ -2108,37 +2114,32 @@
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-			  void **buf, int *len, int *proto,
-			  void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+					int *proto, int force_new)
 {
 	struct ceph_osd *o = con->private;
 	struct ceph_osd_client *osdc = o->o_osdc;
 	struct ceph_auth_client *ac = osdc->client->monc.auth;
-	int ret = 0;
+	struct ceph_auth_handshake *auth = &o->o_auth;
 
-	if (force_new && o->o_authorizer) {
-		ac->ops->destroy_authorizer(ac, o->o_authorizer);
-		o->o_authorizer = NULL;
+	if (force_new && auth->authorizer) {
+		if (ac->ops && ac->ops->destroy_authorizer)
+			ac->ops->destroy_authorizer(ac, auth->authorizer);
+		auth->authorizer = NULL;
 	}
-	if (o->o_authorizer == NULL) {
-		ret = ac->ops->create_authorizer(
-			ac, CEPH_ENTITY_TYPE_OSD,
-			&o->o_authorizer,
-			&o->o_authorizer_buf,
-			&o->o_authorizer_buf_len,
-			&o->o_authorizer_reply_buf,
-			&o->o_authorizer_reply_buf_len);
+	if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+		int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+							auth);
 		if (ret)
-			return ret;
+			return ERR_PTR(ret);
 	}
-
 	*proto = ac->protocol;
-	*buf = o->o_authorizer_buf;
-	*len = o->o_authorizer_buf_len;
-	*reply_buf = o->o_authorizer_reply_buf;
-	*reply_len = o->o_authorizer_reply_buf_len;
-	return 0;
+
+	return auth;
 }
 
 
@@ -2148,7 +2149,11 @@
 	struct ceph_osd_client *osdc = o->o_osdc;
 	struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-	return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+	/*
+	 * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+	 * XXX which do we do:  succeed or fail?
+	 */
+	return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@
 	struct ceph_osd_client *osdc = o->o_osdc;
 	struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-	if (ac->ops->invalidate_authorizer)
+	if (ac->ops && ac->ops->invalidate_authorizer)
 		ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
 
 	return ceph_monc_validate_auth(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 56e561a..81e3b84 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -161,13 +161,6 @@
 	c->max_rules = ceph_decode_32(p);
 	c->max_devices = ceph_decode_32(p);
 
-	c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
-	if (c->device_parents == NULL)
-		goto badmem;
-	c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
-	if (c->bucket_parents == NULL)
-		goto badmem;
-
 	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
 	if (c->buckets == NULL)
 		goto badmem;
@@ -890,8 +883,12 @@
 		pglen = ceph_decode_32(p);
 
 		if (pglen) {
-			/* insert */
 			ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+			/* removing existing (if any) */
+			(void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+			/* insert */
 			pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
 			if (!pg) {
 				err = -ENOMEM;
@@ -1000,7 +997,6 @@
 {
 	unsigned int num, num_mask;
 	struct ceph_pg pgid;
-	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
 	int poolid = le32_to_cpu(fl->fl_pg_pool);
 	struct ceph_pg_pool_info *pool;
 	unsigned int ps;
@@ -1011,23 +1007,13 @@
 	if (!pool)
 		return -EIO;
 	ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
-	if (preferred >= 0) {
-		ps += preferred;
-		num = le32_to_cpu(pool->v.lpg_num);
-		num_mask = pool->lpg_num_mask;
-	} else {
-		num = le32_to_cpu(pool->v.pg_num);
-		num_mask = pool->pg_num_mask;
-	}
+	num = le32_to_cpu(pool->v.pg_num);
+	num_mask = pool->pg_num_mask;
 
 	pgid.ps = cpu_to_le16(ps);
-	pgid.preferred = cpu_to_le16(preferred);
+	pgid.preferred = cpu_to_le16(-1);
 	pgid.pool = fl->fl_pg_pool;
-	if (preferred >= 0)
-		dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
-		     (int)preferred);
-	else
-		dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+	dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
 
 	ol->ol_pgid = pgid;
 	ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@
 	struct ceph_pg_mapping *pg;
 	struct ceph_pg_pool_info *pool;
 	int ruleno;
-	unsigned int poolid, ps, pps, t;
-	int preferred;
+	unsigned int poolid, ps, pps, t, r;
 
 	poolid = le32_to_cpu(pgid.pool);
 	ps = le16_to_cpu(pgid.ps);
-	preferred = (s16)le16_to_cpu(pgid.preferred);
 
 	pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
 	if (!pool)
 		return NULL;
 
 	/* pg_temp? */
-	if (preferred >= 0)
-		t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
-				    pool->lpgp_num_mask);
-	else
-		t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
-				    pool->pgp_num_mask);
+	t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+			    pool->pgp_num_mask);
 	pgid.ps = cpu_to_le16(t);
 	pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
 	if (pg) {
@@ -1080,23 +1060,20 @@
 		return NULL;
 	}
 
-	/* don't forcefeed bad device ids to crush */
-	if (preferred >= osdmap->max_osd ||
-	    preferred >= osdmap->crush->max_devices)
-		preferred = -1;
-
-	if (preferred >= 0)
-		pps = ceph_stable_mod(ps,
-				      le32_to_cpu(pool->v.lpgp_num),
-				      pool->lpgp_num_mask);
-	else
-		pps = ceph_stable_mod(ps,
-				      le32_to_cpu(pool->v.pgp_num),
-				      pool->pgp_num_mask);
+	pps = ceph_stable_mod(ps,
+			      le32_to_cpu(pool->v.pgp_num),
+			      pool->pgp_num_mask);
 	pps += poolid;
-	*num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
-			     min_t(int, pool->v.size, *num),
-			     preferred, osdmap->osd_weight);
+	r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+			  min_t(int, pool->v.size, *num),
+			  osdmap->osd_weight);
+	if (r < 0) {
+		pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+		       " size %d\n", r, poolid, pool->v.crush_ruleset,
+		       pool->v.type, pool->v.size);
+		return NULL;
+	}
+	*num = r;
 	return osds;
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index cd09819..6df2140 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2089,25 +2089,6 @@
 	return 0;
 }
 
-/*
- * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested or the sk-reference
- * is needed on driver level for other reasons, e.g. see net/can/raw.c
- */
-static inline void skb_orphan_try(struct sk_buff *skb)
-{
-	struct sock *sk = skb->sk;
-
-	if (sk && !skb_shinfo(skb)->tx_flags) {
-		/* skb_tx_hash() wont be able to get sk.
-		 * We copy sk_hash into skb->rxhash
-		 */
-		if (!skb->rxhash)
-			skb->rxhash = sk->sk_hash;
-		skb_orphan(skb);
-	}
-}
-
 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
 	return ((features & NETIF_F_GEN_CSUM) ||
@@ -2193,8 +2174,6 @@
 		if (!list_empty(&ptype_all))
 			dev_queue_xmit_nit(skb, dev);
 
-		skb_orphan_try(skb);
-
 		features = netif_skb_features(skb);
 
 		if (vlan_tx_tag_present(skb) &&
@@ -2304,7 +2283,7 @@
 	if (skb->sk && skb->sk->sk_hash)
 		hash = skb->sk->sk_hash;
 	else
-		hash = (__force u16) skb->protocol ^ skb->rxhash;
+		hash = (__force u16) skb->protocol;
 	hash = jhash_1word(hash, hashrnd);
 
 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 3252e7e..d23b668 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -36,9 +36,6 @@
 #define TRACE_ON 1
 #define TRACE_OFF 0
 
-static void send_dm_alert(struct work_struct *unused);
-
-
 /*
  * Globals, our netlink socket pointer
  * and the work handle that will send up
@@ -48,11 +45,10 @@
 static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
-	struct work_struct dm_alert_work;
-	struct sk_buff __rcu *skb;
-	atomic_t dm_hit_count;
-	struct timer_list send_timer;
-	int cpu;
+	spinlock_t		lock;
+	struct sk_buff		*skb;
+	struct work_struct	dm_alert_work;
+	struct timer_list	send_timer;
 };
 
 struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@
 static unsigned long dm_hw_check_delta = 2*HZ;
 static LIST_HEAD(hw_stats_list);
 
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 {
 	size_t al;
 	struct net_dm_alert_msg *msg;
 	struct nlattr *nla;
 	struct sk_buff *skb;
-	struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
+	unsigned long flags;
 
 	al = sizeof(struct net_dm_alert_msg);
 	al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@
 				  sizeof(struct net_dm_alert_msg));
 		msg = nla_data(nla);
 		memset(msg, 0, al);
-	} else
-		schedule_work_on(data->cpu, &data->dm_alert_work);
-
-	/*
-	 * Don't need to lock this, since we are guaranteed to only
-	 * run this on a single cpu at a time.
-	 * Note also that we only update data->skb if the old and new skb
-	 * pointers don't match.  This ensures that we don't continually call
-	 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
-	 */
-	if (skb != oskb) {
-		rcu_assign_pointer(data->skb, skb);
-
-		synchronize_rcu();
-
-		atomic_set(&data->dm_hit_count, dm_hit_limit);
+	} else {
+		mod_timer(&data->send_timer, jiffies + HZ / 10);
 	}
 
+	spin_lock_irqsave(&data->lock, flags);
+	swap(data->skb, skb);
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	return skb;
 }
 
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
 {
 	struct sk_buff *skb;
-	struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+	struct per_cpu_dm_data *data;
 
-	WARN_ON_ONCE(data->cpu != smp_processor_id());
+	data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 
-	/*
-	 * Grab the skb we're about to send
-	 */
-	skb = rcu_dereference_protected(data->skb, 1);
+	skb = reset_per_cpu_data(data);
 
-	/*
-	 * Replace it with a new one
-	 */
-	reset_per_cpu_data(data);
-
-	/*
-	 * Ship it!
-	 */
 	if (skb)
 		genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
-
-	put_cpu_var(dm_cpu_data);
 }
 
 /*
  * This is the timer function to delay the sending of an alert
  * in the event that more drops will arrive during the
- * hysteresis period.  Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
  */
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
 {
-	struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
+	struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
 
-	schedule_work_on(smp_processor_id(), &data->dm_alert_work);
-
-	put_cpu_var(dm_cpu_data);
+	schedule_work(&data->dm_alert_work);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@
 	struct nlattr *nla;
 	int i;
 	struct sk_buff *dskb;
-	struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+	struct per_cpu_dm_data *data;
+	unsigned long flags;
 
-
-	rcu_read_lock();
-	dskb = rcu_dereference(data->skb);
+	local_irq_save(flags);
+	data = &__get_cpu_var(dm_cpu_data);
+	spin_lock(&data->lock);
+	dskb = data->skb;
 
 	if (!dskb)
 		goto out;
 
-	if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
-		/*
-		 * we're already at zero, discard this hit
-		 */
-		goto out;
-	}
-
 	nlh = (struct nlmsghdr *)dskb->data;
 	nla = genlmsg_data(nlmsg_data(nlh));
 	msg = nla_data(nla);
 	for (i = 0; i < msg->entries; i++) {
 		if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
 			msg->points[i].count++;
-			atomic_inc(&data->dm_hit_count);
 			goto out;
 		}
 	}
-
+	if (msg->entries == dm_hit_limit)
+		goto out;
 	/*
 	 * We need to create a new entry
 	 */
@@ -205,13 +171,11 @@
 
 	if (!timer_pending(&data->send_timer)) {
 		data->send_timer.expires = jiffies + dm_delay * HZ;
-		add_timer_on(&data->send_timer, smp_processor_id());
+		add_timer(&data->send_timer);
 	}
 
 out:
-	rcu_read_unlock();
-	put_cpu_var(dm_cpu_data);
-	return;
+	spin_unlock_irqrestore(&data->lock, flags);
 }
 
 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@
 
 	for_each_possible_cpu(cpu) {
 		data = &per_cpu(dm_cpu_data, cpu);
-		data->cpu = cpu;
 		INIT_WORK(&data->dm_alert_work, send_dm_alert);
 		init_timer(&data->send_timer);
-		data->send_timer.data = cpu;
+		data->send_timer.data = (unsigned long)data;
 		data->send_timer.function = sched_send_work;
+		spin_lock_init(&data->lock);
 		reset_per_cpu_data(data);
 	}
 
@@ -468,3 +432,4 @@
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/core/filter.c b/net/core/filter.c
index a3eddb5..d4ce2dc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -616,9 +616,9 @@
 /**
  *	sk_unattached_filter_create - create an unattached filter
  *	@fprog: the filter program
- *	@sk: the socket to use
+ *	@pfp: the unattached filter that is created
  *
- * Create a filter independent ofr any socket. We first run some
+ * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
  * If an error occurs or there is insufficient memory for the filter
  * a negative errno code is returned. On success the return is zero.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eb09f8b..d81d026 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2219,9 +2219,7 @@
 	rcu_read_lock_bh();
 	nht = rcu_dereference_bh(tbl->nht);
 
-	for (h = 0; h < (1 << nht->hash_shift); h++) {
-		if (h < s_h)
-			continue;
+	for (h = s_h; h < (1 << nht->hash_shift); h++) {
 		if (h > s_h)
 			s_idx = 0;
 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@
 
 	read_lock_bh(&tbl->lock);
 
-	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
-		if (h < s_h)
-			continue;
+	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
 		if (h > s_h)
 			s_idx = 0;
 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@
 	struct neigh_table *tbl;
 	int t, family, s_t;
 	int proxy = 0;
-	int err = 0;
+	int err;
 
 	read_lock(&neigh_tbl_lock);
 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@
 
 	s_t = cb->args[0];
 
-	for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
+	for (tbl = neigh_tables, t = 0; tbl;
 	     tbl = tbl->next, t++) {
 		if (t < s_t || (family && tbl->family != family))
 			continue;
@@ -2322,6 +2318,8 @@
 			err = pneigh_dump_table(tbl, skb, cb);
 		else
 			err = neigh_dump_table(tbl, skb, cb);
+		if (err < 0)
+			break;
 	}
 	read_unlock(&neigh_tbl_lock);
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3d84fb9..f9f40b9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -362,22 +362,23 @@
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
-	int total_len, eth_len, ip_len, udp_len;
+	int total_len, ip_len, udp_len;
 	struct sk_buff *skb;
 	struct udphdr *udph;
 	struct iphdr *iph;
 	struct ethhdr *eth;
 
 	udp_len = len + sizeof(*udph);
-	ip_len = eth_len = udp_len + sizeof(*iph);
-	total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
+	ip_len = udp_len + sizeof(*iph);
+	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 
-	skb = find_skb(np, total_len, total_len - len);
+	skb = find_skb(np, total_len + np->dev->needed_tailroom,
+		       total_len - len);
 	if (!skb)
 		return;
 
 	skb_copy_to_linear_data(skb, msg, len);
-	skb->len += len;
+	skb_put(skb, len);
 
 	skb_push(skb, sizeof(*udph));
 	skb_reset_transport_header(skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 016694d..d78671e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3361,7 +3361,7 @@
  * @to: prior buffer
  * @from: buffer to add
  * @fragstolen: pointer to boolean
- *
+ * @delta_truesize: how much more was allocated than was requested
  */
 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 		      bool *fragstolen, int *delta_truesize)
diff --git a/net/core/sock.c b/net/core/sock.c
index 653f8c0..9e5b71f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1592,6 +1592,11 @@
 	gfp_t gfp_mask;
 	long timeo;
 	int err;
+	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+	err = -EMSGSIZE;
+	if (npages > MAX_SKB_FRAGS)
+		goto failure;
 
 	gfp_mask = sk->sk_allocation;
 	if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@
 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
 			skb = alloc_skb(header_len, gfp_mask);
 			if (skb) {
-				int npages;
 				int i;
 
 				/* No pages, we're done... */
 				if (!data_len)
 					break;
 
-				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 				skb->truesize += data_len;
 				skb_shinfo(skb)->nr_frags = npages;
 				for (i = 0; i < npages; i++) {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 89a47b3..cb982a6 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -459,28 +459,22 @@
 	struct esp_data *esp = x->data;
 	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 	u32 align = max_t(u32, blksize, esp->padlen);
-	u32 rem;
-
-	mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-	rem = mtu & (align - 1);
-	mtu &= ~(align - 1);
+	unsigned int net_adj;
 
 	switch (x->props.mode) {
+	case XFRM_MODE_TRANSPORT:
+	case XFRM_MODE_BEET:
+		net_adj = sizeof(struct iphdr);
+		break;
 	case XFRM_MODE_TUNNEL:
+		net_adj = 0;
 		break;
 	default:
-	case XFRM_MODE_TRANSPORT:
-		/* The worst case */
-		mtu -= blksize - 4;
-		mtu += min_t(u32, blksize - 4, rem);
-		break;
-	case XFRM_MODE_BEET:
-		/* The worst case. */
-		mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
-		break;
+		BUG();
 	}
 
-	return mtu - 2;
+	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+		 net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 95e61596..f9ee741 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -377,7 +377,8 @@
 
 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
-			   sk->sk_protocol, inet_sk_flowi_flags(sk),
+			   sk->sk_protocol,
+			   inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
 			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d4d61b6..dfba343 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -560,6 +560,17 @@
 }
 EXPORT_SYMBOL(inet_peer_xrlim_allow);
 
+static void inetpeer_inval_rcu(struct rcu_head *head)
+{
+	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
+
+	spin_lock_bh(&gc_lock);
+	list_add_tail(&p->gc_list, &gc_list);
+	spin_unlock_bh(&gc_lock);
+
+	schedule_delayed_work(&gc_work, gc_delay);
+}
+
 void inetpeer_invalidate_tree(int family)
 {
 	struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@
 	prev = cmpxchg(&base->root, old, new);
 	if (prev == old) {
 		base->total = 0;
-		spin_lock(&gc_lock);
-		list_add_tail(&prev->gc_list, &gc_list);
-		spin_unlock(&gc_lock);
-		schedule_delayed_work(&gc_work, gc_delay);
+		call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
 	}
 
 out:
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e5c44fc..ab09b12 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -44,6 +44,7 @@
 	struct ip_options *opt	= &(IPCB(skb)->opt);
 
 	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a9e519a..c94bbc6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1574,6 +1574,7 @@
 	struct ip_options *opt = &(IPCB(skb)->opt);
 
 	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a43b87d..c8d28c4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -824,7 +824,8 @@
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 			      struct request_sock *req,
-			      struct request_values *rvp)
+			      struct request_values *rvp,
+			      u16 queue_mapping)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	struct flowi4 fl4;
@@ -840,6 +841,7 @@
 	if (skb) {
 		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
 
+		skb_set_queue_mapping(skb, queue_mapping);
 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 					    ireq->rmt_addr,
 					    ireq->opt);
@@ -854,7 +856,7 @@
 			      struct request_values *rvp)
 {
 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-	return tcp_v4_send_synack(sk, NULL, req, rvp);
+	return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
 }
 
 /*
@@ -1422,7 +1424,8 @@
 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
 	if (tcp_v4_send_synack(sk, dst, req,
-			       (struct request_values *)&tmp_ext) ||
+			       (struct request_values *)&tmp_ext,
+			       skb_get_queue_mapping(skb)) ||
 	    want_cookie)
 		goto drop_and_free;
 
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 1517037..b6f3583 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -74,9 +74,6 @@
 	percpu_counter_destroy(&tcp->tcp_sockets_allocated);
 
 	val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-
-	if (val != RESOURCE_MAX)
-		static_key_slow_dec(&memcg_socket_limit_enabled);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
@@ -107,10 +104,33 @@
 		tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
 					     net->ipv4.sysctl_tcp_mem[i]);
 
-	if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
-		static_key_slow_dec(&memcg_socket_limit_enabled);
-	else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
-		static_key_slow_inc(&memcg_socket_limit_enabled);
+	if (val == RESOURCE_MAX)
+		clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+	else if (val != RESOURCE_MAX) {
+		/*
+		 * The active bit needs to be written after the static_key
+		 * update. This is what guarantees that the socket activation
+		 * function is the last one to run. See sock_update_memcg() for
+		 * details, and note that we don't mark any socket as belonging
+		 * to this memcg until that flag is up.
+		 *
+		 * We need to do this, because static_keys will span multiple
+		 * sites, but we can't control their order. If we mark a socket
+		 * as accounted, but the accounting functions are not patched in
+		 * yet, we'll lose accounting.
+		 *
+		 * We never race with the readers in sock_update_memcg(),
+		 * because when this value change, the code to process it is not
+		 * patched in yet.
+		 *
+		 * The activated bit is used to guarantee that no two writers
+		 * will do the update in the same memcg. Without that, we can't
+		 * properly shutdown the static key.
+		 */
+		if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+			static_key_slow_inc(&memcg_socket_limit_enabled);
+		set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+	}
 
 	return 0;
 }
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1e62b755..db1521f 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -413,19 +413,15 @@
 	struct esp_data *esp = x->data;
 	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 	u32 align = max_t(u32, blksize, esp->padlen);
-	u32 rem;
+	unsigned int net_adj;
 
-	mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-	rem = mtu & (align - 1);
-	mtu &= ~(align - 1);
+	if (x->props.mode != XFRM_MODE_TUNNEL)
+		net_adj = sizeof(struct ipv6hdr);
+	else
+		net_adj = 0;
 
-	if (x->props.mode != XFRM_MODE_TUNNEL) {
-		u32 padsize = ((blksize - 1) & 7) + 1;
-		mtu -= blksize - padsize;
-		mtu += min_t(u32, blksize - padsize, rem);
-	}
-
-	return mtu - 2;
+	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+		 net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0c220a4..6083276 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1349,8 +1349,8 @@
 			if (w->leaf && fn->fn_flags & RTN_RTINFO) {
 				int err;
 
-				if (w->count < w->skip) {
-					w->count++;
+				if (w->skip) {
+					w->skip--;
 					continue;
 				}
 
@@ -1561,7 +1561,7 @@
 				neigh_flags = neigh->flags;
 				neigh_release(neigh);
 			}
-			if (neigh_flags & NTF_ROUTER) {
+			if (!(neigh_flags & NTF_ROUTER)) {
 				RT6_TRACE("purging route %p via non-router but gateway\n",
 					  rt);
 				return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d99fdc6..decc21d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -526,6 +526,7 @@
 	hdr->hop_limit--;
 
 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
 
@@ -1187,6 +1188,29 @@
 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
 }
 
+static void ip6_append_data_mtu(int *mtu,
+				int *maxfraglen,
+				unsigned int fragheaderlen,
+				struct sk_buff *skb,
+				struct rt6_info *rt)
+{
+	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+		if (skb == NULL) {
+			/* first fragment, reserve header_len */
+			*mtu = *mtu - rt->dst.header_len;
+
+		} else {
+			/*
+			 * this fragment is not first, the headers
+			 * space is regarded as data space.
+			 */
+			*mtu = dst_mtu(rt->dst.path);
+		}
+		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+			      + fragheaderlen - sizeof(struct frag_hdr);
+	}
+}
+
 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 	int offset, int len, int odd, struct sk_buff *skb),
 	void *from, int length, int transhdrlen,
@@ -1196,7 +1220,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct inet_cork *cork;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *skb_prev = NULL;
 	unsigned int maxfraglen, fragheaderlen;
 	int exthdrlen;
 	int dst_exthdrlen;
@@ -1253,8 +1277,12 @@
 		inet->cork.fl.u.ip6 = *fl6;
 		np->cork.hop_limit = hlimit;
 		np->cork.tclass = tclass;
-		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
-		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
+		if (rt->dst.flags & DST_XFRM_TUNNEL)
+			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
+		else
+			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
 		if (np->frag_size < mtu) {
 			if (np->frag_size)
 				mtu = np->frag_size;
@@ -1350,25 +1378,27 @@
 			unsigned int fraglen;
 			unsigned int fraggap;
 			unsigned int alloclen;
-			struct sk_buff *skb_prev;
 alloc_new_skb:
-			skb_prev = skb;
-
 			/* There's no room in the current skb */
-			if (skb_prev)
-				fraggap = skb_prev->len - maxfraglen;
+			if (skb)
+				fraggap = skb->len - maxfraglen;
 			else
 				fraggap = 0;
+			/* update mtu and maxfraglen if necessary */
+			if (skb == NULL || skb_prev == NULL)
+				ip6_append_data_mtu(&mtu, &maxfraglen,
+						    fragheaderlen, skb, rt);
+
+			skb_prev = skb;
 
 			/*
 			 * If remaining data exceeds the mtu,
 			 * we know we need more fragment(s).
 			 */
 			datalen = length + fraggap;
-			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
-				datalen = maxfraglen - fragheaderlen;
 
-			fraglen = datalen + fragheaderlen;
+			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
 			if ((flags & MSG_MORE) &&
 			    !(rt->dst.dev->features&NETIF_F_SG))
 				alloclen = mtu;
@@ -1377,13 +1407,16 @@
 
 			alloclen += dst_exthdrlen;
 
-			/*
-			 * The last fragment gets additional space at tail.
-			 * Note: we overallocate on fragments with MSG_MODE
-			 * because we have no idea if we're the last one.
-			 */
-			if (datalen == length + fraggap)
-				alloclen += rt->dst.trailer_len;
+			if (datalen != length + fraggap) {
+				/*
+				 * this is not the last fragment, the trailer
+				 * space is regarded as data space.
+				 */
+				datalen += rt->dst.trailer_len;
+			}
+
+			alloclen += rt->dst.trailer_len;
+			fraglen = datalen + fragheaderlen;
 
 			/*
 			 * We just reserve space for fragment header.
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b15dc08..461e47c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1886,6 +1886,8 @@
 {
 	IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
 			 IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+			 IPSTATS_MIB_OUTOCTETS, skb->len);
 	return dst_output(skb);
 }
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 999a982..becb048 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2957,10 +2957,6 @@
 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
 
-#ifdef CONFIG_PROC_FS
-	proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-	proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
 
 	ret = 0;
@@ -2981,10 +2977,6 @@
 
 static void __net_exit ip6_route_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
-	proc_net_remove(net, "ipv6_route");
-	proc_net_remove(net, "rt6_stats");
-#endif
 	kfree(net->ipv6.ip6_null_entry);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	kfree(net->ipv6.ip6_prohibit_entry);
@@ -2993,11 +2985,33 @@
 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
+static int __net_init ip6_route_net_init_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+	proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+	proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+#endif
+	return 0;
+}
+
+static void __net_exit ip6_route_net_exit_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+	proc_net_remove(net, "ipv6_route");
+	proc_net_remove(net, "rt6_stats");
+#endif
+}
+
 static struct pernet_operations ip6_route_net_ops = {
 	.init = ip6_route_net_init,
 	.exit = ip6_route_net_exit,
 };
 
+static struct pernet_operations ip6_route_net_late_ops = {
+	.init = ip6_route_net_init_late,
+	.exit = ip6_route_net_exit_late,
+};
+
 static struct notifier_block ip6_route_dev_notifier = {
 	.notifier_call = ip6_route_dev_notify,
 	.priority = 0,
@@ -3047,19 +3061,25 @@
 	if (ret)
 		goto xfrm6_init;
 
+	ret = register_pernet_subsys(&ip6_route_net_late_ops);
+	if (ret)
+		goto fib6_rules_init;
+
 	ret = -ENOBUFS;
 	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
 	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
 	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
-		goto fib6_rules_init;
+		goto out_register_late_subsys;
 
 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
 	if (ret)
-		goto fib6_rules_init;
+		goto out_register_late_subsys;
 
 out:
 	return ret;
 
+out_register_late_subsys:
+	unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
 	fib6_rules_cleanup();
 xfrm6_init:
@@ -3078,6 +3098,7 @@
 void ip6_route_cleanup(void)
 {
 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
+	unregister_pernet_subsys(&ip6_route_net_late_ops);
 	fib6_rules_cleanup();
 	xfrm6_fini();
 	fib6_gc_cleanup();
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 554d599..9df64a5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@
 
 
 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
-			      struct request_values *rvp)
+			      struct request_values *rvp,
+			      u16 queue_mapping)
 {
 	struct inet6_request_sock *treq = inet6_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@
 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 		fl6.daddr = treq->rmt_addr;
+		skb_set_queue_mapping(skb, queue_mapping);
 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
 		err = net_xmit_eval(err);
 	}
@@ -528,7 +530,7 @@
 			     struct request_values *rvp)
 {
 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-	return tcp_v6_send_synack(sk, req, rvp);
+	return tcp_v6_send_synack(sk, req, rvp, 0);
 }
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1210,10 +1212,12 @@
 	tcp_rsk(req)->snt_isn = isn;
 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-	security_inet_conn_request(sk, skb, req);
+	if (security_inet_conn_request(sk, skb, req))
+		goto drop_and_release;
 
 	if (tcp_v6_send_synack(sk, req,
-			       (struct request_values *)&tmp_ext) ||
+			       (struct request_values *)&tmp_ext,
+			       skb_get_queue_mapping(skb)) ||
 	    want_cookie)
 		goto drop_and_free;
 
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 07d7d55..cd6f7a9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -372,7 +372,6 @@
 			skb_trim(skb, skb->dev->mtu);
 	}
 	skb->protocol = ETH_P_AF_IUCV;
-	skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
 	nskb = skb_clone(skb, GFP_ATOMIC);
 	if (!nskb)
 		return -ENOMEM;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 443591d..47b259f 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -42,6 +42,11 @@
 	struct sock		*tunnel_sock;
 	struct l2tp_session	*session;
 	struct list_head	list;
+	atomic_long_t		tx_bytes;
+	atomic_long_t		tx_packets;
+	atomic_long_t		rx_bytes;
+	atomic_long_t		rx_packets;
+	atomic_long_t		rx_errors;
 };
 
 /* via l2tp_session_priv() */
@@ -88,24 +93,40 @@
 	struct l2tp_eth *priv = netdev_priv(dev);
 	struct l2tp_session *session = priv->session;
 
+	atomic_long_add(skb->len, &priv->tx_bytes);
+	atomic_long_inc(&priv->tx_packets);
+
 	l2tp_xmit_skb(session, skb, session->hdr_len);
 
-	dev->stats.tx_bytes += skb->len;
-	dev->stats.tx_packets++;
-
-	return 0;
+	return NETDEV_TX_OK;
 }
 
+static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+						      struct rtnl_link_stats64 *stats)
+{
+	struct l2tp_eth *priv = netdev_priv(dev);
+
+	stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
+	stats->tx_packets = atomic_long_read(&priv->tx_packets);
+	stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
+	stats->rx_packets = atomic_long_read(&priv->rx_packets);
+	stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+	return stats;
+}
+
+
 static struct net_device_ops l2tp_eth_netdev_ops = {
 	.ndo_init		= l2tp_eth_dev_init,
 	.ndo_uninit		= l2tp_eth_dev_uninit,
 	.ndo_start_xmit		= l2tp_eth_dev_xmit,
+	.ndo_get_stats64	= l2tp_eth_get_stats64,
 };
 
 static void l2tp_eth_dev_setup(struct net_device *dev)
 {
 	ether_setup(dev);
-	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
+	dev->features		|= NETIF_F_LLTX;
 	dev->netdev_ops		= &l2tp_eth_netdev_ops;
 	dev->destructor		= free_netdev;
 }
@@ -114,17 +135,17 @@
 {
 	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
 	struct net_device *dev = spriv->dev;
+	struct l2tp_eth *priv = netdev_priv(dev);
 
 	if (session->debug & L2TP_MSG_DATA) {
 		unsigned int length;
-		u8 *ptr = skb->data;
 
 		length = min(32u, skb->len);
 		if (!pskb_may_pull(skb, length))
 			goto error;
 
 		pr_debug("%s: eth recv\n", session->name);
-		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
 	}
 
 	if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -139,15 +160,15 @@
 	nf_reset(skb);
 
 	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
-		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += data_len;
-	} else
-		dev->stats.rx_errors++;
-
+		atomic_long_inc(&priv->rx_packets);
+		atomic_long_add(data_len, &priv->rx_bytes);
+	} else {
+		atomic_long_inc(&priv->rx_errors);
+	}
 	return;
 
 error:
-	dev->stats.rx_errors++;
+	atomic_long_inc(&priv->rx_errors);
 	kfree_skb(skb);
 }
 
@@ -162,6 +183,7 @@
 		if (dev) {
 			unregister_netdev(dev);
 			spriv->dev = NULL;
+			module_put(THIS_MODULE);
 		}
 	}
 }
@@ -249,6 +271,7 @@
 	if (rc < 0)
 		goto out_del_dev;
 
+	__module_get(THIS_MODULE);
 	/* Must be done after register_netdev() */
 	strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 889f5d1..61d8b75 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -239,9 +239,16 @@
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
-	int ret = -EINVAL;
+	int ret;
 	int chk_addr_ret;
 
+	if (!sock_flag(sk, SOCK_ZAPPED))
+		return -EINVAL;
+	if (addr_len < sizeof(struct sockaddr_l2tpip))
+		return -EINVAL;
+	if (addr->l2tp_family != AF_INET)
+		return -EINVAL;
+
 	ret = -EADDRINUSE;
 	read_lock_bh(&l2tp_ip_lock);
 	if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@
 	sk_del_node_init(sk);
 	write_unlock_bh(&l2tp_ip_lock);
 	ret = 0;
+	sock_reset_flag(sk, SOCK_ZAPPED);
+
 out:
 	release_sock(sk);
 
@@ -288,6 +297,9 @@
 	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
 	int rc;
 
+	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+		return -EINVAL;
+
 	if (addr_len < sizeof(*lsa))
 		return -EINVAL;
 
@@ -311,6 +323,14 @@
 	return rc;
 }
 
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+	if (sock_flag(sk, SOCK_ZAPPED))
+		return 0;
+
+	return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
 			   int *uaddr_len, int peer)
 {
@@ -444,10 +464,12 @@
 					   sk->sk_bound_dev_if);
 		if (IS_ERR(rt))
 			goto no_route;
-		if (connected)
+		if (connected) {
 			sk_setup_caps(sk, &rt->dst);
-		else
-			dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
+		} else {
+			skb_dst_set(skb, &rt->dst);
+			goto xmit;
+		}
 	}
 
 	/* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -455,6 +477,7 @@
 	 */
 	skb_dst_set_noref(skb, &rt->dst);
 
+xmit:
 	/* Queue the packet to IP for output */
 	rc = ip_queue_xmit(skb, &inet->cork.fl);
 	rcu_read_unlock();
@@ -530,7 +553,7 @@
 	.close		   = l2tp_ip_close,
 	.bind		   = l2tp_ip_bind,
 	.connect	   = l2tp_ip_connect,
-	.disconnect	   = udp_disconnect,
+	.disconnect	   = l2tp_ip_disconnect,
 	.ioctl		   = udp_ioctl,
 	.destroy	   = l2tp_ip_destroy_sock,
 	.setsockopt	   = ip_setsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0291d8d..35e1e4b 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -258,6 +258,10 @@
 	int addr_type;
 	int err;
 
+	if (!sock_flag(sk, SOCK_ZAPPED))
+		return -EINVAL;
+	if (addr->l2tp_family != AF_INET6)
+		return -EINVAL;
 	if (addr_len < sizeof(*addr))
 		return -EINVAL;
 
@@ -331,6 +335,7 @@
 	sk_del_node_init(sk);
 	write_unlock_bh(&l2tp_ip6_lock);
 
+	sock_reset_flag(sk, SOCK_ZAPPED);
 	release_sock(sk);
 	return 0;
 
@@ -354,6 +359,9 @@
 	int	addr_type;
 	int rc;
 
+	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+		return -EINVAL;
+
 	if (addr_len < sizeof(*lsa))
 		return -EINVAL;
 
@@ -383,6 +391,14 @@
 	return rc;
 }
 
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+	if (sock_flag(sk, SOCK_ZAPPED))
+		return 0;
+
+	return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
 			    int *uaddr_len, int peer)
 {
@@ -689,7 +705,7 @@
 	.close		   = l2tp_ip6_close,
 	.bind		   = l2tp_ip6_bind,
 	.connect	   = l2tp_ip6_connect,
-	.disconnect	   = udp_disconnect,
+	.disconnect	   = l2tp_ip6_disconnect,
 	.ioctl		   = udp_ioctl,
 	.destroy	   = l2tp_ip6_destroy_sock,
 	.setsockopt	   = ipv6_setsockopt,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 8577264..ddc553e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -923,5 +923,4 @@
 MODULE_DESCRIPTION("L2TP netlink");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
-	     __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 26ddb69..c649188 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -145,15 +145,20 @@
 	struct tid_ampdu_rx *tid_rx;
 	unsigned long timeout;
 
+	rcu_read_lock();
 	tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
-	if (!tid_rx)
+	if (!tid_rx) {
+		rcu_read_unlock();
 		return;
+	}
 
 	timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
 	if (time_is_after_jiffies(timeout)) {
 		mod_timer(&tid_rx->session_timer, timeout);
+		rcu_read_unlock();
 		return;
 	}
+	rcu_read_unlock();
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 495831e..7d5108a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -533,16 +533,16 @@
 		sinfo.filled = 0;
 		sta_set_sinfo(sta, &sinfo);
 
-		if (sinfo.filled | STATION_INFO_TX_BITRATE)
+		if (sinfo.filled & STATION_INFO_TX_BITRATE)
 			data[i] = 100000 *
 				cfg80211_calculate_bitrate(&sinfo.txrate);
 		i++;
-		if (sinfo.filled | STATION_INFO_RX_BITRATE)
+		if (sinfo.filled & STATION_INFO_RX_BITRATE)
 			data[i] = 100000 *
 				cfg80211_calculate_bitrate(&sinfo.rxrate);
 		i++;
 
-		if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+		if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
 			data[i] = (u8)sinfo.signal_avg;
 		i++;
 	} else {
@@ -2093,6 +2093,9 @@
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	int i, ret;
 
+	if (!ieee80211_sdata_running(sdata))
+		return -ENETDOWN;
+
 	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
 		ret = drv_set_bitrate_mask(local, sdata, mask);
 		if (ret)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d4c19a7..8664111 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -637,6 +637,18 @@
 		ieee80211_configure_filter(local);
 		break;
 	default:
+		mutex_lock(&local->mtx);
+		if (local->hw_roc_dev == sdata->dev &&
+		    local->hw_roc_channel) {
+			/* ignore return value since this is racy */
+			drv_cancel_remain_on_channel(local);
+			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+		}
+		mutex_unlock(&local->mtx);
+
+		flush_work(&local->hw_roc_start);
+		flush_work(&local->hw_roc_done);
+
 		flush_work(&sdata->work);
 		/*
 		 * When we get here, the interface is marked down.
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b3b3c26..66e4fcd 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1220,6 +1220,22 @@
 	sdata->vif.bss_conf.qos = true;
 }
 
+static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+	lockdep_assert_held(&sdata->local->mtx);
+
+	sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+				IEEE80211_STA_BEACON_POLL);
+	ieee80211_run_deferred_scan(sdata->local);
+}
+
+static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+	mutex_lock(&sdata->local->mtx);
+	__ieee80211_stop_poll(sdata);
+	mutex_unlock(&sdata->local->mtx);
+}
+
 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
 					   u16 capab, bool erp_valid, u8 erp)
 {
@@ -1285,8 +1301,7 @@
 	sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
 
 	/* just to be sure */
-	sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-				IEEE80211_STA_BEACON_POLL);
+	ieee80211_stop_poll(sdata);
 
 	ieee80211_led_assoc(local, 1);
 
@@ -1337,6 +1352,8 @@
 	if (WARN_ON(!ifmgd->associated))
 		return;
 
+	ieee80211_stop_poll(sdata);
+
 	memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
 	ifmgd->associated = NULL;
@@ -1456,8 +1473,7 @@
 		return;
 	}
 
-	ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-			  IEEE80211_STA_BEACON_POLL);
+	__ieee80211_stop_poll(sdata);
 
 	mutex_lock(&local->iflist_mtx);
 	ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1493,6 @@
 		  round_jiffies_up(jiffies +
 				   IEEE80211_CONNECTION_IDLE_TIME));
 out:
-	ieee80211_run_deferred_scan(local);
 	mutex_unlock(&local->mtx);
 }
 
@@ -1522,6 +1537,8 @@
 	 * anymore. The timeout will be reset if the frame is ACKed by
 	 * the AP.
 	 */
+	ifmgd->probe_send_count++;
+
 	if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
 		ifmgd->nullfunc_failed = false;
 		ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1555,6 @@
 					 0, (u32) -1, true, false);
 	}
 
-	ifmgd->probe_send_count++;
 	ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
 	run_again(ifmgd, ifmgd->probe_timeout);
 	if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
@@ -2407,7 +2423,11 @@
 		net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
 				    sdata->name);
 #endif
+		mutex_lock(&local->mtx);
 		ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
+		ieee80211_run_deferred_scan(local);
+		mutex_unlock(&local->mtx);
+
 		mutex_lock(&local->iflist_mtx);
 		ieee80211_recalc_ps(local, -1);
 		mutex_unlock(&local->iflist_mtx);
@@ -2594,9 +2614,6 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
-	ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-			  IEEE80211_STA_BEACON_POLL);
-
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
 			       false, frame_buf);
 	mutex_unlock(&ifmgd->mtx);
@@ -2873,8 +2890,7 @@
 	u32 flags;
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-		sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
-					IEEE80211_STA_CONNECTION_POLL);
+		__ieee80211_stop_poll(sdata);
 
 		/* let's probe the connection once */
 		flags = sdata->local->hw.flags;
@@ -2943,7 +2959,10 @@
 	if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
 		add_timer(&ifmgd->chswitch_timer);
 	ieee80211_sta_reset_beacon_monitor(sdata);
+
+	mutex_lock(&sdata->local->mtx);
 	ieee80211_restart_sta_timer(sdata);
+	mutex_unlock(&sdata->local->mtx);
 }
 #endif
 
@@ -3105,7 +3124,7 @@
 	}
 
 	local->oper_channel = cbss->channel;
-	ieee80211_hw_config(local, 0);
+	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
 	if (!have_sta) {
 		u32 rates = 0, basic_rates = 0;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f054e94..935aa4b 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -234,6 +234,22 @@
 		return;
 	}
 
+	/* was never transmitted */
+	if (local->hw_roc_skb) {
+		u64 cookie;
+
+		cookie = local->hw_roc_cookie ^ 2;
+
+		cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
+					local->hw_roc_skb->data,
+					local->hw_roc_skb->len, false,
+					GFP_KERNEL);
+
+		kfree_skb(local->hw_roc_skb);
+		local->hw_roc_skb = NULL;
+		local->hw_roc_skb_for_status = NULL;
+	}
+
 	if (!local->hw_roc_for_tx)
 		cfg80211_remain_on_channel_expired(local->hw_roc_dev,
 						   local->hw_roc_cookie,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f5b1638..de455f8 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -378,7 +378,7 @@
 	/* make the station visible */
 	sta_info_hash_add(local, sta);
 
-	list_add(&sta->list, &local->sta_list);
+	list_add_rcu(&sta->list, &local->sta_list);
 
 	set_sta_flag(sta, WLAN_STA_INSERTED);
 
@@ -688,7 +688,7 @@
 	if (ret)
 		return ret;
 
-	list_del(&sta->list);
+	list_del_rcu(&sta->list);
 
 	mutex_lock(&local->key_mtx);
 	for (i = 0; i < NUM_DEFAULT_KEYS; i++)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 3bb24a1..a470e11 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -271,6 +271,9 @@
  * @plink_timer: peer link watch timer
  * @plink_timer_was_running: used by suspend/resume to restore timers
  * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ * 	calculating clockdrift
+ * @ch_type: peer's channel type
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
+ * @supports_40mhz: tracks whether the station advertised 40 MHz support
+ *	as we overwrite its HT parameters with the currently used value
  */
 struct sta_info {
 	/* General information, mostly static */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5f827a6..e453212 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,7 +153,7 @@
 
 	/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
 	if (ieee80211_is_data_qos(hdr->frame_control) &&
-	    *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+	    *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
 		dur = 0;
 	else
 		/* Time needed to transmit ACK
@@ -1737,7 +1737,7 @@
 	__le16 fc;
 	struct ieee80211_hdr hdr;
 	struct ieee80211s_hdr mesh_hdr __maybe_unused;
-	struct mesh_path __maybe_unused *mppath = NULL;
+	struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
 	const u8 *encaps_data;
 	int encaps_len, skip_header_bytes;
 	int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@
 			goto fail;
 		}
 		rcu_read_lock();
-		if (!is_multicast_ether_addr(skb->data))
-			mppath = mpp_path_lookup(skb->data, sdata);
+		if (!is_multicast_ether_addr(skb->data)) {
+			mpath = mesh_path_lookup(skb->data, sdata);
+			if (!mpath)
+				mppath = mpp_path_lookup(skb->data, sdata);
+		}
 
 		/*
 		 * Use address extension if it is a packet from
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22f2216b..8dd4712 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1271,7 +1271,7 @@
 			enum ieee80211_sta_state state;
 
 			for (state = IEEE80211_STA_NOTEXIST;
-			     state < sta->sta_state - 1; state++)
+			     state < sta->sta_state; state++)
 				WARN_ON(drv_sta_state(local, sta->sdata, sta,
 						      state, state + 1));
 		}
@@ -1371,6 +1371,12 @@
 		}
 	}
 
+	/* add back keys */
+	list_for_each_entry(sdata, &local->interfaces, list)
+		if (ieee80211_sdata_running(sdata))
+			ieee80211_enable_keys(sdata);
+
+ wake_up:
 	/*
 	 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
 	 * sessions can be established after a resume.
@@ -1392,12 +1398,6 @@
 		mutex_unlock(&local->sta_mtx);
 	}
 
-	/* add back keys */
-	list_for_each_entry(sdata, &local->interfaces, list)
-		if (ieee80211_sdata_running(sdata))
-			ieee80211_enable_keys(sdata);
-
- wake_up:
 	ieee80211_wake_queues_by_reason(hw,
 			IEEE80211_QUEUE_STOP_REASON_SUSPEND);
 
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 8781d8f9..434b687 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -83,9 +83,10 @@
 {
 	struct xmit_work *work;
 
-	if (!(priv->phy->channels_supported[page] & (1 << chan)))
+	if (!(priv->phy->channels_supported[page] & (1 << chan))) {
 		WARN_ON(1);
 		return NETDEV_TX_OK;
+	}
 
 	if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
 		u16 crc = crc_ccitt(0, skb->data, skb->len);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 46d69d7..31f50bc 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -270,9 +270,8 @@
 		return 0;
 
 	/* RTP port is even */
-	port &= htons(~1);
-	rtp_port = port;
-	rtcp_port = htons(ntohs(port) + 1);
+	rtp_port = port & ~htons(1);
+	rtcp_port = port | htons(1);
 
 	/* Create expect for RTP */
 	if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 0a96a43..1686ca1 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -32,13 +32,13 @@
 MODULE_ALIAS("ip6t_HMARK");
 
 struct hmark_tuple {
-	u32			src;
-	u32			dst;
+	__be32			src;
+	__be32			dst;
 	union hmark_ports	uports;
-	uint8_t			proto;
+	u8			proto;
 };
 
-static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
 {
 	return (addr32[0] & mask[0]) ^
 	       (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@
 	       (addr32[3] & mask[3]);
 }
 
-static inline u32
-hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+static inline __be32
+hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
 {
 	switch (l3num) {
 	case AF_INET:
@@ -58,6 +58,22 @@
 	return 0;
 }
 
+static inline void hmark_swap_ports(union hmark_ports *uports,
+				    const struct xt_hmark_info *info)
+{
+	union hmark_ports hp;
+	u16 src, dst;
+
+	hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
+	src = ntohs(hp.b16.src);
+	dst = ntohs(hp.b16.dst);
+
+	if (dst > src)
+		uports->v32 = (dst << 16) | src;
+	else
+		uports->v32 = (src << 16) | dst;
+}
+
 static int
 hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
 		    const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@
 	otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 	rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-	t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
-				 info->src_mask.all);
-	t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
-				 info->dst_mask.all);
+	t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
+				 info->src_mask.ip6);
+	t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
+				 info->dst_mask.ip6);
 
 	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
 		return 0;
 
 	t->proto = nf_ct_protonum(ct);
 	if (t->proto != IPPROTO_ICMP) {
-		t->uports.p16.src = otuple->src.u.all;
-		t->uports.p16.dst = rtuple->src.u.all;
-		t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-				info->port_set.v32;
-		if (t->uports.p16.dst < t->uports.p16.src)
-			swap(t->uports.p16.dst, t->uports.p16.src);
+		t->uports.b16.src = otuple->src.u.all;
+		t->uports.b16.dst = rtuple->src.u.all;
+		hmark_swap_ports(&t->uports, info);
 	}
 
 	return 0;
@@ -98,15 +111,19 @@
 #endif
 }
 
+/* This hash function is endian independent, to ensure consistent hashing if
+ * the cluster is composed of big and little endian systems. */
 static inline u32
 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
 {
 	u32 hash;
+	u32 src = ntohl(t->src);
+	u32 dst = ntohl(t->dst);
 
-	if (t->dst < t->src)
-		swap(t->src, t->dst);
+	if (dst < src)
+		swap(src, dst);
 
-	hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+	hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
 	hash = hash ^ (t->proto & info->proto_mask);
 
 	return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@
 	if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
 		return;
 
-	t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-			info->port_set.v32;
-
-	if (t->uports.p16.dst < t->uports.p16.src)
-		swap(t->uports.p16.dst, t->uports.p16.src);
+	hmark_swap_ports(&t->uports, info);
 }
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@
 			return -1;
 	}
 noicmp:
-	t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
-	t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+	t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
+	t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
 
 	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
 		return 0;
@@ -255,11 +268,8 @@
 		}
 	}
 
-	t->src = (__force u32) ip->saddr;
-	t->dst = (__force u32) ip->daddr;
-
-	t->src &= info->src_mask.ip;
-	t->dst &= info->dst_mask.ip;
+	t->src = ip->saddr & info->src_mask.ip;
+	t->dst = ip->daddr & info->dst_mask.ip;
 
 	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
 		return 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8340ace..2cc7c1e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -836,7 +836,7 @@
 #ifdef CONFIG_MODULES
 		if (res == NULL) {
 			genl_unlock();
-			request_module("net-pf-%d-proto-%d-type-%s",
+			request_module("net-pf-%d-proto-%d-family-%s",
 				       PF_NETLINK, NETLINK_GENERIC, name);
 			genl_lock();
 			res = genl_family_find_byname(name);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 3f339b1..17a707d 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,6 +292,9 @@
 
 	pr_debug("%p\n", sk);
 
+	if (llcp_sock == NULL)
+		return -EBADFD;
+
 	addr->sa_family = AF_NFC;
 	*len = sizeof(struct sockaddr_nfc_llcp);
 
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 779ce4f..5a940db 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index bf35b4e..12c30f3 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d012089..a2fba7e 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9dd4f92..576f22c 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 36f75a9..5bf6341 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index cfdf135..7dd762a 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Remi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 89cfa9c..0acc943 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index 696348f..d6bbbbd 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf..8d2b3d5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@
 	struct work_struct	free_work;
 };
 
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
 
 /* bits for i_ack_flags */
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 8522a47..ca8e0a5 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -16,8 +16,6 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-extern struct socket *sockfd_lookup(int fd, int *err);	/* @@@ fix this */
-
 /*
  * The ATM queuing discipline provides a framework for invoking classifiers
  * (aka "filters"), which in turn select classes of this queuing discipline.
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5942d27..9c90811 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -673,7 +673,9 @@
 				SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
 			sctp_bh_unlock_sock(sk);
 		}
+#if IS_ENABLED(CONFIG_IPV6)
 free_next:
+#endif
 		list_del(&addrw->list);
 		kfree(addrw);
 	}
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 38f388c..107c452 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -381,21 +381,53 @@
 }
 
 /*
- * We cannot currently handle tokens with rotated data.  We need a
- * generalized routine to rotate the data in place.  It is anticipated
- * that we won't encounter rotated data in the general case.
+ * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
+ * to do more than that, we shift repeatedly.  Kevin Coffman reports
+ * seeing 28 bytes as the value used by Microsoft clients and servers
+ * with AES, so this constant is chosen to allow handling 28 in one pass
+ * without using too much stack space.
+ *
+ * If that proves to a problem perhaps we could use a more clever
+ * algorithm.
  */
-static u32
-rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
+#define LOCAL_BUF_LEN 32u
+
+static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
 {
-	unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
+	char head[LOCAL_BUF_LEN];
+	char tmp[LOCAL_BUF_LEN];
+	unsigned int this_len, i;
 
-	if (realrrc == 0)
-		return 0;
+	BUG_ON(shift > LOCAL_BUF_LEN);
 
-	dprintk("%s: cannot process token with rotated data: "
-		"rrc %u, realrrc %u\n", __func__, rrc, realrrc);
-	return 1;
+	read_bytes_from_xdr_buf(buf, 0, head, shift);
+	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
+		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
+		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
+		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
+	}
+	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
+}
+
+static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
+{
+	int shifted = 0;
+	int this_shift;
+
+	shift %= buf->len;
+	while (shifted < shift) {
+		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
+		rotate_buf_a_little(buf, this_shift);
+		shifted += this_shift;
+	}
+}
+
+static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
+{
+	struct xdr_buf subbuf;
+
+	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
+	_rotate_left(&subbuf, shift);
 }
 
 static u32
@@ -495,11 +527,8 @@
 
 	seqnum = be64_to_cpup((__be64 *)(ptr + 8));
 
-	if (rrc != 0) {
-		err = rotate_left(kctx, offset, buf, rrc);
-		if (err)
-			return GSS_S_FAILURE;
-	}
+	if (rrc != 0)
+		rotate_left(offset + 16, buf, rrc);
 
 	err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
 					&headskip, &tailskip);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 28b62db..73e9573 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -336,7 +336,6 @@
 	struct svc_cred		cred;
 	struct gss_svc_seq_data	seqdata;
 	struct gss_ctx		*mechctx;
-	char			*client_name;
 };
 
 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
@@ -347,9 +346,7 @@
 	kfree(rsci->handle.data);
 	if (rsci->mechctx)
 		gss_delete_sec_context(&rsci->mechctx);
-	if (rsci->cred.cr_group_info)
-		put_group_info(rsci->cred.cr_group_info);
-	kfree(rsci->client_name);
+	free_svc_cred(&rsci->cred);
 }
 
 static void rsc_put(struct kref *ref)
@@ -387,7 +384,7 @@
 	tmp->handle.data = NULL;
 	new->mechctx = NULL;
 	new->cred.cr_group_info = NULL;
-	new->client_name = NULL;
+	new->cred.cr_principal = NULL;
 }
 
 static void
@@ -402,8 +399,8 @@
 	spin_lock_init(&new->seqdata.sd_lock);
 	new->cred = tmp->cred;
 	tmp->cred.cr_group_info = NULL;
-	new->client_name = tmp->client_name;
-	tmp->client_name = NULL;
+	new->cred.cr_principal = tmp->cred.cr_principal;
+	tmp->cred.cr_principal = NULL;
 }
 
 static struct cache_head *
@@ -501,8 +498,8 @@
 		/* get client name */
 		len = qword_get(&mesg, buf, mlen);
 		if (len > 0) {
-			rsci.client_name = kstrdup(buf, GFP_KERNEL);
-			if (!rsci.client_name)
+			rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
+			if (!rsci.cred.cr_principal)
 				goto out;
 		}
 
@@ -932,16 +929,6 @@
 	struct rsc			*rsci;
 };
 
-char *svc_gss_principal(struct svc_rqst *rqstp)
-{
-	struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
-
-	if (gd && gd->rsci)
-		return gd->rsci->client_name;
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(svc_gss_principal);
-
 static int
 svcauth_gss_set_client(struct svc_rqst *rqstp)
 {
@@ -969,16 +956,17 @@
 }
 
 static inline int
-gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi *rsip)
+gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+		struct xdr_netobj *out_handle, int *major_status)
 {
 	struct rsc *rsci;
 	int        rc;
 
-	if (rsip->major_status != GSS_S_COMPLETE)
+	if (*major_status != GSS_S_COMPLETE)
 		return gss_write_null_verf(rqstp);
-	rsci = gss_svc_searchbyctx(cd, &rsip->out_handle);
+	rsci = gss_svc_searchbyctx(cd, out_handle);
 	if (rsci == NULL) {
-		rsip->major_status = GSS_S_NO_CONTEXT;
+		*major_status = GSS_S_NO_CONTEXT;
 		return gss_write_null_verf(rqstp);
 	}
 	rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
@@ -986,6 +974,61 @@
 	return rc;
 }
 
+static inline int
+gss_read_verf(struct rpc_gss_wire_cred *gc,
+	      struct kvec *argv, __be32 *authp,
+	      struct xdr_netobj *in_handle,
+	      struct xdr_netobj *in_token)
+{
+	struct xdr_netobj tmpobj;
+
+	/* Read the verifier; should be NULL: */
+	*authp = rpc_autherr_badverf;
+	if (argv->iov_len < 2 * 4)
+		return SVC_DENIED;
+	if (svc_getnl(argv) != RPC_AUTH_NULL)
+		return SVC_DENIED;
+	if (svc_getnl(argv) != 0)
+		return SVC_DENIED;
+	/* Martial context handle and token for upcall: */
+	*authp = rpc_autherr_badcred;
+	if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
+		return SVC_DENIED;
+	if (dup_netobj(in_handle, &gc->gc_ctx))
+		return SVC_CLOSE;
+	*authp = rpc_autherr_badverf;
+	if (svc_safe_getnetobj(argv, &tmpobj)) {
+		kfree(in_handle->data);
+		return SVC_DENIED;
+	}
+	if (dup_netobj(in_token, &tmpobj)) {
+		kfree(in_handle->data);
+		return SVC_CLOSE;
+	}
+
+	return 0;
+}
+
+static inline int
+gss_write_resv(struct kvec *resv, size_t size_limit,
+	       struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
+	       int major_status, int minor_status)
+{
+	if (resv->iov_len + 4 > size_limit)
+		return -1;
+	svc_putnl(resv, RPC_SUCCESS);
+	if (svc_safe_putnetobj(resv, out_handle))
+		return -1;
+	if (resv->iov_len + 3 * 4 > size_limit)
+		return -1;
+	svc_putnl(resv, major_status);
+	svc_putnl(resv, minor_status);
+	svc_putnl(resv, GSS_SEQ_WIN);
+	if (svc_safe_putnetobj(resv, out_token))
+		return -1;
+	return 0;
+}
+
 /*
  * Having read the cred already and found we're in the context
  * initiation case, read the verifier and initiate (or check the results
@@ -998,36 +1041,15 @@
 {
 	struct kvec *argv = &rqstp->rq_arg.head[0];
 	struct kvec *resv = &rqstp->rq_res.head[0];
-	struct xdr_netobj tmpobj;
 	struct rsi *rsip, rsikey;
 	int ret;
 	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
 
-	/* Read the verifier; should be NULL: */
-	*authp = rpc_autherr_badverf;
-	if (argv->iov_len < 2 * 4)
-		return SVC_DENIED;
-	if (svc_getnl(argv) != RPC_AUTH_NULL)
-		return SVC_DENIED;
-	if (svc_getnl(argv) != 0)
-		return SVC_DENIED;
-
-	/* Martial context handle and token for upcall: */
-	*authp = rpc_autherr_badcred;
-	if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
-		return SVC_DENIED;
 	memset(&rsikey, 0, sizeof(rsikey));
-	if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
-		return SVC_CLOSE;
-	*authp = rpc_autherr_badverf;
-	if (svc_safe_getnetobj(argv, &tmpobj)) {
-		kfree(rsikey.in_handle.data);
-		return SVC_DENIED;
-	}
-	if (dup_netobj(&rsikey.in_token, &tmpobj)) {
-		kfree(rsikey.in_handle.data);
-		return SVC_CLOSE;
-	}
+	ret = gss_read_verf(gc, argv, authp,
+			    &rsikey.in_handle, &rsikey.in_token);
+	if (ret)
+		return ret;
 
 	/* Perform upcall, or find upcall result: */
 	rsip = rsi_lookup(sn->rsi_cache, &rsikey);
@@ -1040,19 +1062,12 @@
 
 	ret = SVC_CLOSE;
 	/* Got an answer to the upcall; use it: */
-	if (gss_write_init_verf(sn->rsc_cache, rqstp, rsip))
+	if (gss_write_init_verf(sn->rsc_cache, rqstp,
+				&rsip->out_handle, &rsip->major_status))
 		goto out;
-	if (resv->iov_len + 4 > PAGE_SIZE)
-		goto out;
-	svc_putnl(resv, RPC_SUCCESS);
-	if (svc_safe_putnetobj(resv, &rsip->out_handle))
-		goto out;
-	if (resv->iov_len + 3 * 4 > PAGE_SIZE)
-		goto out;
-	svc_putnl(resv, rsip->major_status);
-	svc_putnl(resv, rsip->minor_status);
-	svc_putnl(resv, GSS_SEQ_WIN);
-	if (svc_safe_putnetobj(resv, &rsip->out_token))
+	if (gss_write_resv(resv, PAGE_SIZE,
+			   &rsip->out_handle, &rsip->out_token,
+			   rsip->major_status, rsip->minor_status))
 		goto out;
 
 	ret = SVC_COMPLETE;
@@ -1192,7 +1207,7 @@
 		}
 		svcdata->rsci = rsci;
 		cache_get(&rsci->h);
-		rqstp->rq_flavor = gss_svc_to_pseudoflavor(
+		rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
 					rsci->mechctx->mech_type, gc->gc_svc);
 		ret = SVC_OK;
 		goto out;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7fee13b..f56f045 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1286,6 +1286,8 @@
 	}
 
 	switch (status) {
+	case -ENOMEM:
+		rpc_delay(task, HZ >> 2);
 	case -EAGAIN:	/* woken up; retry */
 		task->tk_action = call_reserve;
 		return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fd24239..21fde99 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -71,7 +71,9 @@
 		msg->errno = err;
 		destroy_msg(msg);
 	} while (!list_empty(head));
-	wake_up(waitq);
+
+	if (waitq)
+		wake_up(waitq);
 }
 
 static void
@@ -91,11 +93,9 @@
 	}
 	dentry = dget(pipe->dentry);
 	spin_unlock(&pipe->lock);
-	if (dentry) {
-		rpc_purge_list(&RPC_I(dentry->d_inode)->waitq,
-			       &free_list, destroy_msg, -ETIMEDOUT);
-		dput(dentry);
-	}
+	rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
+			&free_list, destroy_msg, -ETIMEDOUT);
+	dput(dentry);
 }
 
 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
@@ -120,7 +120,7 @@
 
 /**
  * rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
  * @msg: message to queue
  *
  * Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@
  * @parent: dentry of directory to create new "pipe" in
  * @name: name of pipe
  * @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- *	release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
  *
  * Data is made available for userspace to read by calls to
  * rpc_queue_upcall().  The actual reads will result in calls to
@@ -943,7 +941,7 @@
 
 /**
  * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
  */
 int rpc_remove_client_dir(struct dentry *dentry)
 {
@@ -1115,7 +1113,7 @@
 	sb->s_op = &s_ops;
 	sb->s_time_gran = 1;
 
-	inode = rpc_get_inode(sb, S_IFDIR | 0755);
+	inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
 	sb->s_root = root = d_make_root(inode);
 	if (!root)
 		return -ENOMEM;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 78ac39f..92509ff 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -180,14 +180,16 @@
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 	struct rpc_clnt *clnt = sn->rpcb_local_clnt;
 	struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
-	int shutdown;
+	int shutdown = 0;
 
 	spin_lock(&sn->rpcb_clnt_lock);
-	if (--sn->rpcb_users == 0) {
-		sn->rpcb_local_clnt = NULL;
-		sn->rpcb_local_clnt4 = NULL;
+	if (sn->rpcb_users) {
+		if (--sn->rpcb_users == 0) {
+			sn->rpcb_local_clnt = NULL;
+			sn->rpcb_local_clnt4 = NULL;
+		}
+		shutdown = !sn->rpcb_users;
 	}
-	shutdown = !sn->rpcb_users;
 	spin_unlock(&sn->rpcb_clnt_lock);
 
 	if (shutdown) {
@@ -394,6 +396,7 @@
 
 /**
  * rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
  * @prog: RPC program number to bind
  * @vers: RPC version number to bind
  * @prot: transport protocol to register
@@ -521,6 +524,7 @@
 
 /**
  * rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
  * @program: RPC program number of service to (un)register
  * @version: RPC version number of service to (un)register
  * @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 017c011..3ee7461 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -407,6 +407,14 @@
 	return 0;
 }
 
+int svc_bind(struct svc_serv *serv, struct net *net)
+{
+	if (!svc_uses_rpcbind(serv))
+		return 0;
+	return svc_rpcb_setup(serv, net);
+}
+EXPORT_SYMBOL_GPL(svc_bind);
+
 /*
  * Create an RPC service
  */
@@ -471,15 +479,8 @@
 		spin_lock_init(&pool->sp_lock);
 	}
 
-	if (svc_uses_rpcbind(serv)) {
-		if (svc_rpcb_setup(serv, current->nsproxy->net_ns) < 0) {
-			kfree(serv->sv_pools);
-			kfree(serv);
-			return NULL;
-		}
-		if (!serv->sv_shutdown)
-			serv->sv_shutdown = svc_rpcb_cleanup;
-	}
+	if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
+		serv->sv_shutdown = svc_rpcb_cleanup;
 
 	return serv;
 }
@@ -536,8 +537,6 @@
 void
 svc_destroy(struct svc_serv *serv)
 {
-	struct net *net = current->nsproxy->net_ns;
-
 	dprintk("svc: svc_destroy(%s, %d)\n",
 				serv->sv_program->pg_name,
 				serv->sv_nrthreads);
@@ -552,8 +551,6 @@
 
 	del_timer_sync(&serv->sv_temptimer);
 
-	svc_shutdown_net(serv, net);
-
 	/*
 	 * The last user is gone and thus all sockets have to be destroyed to
 	 * the point. Check this.
@@ -1377,7 +1374,8 @@
 						sizeof(req->rq_snd_buf));
 		return bc_send(req);
 	} else {
-		/* Nothing to do to drop request */
+		/* drop request */
+		xprt_free_bc_request(req);
 		return 0;
 	}
 }
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b98ee35..88f2bf6 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -598,6 +598,7 @@
 
 	/* now allocate needed pages.  If we get a failure, sleep briefly */
 	pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
+	BUG_ON(pages >= RPCSVC_MAXPAGES);
 	for (i = 0; i < pages ; i++)
 		while (rqstp->rq_pages[i] == NULL) {
 			struct page *p = alloc_page(GFP_KERNEL);
@@ -612,7 +613,6 @@
 			rqstp->rq_pages[i] = p;
 		}
 	rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
-	BUG_ON(pages >= RPCSVC_MAXPAGES);
 
 	/* Make arg->head point to first page and arg->pages point to rest */
 	arg = &rqstp->rq_arg;
@@ -973,7 +973,7 @@
 	svc_clear_pools(serv, net);
 	/*
 	 * At this point the sp_sockets lists will stay empty, since
-	 * svc_enqueue will not add new entries without taking the
+	 * svc_xprt_enqueue will not add new entries without taking the
 	 * sp_lock and checking XPT_BUSY.
 	 */
 	svc_clear_list(&serv->sv_tempsocks, net);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 71ec853..2777fa8 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -347,17 +347,12 @@
 	return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
-
-void svcauth_unix_purge(void)
+void svcauth_unix_purge(struct net *net)
 {
-	struct net *net;
+	struct sunrpc_net *sn;
 
-	for_each_net(net) {
-		struct sunrpc_net *sn;
-
-		sn = net_generic(net, sunrpc_net_id);
-		cache_purge(sn->ip_map_cache);
-	}
+	sn = net_generic(net, sunrpc_net_id);
+	cache_purge(sn->ip_map_cache);
 }
 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
 
@@ -751,6 +746,7 @@
 	struct svc_cred	*cred = &rqstp->rq_cred;
 
 	cred->cr_group_info = NULL;
+	cred->cr_principal = NULL;
 	rqstp->rq_client = NULL;
 
 	if (argv->iov_len < 3*4)
@@ -778,7 +774,7 @@
 	svc_putnl(resv, RPC_AUTH_NULL);
 	svc_putnl(resv, 0);
 
-	rqstp->rq_flavor = RPC_AUTH_NULL;
+	rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
 	return SVC_OK;
 }
 
@@ -816,6 +812,7 @@
 	int		len   = argv->iov_len;
 
 	cred->cr_group_info = NULL;
+	cred->cr_principal = NULL;
 	rqstp->rq_client = NULL;
 
 	if ((len -= 3*4) < 0)
@@ -852,7 +849,7 @@
 	svc_putnl(resv, RPC_AUTH_NULL);
 	svc_putnl(resv, 0);
 
-	rqstp->rq_flavor = RPC_AUTH_UNIX;
+	rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
 	return SVC_OK;
 
 badcred:
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6fe2dce..3c83035 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -979,20 +979,21 @@
 		list_del(&req->rq_list);
 		goto out_init_req;
 	}
-	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
 	if (!IS_ERR(req))
 		goto out_init_req;
 	switch (PTR_ERR(req)) {
 	case -ENOMEM:
-		rpc_delay(task, HZ >> 2);
 		dprintk("RPC:       dynamic allocation of request slot "
 				"failed! Retrying\n");
+		task->tk_status = -ENOMEM;
 		break;
 	case -EAGAIN:
 		rpc_sleep_on(&xprt->backlog, task, NULL);
 		dprintk("RPC:       waiting for request slot\n");
+	default:
+		task->tk_status = -EAGAIN;
 	}
-	task->tk_status = -EAGAIN;
 	return;
 out_init_req:
 	task->tk_status = 0;
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0..a157a2e 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
 #
 
 config WAN_ROUTER
-	tristate "WAN router"
+	tristate "WAN router (DEPRECATED)"
 	depends on EXPERIMENTAL
 	---help---
 	  Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index d2a19b0..89baa33 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -42,6 +42,7 @@
 	cfg80211_hold_bss(bss_from_pub(bss));
 	wdev->current_bss = bss_from_pub(bss);
 
+	wdev->sme_state = CFG80211_SME_CONNECTED;
 	cfg80211_upload_connect_keys(wdev);
 
 	nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
-	CFG80211_DEV_WARN_ON(!wdev->ssid_len);
+	CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
 
 	ev = kzalloc(sizeof(*ev), gfp);
 	if (!ev)
@@ -115,9 +116,11 @@
 #ifdef CONFIG_CFG80211_WEXT
 	wdev->wext.ibss.channel = params->channel;
 #endif
+	wdev->sme_state = CFG80211_SME_CONNECTING;
 	err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
 	if (err) {
 		wdev->connect_keys = NULL;
+		wdev->sme_state = CFG80211_SME_IDLE;
 		return err;
 	}
 
@@ -169,6 +172,7 @@
 	}
 
 	wdev->current_bss = NULL;
+	wdev->sme_state = CFG80211_SME_IDLE;
 	wdev->ssid_len = 0;
 #ifdef CONFIG_CFG80211_WEXT
 	if (!nowext)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 15f3474..baf5704 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1389,7 +1389,7 @@
 	spin_unlock(&reg_requests_lock);
 
 	if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
-		cancel_delayed_work_sync(&reg_timeout);
+		cancel_delayed_work(&reg_timeout);
 
 	if (need_more_processing)
 		schedule_work(&reg_work);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 55d9946..316cfd0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -804,7 +804,7 @@
 	     ntype == NL80211_IFTYPE_P2P_CLIENT))
 		return -EBUSY;
 
-	if (ntype != otype) {
+	if (ntype != otype && netif_running(dev)) {
 		err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
 						    ntype);
 		if (err)
@@ -935,6 +935,7 @@
 				  enum nl80211_iftype iftype)
 {
 	struct wireless_dev *wdev_iter;
+	u32 used_iftypes = BIT(iftype);
 	int num[NUM_NL80211_IFTYPES];
 	int total = 1;
 	int i, j;
@@ -961,6 +962,7 @@
 
 		num[wdev_iter->iftype]++;
 		total++;
+		used_iftypes |= BIT(wdev_iter->iftype);
 	}
 	mutex_unlock(&rdev->devlist_mtx);
 
@@ -970,6 +972,7 @@
 	for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
 		const struct ieee80211_iface_combination *c;
 		struct ieee80211_iface_limit *limits;
+		u32 all_iftypes = 0;
 
 		c = &rdev->wiphy.iface_combinations[i];
 
@@ -984,6 +987,7 @@
 			if (rdev->wiphy.software_iftypes & BIT(iftype))
 				continue;
 			for (j = 0; j < c->n_limits; j++) {
+				all_iftypes |= limits[j].types;
 				if (!(limits[j].types & BIT(iftype)))
 					continue;
 				if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@
 				limits[j].max -= num[iftype];
 			}
 		}
-		/* yay, it fits */
+
+		/*
+		 * Finally check that all iftypes that we're currently
+		 * using are actually part of this combination. If they
+		 * aren't then we can't use this combination and have
+		 * to continue to the next.
+		 */
+		if ((all_iftypes & used_iftypes) != used_iftypes)
+			goto cont;
+
+		/*
+		 * This combination covered all interface types and
+		 * supported the requested numbers, so we're good.
+		 */
 		kfree(limits);
 		return 0;
  cont:
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c53e8f4..ccfbd32 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1921,6 +1921,9 @@
 	}
 ok:
 	xfrm_pols_put(pols, drop_pols);
+	if (dst && dst->xfrm &&
+	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+		dst->flags |= DST_XFRM_TUNNEL;
 	return dst;
 
 nopol:
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index faea0ec..e5bd60f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2382,6 +2382,19 @@
 			}
 		}
 
+		if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
+			my $orig = $1;
+			my $level = lc($orig);
+			$level = "warn" if ($level eq "warning");
+			WARN("PREFER_PR_LEVEL",
+			     "Prefer pr_$level(... to printk(KERN_$1, ...\n" . $herecurr);
+		}
+
+		if ($line =~ /\bpr_warning\s*\(/) {
+			WARN("PREFER_PR_LEVEL",
+			     "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+		}
+
 # function brace can't be on same line, except for #defines of do while,
 # or if closed on same line
 		if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
@@ -2448,6 +2461,13 @@
 				     "space prohibited between function name and open parenthesis '('\n" . $herecurr);
 			}
 		}
+
+# check for whitespace before a non-naked semicolon
+		if ($line =~ /^\+.*\S\s+;/) {
+			CHK("SPACING",
+			    "space prohibited before semicolon\n" . $herecurr);
+		}
+
 # Check operator spacing.
 		if (!($line=~/\#\s*include/)) {
 			my $ops = qr{
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
new file mode 100644
index 0000000..3e4089a
--- /dev/null
+++ b/scripts/coccinelle/misc/ifaddr.cocci
@@ -0,0 +1,35 @@
+/// the address of a variable or field is non-zero is likely always to bo
+/// non-zero
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+expression x;
+statement S1,S2;
+position p;
+@@
+
+*if@p (&x)
+ S1 else S2
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("test of a variable/field address",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: test of a variable/field address"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
new file mode 100644
index 0000000..c170721
--- /dev/null
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -0,0 +1,65 @@
+/// sizeof when applied to a pointer typed expression gives the size of
+/// the pointer
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+virtual patch
+
+@depends on patch@
+expression *x;
+expression f;
+type T;
+@@
+
+(
+x = <+... sizeof(
+- x
++ *x
+   ) ...+>
+|
+f(...,(T)(x),...,sizeof(
+- x
++ *x
+   ),...)
+|
+f(...,sizeof(x),...,(T)(
+- x
++ *x
+   ),...)
+)
+
+@r depends on !patch@
+expression *x;
+expression f;
+position p;
+type T;
+@@
+
+(
+*x = <+... sizeof@p(x) ...+>
+|
+*f(...,(T)(x),...,sizeof@p(x),...)
+|
+*f(...,sizeof@p(x),...,(T)(x),...)
+)
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("application of sizeof to pointer",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: application of sizeof to pointer"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/config b/scripts/config
index a7c7c4b..ed6653e 100755
--- a/scripts/config
+++ b/scripts/config
@@ -107,7 +107,8 @@
 		;;
 
 	--set-str)
-		set_var "CONFIG_$ARG" "CONFIG_$ARG=\"$1\""
+		# sed swallows one level of escaping, so we need double-escaping
+		set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
 		shift
 		;;
 
@@ -124,9 +125,11 @@
 			if [ $? != 0 ] ; then
 				echo undef
 			else
-				V="${V/CONFIG_$ARG=/}"
-				V="${V/\"/}"
-				echo "$V"
+				V="${V/#CONFIG_$ARG=/}"
+				V="${V/#\"/}"
+				V="${V/%\"/}"
+				V="${V/\\\"/\"}"
+				echo "${V}"
 			fi
 		fi
 		;;
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 0948c6b..8b673dd 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -83,6 +83,8 @@
 push(@signature_tags, "Reviewed-by:");
 push(@signature_tags, "Acked-by:");
 
+my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
 my $rfc822_char = '[\\000-\\377]';
@@ -473,7 +475,6 @@
 my @status = ();
 my %deduplicate_name_hash = ();
 my %deduplicate_address_hash = ();
-my $signature_pattern;
 
 my @maintainers = get_maintainers();
 
diff --git a/scripts/gfp-translate b/scripts/gfp-translate
old mode 100644
new mode 100755
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index f208f90..0dc4a2c 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -574,8 +574,15 @@
 	case alldefconfig:
 	case randconfig:
 		name = getenv("KCONFIG_ALLCONFIG");
-		if (name && !stat(name, &tmpstat)) {
-			conf_read_simple(name, S_DEF_USER);
+		if (!name)
+			break;
+		if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
+			if (conf_read_simple(name, S_DEF_USER)) {
+				fprintf(stderr,
+					_("*** Can't read seed configuration \"%s\"!\n"),
+					name);
+				exit(1);
+			}
 			break;
 		}
 		switch (input_mode) {
@@ -586,10 +593,13 @@
 		case randconfig:	name = "allrandom.config"; break;
 		default: break;
 		}
-		if (!stat(name, &tmpstat))
-			conf_read_simple(name, S_DEF_USER);
-		else if (!stat("all.config", &tmpstat))
-			conf_read_simple("all.config", S_DEF_USER);
+		if (conf_read_simple(name, S_DEF_USER) &&
+		    conf_read_simple("all.config", S_DEF_USER)) {
+			fprintf(stderr,
+				_("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
+				name);
+			exit(1);
+		}
 		break;
 	default:
 		break;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
new file mode 100644
index 0000000..cd9c6c6
--- /dev/null
+++ b/scripts/link-vmlinux.sh
@@ -0,0 +1,221 @@
+#!/bin/sh
+#
+# link vmlinux
+#
+# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
+# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
+# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
+#
+# vmlinux
+#   ^
+#   |
+#   +-< $(KBUILD_VMLINUX_INIT)
+#   |   +--< init/version.o + more
+#   |
+#   +--< $(KBUILD_VMLINUX_MAIN)
+#   |    +--< drivers/built-in.o mm/built-in.o + more
+#   |
+#   +-< ${kallsymso} (see description in KALLSYMS section)
+#
+# vmlinux version (uname -v) cannot be updated during normal
+# descending-into-subdirs phase since we do not yet know if we need to
+# update vmlinux.
+# Therefore this step is delayed until just before final link of vmlinux.
+#
+# System.map is generated to document addresses of all kernel symbols
+
+# Error out on error
+set -e
+
+# Nice output in kbuild format
+# Will be supressed by "make -s"
+info()
+{
+	if [ "${quiet}" != "silent_" ]; then
+		printf "  %-7s %s\n" ${1} ${2}
+	fi
+}
+
+# Link of vmlinux.o used for section mismatch analysis
+# ${1} output file
+modpost_link()
+{
+	${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT}                   \
+		--start-group ${KBUILD_VMLINUX_MAIN} --end-group
+}
+
+# Link of vmlinux
+# ${1} - optional extra .o files
+# ${2} - output file
+vmlinux_link()
+{
+	local lds="${objtree}/${KBUILD_LDS}"
+
+	if [ "${SRCARCH}" != "um" ]; then
+		${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}                  \
+			-T ${lds} ${KBUILD_VMLINUX_INIT}                     \
+			--start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+	else
+		${CC} ${CFLAGS_vmlinux} -o ${2}                              \
+			-Wl,-T,${lds} ${KBUILD_VMLINUX_INIT}                 \
+			-Wl,--start-group                                    \
+				 ${KBUILD_VMLINUX_MAIN}                      \
+			-Wl,--end-group                                      \
+			-lutil ${1}
+		rm -f linux
+	fi
+}
+
+
+# Create ${2} .o file with all symbols from the ${1} object file
+kallsyms()
+{
+	info KSYM ${2}
+	local kallsymopt;
+
+	if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
+		kallsymopt=--all-symbols
+	fi
+
+	local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
+		      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
+	${NM} -n ${1} | \
+		scripts/kallsyms ${kallsymopt} | \
+		${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+}
+
+# Create map file with all symbols from ${1}
+# See mksymap for additional details
+mksysmap()
+{
+	${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
+}
+
+sortextable()
+{
+	${objtree}/scripts/sortextable ${1}
+}
+
+# Delete output files in case of error
+trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
+cleanup()
+{
+	rm -f .old_version
+	rm -f .tmp_System.map
+	rm -f .tmp_kallsyms*
+	rm -f .tmp_version
+	rm -f .tmp_vmlinux*
+	rm -f System.map
+	rm -f vmlinux
+	rm -f vmlinux.o
+}
+
+#
+#
+# Use "make V=1" to debug this script
+case "${KBUILD_VERBOSE}" in
+*1*)
+	set -x
+	;;
+esac
+
+if [ "$1" = "clean" ]; then
+	cleanup
+	exit 0
+fi
+
+# We need access to CONFIG_ symbols
+. ./.config
+
+#link vmlinux.o
+info LD vmlinux.o
+modpost_link vmlinux.o
+
+# modpost vmlinux.o to check for section mismatches
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+
+# Update version
+info GEN .version
+if [ ! -r .version ]; then
+	rm -f .version;
+	echo 1 >.version;
+else
+	mv .version .old_version;
+	expr 0$(cat .old_version) + 1 >.version;
+fi;
+
+# final build of init/
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+
+kallsymso=""
+kallsyms_vmlinux=""
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+
+	# kallsyms support
+	# Generate section listing all symbols and add it into vmlinux
+	# It's a three step process:
+	# 1)  Link .tmp_vmlinux1 so it has all symbols and sections,
+	#     but __kallsyms is empty.
+	#     Running kallsyms on that gives us .tmp_kallsyms1.o with
+	#     the right size
+	# 2)  Link .tmp_vmlinux2 so it now has a __kallsyms section of
+	#     the right size, but due to the added section, some
+	#     addresses have shifted.
+	#     From here, we generate a correct .tmp_kallsyms2.o
+	# 2a) We may use an extra pass as this has been necessary to
+	#     woraround some alignment related bugs.
+	#     KALLSYMS_EXTRA_PASS=1 is used to trigger this.
+	# 3)  The correct ${kallsymso} is linked into the final vmlinux.
+	#
+	# a)  Verify that the System.map from vmlinux matches the map from
+	#     ${kallsymso}.
+
+	kallsymso=.tmp_kallsyms2.o
+	kallsyms_vmlinux=.tmp_vmlinux2
+
+	# step 1
+	vmlinux_link "" .tmp_vmlinux1
+	kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o
+
+	# step 2
+	vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
+	kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
+
+	# step 2a
+	if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
+		kallsymso=.tmp_kallsyms3.o
+		kallsyms_vmlinux=.tmp_vmlinux3
+
+		vmlinux_link .tmp_kallsyms2.o .tmp_vmlinux3
+
+		kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o
+	fi
+fi
+
+info LD vmlinux
+vmlinux_link "${kallsymso}" vmlinux
+
+if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
+	info SORTEX vmlinux
+	sortextable vmlinux
+fi
+
+info SYSMAP System.map
+mksysmap vmlinux System.map
+
+# step a (see comment above)
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+	mksysmap ${kallsyms_vmlinux} .tmp_System.map
+
+	if ! cmp -s System.map .tmp_System.map; then
+		echo Inconsistent kallsyms data
+		echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+		cleanup
+		exit 1
+	fi
+fi
+
+# We made a new kernel - delete old version file
+rm -f .old_version
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8e..c95fdda 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -245,7 +245,7 @@
 # Build header package
 (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 032daab..8ea39aa 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -490,17 +490,9 @@
 	return common_file_perm(op, file, mask);
 }
 
-static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
-			      unsigned long prot, unsigned long flags,
-			      unsigned long addr, unsigned long addr_only)
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+			      unsigned long prot, unsigned long flags)
 {
-	int rc = 0;
-
-	/* do DAC check */
-	rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-	if (rc || addr_only)
-		return rc;
-
 	return common_mmap(OP_FMMAP, file, prot, flags);
 }
 
@@ -646,7 +638,8 @@
 	.file_permission =		apparmor_file_permission,
 	.file_alloc_security =		apparmor_file_alloc_security,
 	.file_free_security =		apparmor_file_free_security,
-	.file_mmap =			apparmor_file_mmap,
+	.mmap_file =			apparmor_mmap_file,
+	.mmap_addr =			cap_mmap_addr,
 	.file_mprotect =		apparmor_file_mprotect,
 	.file_lock =			apparmor_file_lock,
 
diff --git a/security/capability.c b/security/capability.c
index fca8896..61095df 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -949,7 +949,8 @@
 	set_to_cap_if_null(ops, file_alloc_security);
 	set_to_cap_if_null(ops, file_free_security);
 	set_to_cap_if_null(ops, file_ioctl);
-	set_to_cap_if_null(ops, file_mmap);
+	set_to_cap_if_null(ops, mmap_addr);
+	set_to_cap_if_null(ops, mmap_file);
 	set_to_cap_if_null(ops, file_mprotect);
 	set_to_cap_if_null(ops, file_lock);
 	set_to_cap_if_null(ops, file_fcntl);
diff --git a/security/commoncap.c b/security/commoncap.c
index e771cb1..6dbae46 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -958,22 +958,15 @@
 }
 
 /*
- * cap_file_mmap - check if able to map given addr
- * @file: unused
- * @reqprot: unused
- * @prot: unused
- * @flags: unused
+ * cap_mmap_addr - check if able to map given addr
  * @addr: address attempting to be mapped
- * @addr_only: unused
  *
  * If the process is attempting to map memory below dac_mmap_min_addr they need
  * CAP_SYS_RAWIO.  The other parameters to this function are unused by the
  * capability security module.  Returns 0 if this mapping should be allowed
  * -EPERM if not.
  */
-int cap_file_mmap(struct file *file, unsigned long reqprot,
-		  unsigned long prot, unsigned long flags,
-		  unsigned long addr, unsigned long addr_only)
+int cap_mmap_addr(unsigned long addr)
 {
 	int ret = 0;
 
@@ -986,3 +979,9 @@
 	}
 	return ret;
 }
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+		  unsigned long prot, unsigned long flags)
+{
+	return 0;
+}
diff --git a/security/keys/compat.c b/security/keys/compat.c
index fab4f8d..c92d42b 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -38,7 +38,7 @@
 
 	ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
 					   ARRAY_SIZE(iovstack),
-					   iovstack, &iov, 1);
+					   iovstack, &iov);
 	if (ret < 0)
 		return ret;
 	if (ret == 0)
diff --git a/security/keys/internal.h b/security/keys/internal.h
index f711b09..3dcbf86 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -14,6 +14,7 @@
 
 #include <linux/sched.h>
 #include <linux/key-type.h>
+#include <linux/task_work.h>
 
 #ifdef __KDEBUG
 #define kenter(FMT, ...) \
@@ -148,6 +149,7 @@
 #define KEY_LOOKUP_FOR_UNLINK	0x04
 
 extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct task_work *twork);
 
 extern struct work_struct key_gc_work;
 extern unsigned key_gc_delay;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ddb3e05..0f5b3f0 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -84,7 +84,7 @@
 	vm = false;
 	if (_payload) {
 		ret = -ENOMEM;
-		payload = kmalloc(plen, GFP_KERNEL);
+		payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
 		if (!payload) {
 			if (plen <= PAGE_SIZE)
 				goto error2;
@@ -1110,7 +1110,7 @@
 		goto no_payload;
 
 	ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-				    ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+				    ARRAY_SIZE(iovstack), iovstack, &iov);
 	if (ret < 0)
 		return ret;
 	if (ret == 0)
@@ -1454,50 +1454,57 @@
  */
 long keyctl_session_to_parent(void)
 {
-#ifdef TIF_NOTIFY_RESUME
 	struct task_struct *me, *parent;
 	const struct cred *mycred, *pcred;
-	struct cred *cred, *oldcred;
+	struct task_work *newwork, *oldwork;
 	key_ref_t keyring_r;
+	struct cred *cred;
 	int ret;
 
 	keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
 	if (IS_ERR(keyring_r))
 		return PTR_ERR(keyring_r);
 
+	ret = -ENOMEM;
+	newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL);
+	if (!newwork)
+		goto error_keyring;
+
 	/* our parent is going to need a new cred struct, a new tgcred struct
 	 * and new security data, so we allocate them here to prevent ENOMEM in
 	 * our parent */
-	ret = -ENOMEM;
 	cred = cred_alloc_blank();
 	if (!cred)
-		goto error_keyring;
+		goto error_newwork;
 
 	cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
-	keyring_r = NULL;
+	init_task_work(newwork, key_change_session_keyring, cred);
 
 	me = current;
 	rcu_read_lock();
 	write_lock_irq(&tasklist_lock);
 
-	parent = me->real_parent;
 	ret = -EPERM;
+	oldwork = NULL;
+	parent = me->real_parent;
 
 	/* the parent mustn't be init and mustn't be a kernel thread */
 	if (parent->pid <= 1 || !parent->mm)
-		goto not_permitted;
+		goto unlock;
 
 	/* the parent must be single threaded */
 	if (!thread_group_empty(parent))
-		goto not_permitted;
+		goto unlock;
 
 	/* the parent and the child must have different session keyrings or
 	 * there's no point */
 	mycred = current_cred();
 	pcred = __task_cred(parent);
 	if (mycred == pcred ||
-	    mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
-		goto already_same;
+	    mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
+		ret = 0;
+		goto unlock;
+	}
 
 	/* the parent must have the same effective ownership and mustn't be
 	 * SUID/SGID */
@@ -1507,50 +1514,40 @@
 	    pcred->gid	!= mycred->egid	||
 	    pcred->egid	!= mycred->egid	||
 	    pcred->sgid	!= mycred->egid)
-		goto not_permitted;
+		goto unlock;
 
 	/* the keyrings must have the same UID */
 	if ((pcred->tgcred->session_keyring &&
 	     pcred->tgcred->session_keyring->uid != mycred->euid) ||
 	    mycred->tgcred->session_keyring->uid != mycred->euid)
-		goto not_permitted;
+		goto unlock;
 
-	/* if there's an already pending keyring replacement, then we replace
-	 * that */
-	oldcred = parent->replacement_session_keyring;
+	/* cancel an already pending keyring replacement */
+	oldwork = task_work_cancel(parent, key_change_session_keyring);
 
 	/* the replacement session keyring is applied just prior to userspace
 	 * restarting */
-	parent->replacement_session_keyring = cred;
-	cred = NULL;
-	set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
-
+	ret = task_work_add(parent, newwork, true);
+	if (!ret)
+		newwork = NULL;
+unlock:
 	write_unlock_irq(&tasklist_lock);
 	rcu_read_unlock();
-	if (oldcred)
-		put_cred(oldcred);
-	return 0;
-
-already_same:
-	ret = 0;
-not_permitted:
-	write_unlock_irq(&tasklist_lock);
-	rcu_read_unlock();
-	put_cred(cred);
+	if (oldwork) {
+		put_cred(oldwork->data);
+		kfree(oldwork);
+	}
+	if (newwork) {
+		put_cred(newwork->data);
+		kfree(newwork);
+	}
 	return ret;
 
+error_newwork:
+	kfree(newwork);
 error_keyring:
 	key_ref_put(keyring_r);
 	return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
-	/*
-	 * To be removed when TIF_NOTIFY_RESUME has been implemented on
-	 * m68k/xtensa
-	 */
-#warning TIF_NOTIFY_RESUME not implemented
-	return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
 }
 
 /*
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index d71056d..4ad54ee 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -834,23 +834,17 @@
  * Replace a process's session keyring on behalf of one of its children when
  * the target  process is about to resume userspace execution.
  */
-void key_replace_session_keyring(void)
+void key_change_session_keyring(struct task_work *twork)
 {
-	const struct cred *old;
-	struct cred *new;
+	const struct cred *old = current_cred();
+	struct cred *new = twork->data;
 
-	if (!current->replacement_session_keyring)
+	kfree(twork);
+	if (unlikely(current->flags & PF_EXITING)) {
+		put_cred(new);
 		return;
+	}
 
-	write_lock_irq(&tasklist_lock);
-	new = current->replacement_session_keyring;
-	current->replacement_session_keyring = NULL;
-	write_unlock_irq(&tasklist_lock);
-
-	if (!new)
-		return;
-
-	old = current_cred();
 	new->  uid	= old->  uid;
 	new-> euid	= old-> euid;
 	new-> suid	= old-> suid;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index cc37903..000e750 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -93,16 +93,9 @@
 static int call_usermodehelper_keys(char *path, char **argv, char **envp,
 					struct key *session_keyring, int wait)
 {
-	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-	struct subprocess_info *info =
-		call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-	if (!info)
-		return -ENOMEM;
-
-	call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
-					key_get(session_keyring));
-	return call_usermodehelper_exec(info, wait);
+	return call_usermodehelper_fns(path, argv, envp, wait,
+				       umh_keys_init, umh_keys_cleanup,
+				       key_get(session_keyring));
 }
 
 /*
diff --git a/security/security.c b/security/security.c
index 5497a57..3efc9b1 100644
--- a/security/security.c
+++ b/security/security.c
@@ -20,6 +20,9 @@
 #include <linux/ima.h>
 #include <linux/evm.h>
 #include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR	2
@@ -657,18 +660,56 @@
 	return security_ops->file_ioctl(file, cmd, arg);
 }
 
-int security_file_mmap(struct file *file, unsigned long reqprot,
-			unsigned long prot, unsigned long flags,
-			unsigned long addr, unsigned long addr_only)
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+{
+	/*
+	 * Does we have PROT_READ and does the application expect
+	 * it to imply PROT_EXEC?  If not, nothing to talk about...
+	 */
+	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+		return prot;
+	if (!(current->personality & READ_IMPLIES_EXEC))
+		return prot;
+	/*
+	 * if that's an anonymous mapping, let it.
+	 */
+	if (!file)
+		return prot | PROT_EXEC;
+	/*
+	 * ditto if it's not on noexec mount, except that on !MMU we need
+	 * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+	 */
+	if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+#ifndef CONFIG_MMU
+		unsigned long caps = 0;
+		struct address_space *mapping = file->f_mapping;
+		if (mapping && mapping->backing_dev_info)
+			caps = mapping->backing_dev_info->capabilities;
+		if (!(caps & BDI_CAP_EXEC_MAP))
+			return prot;
+#endif
+		return prot | PROT_EXEC;
+	}
+	/* anything on noexec mount won't get PROT_EXEC */
+	return prot;
+}
+
+int security_mmap_file(struct file *file, unsigned long prot,
+			unsigned long flags)
 {
 	int ret;
-
-	ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
+	ret = security_ops->mmap_file(file, prot,
+					mmap_prot(file, prot), flags);
 	if (ret)
 		return ret;
 	return ima_file_mmap(file, prot);
 }
 
+int security_mmap_addr(unsigned long addr)
+{
+	return security_ops->mmap_addr(addr);
+}
+
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
 			    unsigned long prot)
 {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fa2341b..372ec65 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3083,9 +3083,7 @@
 	return rc;
 }
 
-static int selinux_file_mmap(struct file *file, unsigned long reqprot,
-			     unsigned long prot, unsigned long flags,
-			     unsigned long addr, unsigned long addr_only)
+static int selinux_mmap_addr(unsigned long addr)
 {
 	int rc = 0;
 	u32 sid = current_sid();
@@ -3104,10 +3102,12 @@
 	}
 
 	/* do DAC check on address space usage */
-	rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-	if (rc || addr_only)
-		return rc;
+	return cap_mmap_addr(addr);
+}
 
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+			     unsigned long prot, unsigned long flags)
+{
 	if (selinux_checkreqprot)
 		prot = reqprot;
 
@@ -5570,7 +5570,8 @@
 	.file_alloc_security =		selinux_file_alloc_security,
 	.file_free_security =		selinux_file_free_security,
 	.file_ioctl =			selinux_file_ioctl,
-	.file_mmap =			selinux_file_mmap,
+	.mmap_file =			selinux_mmap_file,
+	.mmap_addr =			selinux_mmap_addr,
 	.file_mprotect =		selinux_file_mprotect,
 	.file_lock =			selinux_file_lock,
 	.file_fcntl =			selinux_file_fcntl,
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 4e93f9e..3ad2902 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1259,12 +1259,8 @@
 		if (!inode)
 			goto out;
 
-		ret = -EINVAL;
-		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
-		if (len < 0)
-			goto out;
-
 		ret = -ENAMETOOLONG;
+		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
 		if (len >= PAGE_SIZE)
 			goto out;
 
@@ -1557,19 +1553,10 @@
 static ssize_t sel_read_class(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	ssize_t rc, len;
-	char *page;
 	unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-	page = (char *)__get_free_page(GFP_KERNEL);
-	if (!page)
-		return -ENOMEM;
-
-	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
-	rc = simple_read_from_buffer(buf, count, ppos, page, len);
-	free_page((unsigned long)page);
-
-	return rc;
+	char res[TMPBUFLEN];
+	ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+	return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_class_ops = {
@@ -1580,19 +1567,10 @@
 static ssize_t sel_read_perm(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	ssize_t rc, len;
-	char *page;
 	unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-	page = (char *)__get_free_page(GFP_KERNEL);
-	if (!page)
-		return -ENOMEM;
-
-	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
-	rc = simple_read_from_buffer(buf, count, ppos, page, len);
-	free_page((unsigned long)page);
-
-	return rc;
+	char res[TMPBUFLEN];
+	ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+	return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_perm_ops = {
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index d583c05..ee0bb57 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1171,7 +1171,7 @@
 }
 
 /**
- * smack_file_mmap :
+ * smack_mmap_file :
  * Check permissions for a mmap operation.  The @file may be NULL, e.g.
  * if mapping anonymous memory.
  * @file contains the file structure for file to map (may be NULL).
@@ -1180,10 +1180,9 @@
  * @flags contains the operational flags.
  * Return 0 if permission is granted.
  */
-static int smack_file_mmap(struct file *file,
+static int smack_mmap_file(struct file *file,
 			   unsigned long reqprot, unsigned long prot,
-			   unsigned long flags, unsigned long addr,
-			   unsigned long addr_only)
+			   unsigned long flags)
 {
 	struct smack_known *skp;
 	struct smack_rule *srp;
@@ -1198,11 +1197,6 @@
 	int tmay;
 	int rc;
 
-	/* do DAC check on address space usage */
-	rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-	if (rc || addr_only)
-		return rc;
-
 	if (file == NULL || file->f_dentry == NULL)
 		return 0;
 
@@ -3482,7 +3476,8 @@
 	.file_ioctl = 			smack_file_ioctl,
 	.file_lock = 			smack_file_lock,
 	.file_fcntl = 			smack_file_fcntl,
-	.file_mmap =			smack_file_mmap,
+	.mmap_file =			smack_mmap_file,
+	.mmap_addr =			cap_mmap_addr,
 	.file_set_fowner = 		smack_file_set_fowner,
 	.file_send_sigiotask = 		smack_file_send_sigiotask,
 	.file_receive = 		smack_file_receive,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a68aed7..ec2118d 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -502,10 +502,8 @@
 	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
-	if (!retval) {
+	if (!retval)
 		stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
-		wake_up(&stream->runtime->sleep);
-	}
 	return retval;
 }
 
@@ -544,6 +542,10 @@
 	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 		wake_up(&stream->runtime->sleep);
+		stream->runtime->hw_pointer = 0;
+		stream->runtime->app_pointer = 0;
+		stream->runtime->total_bytes_available = 0;
+		stream->runtime->total_bytes_transferred = 0;
 	}
 	return retval;
 }
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index faedb14..8f312fa 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -313,9 +313,22 @@
 	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
 	snd_pcm_sframes_t hdelta, delta;
 	unsigned long jdelta;
+	unsigned long curr_jiffies;
+	struct timespec curr_tstamp;
 
 	old_hw_ptr = runtime->status->hw_ptr;
+
+	/*
+	 * group pointer, time and jiffies reads to allow for more
+	 * accurate correlations/corrections.
+	 * The values are stored at the end of this routine after
+	 * corrections for hw_ptr position
+	 */
 	pos = substream->ops->pointer(substream);
+	curr_jiffies = jiffies;
+	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+		snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+
 	if (pos == SNDRV_PCM_POS_XRUN) {
 		xrun(substream);
 		return -EPIPE;
@@ -343,7 +356,7 @@
 		delta = runtime->hw_ptr_interrupt + runtime->period_size;
 		if (delta > new_hw_ptr) {
 			/* check for double acknowledged interrupts */
-			hdelta = jiffies - runtime->hw_ptr_jiffies;
+			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 			if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
 				hw_base += runtime->buffer_size;
 				if (hw_base >= runtime->boundary)
@@ -388,7 +401,7 @@
 		 * Without regular period interrupts, we have to check
 		 * the elapsed time to detect xruns.
 		 */
-		jdelta = jiffies - runtime->hw_ptr_jiffies;
+		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
 			goto no_delta_check;
 		hdelta = jdelta - delta * HZ / runtime->rate;
@@ -430,7 +443,7 @@
 	if (hdelta < runtime->delay)
 		goto no_jiffies_check;
 	hdelta -= runtime->delay;
-	jdelta = jiffies - runtime->hw_ptr_jiffies;
+	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 		delta = jdelta /
 			(((runtime->period_size * HZ) / runtime->rate)
@@ -492,9 +505,9 @@
 	}
 	runtime->hw_ptr_base = hw_base;
 	runtime->status->hw_ptr = new_hw_ptr;
-	runtime->hw_ptr_jiffies = jiffies;
+	runtime->hw_ptr_jiffies = curr_jiffies;
 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-		snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
+		runtime->status->tstamp = curr_tstamp;
 
 	return snd_pcm_update_state(substream, runtime);
 }
diff --git a/sound/i2c/other/tea575x-tuner.c b/sound/i2c/other/tea575x-tuner.c
index 582aace..7eca25f 100644
--- a/sound/i2c/other/tea575x-tuner.c
+++ b/sound/i2c/other/tea575x-tuner.c
@@ -37,8 +37,8 @@
 MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips");
 MODULE_LICENSE("GPL");
 
-#define FREQ_LO		 (76U * 16000)
-#define FREQ_HI		(108U * 16000)
+#define FREQ_LO		((tea->tea5759 ? 760 :  875) * 1600U)
+#define FREQ_HI		((tea->tea5759 ? 910 : 1080) * 1600U)
 
 /*
  * definitions
@@ -120,9 +120,9 @@
 	return data;
 }
 
-static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+static u32 snd_tea575x_val_to_freq(struct snd_tea575x *tea, u32 val)
 {
-	u32 freq = snd_tea575x_read(tea) & TEA575X_BIT_FREQ_MASK;
+	u32 freq = val & TEA575X_BIT_FREQ_MASK;
 
 	if (freq == 0)
 		return freq;
@@ -139,6 +139,11 @@
 	return clamp(freq * 16, FREQ_LO, FREQ_HI); /* from kHz */
 }
 
+static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+{
+	return snd_tea575x_val_to_freq(tea, snd_tea575x_read(tea));
+}
+
 static void snd_tea575x_set_freq(struct snd_tea575x *tea)
 {
 	u32 freq = tea->freq;
@@ -156,6 +161,7 @@
 	tea->val &= ~TEA575X_BIT_FREQ_MASK;
 	tea->val |= freq & TEA575X_BIT_FREQ_MASK;
 	snd_tea575x_write(tea, tea->val);
+	tea->freq = snd_tea575x_val_to_freq(tea, tea->val);
 }
 
 /*
@@ -317,7 +323,6 @@
 }
 
 static const struct v4l2_file_operations tea575x_fops = {
-	.owner		= THIS_MODULE,
 	.unlocked_ioctl	= video_ioctl2,
 	.open           = v4l2_fh_open,
 	.release        = v4l2_fh_release,
@@ -337,7 +342,6 @@
 };
 
 static const struct video_device tea575x_radio = {
-	.fops           = &tea575x_fops,
 	.ioctl_ops 	= &tea575x_ioctl_ops,
 	.release        = video_device_release_empty,
 };
@@ -349,7 +353,7 @@
 /*
  * initialize all the tea575x chips
  */
-int snd_tea575x_init(struct snd_tea575x *tea)
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
 {
 	int retval;
 
@@ -374,6 +378,9 @@
 	tea->vd.lock = &tea->mutex;
 	tea->vd.v4l2_dev = tea->v4l2_dev;
 	tea->vd.ctrl_handler = &tea->ctrl_handler;
+	tea->fops = tea575x_fops;
+	tea->fops.owner = owner;
+	tea->vd.fops = &tea->fops;
 	set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
 	/* disable hw_freq_seek if we can't use it */
 	if (tea->cannot_read_data)
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 67f47d8..52b5c0b 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -2769,7 +2769,7 @@
 	chip->tea.ops = &snd_es1968_tea_ops;
 	strlcpy(chip->tea.card, "SF64-PCE2", sizeof(chip->tea.card));
 	sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
-	if (!snd_tea575x_init(&chip->tea))
+	if (!snd_tea575x_init(&chip->tea, THIS_MODULE))
 		printk(KERN_INFO "es1968: detected TEA575x radio\n");
 #endif
 
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index f696623..b32e802 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1254,7 +1254,7 @@
 	sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
 	if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
 	    (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
-		if (snd_tea575x_init(&chip->tea)) {
+		if (snd_tea575x_init(&chip->tea, THIS_MODULE)) {
 			snd_printk(KERN_ERR "TEA575x radio not found\n");
 			snd_fm801_free(chip);
 			return -ENODEV;
@@ -1263,7 +1263,7 @@
 		/* autodetect tuner connection */
 		for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
 			chip->tea575x_tuner = tea575x_tuner;
-			if (!snd_tea575x_init(&chip->tea)) {
+			if (!snd_tea575x_init(&chip->tea, THIS_MODULE)) {
 				snd_printk(KERN_INFO "detected TEA575x radio type %s\n",
 					   get_tea575x_gpio(chip)->name);
 				break;
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 163b6b5..d030797 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -97,19 +97,6 @@
 	  snd-hda-codec-realtek.
 	  This module is automatically loaded at probing.
 
-config SND_HDA_ENABLE_REALTEK_QUIRKS
-	bool "Build static quirks for Realtek codecs"
-	depends on SND_HDA_CODEC_REALTEK
-	default y
-	help
-	  Say Y here to build the static quirks codes for Realtek codecs.
-	  If you need the "model" preset that the default BIOS auto-parser
-	  can't handle, turn this option on.
-
-	  If your device works with model=auto option, basically you don't
-	  need the quirk code.  By turning this off, you can reduce the
-	  module size quite a lot.
-
 config SND_HDA_CODEC_ANALOG
 	bool "Build Analog Device HD-audio codec support"
 	default y
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 6e9ef3e..f7520b9 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -618,7 +618,6 @@
 			  const struct hda_verb *list)
 {
 	const struct hda_verb **v;
-	snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
 	v = snd_array_new(&spec->verbs);
 	if (!v)
 		return -ENOMEM;
diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h
index 2a7889d..632ad0a 100644
--- a/sound/pci/hda/hda_auto_parser.h
+++ b/sound/pci/hda/hda_auto_parser.h
@@ -157,4 +157,14 @@
 			const struct snd_pci_quirk *quirk,
 			const struct hda_fixup *fixlist);
 
+static inline void snd_hda_gen_init(struct hda_gen_spec *spec)
+{
+	snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
+}
+
+static inline void snd_hda_gen_free(struct hda_gen_spec *spec)
+{
+	snd_array_free(&spec->verbs);
+}
+
 #endif /* __SOUND_HDA_AUTO_PARSER_H */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index eb09a33..51cb2a2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1184,6 +1184,7 @@
 {
 	if (!codec)
 		return;
+	snd_hda_jack_tbl_clear(codec);
 	restore_init_pincfgs(codec);
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	cancel_delayed_work(&codec->power_work);
@@ -1192,6 +1193,7 @@
 	list_del(&codec->list);
 	snd_array_free(&codec->mixers);
 	snd_array_free(&codec->nids);
+	snd_array_free(&codec->cvt_setups);
 	snd_array_free(&codec->conn_lists);
 	snd_array_free(&codec->spdif_out);
 	codec->bus->caddr_tbl[codec->addr] = NULL;
@@ -2239,24 +2241,50 @@
 /* pseudo device locking
  * toggle card->shutdown to allow/disallow the device access (as a hack)
  */
-static int hda_lock_devices(struct snd_card *card)
+int snd_hda_lock_devices(struct hda_bus *bus)
 {
+	struct snd_card *card = bus->card;
+	struct hda_codec *codec;
+
 	spin_lock(&card->files_lock);
-	if (card->shutdown) {
-		spin_unlock(&card->files_lock);
-		return -EINVAL;
-	}
+	if (card->shutdown)
+		goto err_unlock;
 	card->shutdown = 1;
+	if (!list_empty(&card->ctl_files))
+		goto err_clear;
+
+	list_for_each_entry(codec, &bus->codec_list, list) {
+		int pcm;
+		for (pcm = 0; pcm < codec->num_pcms; pcm++) {
+			struct hda_pcm *cpcm = &codec->pcm_info[pcm];
+			if (!cpcm->pcm)
+				continue;
+			if (cpcm->pcm->streams[0].substream_opened ||
+			    cpcm->pcm->streams[1].substream_opened)
+				goto err_clear;
+		}
+	}
 	spin_unlock(&card->files_lock);
 	return 0;
-}
 
-static void hda_unlock_devices(struct snd_card *card)
+ err_clear:
+	card->shutdown = 0;
+ err_unlock:
+	spin_unlock(&card->files_lock);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_HDA(snd_hda_lock_devices);
+
+void snd_hda_unlock_devices(struct hda_bus *bus)
 {
+	struct snd_card *card = bus->card;
+
+	card = bus->card;
 	spin_lock(&card->files_lock);
 	card->shutdown = 0;
 	spin_unlock(&card->files_lock);
 }
+EXPORT_SYMBOL_HDA(snd_hda_unlock_devices);
 
 /**
  * snd_hda_codec_reset - Clear all objects assigned to the codec
@@ -2270,26 +2298,12 @@
  */
 int snd_hda_codec_reset(struct hda_codec *codec)
 {
-	struct snd_card *card = codec->bus->card;
-	int i, pcm;
+	struct hda_bus *bus = codec->bus;
+	struct snd_card *card = bus->card;
+	int i;
 
-	if (hda_lock_devices(card) < 0)
+	if (snd_hda_lock_devices(bus) < 0)
 		return -EBUSY;
-	/* check whether the codec isn't used by any mixer or PCM streams */
-	if (!list_empty(&card->ctl_files)) {
-		hda_unlock_devices(card);
-		return -EBUSY;
-	}
-	for (pcm = 0; pcm < codec->num_pcms; pcm++) {
-		struct hda_pcm *cpcm = &codec->pcm_info[pcm];
-		if (!cpcm->pcm)
-			continue;
-		if (cpcm->pcm->streams[0].substream_opened ||
-		    cpcm->pcm->streams[1].substream_opened) {
-			hda_unlock_devices(card);
-			return -EBUSY;
-		}
-	}
 
 	/* OK, let it free */
 
@@ -2298,7 +2312,7 @@
 	codec->power_on = 0;
 	codec->power_transition = 0;
 	codec->power_jiffies = jiffies;
-	flush_workqueue(codec->bus->workq);
+	flush_workqueue(bus->workq);
 #endif
 	snd_hda_ctls_clear(codec);
 	/* relase PCMs */
@@ -2306,7 +2320,7 @@
 		if (codec->pcm_info[i].pcm) {
 			snd_device_free(card, codec->pcm_info[i].pcm);
 			clear_bit(codec->pcm_info[i].device,
-				  codec->bus->pcm_dev_bits);
+				  bus->pcm_dev_bits);
 		}
 	}
 	if (codec->patch_ops.free)
@@ -2321,6 +2335,8 @@
 	/* free only driver_pins so that init_pins + user_pins are restored */
 	snd_array_free(&codec->driver_pins);
 	restore_pincfgs(codec);
+	snd_array_free(&codec->cvt_setups);
+	snd_array_free(&codec->spdif_out);
 	codec->num_pcms = 0;
 	codec->pcm_info = NULL;
 	codec->preset = NULL;
@@ -2331,7 +2347,7 @@
 	codec->owner = NULL;
 
 	/* allow device access again */
-	hda_unlock_devices(card);
+	snd_hda_unlock_devices(bus);
 	return 0;
 }
 
@@ -4381,20 +4397,19 @@
 	codec->power_jiffies += delta;
 }
 
-/**
- * snd_hda_power_up - Power-up the codec
- * @codec: HD-audio codec
- *
- * Increment the power-up counter and power up the hardware really when
- * not turned on yet.
- */
-void snd_hda_power_up(struct hda_codec *codec)
+/* Transition to powered up, if wait_power_down then wait for a pending
+ * transition to D3 to complete. A pending D3 transition is indicated
+ * with power_transition == -1. */
+static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
 {
 	struct hda_bus *bus = codec->bus;
 
 	spin_lock(&codec->power_lock);
 	codec->power_count++;
-	if (codec->power_on || codec->power_transition > 0) {
+	/* Return if power_on or transitioning to power_on, unless currently
+	 * powering down. */
+	if ((codec->power_on || codec->power_transition > 0) &&
+	    !(wait_power_down && codec->power_transition < 0)) {
 		spin_unlock(&codec->power_lock);
 		return;
 	}
@@ -4418,8 +4433,37 @@
 	codec->power_transition = 0;
 	spin_unlock(&codec->power_lock);
 }
+
+/**
+ * snd_hda_power_up - Power-up the codec
+ * @codec: HD-audio codec
+ *
+ * Increment the power-up counter and power up the hardware really when
+ * not turned on yet.
+ */
+void snd_hda_power_up(struct hda_codec *codec)
+{
+	__snd_hda_power_up(codec, false);
+}
 EXPORT_SYMBOL_HDA(snd_hda_power_up);
 
+/**
+ * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
+ *   D3 transition to complete.  This differs from snd_hda_power_up() when
+ *   power_transition == -1.  snd_hda_power_up sees this case as a nop,
+ *   snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
+ *   back up.
+ * @codec: HD-audio codec
+ *
+ * Cancel any power down operation hapenning on the work queue, then power up.
+ */
+void snd_hda_power_up_d3wait(struct hda_codec *codec)
+{
+	/* This will cancel and wait for pending power_work to complete. */
+	__snd_hda_power_up(codec, true);
+}
+EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait);
+
 #define power_save(codec)	\
 	((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
 
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 54b5281..2fdaadb 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -1023,6 +1023,9 @@
 				    unsigned int power_state,
 				    bool eapd_workaround);
 
+int snd_hda_lock_devices(struct hda_bus *bus);
+void snd_hda_unlock_devices(struct hda_bus *bus);
+
 /*
  * power management
  */
@@ -1053,10 +1056,12 @@
  */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 void snd_hda_power_up(struct hda_codec *codec);
+void snd_hda_power_up_d3wait(struct hda_codec *codec);
 void snd_hda_power_down(struct hda_codec *codec);
 void snd_hda_update_power_acct(struct hda_codec *codec);
 #else
 static inline void snd_hda_power_up(struct hda_codec *codec) {}
+static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {}
 static inline void snd_hda_power_down(struct hda_codec *codec) {}
 #endif
 
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ab8102..7757536 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,6 +53,8 @@
 #endif
 #include <sound/core.h>
 #include <sound/initval.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
 #include "hda_codec.h"
 
 
@@ -175,6 +177,13 @@
 #define SFX	"hda-intel: "
 #endif
 
+#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
+#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#define SUPPORT_VGA_SWITCHEROO
+#endif
+#endif
+
+
 /*
  * registers
  */
@@ -472,6 +481,12 @@
 	unsigned int probing :1; /* codec probing phase */
 	unsigned int snoop:1;
 	unsigned int align_buffer_size:1;
+	unsigned int region_requested:1;
+
+	/* VGA-switcheroo setup */
+	unsigned int use_vga_switcheroo:1;
+	unsigned int init_failed:1; /* delayed init failed */
+	unsigned int disabled:1; /* disabled by VGA-switcher */
 
 	/* for debugging */
 	unsigned int last_cmd[AZX_MAX_CODECS];
@@ -538,7 +553,20 @@
 #define AZX_DCAPS_PRESET_CTHDA \
 	(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
 
-static char *driver_short_names[] __devinitdata = {
+/*
+ * VGA-switcher support
+ */
+#ifdef SUPPORT_VGA_SWITCHEROO
+#define DELAYED_INIT_MARK
+#define DELAYED_INITDATA_MARK
+#define use_vga_switcheroo(chip)	((chip)->use_vga_switcheroo)
+#else
+#define DELAYED_INIT_MARK	__devinit
+#define DELAYED_INITDATA_MARK	__devinitdata
+#define use_vga_switcheroo(chip)	0
+#endif
+
+static char *driver_short_names[] DELAYED_INITDATA_MARK = {
 	[AZX_DRIVER_ICH] = "HDA Intel",
 	[AZX_DRIVER_PCH] = "HDA Intel PCH",
 	[AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -959,6 +987,8 @@
 {
 	struct azx *chip = bus->private_data;
 
+	if (chip->disabled)
+		return 0;
 	chip->last_cmd[azx_command_addr(val)] = val;
 	if (chip->single_cmd)
 		return azx_single_send_cmd(bus, val);
@@ -971,6 +1001,8 @@
 				     unsigned int addr)
 {
 	struct azx *chip = bus->private_data;
+	if (chip->disabled)
+		return 0;
 	if (chip->single_cmd)
 		return azx_single_get_response(bus, addr);
 	else
@@ -1236,6 +1268,11 @@
 
 	spin_lock(&chip->reg_lock);
 
+	if (chip->disabled) {
+		spin_unlock(&chip->reg_lock);
+		return IRQ_NONE;
+	}
+
 	status = azx_readl(chip, INTSTS);
 	if (status == 0) {
 		spin_unlock(&chip->reg_lock);
@@ -1521,12 +1558,12 @@
  */
 
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
-static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
+static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
 	[AZX_DRIVER_NVIDIA] = 8,
 	[AZX_DRIVER_TERA] = 1,
 };
 
-static int __devinit azx_codec_create(struct azx *chip, const char *model)
+static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
 {
 	struct hda_bus_template bus_temp;
 	int c, codecs, err;
@@ -1729,7 +1766,7 @@
 				   buff_step);
 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
 				   buff_step);
-	snd_hda_power_up(apcm->codec);
+	snd_hda_power_up_d3wait(apcm->codec);
 	err = hinfo->ops.open(hinfo, apcm->codec, substream);
 	if (err < 0) {
 		azx_release_device(azx_dev);
@@ -2444,6 +2481,106 @@
 		unregister_reboot_notifier(&chip->reboot_notifier);
 }
 
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
+
+#ifdef SUPPORT_VGA_SWITCHEROO
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
+
+static void azx_vs_set_state(struct pci_dev *pci,
+			     enum vga_switcheroo_state state)
+{
+	struct snd_card *card = pci_get_drvdata(pci);
+	struct azx *chip = card->private_data;
+	bool disabled;
+
+	if (chip->init_failed)
+		return;
+
+	disabled = (state == VGA_SWITCHEROO_OFF);
+	if (chip->disabled == disabled)
+		return;
+
+	if (!chip->bus) {
+		chip->disabled = disabled;
+		if (!disabled) {
+			snd_printk(KERN_INFO SFX
+				   "%s: Start delayed initialization\n",
+				   pci_name(chip->pci));
+			if (azx_first_init(chip) < 0 ||
+			    azx_probe_continue(chip) < 0) {
+				snd_printk(KERN_ERR SFX
+					   "%s: initialization error\n",
+					   pci_name(chip->pci));
+				chip->init_failed = true;
+			}
+		}
+	} else {
+		snd_printk(KERN_INFO SFX
+			   "%s %s via VGA-switcheroo\n",
+			   disabled ? "Disabling" : "Enabling",
+			   pci_name(chip->pci));
+		if (disabled) {
+			azx_suspend(pci, PMSG_FREEZE);
+			chip->disabled = true;
+			snd_hda_lock_devices(chip->bus);
+		} else {
+			snd_hda_unlock_devices(chip->bus);
+			chip->disabled = false;
+			azx_resume(pci);
+		}
+	}
+}
+
+static bool azx_vs_can_switch(struct pci_dev *pci)
+{
+	struct snd_card *card = pci_get_drvdata(pci);
+	struct azx *chip = card->private_data;
+
+	if (chip->init_failed)
+		return false;
+	if (chip->disabled || !chip->bus)
+		return true;
+	if (snd_hda_lock_devices(chip->bus))
+		return false;
+	snd_hda_unlock_devices(chip->bus);
+	return true;
+}
+
+static void __devinit init_vga_switcheroo(struct azx *chip)
+{
+	struct pci_dev *p = get_bound_vga(chip->pci);
+	if (p) {
+		snd_printk(KERN_INFO SFX
+			   "%s: Handle VGA-switcheroo audio client\n",
+			   pci_name(chip->pci));
+		chip->use_vga_switcheroo = 1;
+		pci_dev_put(p);
+	}
+}
+
+static const struct vga_switcheroo_client_ops azx_vs_ops = {
+	.set_gpu_state = azx_vs_set_state,
+	.can_switch = azx_vs_can_switch,
+};
+
+static int __devinit register_vga_switcheroo(struct azx *chip)
+{
+	if (!chip->use_vga_switcheroo)
+		return 0;
+	/* FIXME: currently only handling DIS controller
+	 * is there any machine with two switchable HDMI audio controllers?
+	 */
+	return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+						    VGA_SWITCHEROO_DIS,
+						    chip->bus != NULL);
+}
+#else
+#define init_vga_switcheroo(chip)		/* NOP */
+#define register_vga_switcheroo(chip)		0
+#define check_hdmi_disabled(pci)	false
+#endif /* SUPPORT_VGA_SWITCHER */
+
 /*
  * destructor
  */
@@ -2453,6 +2590,12 @@
 
 	azx_notifier_unregister(chip);
 
+	if (use_vga_switcheroo(chip)) {
+		if (chip->disabled && chip->bus)
+			snd_hda_unlock_devices(chip->bus);
+		vga_switcheroo_unregister_client(chip->pci);
+	}
+
 	if (chip->initialized) {
 		azx_clear_irq_pending(chip);
 		for (i = 0; i < chip->num_streams; i++)
@@ -2482,7 +2625,8 @@
 		mark_pages_wc(chip, &chip->posbuf, false);
 		snd_dma_free_pages(&chip->posbuf);
 	}
-	pci_release_regions(chip->pci);
+	if (chip->region_requested)
+		pci_release_regions(chip->pci);
 	pci_disable_device(chip->pci);
 	kfree(chip->azx_dev);
 	kfree(chip);
@@ -2495,6 +2639,47 @@
 	return azx_free(device->device_data);
 }
 
+#ifdef SUPPORT_VGA_SWITCHEROO
+/*
+ * Check of disabled HDMI controller by vga-switcheroo
+ */
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
+{
+	struct pci_dev *p;
+
+	/* check only discrete GPU */
+	switch (pci->vendor) {
+	case PCI_VENDOR_ID_ATI:
+	case PCI_VENDOR_ID_AMD:
+	case PCI_VENDOR_ID_NVIDIA:
+		if (pci->devfn == 1) {
+			p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
+							pci->bus->number, 0);
+			if (p) {
+				if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+					return p;
+				pci_dev_put(p);
+			}
+		}
+		break;
+	}
+	return NULL;
+}
+
+static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
+{
+	bool vga_inactive = false;
+	struct pci_dev *p = get_bound_vga(pci);
+
+	if (p) {
+		if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
+			vga_inactive = true;
+		pci_dev_put(p);
+	}
+	return vga_inactive;
+}
+#endif /* SUPPORT_VGA_SWITCHEROO */
+
 /*
  * white/black-listing for position_fix
  */
@@ -2672,12 +2857,11 @@
 				int dev, unsigned int driver_caps,
 				struct azx **rchip)
 {
-	struct azx *chip;
-	int i, err;
-	unsigned short gcap;
 	static struct snd_device_ops ops = {
 		.dev_free = azx_dev_free,
 	};
+	struct azx *chip;
+	int err;
 
 	*rchip = NULL;
 
@@ -2703,6 +2887,7 @@
 	chip->dev_index = dev;
 	INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
 	INIT_LIST_HEAD(&chip->pcm_list);
+	init_vga_switcheroo(chip);
 
 	chip->position_fix[0] = chip->position_fix[1] =
 		check_position_fix(chip, position_fix[dev]);
@@ -2730,6 +2915,53 @@
 		}
 	}
 
+	if (check_hdmi_disabled(pci)) {
+		snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
+			   pci_name(pci));
+		if (use_vga_switcheroo(chip)) {
+			snd_printk(KERN_INFO SFX "Delaying initialization\n");
+			chip->disabled = true;
+			goto ok;
+		}
+		kfree(chip);
+		pci_disable_device(pci);
+		return -ENXIO;
+	}
+
+	err = azx_first_init(chip);
+	if (err < 0) {
+		azx_free(chip);
+		return err;
+	}
+
+ ok:
+	err = register_vga_switcheroo(chip);
+	if (err < 0) {
+		snd_printk(KERN_ERR SFX
+			   "Error registering VGA-switcheroo client\n");
+		azx_free(chip);
+		return err;
+	}
+
+	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+	if (err < 0) {
+		snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+		azx_free(chip);
+		return err;
+	}
+
+	*rchip = chip;
+	return 0;
+}
+
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
+{
+	int dev = chip->dev_index;
+	struct pci_dev *pci = chip->pci;
+	struct snd_card *card = chip->card;
+	int i, err;
+	unsigned short gcap;
+
 #if BITS_PER_LONG != 64
 	/* Fix up base address on ULI M5461 */
 	if (chip->driver_type == AZX_DRIVER_ULI) {
@@ -2741,28 +2973,23 @@
 #endif
 
 	err = pci_request_regions(pci, "ICH HD audio");
-	if (err < 0) {
-		kfree(chip);
-		pci_disable_device(pci);
+	if (err < 0)
 		return err;
-	}
+	chip->region_requested = 1;
 
 	chip->addr = pci_resource_start(pci, 0);
 	chip->remap_addr = pci_ioremap_bar(pci, 0);
 	if (chip->remap_addr == NULL) {
 		snd_printk(KERN_ERR SFX "ioremap error\n");
-		err = -ENXIO;
-		goto errout;
+		return -ENXIO;
 	}
 
 	if (chip->msi)
 		if (pci_enable_msi(pci) < 0)
 			chip->msi = 0;
 
-	if (azx_acquire_irq(chip, 0) < 0) {
-		err = -EBUSY;
-		goto errout;
-	}
+	if (azx_acquire_irq(chip, 0) < 0)
+		return -EBUSY;
 
 	pci_set_master(pci);
 	synchronize_irq(chip->irq);
@@ -2841,7 +3068,7 @@
 				GFP_KERNEL);
 	if (!chip->azx_dev) {
 		snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
-		goto errout;
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < chip->num_streams; i++) {
@@ -2851,7 +3078,7 @@
 					  BDL_SIZE, &chip->azx_dev[i].bdl);
 		if (err < 0) {
 			snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
-			goto errout;
+			return -ENOMEM;
 		}
 		mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
 	}
@@ -2861,13 +3088,13 @@
 				  chip->num_streams * 8, &chip->posbuf);
 	if (err < 0) {
 		snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
-		goto errout;
+		return -ENOMEM;
 	}
 	mark_pages_wc(chip, &chip->posbuf, true);
 	/* allocate CORB/RIRB */
 	err = azx_alloc_cmd_io(chip);
 	if (err < 0)
-		goto errout;
+		return err;
 
 	/* initialize streams */
 	azx_init_stream(chip);
@@ -2879,14 +3106,7 @@
 	/* codec detection */
 	if (!chip->codec_mask) {
 		snd_printk(KERN_ERR SFX "no codecs found!\n");
-		err = -ENODEV;
-		goto errout;
-	}
-
-	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
-	if (err <0) {
-		snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
-		goto errout;
+		return -ENODEV;
 	}
 
 	strcpy(card->driver, "HDA-Intel");
@@ -2896,12 +3116,7 @@
 		 "%s at 0x%lx irq %i",
 		 card->shortname, chip->addr, chip->irq);
 
-	*rchip = chip;
 	return 0;
-
- errout:
-	azx_free(chip);
-	return err;
 }
 
 static void power_down_all_codecs(struct azx *chip)
@@ -2946,6 +3161,27 @@
 		goto out_free;
 	card->private_data = chip;
 
+	if (!chip->disabled) {
+		err = azx_probe_continue(chip);
+		if (err < 0)
+			goto out_free;
+	}
+
+	pci_set_drvdata(pci, card);
+
+	dev++;
+	return 0;
+
+out_free:
+	snd_card_free(card);
+	return err;
+}
+
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
+{
+	int dev = chip->dev_index;
+	int err;
+
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
 	chip->beep_mode = beep_mode[dev];
 #endif
@@ -2979,25 +3215,26 @@
 	if (err < 0)
 		goto out_free;
 
-	err = snd_card_register(card);
+	err = snd_card_register(chip->card);
 	if (err < 0)
 		goto out_free;
 
-	pci_set_drvdata(pci, card);
 	chip->running = 1;
 	power_down_all_codecs(chip);
 	azx_notifier_register(chip);
 
-	dev++;
-	return err;
+	return 0;
+
 out_free:
-	snd_card_free(card);
+	chip->init_failed = 1;
 	return err;
 }
 
 static void __devexit azx_remove(struct pci_dev *pci)
 {
-	snd_card_free(pci_get_drvdata(pci));
+	struct snd_card *card = pci_get_drvdata(pci);
+	if (card)
+		snd_card_free(card);
 	pci_set_drvdata(pci, NULL);
 }
 
@@ -3117,6 +3354,11 @@
 	{ PCI_DEVICE(0x6549, 0x1200),
 	  .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
 	/* Creative X-Fi (CA0110-IBG) */
+	/* CTHDA chips */
+	{ PCI_DEVICE(0x1102, 0x0010),
+	  .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
+	{ PCI_DEVICE(0x1102, 0x0012),
+	  .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
 #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
 	/* the following entry conflicts with snd-ctxfi driver,
 	 * as ctxfi driver mutates from HD-audio to native mode with
@@ -3133,11 +3375,6 @@
 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
 	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
 #endif
-	/* CTHDA chips */
-	{ PCI_DEVICE(0x1102, 0x0010),
-	  .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
-	{ PCI_DEVICE(0x1102, 0x0012),
-	  .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
 	/* Vortex86MX */
 	{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
 	/* VMware HDAudio */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3acb582..2bf99fc 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -445,8 +445,10 @@
 
 static void conexant_free(struct hda_codec *codec)
 {
+	struct conexant_spec *spec = codec->spec;
+	snd_hda_gen_free(&spec->gen);
 	snd_hda_detach_beep_device(codec);
-	kfree(codec->spec);
+	kfree(spec);
 }
 
 static const struct snd_kcontrol_new cxt_capture_mixers[] = {
@@ -4061,7 +4063,7 @@
 static int cx_auto_init(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
-	/*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/
+	snd_hda_gen_apply_verbs(codec);
 	cx_auto_init_output(codec);
 	cx_auto_init_input(codec);
 	cx_auto_init_digital(codec);
@@ -4466,6 +4468,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
 	SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
 	{}
 };
 
@@ -4497,6 +4500,7 @@
 	if (!spec)
 		return -ENOMEM;
 	codec->spec = spec;
+	snd_hda_gen_init(&spec->gen);
 
 	switch (codec->vendor_id) {
 	case 0x14f15045:
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ff71dce..5ccf10a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1896,6 +1896,7 @@
 	alc_fix_pll(codec);
 	alc_auto_init_amp(codec, spec->init_amp);
 
+	snd_hda_gen_apply_verbs(codec);
 	alc_init_special_input_src(codec);
 	alc_auto_init_std(codec);
 
@@ -2288,6 +2289,7 @@
 	alc_shutup(codec);
 	alc_free_kctls(codec);
 	alc_free_bind_ctls(codec);
+	snd_hda_gen_free(&spec->gen);
 	kfree(spec);
 	snd_hda_detach_beep_device(codec);
 }
@@ -2368,6 +2370,7 @@
 	{ 0x10ec0269, 0xffff, 0xa023, "ALC259" },
 	{ 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
 	{ 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
+	{ 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
 	{ 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
 	{ 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
 	{ 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -4251,6 +4254,7 @@
 		return -ENOMEM;
 	codec->spec = spec;
 	spec->mixer_nid = mixer_nid;
+	snd_hda_gen_init(&spec->gen);
 
 	err = alc_codec_rename_from_preset(codec);
 	if (err < 0) {
@@ -5614,6 +5618,7 @@
 	ALC269_TYPE_ALC269VA,
 	ALC269_TYPE_ALC269VB,
 	ALC269_TYPE_ALC269VC,
+	ALC269_TYPE_ALC269VD,
 };
 
 /*
@@ -5625,8 +5630,21 @@
 	static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 };
 	static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 };
 	struct alc_spec *spec = codec->spec;
-	const hda_nid_t *ssids = spec->codec_variant == ALC269_TYPE_ALC269VA ?
-		alc269va_ssids : alc269_ssids;
+	const hda_nid_t *ssids;
+
+	switch (spec->codec_variant) {
+	case ALC269_TYPE_ALC269VA:
+	case ALC269_TYPE_ALC269VC:
+		ssids = alc269va_ssids;
+		break;
+	case ALC269_TYPE_ALC269VB:
+	case ALC269_TYPE_ALC269VD:
+		ssids = alc269_ssids;
+		break;
+	default:
+		ssids = alc269_ssids;
+		break;
+	}
 
 	return alc_parse_auto_config(codec, alc269_ignore, ssids);
 }
@@ -5643,6 +5661,11 @@
 
 static void alc269_shutup(struct hda_codec *codec)
 {
+	struct alc_spec *spec = codec->spec;
+
+	if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+		return;
+
 	if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
 		alc269_toggle_power_output(codec, 0);
 	if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5654,19 +5677,24 @@
 #ifdef CONFIG_PM
 static int alc269_resume(struct hda_codec *codec)
 {
-	if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+	struct alc_spec *spec = codec->spec;
+
+	if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+			(alc_get_coef0(codec) & 0x00ff) == 0x018) {
 		alc269_toggle_power_output(codec, 0);
 		msleep(150);
 	}
 
 	codec->patch_ops.init(codec);
 
-	if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+	if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+			(alc_get_coef0(codec) & 0x00ff) == 0x017) {
 		alc269_toggle_power_output(codec, 1);
 		msleep(200);
 	}
 
-	if ((alc_get_coef0(codec) & 0x00ff) == 0x018)
+	if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+			(alc_get_coef0(codec) & 0x00ff) == 0x018)
 		alc269_toggle_power_output(codec, 1);
 
 	snd_hda_codec_resume_amp(codec);
@@ -6081,6 +6109,9 @@
 				err = alc_codec_rename(codec, "ALC3202");
 			spec->codec_variant = ALC269_TYPE_ALC269VC;
 			break;
+		case 0x0030:
+			spec->codec_variant = ALC269_TYPE_ALC269VD;
+			break;
 		default:
 			alc_fix_pll_init(codec, 0x20, 0x04, 15);
 		}
@@ -6411,6 +6442,7 @@
 	ALC662_FIXUP_ASUS_MODE7,
 	ALC662_FIXUP_ASUS_MODE8,
 	ALC662_FIXUP_NO_JACK_DETECT,
+	ALC662_FIXUP_ZOTAC_Z68,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -6560,6 +6592,13 @@
 		.type = ALC_FIXUP_FUNC,
 		.v.func = alc_fixup_no_jack_detect,
 	},
+	[ALC662_FIXUP_ZOTAC_Z68] = {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
+			{ 0x1b, 0x02214020 }, /* Front HP */
+			{ }
+		}
+	},
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6573,6 +6612,7 @@
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+	SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
 	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
 
 #if 0
@@ -6667,6 +6707,12 @@
 
 	alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
+	alc_pick_fixup(codec, alc662_fixup_models,
+		       alc662_fixup_tbl, alc662_fixups);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+
+	alc_auto_parse_customize_define(codec);
+
 	if ((alc_get_coef0(codec) & (1 << 14)) &&
 	    codec->bus->pci->subsystem_vendor == 0x1025 &&
 	    spec->cdefine.platform_type == 1) {
@@ -6674,12 +6720,6 @@
 			goto error;
 	}
 
-	alc_pick_fixup(codec, alc662_fixup_models,
-		       alc662_fixup_tbl, alc662_fixups);
-	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
-
-	alc_auto_parse_customize_define(codec);
-
 	/* automatic parse from the BIOS config */
 	err = alc662_parse_auto_config(codec);
 	if (err < 0)
@@ -6762,6 +6802,7 @@
 	{ .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
 	{ .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
 	{ .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+	{ .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
 	{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
 	  .patch = patch_alc861 },
 	{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7db8228..0767528 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4367,7 +4367,7 @@
 					 AC_PINCTL_IN_EN);
 	for (i = 0; i < spec->num_pwrs; i++)  {
 		hda_nid_t nid = spec->pwr_nids[i];
-		int pinctl, def_conf;
+		unsigned int pinctl, def_conf;
 
 		def_conf = snd_hda_codec_get_pincfg(codec, nid);
 		def_conf = get_defcfg_connect(def_conf);
@@ -4376,6 +4376,11 @@
 			stac_toggle_power_map(codec, nid, 0);
 			continue;
 		}
+		if (def_conf == AC_JACK_PORT_FIXED) {
+			/* no need for jack detection for fixed pins */
+			stac_toggle_power_map(codec, nid, 1);
+			continue;
+		}
 		/* power on when no jack detection is available */
 		/* or when the VREF is used for controlling LED */
 		if (!spec->hp_detect ||
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0a5027b..b8ac871 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1988,6 +1988,13 @@
 	period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
 	rate = hdspm_calc_dds_value(hdspm, period);
 
+	if (rate > 207000) {
+		/* Unreasonable high sample rate as seen on PCI MADI cards.
+		 * Use the cached value instead.
+		 */
+		rate = hdspm->system_sample_rate;
+	}
+
 	return rate;
 }
 
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a75c376..0418fa1 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -99,8 +99,9 @@
 }
 
 static int wm2000_poll_bit(struct i2c_client *i2c,
-			   unsigned int reg, u8 mask, int timeout)
+			   unsigned int reg, u8 mask)
 {
+	int timeout = 4000;
 	int val;
 
 	val = wm2000_read(i2c, reg);
@@ -119,7 +120,7 @@
 static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 {
 	struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-	int ret, timeout;
+	int ret;
 
 	BUG_ON(wm2000->anc_mode != ANC_OFF);
 
@@ -140,13 +141,13 @@
 
 	/* Wait for ANC engine to become ready */
 	if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-			     WM2000_ANC_ENG_IDLE, 1)) {
+			     WM2000_ANC_ENG_IDLE)) {
 		dev_err(&i2c->dev, "ANC engine failed to reset\n");
 		return -ETIMEDOUT;
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_BOOT_COMPLETE, 1)) {
+			     WM2000_STATUS_BOOT_COMPLETE)) {
 		dev_err(&i2c->dev, "ANC engine failed to initialise\n");
 		return -ETIMEDOUT;
 	}
@@ -173,16 +174,13 @@
 	dev_dbg(&i2c->dev, "Download complete\n");
 
 	if (analogue) {
-		timeout = 248;
-		wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+		wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_ANA_SEQ_INCLUDE |
 			     WM2000_MODE_MOUSE_ENABLE |
 			     WM2000_MODE_THERMAL_ENABLE);
 	} else {
-		timeout = 10;
-
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_MOUSE_ENABLE |
 			     WM2000_MODE_THERMAL_ENABLE);
@@ -201,9 +199,8 @@
 	wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-		dev_err(&i2c->dev, "Timed out waiting for device after %dms\n",
-			timeout * 10);
+			     WM2000_STATUS_MOUSE_ACTIVE)) {
+		dev_err(&i2c->dev, "Timed out waiting for device\n");
 		return -ETIMEDOUT;
 	}
 
@@ -218,28 +215,25 @@
 static int wm2000_power_down(struct i2c_client *i2c, int analogue)
 {
 	struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-	int timeout;
 
 	if (analogue) {
-		timeout = 248;
-		wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+		wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_ANA_SEQ_INCLUDE |
 			     WM2000_MODE_POWER_DOWN);
 	} else {
-		timeout = 10;
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_POWER_DOWN);
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) {
+			     WM2000_STATUS_POWER_DOWN_COMPLETE)) {
 		dev_err(&i2c->dev, "Timeout waiting for ANC power down\n");
 		return -ETIMEDOUT;
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-			     WM2000_ANC_ENG_IDLE, 1)) {
+			     WM2000_ANC_ENG_IDLE)) {
 		dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
 		return -ETIMEDOUT;
 	}
@@ -268,13 +262,13 @@
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_ANC_DISABLED, 10)) {
+			     WM2000_STATUS_ANC_DISABLED)) {
 		dev_err(&i2c->dev, "Timeout waiting for ANC disable\n");
 		return -ETIMEDOUT;
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-			     WM2000_ANC_ENG_IDLE, 1)) {
+			     WM2000_ANC_ENG_IDLE)) {
 		dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
 		return -ETIMEDOUT;
 	}
@@ -311,7 +305,7 @@
 	wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_MOUSE_ACTIVE, 10)) {
+			     WM2000_STATUS_MOUSE_ACTIVE)) {
 		dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
 		return -ETIMEDOUT;
 	}
@@ -325,38 +319,32 @@
 static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
 {
 	struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-	int timeout;
 
 	BUG_ON(wm2000->anc_mode != ANC_ACTIVE);
 
 	if (analogue) {
-		timeout = 248;
-		wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+		wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
 
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_ANA_SEQ_INCLUDE |
 			     WM2000_MODE_THERMAL_ENABLE |
 			     WM2000_MODE_STANDBY_ENTRY);
 	} else {
-		timeout = 10;
-
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_THERMAL_ENABLE |
 			     WM2000_MODE_STANDBY_ENTRY);
 	}
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_ANC_DISABLED, timeout)) {
+			     WM2000_STATUS_ANC_DISABLED)) {
 		dev_err(&i2c->dev,
 			"Timed out waiting for ANC disable after 1ms\n");
 		return -ETIMEDOUT;
 	}
 
-	if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE,
-			     1)) {
+	if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) {
 		dev_err(&i2c->dev,
-			"Timed out waiting for standby after %dms\n",
-			timeout * 10);
+			"Timed out waiting for standby\n");
 		return -ETIMEDOUT;
 	}
 
@@ -374,23 +362,19 @@
 static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
 {
 	struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-	int timeout;
 
 	BUG_ON(wm2000->anc_mode != ANC_STANDBY);
 
 	wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0);
 
 	if (analogue) {
-		timeout = 248;
-		wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+		wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_ANA_SEQ_INCLUDE |
 			     WM2000_MODE_THERMAL_ENABLE |
 			     WM2000_MODE_MOUSE_ENABLE);
 	} else {
-		timeout = 10;
-
 		wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
 			     WM2000_MODE_THERMAL_ENABLE |
 			     WM2000_MODE_MOUSE_ENABLE);
@@ -400,9 +384,8 @@
 	wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
 	if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-			     WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-		dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n",
-			timeout * 10);
+			     WM2000_STATUS_MOUSE_ACTIVE)) {
+		dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
 		return -ETIMEDOUT;
 	}
 
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 65d525d..812acd8 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1863,6 +1863,7 @@
 				return ret;
 			}
 
+			regcache_cache_only(wm8904->regmap, false);
 			regcache_sync(wm8904->regmap);
 
 			/* Enable bias */
@@ -1899,14 +1900,8 @@
 		snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
 				    WM8904_BIAS_ENA, 0);
 
-#ifdef CONFIG_REGULATOR
-		/* Post 2.6.34 we will be able to get a callback when
-		 * the regulators are disabled which we can use but
-		 * for now just assume that the power will be cut if
-		 * the regulator API is in use.
-		 */
-		codec->cache_sync = 1;
-#endif
+		regcache_cache_only(wm8904->regmap, true);
+		regcache_mark_dirty(wm8904->regmap);
 
 		regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
 				       wm8904->supplies);
@@ -2084,10 +2079,8 @@
 {
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
 	struct wm8904_pdata *pdata = wm8904->pdata;
-	u16 *reg_cache = codec->reg_cache;
 	int ret, i;
 
-	codec->cache_sync = 1;
 	codec->control_data = wm8904->regmap;
 
 	switch (wm8904->devtype) {
@@ -2150,6 +2143,7 @@
 		goto err_enable;
 	}
 
+	regcache_cache_only(wm8904->regmap, true);
 	/* Change some default settings - latch VU and enable ZC */
 	snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
 			    WM8904_ADC_VU, WM8904_ADC_VU);
@@ -2180,14 +2174,18 @@
 			if (!pdata->gpio_cfg[i])
 				continue;
 
-			reg_cache[WM8904_GPIO_CONTROL_1 + i]
-				= pdata->gpio_cfg[i] & 0xffff;
+			regmap_update_bits(wm8904->regmap,
+					   WM8904_GPIO_CONTROL_1 + i,
+					   0xffff,
+					   pdata->gpio_cfg[i]);
 		}
 
 		/* Zero is the default value for these anyway */
 		for (i = 0; i < WM8904_MIC_REGS; i++)
-			reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i]
-				= pdata->mic_cfg[i];
+			regmap_update_bits(wm8904->regmap,
+					   WM8904_MIC_BIAS_CONTROL_0 + i,
+					   0xffff,
+					   pdata->mic_cfg[i]);
 	}
 
 	/* Set Class W by default - this will be managed by the Class
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 993639d..1436b6c 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -46,6 +46,39 @@
 #define WM8994_NUM_DRC 3
 #define WM8994_NUM_EQ  3
 
+static struct {
+	unsigned int reg;
+	unsigned int mask;
+} wm8994_vu_bits[] = {
+	{ WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+	{ WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+	{ WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+	{ WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+	{ WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU },
+	{ WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU },
+	{ WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+	{ WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+	{ WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+	{ WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+
+	{ WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
+	{ WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
+	{ WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
+	{ WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
+	{ WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
+	{ WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
+	{ WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
+	{ WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
+	{ WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
+	{ WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+	{ WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
+	{ WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+	{ WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
+	{ WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU },
+	{ WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU },
+	{ WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
+};
+
 static int wm8994_drc_base[] = {
 	WM8994_AIF1_DRC1_1,
 	WM8994_AIF1_DRC2_1,
@@ -694,9 +727,6 @@
 	if (!wm8994->jackdet || !wm8994->jack_cb)
 		return;
 
-	if (!wm8994->jackdet || !wm8994->jack_cb)
-		return;
-
 	if (wm8994->active_refcount)
 		mode = WM1811_JACKDET_MODE_AUDIO;
 
@@ -989,6 +1019,7 @@
 	struct snd_soc_codec *codec = w->codec;
 	struct wm8994 *control = codec->control_data;
 	int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
+	int i;
 	int dac;
 	int adc;
 	int val;
@@ -1047,6 +1078,13 @@
 				    WM8994_AIF1DAC2L_ENA);
 		break;
 
+	case SND_SOC_DAPM_POST_PMU:
+		for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+			snd_soc_write(codec, wm8994_vu_bits[i].reg,
+				      snd_soc_read(codec,
+						   wm8994_vu_bits[i].reg));
+		break;
+
 	case SND_SOC_DAPM_PRE_PMD:
 	case SND_SOC_DAPM_POST_PMD:
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1072,6 +1110,7 @@
 		      struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = w->codec;
+	int i;
 	int dac;
 	int adc;
 	int val;
@@ -1122,6 +1161,13 @@
 				    WM8994_AIF2DACR_ENA);
 		break;
 
+	case SND_SOC_DAPM_POST_PMU:
+		for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+			snd_soc_write(codec, wm8994_vu_bits[i].reg,
+				      snd_soc_read(codec,
+						   wm8994_vu_bits[i].reg));
+		break;
+
 	case SND_SOC_DAPM_PRE_PMD:
 	case SND_SOC_DAPM_POST_PMD:
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1190,17 +1236,19 @@
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		if (wm8994->aif1clk_enable) {
-			aif1clk_ev(w, kcontrol, event);
+			aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
 			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
 					    WM8994_AIF1CLK_ENA_MASK,
 					    WM8994_AIF1CLK_ENA);
+			aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
 			wm8994->aif1clk_enable = 0;
 		}
 		if (wm8994->aif2clk_enable) {
-			aif2clk_ev(w, kcontrol, event);
+			aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
 			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
 					    WM8994_AIF2CLK_ENA_MASK,
 					    WM8994_AIF2CLK_ENA);
+			aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
 			wm8994->aif2clk_enable = 0;
 		}
 		break;
@@ -1221,15 +1269,17 @@
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMD:
 		if (wm8994->aif1clk_disable) {
+			aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
 			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
 					    WM8994_AIF1CLK_ENA_MASK, 0);
-			aif1clk_ev(w, kcontrol, event);
+			aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
 			wm8994->aif1clk_disable = 0;
 		}
 		if (wm8994->aif2clk_disable) {
+			aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
 			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
 					    WM8994_AIF2CLK_ENA_MASK, 0);
-			aif2clk_ev(w, kcontrol, event);
+			aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
 			wm8994->aif2clk_disable = 0;
 		}
 		break;
@@ -1527,9 +1577,11 @@
 
 static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
 SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
-		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		    SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
-		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+		    SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
 		   left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -3879,39 +3931,11 @@
 
 	pm_runtime_put(codec->dev);
 
-	/* Latch volume updates (right only; we always do left then right). */
-	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
-			    WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
-			    WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
-			    WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
-			    WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-	snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
-			    WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-	snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
-			    WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
-			    WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
-			    WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
-			    WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-	snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
-			    WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-	snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
-			    WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-	snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
-			    WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-	snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
-			    WM8994_DAC1_VU, WM8994_DAC1_VU);
-	snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
-			    WM8994_DAC1_VU, WM8994_DAC1_VU);
-	snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
-			    WM8994_DAC2_VU, WM8994_DAC2_VU);
-	snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
-			    WM8994_DAC2_VU, WM8994_DAC2_VU);
+	/* Latch volume update bits */
+	for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+		snd_soc_update_bits(codec, wm8994_vu_bits[i].reg,
+				    wm8994_vu_bits[i].mask,
+				    wm8994_vu_bits[i].mask);
 
 	/* Set the low bit of the 3D stereo depth so TLV matches */
 	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 8af422e..dc9b42b 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2837,8 +2837,6 @@
 		}
 	}
 
-	regcache_cache_only(codec->control_data, true);
-
 	/* Apply platform data settings */
 	snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
 			    WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
@@ -3051,7 +3049,6 @@
 	for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
 		regulator_unregister_notifier(wm8996->supplies[i].consumer,
 					      &wm8996->disable_nb[i]);
-	regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
 
 	return 0;
 }
@@ -3206,14 +3203,15 @@
 	dev_info(&i2c->dev, "revision %c\n",
 		 (reg & WM8996_CHIP_REV_MASK) + 'A');
 
-	regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
-
 	ret = wm8996_reset(wm8996);
 	if (ret < 0) {
 		dev_err(&i2c->dev, "Failed to issue reset\n");
 		goto err_regmap;
 	}
 
+	regcache_cache_only(wm8996->regmap, true);
+	regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
+
 	wm8996_init_gpio(wm8996);
 
 	ret = snd_soc_register_codec(&i2c->dev,
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index f237003..0803274 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -26,6 +26,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "imx-audmux.h"
 
@@ -249,6 +250,7 @@
 static int __devinit imx_audmux_probe(struct platform_device *pdev)
 {
 	struct resource *res;
+	struct pinctrl *pinctrl;
 	const struct of_device_id *of_id =
 			of_match_device(imx_audmux_dt_ids, &pdev->dev);
 
@@ -257,6 +259,12 @@
 	if (!audmux_base)
 		return -EADDRNOTAVAIL;
 
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		dev_err(&pdev->dev, "setup pinctrl failed!");
+		return PTR_ERR(pinctrl);
+	}
+
 	audmux_clk = clk_get(&pdev->dev, "audmux");
 	if (IS_ERR(audmux_clk)) {
 		dev_dbg(&pdev->dev, "cannot get clock: %ld\n",
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index cf3ed03..28dd76c 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -543,7 +543,7 @@
 			ret);
 		goto failed_clk;
 	}
-	clk_enable(ssi->clk);
+	clk_prepare_enable(ssi->clk);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -641,7 +641,7 @@
 failed_ioremap:
 	release_mem_region(res->start, resource_size(res));
 failed_get_resource:
-	clk_disable(ssi->clk);
+	clk_disable_unprepare(ssi->clk);
 	clk_put(ssi->clk);
 failed_clk:
 	kfree(ssi);
@@ -664,7 +664,7 @@
 
 	iounmap(ssi->base);
 	release_mem_region(res->start, resource_size(res));
-	clk_disable(ssi->clk);
+	clk_disable_unprepare(ssi->clk);
 	clk_put(ssi->clk);
 	kfree(ssi);
 
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 3cb9aa4..fa45567 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/mbus.h>
 #include <linux/delay.h>
+#include <linux/clk.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
@@ -449,6 +450,14 @@
 
 	priv->burst = data->burst;
 
+	priv->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "no clock\n");
+		err = PTR_ERR(priv->clk);
+		goto err_ioremap;
+	}
+	clk_prepare_enable(priv->clk);
+
 	return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
 
 err_ioremap:
@@ -466,6 +475,10 @@
 	struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
 
 	snd_soc_unregister_dai(&pdev->dev);
+
+	clk_disable_unprepare(priv->clk);
+	clk_put(priv->clk);
+
 	iounmap(priv->io);
 	release_mem_region(priv->mem->start, SZ_16K);
 	kfree(priv);
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 9047436..f9084d8 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -123,6 +123,7 @@
 	void __iomem *io;
 	int irq;
 	int burst;
+	struct clk *clk;
 };
 
 #endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 9ccfa5e..57a2fa7 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -109,11 +109,12 @@
 	  - PandaBoard (4430)
 	  - PandaBoardES (4460)
 
-config SND_OMAP_SOC_OMAP4_HDMI
-	tristate "SoC Audio support for Texas Instruments OMAP4 HDMI"
-	depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4
+config SND_OMAP_SOC_OMAP_HDMI
+	tristate "SoC Audio support for Texas Instruments OMAP HDMI"
+	depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
 	select SND_OMAP_SOC_HDMI
 	select SND_SOC_OMAP_HDMI_CODEC
+	select OMAP4_DSS_HDMI_AUDIO
 	help
 	  Say Y if you want to add support for SoC HDMI audio on Texas Instruments
 	  OMAP4 chips
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 1d656bc..0e14dd3 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -25,7 +25,7 @@
 snd-soc-omap3beagle-objs := omap3beagle.o
 snd-soc-zoom2-objs := zoom2.o
 snd-soc-igep0020-objs := igep0020.o
-snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o
+snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
 
 obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
 obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -41,4 +41,4 @@
 obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
 obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
 obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index e5f4444..34835e8 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -109,6 +109,47 @@
 	dev_dbg(mcbsp->dev, "***********************\n");
 }
 
+static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
+{
+	struct omap_mcbsp *mcbsp = dev_id;
+	u16 irqst;
+
+	irqst = MCBSP_READ(mcbsp, IRQST);
+	dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
+
+	if (irqst & RSYNCERREN)
+		dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
+	if (irqst & RFSREN)
+		dev_dbg(mcbsp->dev, "RX Frame Sync\n");
+	if (irqst & REOFEN)
+		dev_dbg(mcbsp->dev, "RX End Of Frame\n");
+	if (irqst & RRDYEN)
+		dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
+	if (irqst & RUNDFLEN)
+		dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
+	if (irqst & ROVFLEN)
+		dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
+
+	if (irqst & XSYNCERREN)
+		dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
+	if (irqst & XFSXEN)
+		dev_dbg(mcbsp->dev, "TX Frame Sync\n");
+	if (irqst & XEOFEN)
+		dev_dbg(mcbsp->dev, "TX End Of Frame\n");
+	if (irqst & XRDYEN)
+		dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
+	if (irqst & XUNDFLEN)
+		dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
+	if (irqst & XOVFLEN)
+		dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
+	if (irqst & XEMPTYEOFEN)
+		dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
+
+	MCBSP_WRITE(mcbsp, IRQST, irqst);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
 {
 	struct omap_mcbsp *mcbsp_tx = dev_id;
@@ -176,6 +217,10 @@
 	/* Enable wakeup behavior */
 	if (mcbsp->pdata->has_wakeup)
 		MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
+
+	/* Enable TX/RX sync error interrupts by default */
+	if (mcbsp->irq)
+		MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
 }
 
 /**
@@ -489,23 +534,25 @@
 	MCBSP_WRITE(mcbsp, SPCR1, 0);
 	MCBSP_WRITE(mcbsp, SPCR2, 0);
 
-	err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
-				0, "McBSP", (void *)mcbsp);
-	if (err != 0) {
-		dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
-				"for McBSP%d\n", mcbsp->tx_irq,
-				mcbsp->id);
-		goto err_clk_disable;
-	}
-
-	if (mcbsp->rx_irq) {
-		err = request_irq(mcbsp->rx_irq,
-				omap_mcbsp_rx_irq_handler,
-				0, "McBSP", (void *)mcbsp);
+	if (mcbsp->irq) {
+		err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
+				  "McBSP", (void *)mcbsp);
 		if (err != 0) {
-			dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
-					"for McBSP%d\n", mcbsp->rx_irq,
-					mcbsp->id);
+			dev_err(mcbsp->dev, "Unable to request IRQ\n");
+			goto err_clk_disable;
+		}
+	} else {
+		err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
+				  "McBSP TX", (void *)mcbsp);
+		if (err != 0) {
+			dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
+			goto err_clk_disable;
+		}
+
+		err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
+				  "McBSP RX", (void *)mcbsp);
+		if (err != 0) {
+			dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
 			goto err_free_irq;
 		}
 	}
@@ -542,9 +589,16 @@
 	if (mcbsp->pdata->has_wakeup)
 		MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
 
-	if (mcbsp->rx_irq)
+	/* Disable interrupt requests */
+	if (mcbsp->irq)
+		MCBSP_WRITE(mcbsp, IRQEN, 0);
+
+	if (mcbsp->irq) {
+		free_irq(mcbsp->irq, (void *)mcbsp);
+	} else {
 		free_irq(mcbsp->rx_irq, (void *)mcbsp);
-	free_irq(mcbsp->tx_irq, (void *)mcbsp);
+		free_irq(mcbsp->tx_irq, (void *)mcbsp);
+	}
 
 	reg_cache = mcbsp->reg_cache;
 
@@ -754,7 +808,7 @@
 THRESHOLD_PROP_BUILDER(max_rx_thres);
 
 static const char *dma_op_modes[] = {
-	"element", "threshold", "frame",
+	"element", "threshold",
 };
 
 static ssize_t dma_op_mode_show(struct device *dev,
@@ -949,13 +1003,24 @@
 	else
 		mcbsp->phys_dma_base = res->start;
 
-	mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
-	mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+	/*
+	 * OMAP1, 2 uses two interrupt lines: TX, RX
+	 * OMAP2430, OMAP3 SoC have combined IRQ line as well.
+	 * OMAP4 and newer SoC only have the combined IRQ line.
+	 * Use the combined IRQ if available since it gives better debugging
+	 * possibilities.
+	 */
+	mcbsp->irq = platform_get_irq_byname(pdev, "common");
+	if (mcbsp->irq == -ENXIO) {
+		mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
 
-	/* From OMAP4 there will be a single irq line */
-	if (mcbsp->tx_irq == -ENXIO) {
-		mcbsp->tx_irq = platform_get_irq(pdev, 0);
-		mcbsp->rx_irq = 0;
+		if (mcbsp->tx_irq == -ENXIO) {
+			mcbsp->irq = platform_get_irq(pdev, 0);
+			mcbsp->tx_irq = 0;
+		} else {
+			mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+			mcbsp->irq = 0;
+		}
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index a944fcc..262a615 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -217,17 +217,20 @@
 /********************** McBSP DMA operating modes **************************/
 #define MCBSP_DMA_MODE_ELEMENT		0
 #define MCBSP_DMA_MODE_THRESHOLD	1
-#define MCBSP_DMA_MODE_FRAME		2
 
-/********************** McBSP WAKEUPEN bit definitions *********************/
+/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
 #define RSYNCERREN		BIT(0)
 #define RFSREN			BIT(1)
 #define REOFEN			BIT(2)
 #define RRDYEN			BIT(3)
+#define RUNDFLEN		BIT(4)
+#define ROVFLEN			BIT(5)
 #define XSYNCERREN		BIT(7)
 #define XFSXEN			BIT(8)
 #define XEOFEN			BIT(9)
 #define XRDYEN			BIT(10)
+#define XUNDFLEN		BIT(11)
+#define XOVFLEN			BIT(12)
 #define XEMPTYEOFEN		BIT(14)
 
 /* Clock signal muxing options */
@@ -295,6 +298,7 @@
 	int configured;
 	u8 free;
 
+	int irq;
 	int rx_irq;
 	int tx_irq;
 
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 93bb8ee..9d93793 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -40,6 +40,11 @@
 #include "omap-pcm.h"
 #include "../codecs/twl6040.h"
 
+struct abe_twl6040 {
+	int	jack_detection;	/* board can detect jack events */
+	int	mclk_freq;	/* MCLK frequency speed for twl6040 */
+};
+
 static int omap_abe_hw_params(struct snd_pcm_substream *substream,
 	struct snd_pcm_hw_params *params)
 {
@@ -47,13 +52,13 @@
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_card *card = codec->card;
-	struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+	struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
 	int clk_id, freq;
 	int ret;
 
 	clk_id = twl6040_get_clk_id(rtd->codec);
 	if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
-		freq = pdata->mclk_freq;
+		freq = priv->mclk_freq;
 	else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
 		freq = 32768;
 	else
@@ -128,6 +133,9 @@
 	SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
 	SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
 	SND_SOC_DAPM_LINE("Line In", NULL),
+
+	/* Digital microphones */
+	SND_SOC_DAPM_MIC("Digital Mic", NULL),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -173,6 +181,7 @@
 	struct snd_soc_card *card = codec->card;
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+	struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
 	int hs_trim;
 	int ret = 0;
 
@@ -196,7 +205,7 @@
 					TWL6040_HSF_TRIM_RIGHT(hs_trim));
 
 	/* Headset jack detection only if it is supported */
-	if (pdata->jack_detection) {
+	if (priv->jack_detection) {
 		ret = snd_soc_jack_new(codec, "Headset Jack",
 					SND_JACK_HEADSET, &hs_jack);
 		if (ret)
@@ -210,10 +219,6 @@
 	return ret;
 }
 
-static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
-	SND_SOC_DAPM_MIC("Digital Mic", NULL),
-};
-
 static const struct snd_soc_dapm_route dmic_audio_map[] = {
 	{"DMic", NULL, "Digital Mic"},
 	{"Digital Mic", NULL, "Digital Mic1 Bias"},
@@ -223,19 +228,13 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
-	int ret;
-
-	ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
-				ARRAY_SIZE(dmic_dapm_widgets));
-	if (ret)
-		return ret;
 
 	return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
 				ARRAY_SIZE(dmic_audio_map));
 }
 
 /* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link twl6040_dmic_dai[] = {
+static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
 	{
 		.name = "TWL6040",
 		.stream_name = "TWL6040",
@@ -258,19 +257,6 @@
 	},
 };
 
-static struct snd_soc_dai_link twl6040_only_dai[] = {
-	{
-		.name = "TWL6040",
-		.stream_name = "TWL6040",
-		.cpu_dai_name = "omap-mcpdm",
-		.codec_dai_name = "twl6040-legacy",
-		.platform_name = "omap-pcm-audio",
-		.codec_name = "twl6040-codec",
-		.init = omap_abe_twl6040_init,
-		.ops = &omap_abe_ops,
-	},
-};
-
 /* Audio machine driver */
 static struct snd_soc_card omap_abe_card = {
 	.owner = THIS_MODULE,
@@ -285,6 +271,8 @@
 {
 	struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
 	struct snd_soc_card *card = &omap_abe_card;
+	struct abe_twl6040 *priv;
+	int num_links = 0;
 	int ret;
 
 	card->dev = &pdev->dev;
@@ -294,6 +282,10 @@
 		return -ENODEV;
 	}
 
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+
 	if (pdata->card_name) {
 		card->name = pdata->card_name;
 	} else {
@@ -301,18 +293,24 @@
 		return -ENODEV;
 	}
 
-	if (!pdata->mclk_freq) {
+	priv->jack_detection = pdata->jack_detection;
+	priv->mclk_freq = pdata->mclk_freq;
+
+
+	if (!priv->mclk_freq) {
 		dev_err(&pdev->dev, "MCLK frequency missing\n");
 		return -ENODEV;
 	}
 
-	if (pdata->has_dmic) {
-		card->dai_link = twl6040_dmic_dai;
-		card->num_links = ARRAY_SIZE(twl6040_dmic_dai);
-	} else {
-		card->dai_link = twl6040_only_dai;
-		card->num_links = ARRAY_SIZE(twl6040_only_dai);
-	}
+	if (pdata->has_dmic)
+		num_links = 2;
+	else
+		num_links = 1;
+
+	card->dai_link = abe_twl6040_dai_links;
+	card->num_links = num_links;
+
+	snd_soc_card_set_drvdata(card, priv);
 
 	ret = snd_soc_register_card(card);
 	if (ret)
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 4dcb5a7..75f5dca 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/of_device.h>
 #include <plat/dma.h>
 
 #include <sound/core.h>
@@ -528,10 +529,17 @@
 	return 0;
 }
 
+static const struct of_device_id omap_dmic_of_match[] = {
+	{ .compatible = "ti,omap4-dmic", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
+
 static struct platform_driver asoc_dmic_driver = {
 	.driver = {
 		.name = "omap-dmic",
 		.owner = THIS_MODULE,
+		.of_match_table = omap_dmic_of_match,
 	},
 	.probe = asoc_dmic_probe,
 	.remove = __devexit_p(asoc_dmic_remove),
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
new file mode 100644
index 0000000..eaa2ea0
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi-card.c
@@ -0,0 +1,87 @@
+/*
+ * omap-hdmi-card.c
+ *
+ * OMAP ALSA SoC machine driver for TI OMAP HDMI
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap-hdmi-audio"
+
+static struct snd_soc_dai_link omap_hdmi_dai = {
+	.name = "HDMI",
+	.stream_name = "HDMI",
+	.cpu_dai_name = "omap-hdmi-audio-dai",
+	.platform_name = "omap-pcm-audio",
+	.codec_name = "hdmi-audio-codec",
+	.codec_dai_name = "omap-hdmi-hifi",
+};
+
+static struct snd_soc_card snd_soc_omap_hdmi = {
+	.name = "OMAPHDMI",
+	.owner = THIS_MODULE,
+	.dai_link = &omap_hdmi_dai,
+	.num_links = 1,
+};
+
+static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &snd_soc_omap_hdmi;
+	int ret;
+
+	card->dev = &pdev->dev;
+
+	ret = snd_soc_register_card(card);
+	if (ret) {
+		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+		card->dev = NULL;
+		return ret;
+	}
+	return 0;
+}
+
+static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+	snd_soc_unregister_card(card);
+	card->dev = NULL;
+	return 0;
+}
+
+static struct platform_driver omap_hdmi_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = omap_hdmi_probe,
+	.remove = __devexit_p(omap_hdmi_remove),
+};
+
+module_platform_driver(omap_hdmi_driver);
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
index 38e0def..a08245d 100644
--- a/sound/soc/omap/omap-hdmi.c
+++ b/sound/soc/omap/omap-hdmi.c
@@ -30,21 +30,28 @@
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#include <video/omapdss.h>
 
 #include <plat/dma.h>
 #include "omap-pcm.h"
 #include "omap-hdmi.h"
 
-#define DRV_NAME "hdmi-audio-dai"
+#define DRV_NAME "omap-hdmi-audio-dai"
 
-static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = {
-	.name = "HDMI playback",
-	.sync_mode = OMAP_DMA_SYNC_PACKET,
+struct hdmi_priv {
+	struct omap_pcm_dma_data dma_params;
+	struct omap_dss_audio dss_audio;
+	struct snd_aes_iec958 iec;
+	struct snd_cea_861_aud_if cea;
+	struct omap_dss_device *dssdev;
 };
 
 static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
 				  struct snd_soc_dai *dai)
 {
+	struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
 	int err;
 	/*
 	 * Make sure that the period bytes are multiple of the DMA packet size.
@@ -52,46 +59,201 @@
 	 */
 	err = snd_pcm_hw_constraint_step(substream->runtime, 0,
 				 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(dai->dev, "could not apply constraint\n");
 		return err;
+	}
 
+	if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
+		dev_err(dai->dev, "audio not supported\n");
+		return -ENODEV;
+	}
 	return 0;
 }
 
+static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+	return priv->dssdev->driver->audio_enable(priv->dssdev);
+}
+
 static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
 				    struct snd_pcm_hw_params *params,
 				    struct snd_soc_dai *dai)
 {
+	struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+	struct snd_aes_iec958 *iec = &priv->iec;
+	struct snd_cea_861_aud_if *cea = &priv->cea;
 	int err = 0;
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
-		omap_hdmi_dai_dma_params.packet_size = 16;
+		priv->dma_params.packet_size = 16;
 		break;
 	case SNDRV_PCM_FORMAT_S24_LE:
-		omap_hdmi_dai_dma_params.packet_size = 32;
+		priv->dma_params.packet_size = 32;
+		break;
+	default:
+		dev_err(dai->dev, "format not supported!\n");
+		return -EINVAL;
+	}
+
+	priv->dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+
+	snd_soc_dai_set_dma_data(dai, substream,
+				 &priv->dma_params);
+
+	/*
+	 * fill the IEC-60958 channel status word
+	 */
+
+	/* specify IEC-60958-3 (commercial use) */
+	iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
+
+	/* specify that the audio is LPCM*/
+	iec->status[0] &= ~IEC958_AES0_NONAUDIO;
+
+	iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+	iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
+
+	iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
+
+	iec->status[1] = IEC958_AES1_CON_GENERAL;
+
+	iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
+
+	iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
+
+	switch (params_rate(params)) {
+	case 32000:
+		iec->status[3] |= IEC958_AES3_CON_FS_32000;
+		break;
+	case 44100:
+		iec->status[3] |= IEC958_AES3_CON_FS_44100;
+		break;
+	case 48000:
+		iec->status[3] |= IEC958_AES3_CON_FS_48000;
+		break;
+	case 88200:
+		iec->status[3] |= IEC958_AES3_CON_FS_88200;
+		break;
+	case 96000:
+		iec->status[3] |= IEC958_AES3_CON_FS_96000;
+		break;
+	case 176400:
+		iec->status[3] |= IEC958_AES3_CON_FS_176400;
+		break;
+	case 192000:
+		iec->status[3] |= IEC958_AES3_CON_FS_192000;
+		break;
+	default:
+		dev_err(dai->dev, "rate not supported!\n");
+		return -EINVAL;
+	}
+
+	/* specify the clock accuracy */
+	iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
+
+	/*
+	 * specify the word length. The same word length value can mean
+	 * two different lengths. Hence, we need to specify the maximum
+	 * word length as well.
+	 */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
+		iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
+		iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
+		break;
+	default:
+		dev_err(dai->dev, "format not supported!\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Fill the CEA-861 audio infoframe (see spec for details)
+	 */
+
+	cea->db1_ct_cc = (params_channels(params) - 1)
+		& CEA861_AUDIO_INFOFRAME_DB1CC;
+	cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
+
+	cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
+	cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
+
+	cea->db3 = 0; /* not used, all zeros */
+
+	/*
+	 * The OMAP HDMI IP requires to use the 8-channel channel code when
+	 * transmitting more than two channels.
+	 */
+	if (params_channels(params) == 2)
+		cea->db4_ca = 0x0;
+	else
+		cea->db4_ca = 0x13;
+
+	cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
+	/* the expression is trivial but makes clear what we are doing */
+	cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
+
+	priv->dss_audio.iec = iec;
+	priv->dss_audio.cea = cea;
+
+	err = priv->dssdev->driver->audio_config(priv->dssdev,
+						 &priv->dss_audio);
+
+	return err;
+}
+
+static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+	int err = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		err = priv->dssdev->driver->audio_start(priv->dssdev);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		priv->dssdev->driver->audio_stop(priv->dssdev);
 		break;
 	default:
 		err = -EINVAL;
 	}
-
-	omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
-
-	snd_soc_dai_set_dma_data(dai, substream,
-				 &omap_hdmi_dai_dma_params);
-
 	return err;
 }
 
+static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+	priv->dssdev->driver->audio_disable(priv->dssdev);
+}
+
 static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
 	.startup	= omap_hdmi_dai_startup,
 	.hw_params	= omap_hdmi_dai_hw_params,
+	.prepare	= omap_hdmi_dai_prepare,
+	.trigger	= omap_hdmi_dai_trigger,
+	.shutdown	= omap_hdmi_dai_shutdown,
 };
 
 static struct snd_soc_dai_driver omap_hdmi_dai = {
 	.playback = {
 		.channels_min = 2,
-		.channels_max = 2,
+		.channels_max = 8,
 		.rates = OMAP_HDMI_RATES,
 		.formats = OMAP_HDMI_FORMATS,
 	},
@@ -102,31 +264,77 @@
 {
 	int ret;
 	struct resource *hdmi_rsrc;
+	struct hdmi_priv *hdmi_data;
+	bool hdmi_dev_found = false;
+
+	hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
+	if (hdmi_data == NULL) {
+		dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
+		return -ENOMEM;
+	}
 
 	hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!hdmi_rsrc) {
 		dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
-		return -EINVAL;
+		return -ENODEV;
 	}
 
-	omap_hdmi_dai_dma_params.port_addr =  hdmi_rsrc->start
+	hdmi_data->dma_params.port_addr =  hdmi_rsrc->start
 		+ OMAP_HDMI_AUDIO_DMA_PORT;
 
 	hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!hdmi_rsrc) {
 		dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
-		return -EINVAL;
+		return -ENODEV;
 	}
 
-	omap_hdmi_dai_dma_params.dma_req =  hdmi_rsrc->start;
+	hdmi_data->dma_params.dma_req =  hdmi_rsrc->start;
+	hdmi_data->dma_params.name = "HDMI playback";
+	hdmi_data->dma_params.sync_mode = OMAP_DMA_SYNC_PACKET;
 
+	/*
+	 * TODO: We assume that there is only one DSS HDMI device. Future
+	 * OMAP implementations may support more than one HDMI devices and
+	 * we should provided separate audio support for all of them.
+	 */
+	/* Find an HDMI device. */
+	for_each_dss_dev(hdmi_data->dssdev) {
+		omap_dss_get_device(hdmi_data->dssdev);
+
+		if (!hdmi_data->dssdev->driver) {
+			omap_dss_put_device(hdmi_data->dssdev);
+			continue;
+		}
+
+		if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
+			hdmi_dev_found = true;
+			break;
+		}
+	}
+
+	if (!hdmi_dev_found) {
+		dev_err(&pdev->dev, "no driver for HDMI display found\n");
+		return -ENODEV;
+	}
+
+	dev_set_drvdata(&pdev->dev, hdmi_data);
 	ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
+
 	return ret;
 }
 
 static int __devexit omap_hdmi_remove(struct platform_device *pdev)
 {
+	struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
+
 	snd_soc_unregister_dai(&pdev->dev);
+
+	if (hdmi_data == NULL) {
+		dev_err(&pdev->dev, "cannot obtain HDMi data\n");
+		return -ENODEV;
+	}
+
+	omap_dss_put_device(hdmi_data->dssdev);
 	return 0;
 }
 
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
index 34c298d..6ad2bf4 100644
--- a/sound/soc/omap/omap-hdmi.h
+++ b/sound/soc/omap/omap-hdmi.h
@@ -28,7 +28,9 @@
 #define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
 
 #define OMAP_HDMI_RATES	(SNDRV_PCM_RATE_32000 | \
-				SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+				SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+				SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+				SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
 
 #define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
 				SNDRV_PCM_FMTBIT_S24_LE)
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 6912ac7..1046083 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -71,18 +71,17 @@
 
 	dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
 
-	/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
-	if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
-		/*
-		 * Configure McBSP threshold based on either:
-		 * packet_size, when the sDMA is in packet mode, or
-		 * based on the period size.
-		 */
-		if (dma_data->packet_size)
-			words = dma_data->packet_size;
-		else
-			words = snd_pcm_lib_period_bytes(substream) /
-							(mcbsp->wlen / 8);
+	/*
+	 * Configure McBSP threshold based on either:
+	 * packet_size, when the sDMA is in packet mode, or based on the
+	 * period size in THRESHOLD mode, otherwise use McBSP threshold = 1
+	 * for mono streams.
+	 */
+	if (dma_data->packet_size)
+		words = dma_data->packet_size;
+	else if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+		words = snd_pcm_lib_period_bytes(substream) /
+						(mcbsp->wlen / 8);
 	else
 		words = 1;
 
@@ -139,13 +138,15 @@
 	if (mcbsp->pdata->buffer_size) {
 		/*
 		* Rule for the buffer size. We should not allow
-		* smaller buffer than the FIFO size to avoid underruns
+		* smaller buffer than the FIFO size to avoid underruns.
+		* This applies only for the playback stream.
 		*/
-		snd_pcm_hw_rule_add(substream->runtime, 0,
-				    SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
-				    omap_mcbsp_hwrule_min_buffersize,
-				    mcbsp,
-				    SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			snd_pcm_hw_rule_add(substream->runtime, 0,
+					    SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+					    omap_mcbsp_hwrule_min_buffersize,
+					    mcbsp,
+					    SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
 		/* Make sure, that the period size is always even */
 		snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -230,6 +231,7 @@
 	unsigned int format, div, framesize, master;
 
 	dma_data = &mcbsp->dma_data[substream->stream];
+	channels = params_channels(params);
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
@@ -245,7 +247,6 @@
 	}
 	if (mcbsp->pdata->buffer_size) {
 		dma_data->set_threshold = omap_mcbsp_set_threshold;
-		/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
 		if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
 			int period_words, max_thrsh;
 
@@ -283,6 +284,10 @@
 			} else {
 				sync_mode = OMAP_DMA_SYNC_FRAME;
 			}
+		} else if (channels > 1) {
+			/* Use packet mode for non mono streams */
+			pkt_size = channels;
+			sync_mode = OMAP_DMA_SYNC_PACKET;
 		}
 	}
 
@@ -301,7 +306,7 @@
 	regs->rcr1	&= ~(RFRLEN1(0x7f) | RWDLEN1(7));
 	regs->xcr1	&= ~(XFRLEN1(0x7f) | XWDLEN1(7));
 	format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
-	wpf = channels = params_channels(params);
+	wpf = channels;
 	if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
 			      format == SND_SOC_DAIFMT_LEFT_J)) {
 		/* Use dual-phase frames */
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 3970556..59d47ab5 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -33,6 +33,7 @@
 #include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/of_device.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -507,10 +508,17 @@
 	return 0;
 }
 
+static const struct of_device_id omap_mcpdm_of_match[] = {
+	{ .compatible = "ti,omap4-mcpdm", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
+
 static struct platform_driver asoc_mcpdm_driver = {
 	.driver = {
 		.name	= "omap-mcpdm",
 		.owner	= THIS_MODULE,
+		.of_match_table = omap_mcpdm_of_match,
 	},
 
 	.probe	= asoc_mcpdm_probe,
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
deleted file mode 100644
index 28d689b..0000000
--- a/sound/soc/omap/omap4-hdmi-card.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * omap4-hdmi-card.c
- *
- * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-#include <video/omapdss.h>
-
-#define DRV_NAME "omap4-hdmi-audio"
-
-static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	int i;
-	struct omap_overlay_manager *mgr = NULL;
-	struct device *dev = substream->pcm->card->dev;
-
-	/* Find DSS HDMI device */
-	for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
-		mgr = omap_dss_get_overlay_manager(i);
-		if (mgr && mgr->device
-			&& mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
-			break;
-	}
-
-	if (i == omap_dss_get_num_overlay_managers()) {
-		dev_err(dev, "HDMI display device not found!\n");
-		return -ENODEV;
-	}
-
-	/* Make sure HDMI is power-on to avoid L3 interconnect errors */
-	if (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
-		dev_err(dev, "HDMI display is not active!\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static struct snd_soc_ops omap4_hdmi_dai_ops = {
-	.hw_params = omap4_hdmi_dai_hw_params,
-};
-
-static struct snd_soc_dai_link omap4_hdmi_dai = {
-	.name = "HDMI",
-	.stream_name = "HDMI",
-	.cpu_dai_name = "hdmi-audio-dai",
-	.platform_name = "omap-pcm-audio",
-	.codec_name = "omapdss_hdmi",
-	.codec_dai_name = "hdmi-audio-codec",
-	.ops = &omap4_hdmi_dai_ops,
-};
-
-static struct snd_soc_card snd_soc_omap4_hdmi = {
-	.name = "OMAP4HDMI",
-	.owner = THIS_MODULE,
-	.dai_link = &omap4_hdmi_dai,
-	.num_links = 1,
-};
-
-static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
-{
-	struct snd_soc_card *card = &snd_soc_omap4_hdmi;
-	int ret;
-
-	card->dev = &pdev->dev;
-
-	ret = snd_soc_register_card(card);
-	if (ret) {
-		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
-		card->dev = NULL;
-		return ret;
-	}
-	return 0;
-}
-
-static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
-{
-	struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-	snd_soc_unregister_card(card);
-	card->dev = NULL;
-	return 0;
-}
-
-static struct platform_driver omap4_hdmi_driver = {
-	.driver = {
-		.name = "omap4-hdmi-audio",
-		.owner = THIS_MODULE,
-	},
-	.probe = omap4_hdmi_probe,
-	.remove = __devexit_p(omap4_hdmi_remove),
-};
-
-module_platform_driver(omap4_hdmi_driver);
-
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 1c2aa7f..4da5fc5 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -33,7 +33,6 @@
 
 #include <mach/hardware.h>
 #include <mach/dma.h>
-#include <mach/audio.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
@@ -194,7 +193,7 @@
 {
 	u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
 
-	if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) {
+	if (ssp->type == PXA25x_SSP) {
 		sscr0 &= ~0x0000ff00;
 		sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
 	} else {
@@ -212,7 +211,7 @@
 	u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
 	u32 div;
 
-	if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP)
+	if (ssp->type == PXA25x_SSP)
 		div = ((sscr0 >> 8) & 0xff) * 2 + 2;
 	else
 		div = ((sscr0 >> 8) & 0xfff) + 1;
@@ -242,7 +241,7 @@
 		break;
 	case PXA_SSP_CLK_PLL:
 		/* Internal PLL is fixed */
-		if (cpu_is_pxa25x())
+		if (ssp->type == PXA25x_SSP)
 			priv->sysclk = 1843200;
 		else
 			priv->sysclk = 13000000;
@@ -266,11 +265,11 @@
 
 	/* The SSP clock must be disabled when changing SSP clock mode
 	 * on PXA2xx.  On PXA3xx it must be enabled when doing so. */
-	if (!cpu_is_pxa3xx())
+	if (ssp->type != PXA3xx_SSP)
 		clk_disable(ssp->clk);
 	val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
 	pxa_ssp_write_reg(ssp, SSCR0, val);
-	if (!cpu_is_pxa3xx())
+	if (ssp->type != PXA3xx_SSP)
 		clk_enable(ssp->clk);
 
 	return 0;
@@ -294,24 +293,20 @@
 	case PXA_SSP_AUDIO_DIV_SCDB:
 		val = pxa_ssp_read_reg(ssp, SSACD);
 		val &= ~SSACD_SCDB;
-#if defined(CONFIG_PXA3xx)
-		if (cpu_is_pxa3xx())
+		if (ssp->type == PXA3xx_SSP)
 			val &= ~SSACD_SCDX8;
-#endif
 		switch (div) {
 		case PXA_SSP_CLK_SCDB_1:
 			val |= SSACD_SCDB;
 			break;
 		case PXA_SSP_CLK_SCDB_4:
 			break;
-#if defined(CONFIG_PXA3xx)
 		case PXA_SSP_CLK_SCDB_8:
-			if (cpu_is_pxa3xx())
+			if (ssp->type == PXA3xx_SSP)
 				val |= SSACD_SCDX8;
 			else
 				return -EINVAL;
 			break;
-#endif
 		default:
 			return -EINVAL;
 		}
@@ -337,10 +332,8 @@
 	struct ssp_device *ssp = priv->ssp;
 	u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
 
-#if defined(CONFIG_PXA3xx)
-	if (cpu_is_pxa3xx())
+	if (ssp->type == PXA3xx_SSP)
 		pxa_ssp_write_reg(ssp, SSACDD, 0);
-#endif
 
 	switch (freq_out) {
 	case 5622000:
@@ -365,11 +358,10 @@
 		break;
 
 	default:
-#ifdef CONFIG_PXA3xx
 		/* PXA3xx has a clock ditherer which can be used to generate
 		 * a wider range of frequencies - calculate a value for it.
 		 */
-		if (cpu_is_pxa3xx()) {
+		if (ssp->type == PXA3xx_SSP) {
 			u32 val;
 			u64 tmp = 19968;
 			tmp *= 1000000;
@@ -386,7 +378,6 @@
 				val, freq_out);
 			break;
 		}
-#endif
 
 		return -EINVAL;
 	}
@@ -590,10 +581,8 @@
 	/* bit size */
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
-#ifdef CONFIG_PXA3xx
-		if (cpu_is_pxa3xx())
+		if (ssp->type == PXA3xx_SSP)
 			sscr0 |= SSCR0_FPCKE;
-#endif
 		sscr0 |= SSCR0_DataSize(16);
 		break;
 	case SNDRV_PCM_FORMAT_S24_LE:
@@ -618,9 +607,7 @@
 			* trying and failing a lot; some of the registers
 			* needed for that mode are only available on PXA3xx.
 			*/
-
-#ifdef CONFIG_PXA3xx
-			if (!cpu_is_pxa3xx())
+			if (ssp->type != PXA3xx_SSP)
 				return -EINVAL;
 
 			sspsp |= SSPSP_SFRMWDTH(width * 2);
@@ -628,9 +615,6 @@
 			sspsp |= SSPSP_EDMYSTOP(3);
 			sspsp |= SSPSP_DMYSTOP(3);
 			sspsp |= SSPSP_DMYSTRT(1);
-#else
-			return -EINVAL;
-#endif
 		} else {
 			/* The frame width is the width the LRCLK is
 			 * asserted for; the delay is expressed in
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 7cee225..2ef9853 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1052,6 +1052,13 @@
 	return 0;
 }
 
+static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
+{
+	struct snd_pcm_runtime *runtime = io->substream->runtime;
+
+	return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
+}
+
 static void fsi_dma_complete(void *data)
 {
 	struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1061,7 +1068,7 @@
 	enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ?
 		DMA_TO_DEVICE : DMA_FROM_DEVICE;
 
-	dma_sync_single_for_cpu(dai->dev, io->dma,
+	dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io),
 			samples_to_bytes(runtime, io->period_samples), dir);
 
 	io->buff_sample_pos += io->period_samples;
@@ -1078,13 +1085,6 @@
 	snd_pcm_period_elapsed(io->substream);
 }
 
-static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
-{
-	struct snd_pcm_runtime *runtime = io->substream->runtime;
-
-	return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
-}
-
 static void fsi_dma_do_tasklet(unsigned long data)
 {
 	struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1110,7 +1110,7 @@
 	len	= samples_to_bytes(runtime, io->period_samples);
 	buf	= fsi_dma_get_area(io);
 
-	dma_sync_single_for_device(dai->dev, io->dma, len, dir);
+	dma_sync_single_for_device(dai->dev, buf, len, dir);
 
 	sg_init_table(&sg, 1);
 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
@@ -1172,9 +1172,16 @@
 static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
 				 int start)
 {
+	struct fsi_master *master = fsi_get_master(fsi);
+	u32 clk  = fsi_is_port_a(fsi) ? CRA  : CRB;
 	u32 enable = start ? DMA_ON : 0;
 
 	fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable);
+
+	dmaengine_terminate_all(io->chan);
+
+	if (fsi_is_clk_master(fsi))
+		fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0);
 }
 
 static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 90ee77d..89eae93 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -913,7 +913,7 @@
 			/* do we need to add this widget to the list ? */
 			if (list) {
 				int err;
-				err = dapm_list_add_widget(list, path->sink);
+				err = dapm_list_add_widget(list, path->source);
 				if (err < 0) {
 					dev_err(widget->dapm->dev, "could not add widget %s\n",
 						widget->name);
@@ -954,7 +954,7 @@
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
 		paths = is_connected_output_ep(dai->playback_widget, list);
 	else
-		paths = is_connected_input_ep(dai->playback_widget, list);
+		paths = is_connected_input_ep(dai->capture_widget, list);
 
 	trace_snd_soc_dapm_connected(paths, stream);
 	dapm_clear_walk(&card->dapm);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index bedd171..48fd15b 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -794,6 +794,9 @@
 		for (i = 0; i < card->num_links; i++) {
 			be = &card->rtd[i];
 
+			if (!be->dai_link->no_pcm)
+				continue;
+
 			if (be->cpu_dai->playback_widget == widget ||
 				be->codec_dai->playback_widget == widget)
 				return be;
@@ -803,6 +806,9 @@
 		for (i = 0; i < card->num_links; i++) {
 			be = &card->rtd[i];
 
+			if (!be->dai_link->no_pcm)
+				continue;
+
 			if (be->cpu_dai->capture_widget == widget ||
 				be->codec_dai->capture_widget == widget)
 				return be;
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 57cd419..f43edb3 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -629,3 +629,4 @@
 MODULE_DESCRIPTION("Tegra30 AHUB driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0b0df49..3b6da91 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -346,6 +346,17 @@
 	return 0;
 }
 
+static int tegra_wm8903_remove(struct snd_soc_card *card)
+{
+	struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]);
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_codec *codec = codec_dai->codec;
+
+	wm8903_mic_detect(codec, NULL, 0, 0);
+
+	return 0;
+}
+
 static struct snd_soc_dai_link tegra_wm8903_dai = {
 	.name = "WM8903",
 	.stream_name = "WM8903 PCM",
@@ -363,6 +374,8 @@
 	.dai_link = &tegra_wm8903_dai,
 	.num_links = 1,
 
+	.remove = tegra_wm8903_remove,
+
 	.controls = tegra_wm8903_controls,
 	.num_controls = ARRAY_SIZE(tegra_wm8903_controls),
 	.dapm_widgets = tegra_wm8903_dapm_widgets,
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index 6f9715a..56ad923 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -209,7 +209,7 @@
 	int ret;
 	u8 data;
 	struct usb_device *device = interface_to_usbdev(intf);
-	const struct firmware *fw = 0;
+	const struct firmware *fw = NULL;
 	struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
 			GFP_KERNEL);
 
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 0d37238..2b9ffff 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -119,6 +119,7 @@
 	unsigned long unlink_mask;	/* bitmask of unlinked urbs */
 
 	/* data and sync endpoints for this stream */
+	unsigned int ep_num;		/* the endpoint number */
 	struct snd_usb_endpoint *data_endpoint;
 	struct snd_usb_endpoint *sync_endpoint;
 	unsigned long flags;
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 41daaa2..e71fe55 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,14 @@
 		.map = audigy2nx_map,
 		.selector_map = audigy2nx_selectors,
 	},
+	{	/* Logitech, Inc. QuickCam Pro for Notebooks */
+		.id = USB_ID(0x046d, 0x0991),
+		.ignore_ctl_error = 1,
+	},
+	{	/* Logitech, Inc. QuickCam E 3500 */
+		.id = USB_ID(0x046d, 0x09a4),
+		.ignore_ctl_error = 1,
+	},
 	{
 		/* Hercules DJ Console (Windows Edition) */
 		.id = USB_ID(0x06f8, 0xb000),
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 24839d9..54607f8 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -354,17 +354,21 @@
 		    (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
 		     get_endpoint(alts, 1)->bSynchAddress != 0 &&
 		     !implicit_fb)) {
-			snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-				   dev->devnum, fmt->iface, fmt->altsetting);
+			snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+				   dev->devnum, fmt->iface, fmt->altsetting,
+				   get_endpoint(alts, 1)->bmAttributes,
+				   get_endpoint(alts, 1)->bLength,
+				   get_endpoint(alts, 1)->bSynchAddress);
 			return -EINVAL;
 		}
 		ep = get_endpoint(alts, 1)->bEndpointAddress;
-		if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+		if (!implicit_fb &&
+		    get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
 		    (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
-		     (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) ||
-		     ( is_playback && !implicit_fb))) {
-			snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-				   dev->devnum, fmt->iface, fmt->altsetting);
+		     (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+			snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+				   dev->devnum, fmt->iface, fmt->altsetting,
+				   is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
 			return -EINVAL;
 		}
 
@@ -788,6 +792,9 @@
 	int count = 0, needs_knot = 0;
 	int err;
 
+	kfree(subs->rate_list.list);
+	subs->rate_list.list = NULL;
+
 	list_for_each_entry(fp, &subs->fmt_list, list) {
 		if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
 			return 0;
@@ -1144,7 +1151,8 @@
 	return -EINVAL;
 }
 
-int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream,
+					     int cmd)
 {
 	int err;
 	struct snd_usb_substream *subs = substream->runtime->private_data;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index d89ab4c..79780fa 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1831,6 +1831,36 @@
 		}
 	}
 },
+{
+	USB_DEVICE(0x0582, 0x014d),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		/* .vendor_name = "BOSS", */
+		/* .product_name = "GT-100", */
+		.ifnum = QUIRK_ANY_INTERFACE,
+		.type = QUIRK_COMPOSITE,
+		.data = (const struct snd_usb_audio_quirk[]) {
+			{
+				.ifnum = 1,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 2,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 3,
+				.type = QUIRK_MIDI_FIXED_ENDPOINT,
+				.data = & (const struct snd_usb_midi_endpoint_info) {
+					.out_cables = 0x0001,
+					.in_cables  = 0x0001
+				}
+			},
+			{
+				.ifnum = -1
+			}
+		}
+	}
+},
 
 /* Guillemot devices */
 {
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6b7d7a2..083ed81 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -97,6 +97,7 @@
 	subs->formats |= fp->formats;
 	subs->num_formats++;
 	subs->fmt_type = fp->fmt_type;
+	subs->ep_num = fp->endpoint;
 }
 
 /*
@@ -119,9 +120,7 @@
 		if (as->fmt_type != fp->fmt_type)
 			continue;
 		subs = &as->substream[stream];
-		if (!subs->data_endpoint)
-			continue;
-		if (subs->data_endpoint->ep_num == fp->endpoint) {
+		if (subs->ep_num == fp->endpoint) {
 			list_add_tail(&fp->list, &subs->fmt_list);
 			subs->num_formats++;
 			subs->formats |= fp->formats;
@@ -134,7 +133,7 @@
 		if (as->fmt_type != fp->fmt_type)
 			continue;
 		subs = &as->substream[stream];
-		if (subs->data_endpoint)
+		if (subs->ep_num)
 			continue;
 		err = snd_pcm_new_stream(as->pcm, stream, 1);
 		if (err < 0)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 146fd61..d9834b3 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -701,14 +701,18 @@
 	pfd.fd = fd;
 
 	while (1) {
+		struct sockaddr *addr_p = (struct sockaddr *) &addr;
+		socklen_t addr_l = sizeof(addr);
 		pfd.events = POLLIN;
 		pfd.revents = 0;
 		poll(&pfd, 1, -1);
 
-		len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
+		len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+				addr_p, &addr_l);
 
-		if (len < 0) {
-			syslog(LOG_ERR, "recv failed; error:%d", len);
+		if (len < 0 || addr.nl_pid) {
+			syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+					addr.nl_pid, errno, strerror(errno));
 			close(fd);
 			return -1;
 		}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 9985349..5548282 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1434,8 +1434,11 @@
 fail:
 	free_token(token);
 fail_expect:
-	if (field)
+	if (field) {
+		free(field->type);
+		free(field->name);
 		free(field);
+	}
 	return -1;
 }
 
@@ -1712,6 +1715,8 @@
 
 		if (set_op_prio(arg) == -1) {
 			event->flags |= EVENT_FL_FAILED;
+			/* arg->op.op (= token) will be freed at out_free */
+			arg->op.op = NULL;
 			goto out_free;
 		}
 
@@ -2124,6 +2129,13 @@
 
 		free_token(token);
 		type = process_arg(event, arg, &token);
+
+		if (type == EVENT_OP)
+			type = process_op(event, arg, &token);
+
+		if (type == EVENT_ERROR)
+			goto out_free;
+
 		if (test_type_token(type, token, EVENT_DELIM, ","))
 			goto out_free;
 
@@ -2288,17 +2300,18 @@
 	arg = alloc_arg();
 	type = process_arg(event, arg, &token);
 	if (type == EVENT_ERROR)
-		goto out_free;
+		goto out_free_arg;
 
 	if (!test_type_token(type, token, EVENT_OP, "]"))
-		goto out_free;
+		goto out_free_arg;
 
 	free_token(token);
 	type = read_token_item(tok);
 	return type;
 
+ out_free_arg:
+	free_arg(arg);
  out_free:
-	free(arg);
 	free_token(token);
 	*tok = NULL;
 	return EVENT_ERROR;
@@ -3362,6 +3375,7 @@
 			break;
 		}
 		farg = farg->next;
+		param = param->next;
 	}
 
 	ret = (*func_handle->func)(s, args);
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 2d40c5e..dfcfe2c1 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -325,9 +325,8 @@
 }
 
 static struct filter_arg *
-create_arg_item(struct event_format *event,
-		const char *token, enum filter_arg_type type,
-		char **error_str)
+create_arg_item(struct event_format *event, const char *token,
+		enum event_type type, char **error_str)
 {
 	struct format_field *field;
 	struct filter_arg *arg;
@@ -1585,7 +1584,7 @@
 		const char *name;
 
 		name = get_comm(event, record);
-		return (unsigned long long)name;
+		return (unsigned long)name;
 	}
 
 	pevent_read_number_field(field, record->data, &val);
diff --git a/tools/perf/Documentation/perfconfig.example b/tools/perf/Documentation/perfconfig.example
index 42c6fd2..767ea24 100644
--- a/tools/perf/Documentation/perfconfig.example
+++ b/tools/perf/Documentation/perfconfig.example
@@ -19,3 +19,11 @@
 
 	# Default, disable using /dev/null
 	dir = /root/.debug
+
+[annotate]
+
+	# Defaults
+	hide_src_code = false
+	use_offset = true
+	jump_arrows = true
+	show_nr_jumps = false
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 5476bc0..b4b572e 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,4 +1,6 @@
 tools/perf
+tools/scripts
+tools/lib/traceevent
 include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1d3d513..0eee64c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -80,7 +80,7 @@
   PERF_DEBUG = $(DEBUG)
 endif
 ifndef PERF_DEBUG
-  CFLAGS_OPTIMIZE = -O6
+  CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
 endif
 
 ifdef PARSER_DEBUG
@@ -89,7 +89,7 @@
 	PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
 endif
 
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 806e0a2..67522cf 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -215,7 +215,7 @@
 	}
 
 	if (total_nr_samples == 0) {
-		ui__warning("The %s file has no samples!\n", session->filename);
+		ui__error("The %s file has no samples!\n", session->filename);
 		goto out_delete;
 	}
 out_delete:
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e52d77e..acd78dc 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -116,7 +116,7 @@
 int cmd_evlist(int argc, const char **argv, const char *prefix __used)
 {
 	struct perf_attr_details details = { .verbose = false, };
-	const char *input_name;
+	const char *input_name = NULL;
 	const struct option options[] = {
 		OPT_STRING('i', "input", &input_name, "file",
 			    "Input file name"),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5cb084..f95840d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -264,7 +264,7 @@
 			}
 
 			if (err == ENOENT) {
-				ui__warning("The %s event is not supported.\n",
+				ui__error("The %s event is not supported.\n",
 					    event_name(pos));
 				exit(EXIT_FAILURE);
 			}
@@ -858,8 +858,8 @@
 		usage_with_options(record_usage, record_options);
 
 	if (rec->force && rec->append_file) {
-		fprintf(stderr, "Can't overwrite and append at the same time."
-				" You need to choose between -f and -A");
+		ui__error("Can't overwrite and append at the same time."
+			  " You need to choose between -f and -A");
 		usage_with_options(record_usage, record_options);
 	} else if (rec->append_file) {
 		rec->write_mode = WRITE_APPEND;
@@ -868,8 +868,8 @@
 	}
 
 	if (nr_cgroups && !rec->opts.target.system_wide) {
-		fprintf(stderr, "cgroup monitoring only available in"
-			" system-wide mode\n");
+		ui__error("cgroup monitoring only available in"
+			  " system-wide mode\n");
 		usage_with_options(record_usage, record_options);
 	}
 
@@ -905,7 +905,7 @@
 		int saved_errno = errno;
 
 		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
-		ui__warning("%s", errbuf);
+		ui__error("%s", errbuf);
 
 		err = -saved_errno;
 		goto out_free_fd;
@@ -933,7 +933,7 @@
 	else if (rec->opts.freq) {
 		rec->opts.default_interval = rec->opts.freq;
 	} else {
-		fprintf(stderr, "frequency and count are zero, aborting\n");
+		ui__error("frequency and count are zero, aborting\n");
 		err = -EINVAL;
 		goto out_free_fd;
 	}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index d58e414..25249f7 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -152,7 +152,7 @@
 
 	if (symbol_conf.use_callchain) {
 		err = callchain_append(he->callchain,
-				       &evsel->hists.callchain_cursor,
+				       &callchain_cursor,
 				       sample->period);
 		if (err)
 			return err;
@@ -162,7 +162,7 @@
 	 * so we don't allocated the extra space needed because the stdio
 	 * code will not use it.
 	 */
-	if (al->sym != NULL && use_browser > 0) {
+	if (he->ms.sym != NULL && use_browser > 0) {
 		struct annotation *notes = symbol__annotation(he->ms.sym);
 
 		assert(evsel != NULL);
@@ -251,13 +251,13 @@
 
 	if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
 		if (sort__has_parent) {
-			ui__warning("Selected --sort parent, but no "
+			ui__error("Selected --sort parent, but no "
 				    "callchain data. Did you call "
 				    "'perf record' without -g?\n");
 			return -EINVAL;
 		}
 		if (symbol_conf.use_callchain) {
-			ui__warning("Selected -g but no callchain data. Did "
+			ui__error("Selected -g but no callchain data. Did "
 				    "you call 'perf record' without -g?\n");
 			return -1;
 		}
@@ -266,17 +266,15 @@
 		   !symbol_conf.use_callchain) {
 			symbol_conf.use_callchain = true;
 			if (callchain_register_param(&callchain_param) < 0) {
-				ui__warning("Can't register callchain "
-					    "params.\n");
+				ui__error("Can't register callchain params.\n");
 				return -EINVAL;
 			}
 	}
 
 	if (sort__branch_mode == 1) {
 		if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
-			fprintf(stderr, "selected -b but no branch data."
-					" Did you call perf record without"
-					" -b?\n");
+			ui__error("Selected -b but no branch data. "
+				  "Did you call perf record without -b?\n");
 			return -1;
 		}
 	}
@@ -420,7 +418,7 @@
 	}
 
 	if (nr_samples == 0) {
-		ui__warning("The %s file has no samples!\n", session->filename);
+		ui__error("The %s file has no samples!\n", session->filename);
 		goto out_delete;
 	}
 
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 62ae30d..07b5c77 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1129,7 +1129,7 @@
 		return 0;
 
 	if (!evsel_list->nr_entries) {
-		if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+		if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
 			return -1;
 	}
 
@@ -1139,21 +1139,21 @@
 		return 0;
 
 	/* Append detailed run extra attributes: */
-	if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+	if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
 		return -1;
 
 	if (detailed_run < 2)
 		return 0;
 
 	/* Append very detailed run extra attributes: */
-	if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+	if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
 		return -1;
 
 	if (detailed_run < 3)
 		return 0;
 
 	/* Append very, very detailed run extra attributes: */
-	return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
+	return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
 }
 
 int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1179,6 +1179,12 @@
 		fprintf(stderr, "cannot use both --output and --log-fd\n");
 		usage_with_options(stat_usage, options);
 	}
+
+	if (output_fd < 0) {
+		fprintf(stderr, "argument to --log-fd must be a > 0\n");
+		usage_with_options(stat_usage, options);
+	}
+
 	if (!output) {
 		struct timespec tm;
 		mode = append_file ? "a" : "w";
@@ -1190,7 +1196,7 @@
 		}
 		clock_gettime(CLOCK_REALTIME, &tm);
 		fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
-	} else if (output_fd != 2) {
+	} else if (output_fd > 0) {
 		mode = append_file ? "a" : "w";
 		output = fdopen(output_fd, mode);
 		if (!output) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6031dce..6bb0277 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -787,7 +787,7 @@
 		}
 
 		if (symbol_conf.use_callchain) {
-			err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
+			err = callchain_append(he->callchain, &callchain_cursor,
 					       sample->period);
 			if (err)
 				return;
@@ -953,22 +953,22 @@
 				attr->config = PERF_COUNT_SW_CPU_CLOCK;
 				if (counter->name) {
 					free(counter->name);
-					counter->name = strdup(event_name(counter));
+					counter->name = NULL;
 				}
 				goto try_again;
 			}
 
 			if (err == ENOENT) {
-				ui__warning("The %s event is not supported.\n",
+				ui__error("The %s event is not supported.\n",
 					    event_name(counter));
 				goto out_err;
 			} else if (err == EMFILE) {
-				ui__warning("Too many events are opened.\n"
+				ui__error("Too many events are opened.\n"
 					    "Try again after reducing the number of events\n");
 				goto out_err;
 			}
 
-			ui__warning("The sys_perf_event_open() syscall "
+			ui__error("The sys_perf_event_open() syscall "
 				    "returned with %d (%s).  /bin/dmesg "
 				    "may provide additional information.\n"
 				    "No CONFIG_PERF_EVENTS=y kernel support "
@@ -978,7 +978,7 @@
 	}
 
 	if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
-		ui__warning("Failed to mmap with %d (%s)\n",
+		ui__error("Failed to mmap with %d (%s)\n",
 			    errno, strerror(errno));
 		goto out_err;
 	}
@@ -994,12 +994,12 @@
 {
 	if (!top->sort_has_symbols) {
 		if (symbol_conf.use_callchain) {
-			ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
+			ui__error("Selected -g but \"sym\" not present in --sort/-s.");
 			return -EINVAL;
 		}
 	} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
 		if (callchain_register_param(&callchain_param) < 0) {
-			ui__warning("Can't register callchain params.\n");
+			ui__error("Can't register callchain params.\n");
 			return -EINVAL;
 		}
 	}
@@ -1041,7 +1041,7 @@
 
 	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
 							    display_thread), top)) {
-		printf("Could not create display thread.\n");
+		ui__error("Could not create display thread.\n");
 		exit(-1);
 	}
 
@@ -1050,7 +1050,7 @@
 
 		param.sched_priority = top->realtime_prio;
 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
-			printf("Could not set realtime priority.\n");
+			ui__error("Could not set realtime priority.\n");
 			exit(-1);
 		}
 	}
@@ -1274,7 +1274,7 @@
 		int saved_errno = errno;
 
 		perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
-		ui__warning("%s", errbuf);
+		ui__error("%s", errbuf);
 
 		status = -saved_errno;
 		goto out_delete_evlist;
@@ -1288,7 +1288,7 @@
 
 	if (!top.evlist->nr_entries &&
 	    perf_evlist__add_default(top.evlist) < 0) {
-		pr_err("Not enough memory for event selector list\n");
+		ui__error("Not enough memory for event selector list\n");
 		return -ENOMEM;
 	}
 
@@ -1305,7 +1305,7 @@
 	else if (top.freq) {
 		top.default_interval = top.freq;
 	} else {
-		fprintf(stderr, "frequency and count are zero, aborting\n");
+		ui__error("frequency and count are zero, aborting\n");
 		exit(EXIT_FAILURE);
 	}
 
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index bd0bb1b..67e5d0c 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -409,14 +409,15 @@
 prctl.  When a counter is disabled, it doesn't count or generate
 events but does continue to exist and maintain its count value.
 
-An individual counter or counter group can be enabled with
+An individual counter can be enabled with
 
-	ioctl(fd, PERF_EVENT_IOC_ENABLE);
+	ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
 
 or disabled with
 
-	ioctl(fd, PERF_EVENT_IOC_DISABLE);
+	ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
 
+For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument.
 Enabling or disabling the leader of a group enables or disables the
 whole group; that is, while the group leader is disabled, none of the
 counters in the group will count.  Enabling or disabling a member of a
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 14f1034..f960ccb 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -227,7 +227,7 @@
 	unsigned int freq;
 	unsigned int mmap_pages;
 	unsigned int user_freq;
-	int	     branch_stack;
+	u64          branch_stack;
 	u64	     default_interval;
 	u64	     user_interval;
 };
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index cde4d0f..1818a53 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -35,16 +35,16 @@
 	return ret;
 }
 
-void ui_browser__set_percent_color(struct ui_browser *self,
+void ui_browser__set_percent_color(struct ui_browser *browser,
 				   double percent, bool current)
 {
-	 int color = ui_browser__percent_color(self, percent, current);
-	 ui_browser__set_color(self, color);
+	 int color = ui_browser__percent_color(browser, percent, current);
+	 ui_browser__set_color(browser, color);
 }
 
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
 {
-	SLsmg_gotorc(self->y + y, self->x + x);
+	SLsmg_gotorc(browser->y + y, browser->x + x);
 }
 
 static struct list_head *
@@ -73,23 +73,23 @@
 	return NULL;
 }
 
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-	struct list_head *head = self->entries;
+	struct list_head *head = browser->entries;
 	struct list_head *pos;
 
-	if (self->nr_entries == 0)
+	if (browser->nr_entries == 0)
 		return;
 
 	switch (whence) {
 	case SEEK_SET:
-		pos = ui_browser__list_head_filter_entries(self, head->next);
+		pos = ui_browser__list_head_filter_entries(browser, head->next);
 		break;
 	case SEEK_CUR:
-		pos = self->top;
+		pos = browser->top;
 		break;
 	case SEEK_END:
-		pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+		pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
 		break;
 	default:
 		return;
@@ -99,18 +99,18 @@
 
 	if (offset > 0) {
 		while (offset-- != 0)
-			pos = ui_browser__list_head_filter_entries(self, pos->next);
+			pos = ui_browser__list_head_filter_entries(browser, pos->next);
 	} else {
 		while (offset++ != 0)
-			pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+			pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
 	}
 
-	self->top = pos;
+	browser->top = pos;
 }
 
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-	struct rb_root *root = self->entries;
+	struct rb_root *root = browser->entries;
 	struct rb_node *nd;
 
 	switch (whence) {
@@ -118,7 +118,7 @@
 		nd = rb_first(root);
 		break;
 	case SEEK_CUR:
-		nd = self->top;
+		nd = browser->top;
 		break;
 	case SEEK_END:
 		nd = rb_last(root);
@@ -135,23 +135,23 @@
 			nd = rb_prev(nd);
 	}
 
-	self->top = nd;
+	browser->top = nd;
 }
 
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
 {
 	struct rb_node *nd;
 	int row = 0;
 
-	if (self->top == NULL)
-                self->top = rb_first(self->entries);
+	if (browser->top == NULL)
+                browser->top = rb_first(browser->entries);
 
-	nd = self->top;
+	nd = browser->top;
 
 	while (nd != NULL) {
-		ui_browser__gotorc(self, row, 0);
-		self->write(self, nd, row);
-		if (++row == self->height)
+		ui_browser__gotorc(browser, row, 0);
+		browser->write(browser, nd, row);
+		if (++row == browser->height)
 			break;
 		nd = rb_next(nd);
 	}
@@ -159,17 +159,17 @@
 	return row;
 }
 
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
 {
-	return self->top_idx + row == self->index;
+	return browser->top_idx + row == browser->index;
 }
 
-void ui_browser__refresh_dimensions(struct ui_browser *self)
+void ui_browser__refresh_dimensions(struct ui_browser *browser)
 {
-	self->width = SLtt_Screen_Cols - 1;
-	self->height = SLtt_Screen_Rows - 2;
-	self->y = 1;
-	self->x = 0;
+	browser->width = SLtt_Screen_Cols - 1;
+	browser->height = SLtt_Screen_Rows - 2;
+	browser->y = 1;
+	browser->x = 0;
 }
 
 void ui_browser__handle_resize(struct ui_browser *browser)
@@ -225,10 +225,10 @@
 	return key == K_ENTER || toupper(key) == 'Y';
 }
 
-void ui_browser__reset_index(struct ui_browser *self)
+void ui_browser__reset_index(struct ui_browser *browser)
 {
-	self->index = self->top_idx = 0;
-	self->seek(self, 0, SEEK_SET);
+	browser->index = browser->top_idx = 0;
+	browser->seek(browser, 0, SEEK_SET);
 }
 
 void __ui_browser__show_title(struct ui_browser *browser, const char *title)
@@ -245,26 +245,26 @@
 	pthread_mutex_unlock(&ui__lock);
 }
 
-int ui_browser__show(struct ui_browser *self, const char *title,
+int ui_browser__show(struct ui_browser *browser, const char *title,
 		     const char *helpline, ...)
 {
 	int err;
 	va_list ap;
 
-	ui_browser__refresh_dimensions(self);
+	ui_browser__refresh_dimensions(browser);
 
 	pthread_mutex_lock(&ui__lock);
-	__ui_browser__show_title(self, title);
+	__ui_browser__show_title(browser, title);
 
-	self->title = title;
-	free(self->helpline);
-	self->helpline = NULL;
+	browser->title = title;
+	free(browser->helpline);
+	browser->helpline = NULL;
 
 	va_start(ap, helpline);
-	err = vasprintf(&self->helpline, helpline, ap);
+	err = vasprintf(&browser->helpline, helpline, ap);
 	va_end(ap);
 	if (err > 0)
-		ui_helpline__push(self->helpline);
+		ui_helpline__push(browser->helpline);
 	pthread_mutex_unlock(&ui__lock);
 	return err ? 0 : -1;
 }
@@ -350,7 +350,7 @@
 	browser->seek(browser, browser->top_idx, SEEK_SET);
 }
 
-int ui_browser__run(struct ui_browser *self, int delay_secs)
+int ui_browser__run(struct ui_browser *browser, int delay_secs)
 {
 	int err, key;
 
@@ -358,7 +358,7 @@
 		off_t offset;
 
 		pthread_mutex_lock(&ui__lock);
-		err = __ui_browser__refresh(self);
+		err = __ui_browser__refresh(browser);
 		SLsmg_refresh();
 		pthread_mutex_unlock(&ui__lock);
 		if (err < 0)
@@ -368,18 +368,18 @@
 
 		if (key == K_RESIZE) {
 			ui__refresh_dimensions(false);
-			ui_browser__refresh_dimensions(self);
-			__ui_browser__show_title(self, self->title);
-			ui_helpline__puts(self->helpline);
+			ui_browser__refresh_dimensions(browser);
+			__ui_browser__show_title(browser, browser->title);
+			ui_helpline__puts(browser->helpline);
 			continue;
 		}
 
-		if (self->use_navkeypressed && !self->navkeypressed) {
+		if (browser->use_navkeypressed && !browser->navkeypressed) {
 			if (key == K_DOWN || key == K_UP ||
 			    key == K_PGDN || key == K_PGUP ||
 			    key == K_HOME || key == K_END ||
 			    key == ' ') {
-				self->navkeypressed = true;
+				browser->navkeypressed = true;
 				continue;
 			} else
 				return key;
@@ -387,59 +387,59 @@
 
 		switch (key) {
 		case K_DOWN:
-			if (self->index == self->nr_entries - 1)
+			if (browser->index == browser->nr_entries - 1)
 				break;
-			++self->index;
-			if (self->index == self->top_idx + self->height) {
-				++self->top_idx;
-				self->seek(self, +1, SEEK_CUR);
+			++browser->index;
+			if (browser->index == browser->top_idx + browser->height) {
+				++browser->top_idx;
+				browser->seek(browser, +1, SEEK_CUR);
 			}
 			break;
 		case K_UP:
-			if (self->index == 0)
+			if (browser->index == 0)
 				break;
-			--self->index;
-			if (self->index < self->top_idx) {
-				--self->top_idx;
-				self->seek(self, -1, SEEK_CUR);
+			--browser->index;
+			if (browser->index < browser->top_idx) {
+				--browser->top_idx;
+				browser->seek(browser, -1, SEEK_CUR);
 			}
 			break;
 		case K_PGDN:
 		case ' ':
-			if (self->top_idx + self->height > self->nr_entries - 1)
+			if (browser->top_idx + browser->height > browser->nr_entries - 1)
 				break;
 
-			offset = self->height;
-			if (self->index + offset > self->nr_entries - 1)
-				offset = self->nr_entries - 1 - self->index;
-			self->index += offset;
-			self->top_idx += offset;
-			self->seek(self, +offset, SEEK_CUR);
+			offset = browser->height;
+			if (browser->index + offset > browser->nr_entries - 1)
+				offset = browser->nr_entries - 1 - browser->index;
+			browser->index += offset;
+			browser->top_idx += offset;
+			browser->seek(browser, +offset, SEEK_CUR);
 			break;
 		case K_PGUP:
-			if (self->top_idx == 0)
+			if (browser->top_idx == 0)
 				break;
 
-			if (self->top_idx < self->height)
-				offset = self->top_idx;
+			if (browser->top_idx < browser->height)
+				offset = browser->top_idx;
 			else
-				offset = self->height;
+				offset = browser->height;
 
-			self->index -= offset;
-			self->top_idx -= offset;
-			self->seek(self, -offset, SEEK_CUR);
+			browser->index -= offset;
+			browser->top_idx -= offset;
+			browser->seek(browser, -offset, SEEK_CUR);
 			break;
 		case K_HOME:
-			ui_browser__reset_index(self);
+			ui_browser__reset_index(browser);
 			break;
 		case K_END:
-			offset = self->height - 1;
-			if (offset >= self->nr_entries)
-				offset = self->nr_entries - 1;
+			offset = browser->height - 1;
+			if (offset >= browser->nr_entries)
+				offset = browser->nr_entries - 1;
 
-			self->index = self->nr_entries - 1;
-			self->top_idx = self->index - offset;
-			self->seek(self, -offset, SEEK_END);
+			browser->index = browser->nr_entries - 1;
+			browser->top_idx = browser->index - offset;
+			browser->seek(browser, -offset, SEEK_END);
 			break;
 		default:
 			return key;
@@ -448,22 +448,22 @@
 	return -1;
 }
 
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
 {
 	struct list_head *pos;
-	struct list_head *head = self->entries;
+	struct list_head *head = browser->entries;
 	int row = 0;
 
-	if (self->top == NULL || self->top == self->entries)
-                self->top = ui_browser__list_head_filter_entries(self, head->next);
+	if (browser->top == NULL || browser->top == browser->entries)
+                browser->top = ui_browser__list_head_filter_entries(browser, head->next);
 
-	pos = self->top;
+	pos = browser->top;
 
 	list_for_each_from(pos, head) {
-		if (!self->filter || !self->filter(self, pos)) {
-			ui_browser__gotorc(self, row, 0);
-			self->write(self, pos, row);
-			if (++row == self->height)
+		if (!browser->filter || !browser->filter(browser, pos)) {
+			ui_browser__gotorc(browser, row, 0);
+			browser->write(browser, pos, row);
+			if (++row == browser->height)
 				break;
 		}
 	}
@@ -708,4 +708,6 @@
 		struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
 		sltt_set_color(c->colorset, c->name, c->fg, c->bg);
 	}
+
+	annotate_browser__init();
 }
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index dd96d82..af70314 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -69,4 +69,5 @@
 unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
 
 void ui_browser__init(void);
+void annotate_browser__init(void);
 #endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 6e0ef79..34b1c46 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -19,6 +19,16 @@
 	int		jump_sources;
 };
 
+static struct annotate_browser_opt {
+	bool hide_src_code,
+	     use_offset,
+	     jump_arrows,
+	     show_nr_jumps;
+} annotate_browser__opts = {
+	.use_offset	= true,
+	.jump_arrows	= true,
+};
+
 struct annotate_browser {
 	struct ui_browser b;
 	struct rb_root	  entries;
@@ -30,10 +40,6 @@
 	int		    nr_entries;
 	int		    max_jump_sources;
 	int		    nr_jumps;
-	bool		    hide_src_code;
-	bool		    use_offset;
-	bool		    jump_arrows;
-	bool		    show_nr_jumps;
 	bool		    searching_backwards;
 	u8		    addr_width;
 	u8		    jumps_width;
@@ -48,11 +54,9 @@
 	return (struct browser_disasm_line *)(dl + 1);
 }
 
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
 {
-	struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
-
-	if (ab->hide_src_code) {
+	if (annotate_browser__opts.hide_src_code) {
 		struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
 		return dl->offset == -1;
 	}
@@ -79,30 +83,30 @@
 	 return ui_browser__set_color(&browser->b, color);
 }
 
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
 {
-	struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+	struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
 	struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
 	struct browser_disasm_line *bdl = disasm_line__browser(dl);
-	bool current_entry = ui_browser__is_current_entry(self, row);
-	bool change_color = (!ab->hide_src_code &&
-			     (!current_entry || (self->use_navkeypressed &&
-					         !self->navkeypressed)));
-	int width = self->width, printed;
+	bool current_entry = ui_browser__is_current_entry(browser, row);
+	bool change_color = (!annotate_browser__opts.hide_src_code &&
+			     (!current_entry || (browser->use_navkeypressed &&
+					         !browser->navkeypressed)));
+	int width = browser->width, printed;
 	char bf[256];
 
 	if (dl->offset != -1 && bdl->percent != 0.0) {
-		ui_browser__set_percent_color(self, bdl->percent, current_entry);
+		ui_browser__set_percent_color(browser, bdl->percent, current_entry);
 		slsmg_printf("%6.2f ", bdl->percent);
 	} else {
-		ui_browser__set_percent_color(self, 0, current_entry);
+		ui_browser__set_percent_color(browser, 0, current_entry);
 		slsmg_write_nstring(" ", 7);
 	}
 
 	SLsmg_write_char(' ');
 
 	/* The scroll bar isn't being used */
-	if (!self->navkeypressed)
+	if (!browser->navkeypressed)
 		width += 1;
 
 	if (!*dl->line)
@@ -116,14 +120,14 @@
 		u64 addr = dl->offset;
 		int color = -1;
 
-		if (!ab->use_offset)
+		if (!annotate_browser__opts.use_offset)
 			addr += ab->start;
 
-		if (!ab->use_offset) {
+		if (!annotate_browser__opts.use_offset) {
 			printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
 		} else {
 			if (bdl->jump_sources) {
-				if (ab->show_nr_jumps) {
+				if (annotate_browser__opts.show_nr_jumps) {
 					int prev;
 					printed = scnprintf(bf, sizeof(bf), "%*d ",
 							    ab->jumps_width,
@@ -131,7 +135,7 @@
 					prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
 											 current_entry);
 					slsmg_write_nstring(bf, printed);
-					ui_browser__set_color(self, prev);
+					ui_browser__set_color(browser, prev);
 				}
 
 				printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
@@ -143,19 +147,19 @@
 		}
 
 		if (change_color)
-			color = ui_browser__set_color(self, HE_COLORSET_ADDR);
+			color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
 		slsmg_write_nstring(bf, printed);
 		if (change_color)
-			ui_browser__set_color(self, color);
+			ui_browser__set_color(browser, color);
 		if (dl->ins && dl->ins->ops->scnprintf) {
 			if (ins__is_jump(dl->ins)) {
 				bool fwd = dl->ops.target.offset > (u64)dl->offset;
 
-				ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
+				ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
 								    SLSMG_UARROW_CHAR);
 				SLsmg_write_char(' ');
 			} else if (ins__is_call(dl->ins)) {
-				ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
+				ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
 				SLsmg_write_char(' ');
 			} else {
 				slsmg_write_nstring(" ", 2);
@@ -164,12 +168,12 @@
 			if (strcmp(dl->name, "retq")) {
 				slsmg_write_nstring(" ", 2);
 			} else {
-				ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
+				ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
 				SLsmg_write_char(' ');
 			}
 		}
 
-		disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
+		disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
 		slsmg_write_nstring(bf, width - 10 - printed);
 	}
 
@@ -184,7 +188,7 @@
 	struct browser_disasm_line *btarget, *bcursor;
 	unsigned int from, to;
 
-	if (!cursor->ins || !ins__is_jump(cursor->ins) ||
+	if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
 	    !disasm_line__has_offset(cursor))
 		return;
 
@@ -195,7 +199,7 @@
 	bcursor = disasm_line__browser(cursor);
 	btarget = disasm_line__browser(target);
 
-	if (ab->hide_src_code) {
+	if (annotate_browser__opts.hide_src_code) {
 		from = bcursor->idx_asm;
 		to = btarget->idx_asm;
 	} else {
@@ -209,10 +213,9 @@
 
 static unsigned int annotate_browser__refresh(struct ui_browser *browser)
 {
-	struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
 	int ret = ui_browser__list_head_refresh(browser);
 
-	if (ab->jump_arrows)
+	if (annotate_browser__opts.jump_arrows)
 		annotate_browser__draw_current_jump(browser);
 
 	ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -272,27 +275,27 @@
 	rb_insert_color(&bdl->rb_node, root);
 }
 
-static void annotate_browser__set_top(struct annotate_browser *self,
+static void annotate_browser__set_top(struct annotate_browser *browser,
 				      struct disasm_line *pos, u32 idx)
 {
 	unsigned back;
 
-	ui_browser__refresh_dimensions(&self->b);
-	back = self->b.height / 2;
-	self->b.top_idx = self->b.index = idx;
+	ui_browser__refresh_dimensions(&browser->b);
+	back = browser->b.height / 2;
+	browser->b.top_idx = browser->b.index = idx;
 
-	while (self->b.top_idx != 0 && back != 0) {
+	while (browser->b.top_idx != 0 && back != 0) {
 		pos = list_entry(pos->node.prev, struct disasm_line, node);
 
-		if (disasm_line__filter(&self->b, &pos->node))
+		if (disasm_line__filter(&browser->b, &pos->node))
 			continue;
 
-		--self->b.top_idx;
+		--browser->b.top_idx;
 		--back;
 	}
 
-	self->b.top = pos;
-	self->b.navkeypressed = true;
+	browser->b.top = pos;
+	browser->b.navkeypressed = true;
 }
 
 static void annotate_browser__set_rb_top(struct annotate_browser *browser,
@@ -300,10 +303,14 @@
 {
 	struct browser_disasm_line *bpos;
 	struct disasm_line *pos;
+	u32 idx;
 
 	bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
 	pos = ((struct disasm_line *)bpos) - 1;
-	annotate_browser__set_top(browser, pos, bpos->idx);
+	idx = bpos->idx;
+	if (annotate_browser__opts.hide_src_code)
+		idx = bpos->idx_asm;
+	annotate_browser__set_top(browser, pos, idx);
 	browser->curr_hot = nd;
 }
 
@@ -343,12 +350,12 @@
 	dl = list_entry(browser->b.top, struct disasm_line, node);
 	bdl = disasm_line__browser(dl);
 
-	if (browser->hide_src_code) {
+	if (annotate_browser__opts.hide_src_code) {
 		if (bdl->idx_asm < offset)
 			offset = bdl->idx;
 
 		browser->b.nr_entries = browser->nr_entries;
-		browser->hide_src_code = false;
+		annotate_browser__opts.hide_src_code = false;
 		browser->b.seek(&browser->b, -offset, SEEK_CUR);
 		browser->b.top_idx = bdl->idx - offset;
 		browser->b.index = bdl->idx;
@@ -363,7 +370,7 @@
 			offset = bdl->idx_asm;
 
 		browser->b.nr_entries = browser->nr_asm_entries;
-		browser->hide_src_code = true;
+		annotate_browser__opts.hide_src_code = true;
 		browser->b.seek(&browser->b, -offset, SEEK_CUR);
 		browser->b.top_idx = bdl->idx_asm - offset;
 		browser->b.index = bdl->idx_asm;
@@ -372,6 +379,12 @@
 	return true;
 }
 
+static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
+{
+	ui_browser__reset_index(&browser->b);
+	browser->b.nr_entries = browser->nr_asm_entries;
+}
+
 static bool annotate_browser__callq(struct annotate_browser *browser,
 				    int evidx, void (*timer)(void *arg),
 				    void *arg, int delay_secs)
@@ -574,33 +587,46 @@
 	return __annotate_browser__search_reverse(browser);
 }
 
-static int annotate_browser__run(struct annotate_browser *self, int evidx,
+static void annotate_browser__update_addr_width(struct annotate_browser *browser)
+{
+	if (annotate_browser__opts.use_offset)
+		browser->target_width = browser->min_addr_width;
+	else
+		browser->target_width = browser->max_addr_width;
+
+	browser->addr_width = browser->target_width;
+
+	if (annotate_browser__opts.show_nr_jumps)
+		browser->addr_width += browser->jumps_width + 1;
+}
+
+static int annotate_browser__run(struct annotate_browser *browser, int evidx,
 				 void(*timer)(void *arg),
 				 void *arg, int delay_secs)
 {
 	struct rb_node *nd = NULL;
-	struct map_symbol *ms = self->b.priv;
+	struct map_symbol *ms = browser->b.priv;
 	struct symbol *sym = ms->sym;
 	const char *help = "Press 'h' for help on key bindings";
 	int key;
 
-	if (ui_browser__show(&self->b, sym->name, help) < 0)
+	if (ui_browser__show(&browser->b, sym->name, help) < 0)
 		return -1;
 
-	annotate_browser__calc_percent(self, evidx);
+	annotate_browser__calc_percent(browser, evidx);
 
-	if (self->curr_hot) {
-		annotate_browser__set_rb_top(self, self->curr_hot);
-		self->b.navkeypressed = false;
+	if (browser->curr_hot) {
+		annotate_browser__set_rb_top(browser, browser->curr_hot);
+		browser->b.navkeypressed = false;
 	}
 
-	nd = self->curr_hot;
+	nd = browser->curr_hot;
 
 	while (1) {
-		key = ui_browser__run(&self->b, delay_secs);
+		key = ui_browser__run(&browser->b, delay_secs);
 
 		if (delay_secs != 0) {
-			annotate_browser__calc_percent(self, evidx);
+			annotate_browser__calc_percent(browser, evidx);
 			/*
 			 * Current line focus got out of the list of most active
 			 * lines, NULL it so that if TAB|UNTAB is pressed, we
@@ -622,27 +648,27 @@
 			if (nd != NULL) {
 				nd = rb_prev(nd);
 				if (nd == NULL)
-					nd = rb_last(&self->entries);
+					nd = rb_last(&browser->entries);
 			} else
-				nd = self->curr_hot;
+				nd = browser->curr_hot;
 			break;
 		case K_UNTAB:
 			if (nd != NULL)
 				nd = rb_next(nd);
 				if (nd == NULL)
-					nd = rb_first(&self->entries);
+					nd = rb_first(&browser->entries);
 			else
-				nd = self->curr_hot;
+				nd = browser->curr_hot;
 			break;
 		case K_F1:
 		case 'h':
-			ui_browser__help_window(&self->b,
+			ui_browser__help_window(&browser->b,
 		"UP/DOWN/PGUP\n"
 		"PGDN/SPACE    Navigate\n"
 		"q/ESC/CTRL+C  Exit\n\n"
 		"->            Go to target\n"
 		"<-            Exit\n"
-		"h             Cycle thru hottest instructions\n"
+		"H             Cycle thru hottest instructions\n"
 		"j             Toggle showing jump to target arrows\n"
 		"J             Toggle showing number of jump sources on targets\n"
 		"n             Search next string\n"
@@ -652,57 +678,62 @@
 		"?             Search previous string\n");
 			continue;
 		case 'H':
-			nd = self->curr_hot;
+			nd = browser->curr_hot;
 			break;
 		case 's':
-			if (annotate_browser__toggle_source(self))
+			if (annotate_browser__toggle_source(browser))
 				ui_helpline__puts(help);
 			continue;
 		case 'o':
-			self->use_offset = !self->use_offset;
-			if (self->use_offset)
-				self->target_width = self->min_addr_width;
-			else
-				self->target_width = self->max_addr_width;
-update_addr_width:
-			self->addr_width = self->target_width;
-			if (self->show_nr_jumps)
-				self->addr_width += self->jumps_width + 1;
+			annotate_browser__opts.use_offset = !annotate_browser__opts.use_offset;
+			annotate_browser__update_addr_width(browser);
 			continue;
 		case 'j':
-			self->jump_arrows = !self->jump_arrows;
+			annotate_browser__opts.jump_arrows = !annotate_browser__opts.jump_arrows;
 			continue;
 		case 'J':
-			self->show_nr_jumps = !self->show_nr_jumps;
-			goto update_addr_width;
+			annotate_browser__opts.show_nr_jumps = !annotate_browser__opts.show_nr_jumps;
+			annotate_browser__update_addr_width(browser);
+			continue;
 		case '/':
-			if (annotate_browser__search(self, delay_secs)) {
+			if (annotate_browser__search(browser, delay_secs)) {
 show_help:
 				ui_helpline__puts(help);
 			}
 			continue;
 		case 'n':
-			if (self->searching_backwards ?
-			    annotate_browser__continue_search_reverse(self, delay_secs) :
-			    annotate_browser__continue_search(self, delay_secs))
+			if (browser->searching_backwards ?
+			    annotate_browser__continue_search_reverse(browser, delay_secs) :
+			    annotate_browser__continue_search(browser, delay_secs))
 				goto show_help;
 			continue;
 		case '?':
-			if (annotate_browser__search_reverse(self, delay_secs))
+			if (annotate_browser__search_reverse(browser, delay_secs))
 				goto show_help;
 			continue;
+		case 'D': {
+			static int seq;
+			ui_helpline__pop();
+			ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
+					   seq++, browser->b.nr_entries,
+					   browser->b.height,
+					   browser->b.index,
+					   browser->b.top_idx,
+					   browser->nr_asm_entries);
+		}
+			continue;
 		case K_ENTER:
 		case K_RIGHT:
-			if (self->selection == NULL)
+			if (browser->selection == NULL)
 				ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
-			else if (self->selection->offset == -1)
+			else if (browser->selection->offset == -1)
 				ui_helpline__puts("Actions are only available for assembly lines.");
-			else if (!self->selection->ins) {
-				if (strcmp(self->selection->name, "retq"))
+			else if (!browser->selection->ins) {
+				if (strcmp(browser->selection->name, "retq"))
 					goto show_sup_ins;
 				goto out;
-			} else if (!(annotate_browser__jump(self) ||
-				     annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
+			} else if (!(annotate_browser__jump(browser) ||
+				     annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) {
 show_sup_ins:
 				ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
 			}
@@ -717,10 +748,10 @@
 		}
 
 		if (nd != NULL)
-			annotate_browser__set_rb_top(self, nd);
+			annotate_browser__set_rb_top(browser, nd);
 	}
 out:
-	ui_browser__hide(&self->b);
+	ui_browser__hide(&browser->b);
 	return key;
 }
 
@@ -797,8 +828,6 @@
 			.priv	 = &ms,
 			.use_navkeypressed = true,
 		},
-		.use_offset = true,
-		.jump_arrows = true,
 	};
 	int ret = -1;
 
@@ -855,6 +884,12 @@
 	browser.b.nr_entries = browser.nr_entries;
 	browser.b.entries = &notes->src->source,
 	browser.b.width += 18; /* Percentage */
+
+	if (annotate_browser__opts.hide_src_code)
+		annotate_browser__init_asm_mode(&browser);
+
+	annotate_browser__update_addr_width(&browser);
+
 	ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
 	list_for_each_entry_safe(pos, n, &notes->src->source, node) {
 		list_del(&pos->node);
@@ -865,3 +900,52 @@
 	free(browser.offsets);
 	return ret;
 }
+
+#define ANNOTATE_CFG(n) \
+	{ .name = #n, .value = &annotate_browser__opts.n, }
+	
+/*
+ * Keep the entries sorted, they are bsearch'ed
+ */
+static struct annotate__config {
+	const char *name;
+	bool *value;
+} annotate__configs[] = {
+	ANNOTATE_CFG(hide_src_code),
+	ANNOTATE_CFG(jump_arrows),
+	ANNOTATE_CFG(show_nr_jumps),
+	ANNOTATE_CFG(use_offset),
+};
+
+#undef ANNOTATE_CFG
+
+static int annotate_config__cmp(const void *name, const void *cfgp)
+{
+	const struct annotate__config *cfg = cfgp;
+
+	return strcmp(name, cfg->name);
+}
+
+static int annotate__config(const char *var, const char *value, void *data __used)
+{
+	struct annotate__config *cfg;
+	const char *name;
+
+	if (prefixcmp(var, "annotate.") != 0)
+		return 0;
+
+	name = var + 9;
+	cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
+		      sizeof(struct annotate__config), annotate_config__cmp);
+
+	if (cfg == NULL)
+		return -1;
+
+	*cfg->value = perf_config_bool(name, value);
+	return 0;
+}
+
+void annotate_browser__init(void)
+{
+	perf_config(annotate__config, NULL);
+}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a372a4b..53f6697 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -26,21 +26,21 @@
 	bool		     has_symbols;
 };
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
 				const char *ev_name);
 
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
+static void hist_browser__refresh_dimensions(struct hist_browser *browser)
 {
 	/* 3 == +/- toggle symbol before actual hist_entry rendering */
-	self->b.width = 3 + (hists__sort_list_width(self->hists) +
+	browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
 			     sizeof("[k]"));
 }
 
-static void hist_browser__reset(struct hist_browser *self)
+static void hist_browser__reset(struct hist_browser *browser)
 {
-	self->b.nr_entries = self->hists->nr_entries;
-	hist_browser__refresh_dimensions(self);
-	ui_browser__reset_index(&self->b);
+	browser->b.nr_entries = browser->hists->nr_entries;
+	hist_browser__refresh_dimensions(browser);
+	ui_browser__reset_index(&browser->b);
 }
 
 static char tree__folded_sign(bool unfolded)
@@ -48,32 +48,32 @@
 	return unfolded ? '-' : '+';
 }
 
-static char map_symbol__folded(const struct map_symbol *self)
+static char map_symbol__folded(const struct map_symbol *ms)
 {
-	return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+	return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
 }
 
-static char hist_entry__folded(const struct hist_entry *self)
+static char hist_entry__folded(const struct hist_entry *he)
 {
-	return map_symbol__folded(&self->ms);
+	return map_symbol__folded(&he->ms);
 }
 
-static char callchain_list__folded(const struct callchain_list *self)
+static char callchain_list__folded(const struct callchain_list *cl)
 {
-	return map_symbol__folded(&self->ms);
+	return map_symbol__folded(&cl->ms);
 }
 
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
 {
-	self->unfolded = unfold ? self->has_children : false;
+	ms->unfolded = unfold ? ms->has_children : false;
 }
 
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
 {
 	int n = 0;
 	struct rb_node *nd;
 
-	for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
 		struct callchain_list *chain;
 		char folded_sign = ' '; /* No children */
@@ -123,23 +123,23 @@
 	return n;
 }
 
-static bool map_symbol__toggle_fold(struct map_symbol *self)
+static bool map_symbol__toggle_fold(struct map_symbol *ms)
 {
-	if (!self)
+	if (!ms)
 		return false;
 
-	if (!self->has_children)
+	if (!ms->has_children)
 		return false;
 
-	self->unfolded = !self->unfolded;
+	ms->unfolded = !ms->unfolded;
 	return true;
 }
 
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
 {
-	struct rb_node *nd = rb_first(&self->rb_root);
+	struct rb_node *nd = rb_first(&node->rb_root);
 
-	for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
 		struct callchain_list *chain;
 		bool first = true;
@@ -158,49 +158,49 @@
 	}
 }
 
-static void callchain_node__init_have_children(struct callchain_node *self)
+static void callchain_node__init_have_children(struct callchain_node *node)
 {
 	struct callchain_list *chain;
 
-	list_for_each_entry(chain, &self->val, list)
-		chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+	list_for_each_entry(chain, &node->val, list)
+		chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
 
-	callchain_node__init_have_children_rb_tree(self);
+	callchain_node__init_have_children_rb_tree(node);
 }
 
-static void callchain__init_have_children(struct rb_root *self)
+static void callchain__init_have_children(struct rb_root *root)
 {
 	struct rb_node *nd;
 
-	for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
 		callchain_node__init_have_children(node);
 	}
 }
 
-static void hist_entry__init_have_children(struct hist_entry *self)
+static void hist_entry__init_have_children(struct hist_entry *he)
 {
-	if (!self->init_have_children) {
-		self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
-		callchain__init_have_children(&self->sorted_chain);
-		self->init_have_children = true;
+	if (!he->init_have_children) {
+		he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+		callchain__init_have_children(&he->sorted_chain);
+		he->init_have_children = true;
 	}
 }
 
-static bool hist_browser__toggle_fold(struct hist_browser *self)
+static bool hist_browser__toggle_fold(struct hist_browser *browser)
 {
-	if (map_symbol__toggle_fold(self->selection)) {
-		struct hist_entry *he = self->he_selection;
+	if (map_symbol__toggle_fold(browser->selection)) {
+		struct hist_entry *he = browser->he_selection;
 
 		hist_entry__init_have_children(he);
-		self->hists->nr_entries -= he->nr_rows;
+		browser->hists->nr_entries -= he->nr_rows;
 
 		if (he->ms.unfolded)
 			he->nr_rows = callchain__count_rows(&he->sorted_chain);
 		else
 			he->nr_rows = 0;
-		self->hists->nr_entries += he->nr_rows;
-		self->b.nr_entries = self->hists->nr_entries;
+		browser->hists->nr_entries += he->nr_rows;
+		browser->b.nr_entries = browser->hists->nr_entries;
 
 		return true;
 	}
@@ -209,12 +209,12 @@
 	return false;
 }
 
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
 {
 	int n = 0;
 	struct rb_node *nd;
 
-	for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
 		struct callchain_list *chain;
 		bool has_children = false;
@@ -263,37 +263,37 @@
 	return n;
 }
 
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
 {
-	hist_entry__init_have_children(self);
-	map_symbol__set_folding(&self->ms, unfold);
+	hist_entry__init_have_children(he);
+	map_symbol__set_folding(&he->ms, unfold);
 
-	if (self->ms.has_children) {
-		int n = callchain__set_folding(&self->sorted_chain, unfold);
-		self->nr_rows = unfold ? n : 0;
+	if (he->ms.has_children) {
+		int n = callchain__set_folding(&he->sorted_chain, unfold);
+		he->nr_rows = unfold ? n : 0;
 	} else
-		self->nr_rows = 0;
+		he->nr_rows = 0;
 }
 
-static void hists__set_folding(struct hists *self, bool unfold)
+static void hists__set_folding(struct hists *hists, bool unfold)
 {
 	struct rb_node *nd;
 
-	self->nr_entries = 0;
+	hists->nr_entries = 0;
 
-	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
 		struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
 		hist_entry__set_folding(he, unfold);
-		self->nr_entries += 1 + he->nr_rows;
+		hists->nr_entries += 1 + he->nr_rows;
 	}
 }
 
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
 {
-	hists__set_folding(self->hists, unfold);
-	self->b.nr_entries = self->hists->nr_entries;
+	hists__set_folding(browser->hists, unfold);
+	browser->b.nr_entries = browser->hists->nr_entries;
 	/* Go to the start, we may be way after valid entries after a collapse */
-	ui_browser__reset_index(&self->b);
+	ui_browser__reset_index(&browser->b);
 }
 
 static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -305,64 +305,64 @@
 		"Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
 			     void(*timer)(void *arg), void *arg, int delay_secs)
 {
 	int key;
 	char title[160];
 
-	self->b.entries = &self->hists->entries;
-	self->b.nr_entries = self->hists->nr_entries;
+	browser->b.entries = &browser->hists->entries;
+	browser->b.nr_entries = browser->hists->nr_entries;
 
-	hist_browser__refresh_dimensions(self);
-	hists__browser_title(self->hists, title, sizeof(title), ev_name);
+	hist_browser__refresh_dimensions(browser);
+	hists__browser_title(browser->hists, title, sizeof(title), ev_name);
 
-	if (ui_browser__show(&self->b, title,
+	if (ui_browser__show(&browser->b, title,
 			     "Press '?' for help on key bindings") < 0)
 		return -1;
 
 	while (1) {
-		key = ui_browser__run(&self->b, delay_secs);
+		key = ui_browser__run(&browser->b, delay_secs);
 
 		switch (key) {
 		case K_TIMER:
 			timer(arg);
-			ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+			ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
 
-			if (self->hists->stats.nr_lost_warned !=
-			    self->hists->stats.nr_events[PERF_RECORD_LOST]) {
-				self->hists->stats.nr_lost_warned =
-					self->hists->stats.nr_events[PERF_RECORD_LOST];
-				ui_browser__warn_lost_events(&self->b);
+			if (browser->hists->stats.nr_lost_warned !=
+			    browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+				browser->hists->stats.nr_lost_warned =
+					browser->hists->stats.nr_events[PERF_RECORD_LOST];
+				ui_browser__warn_lost_events(&browser->b);
 			}
 
-			hists__browser_title(self->hists, title, sizeof(title), ev_name);
-			ui_browser__show_title(&self->b, title);
+			hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+			ui_browser__show_title(&browser->b, title);
 			continue;
 		case 'D': { /* Debug */
 			static int seq;
-			struct hist_entry *h = rb_entry(self->b.top,
+			struct hist_entry *h = rb_entry(browser->b.top,
 							struct hist_entry, rb_node);
 			ui_helpline__pop();
 			ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
-					   seq++, self->b.nr_entries,
-					   self->hists->nr_entries,
-					   self->b.height,
-					   self->b.index,
-					   self->b.top_idx,
+					   seq++, browser->b.nr_entries,
+					   browser->hists->nr_entries,
+					   browser->b.height,
+					   browser->b.index,
+					   browser->b.top_idx,
 					   h->row_offset, h->nr_rows);
 		}
 			break;
 		case 'C':
 			/* Collapse the whole world. */
-			hist_browser__set_folding(self, false);
+			hist_browser__set_folding(browser, false);
 			break;
 		case 'E':
 			/* Expand the whole world. */
-			hist_browser__set_folding(self, true);
+			hist_browser__set_folding(browser, true);
 			break;
 		case K_ENTER:
-			if (hist_browser__toggle_fold(self))
+			if (hist_browser__toggle_fold(browser))
 				break;
 			/* fall thru */
 		default:
@@ -370,23 +370,23 @@
 		}
 	}
 out:
-	ui_browser__hide(&self->b);
+	ui_browser__hide(&browser->b);
 	return key;
 }
 
-static char *callchain_list__sym_name(struct callchain_list *self,
+static char *callchain_list__sym_name(struct callchain_list *cl,
 				      char *bf, size_t bfsize)
 {
-	if (self->ms.sym)
-		return self->ms.sym->name;
+	if (cl->ms.sym)
+		return cl->ms.sym->name;
 
-	snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+	snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
 	return bf;
 }
 
 #define LEVEL_OFFSET_STEP 3
 
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
 						     struct callchain_node *chain_node,
 						     u64 total, int level,
 						     unsigned short row,
@@ -444,21 +444,21 @@
 			}
 
 			color = HE_COLORSET_NORMAL;
-			width = self->b.width - (offset + extra_offset + 2);
-			if (ui_browser__is_current_entry(&self->b, row)) {
-				self->selection = &chain->ms;
+			width = browser->b.width - (offset + extra_offset + 2);
+			if (ui_browser__is_current_entry(&browser->b, row)) {
+				browser->selection = &chain->ms;
 				color = HE_COLORSET_SELECTED;
 				*is_current_entry = true;
 			}
 
-			ui_browser__set_color(&self->b, color);
-			ui_browser__gotorc(&self->b, row, 0);
+			ui_browser__set_color(&browser->b, color);
+			ui_browser__gotorc(&browser->b, row, 0);
 			slsmg_write_nstring(" ", offset + extra_offset);
 			slsmg_printf("%c ", folded_sign);
 			slsmg_write_nstring(str, width);
 			free(alloc_str);
 
-			if (++row == self->b.height)
+			if (++row == browser->b.height)
 				goto out;
 do_next:
 			if (folded_sign == '+')
@@ -467,11 +467,11 @@
 
 		if (folded_sign == '-') {
 			const int new_level = level + (extra_offset ? 2 : 1);
-			row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+			row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
 									 new_level, row, row_offset,
 									 is_current_entry);
 		}
-		if (row == self->b.height)
+		if (row == browser->b.height)
 			goto out;
 		node = next;
 	}
@@ -479,7 +479,7 @@
 	return row - first_row;
 }
 
-static int hist_browser__show_callchain_node(struct hist_browser *self,
+static int hist_browser__show_callchain_node(struct hist_browser *browser,
 					     struct callchain_node *node,
 					     int level, unsigned short row,
 					     off_t *row_offset,
@@ -488,7 +488,7 @@
 	struct callchain_list *chain;
 	int first_row = row,
 	     offset = level * LEVEL_OFFSET_STEP,
-	     width = self->b.width - offset;
+	     width = browser->b.width - offset;
 	char folded_sign = ' ';
 
 	list_for_each_entry(chain, &node->val, list) {
@@ -503,26 +503,26 @@
 		}
 
 		color = HE_COLORSET_NORMAL;
-		if (ui_browser__is_current_entry(&self->b, row)) {
-			self->selection = &chain->ms;
+		if (ui_browser__is_current_entry(&browser->b, row)) {
+			browser->selection = &chain->ms;
 			color = HE_COLORSET_SELECTED;
 			*is_current_entry = true;
 		}
 
 		s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
-		ui_browser__gotorc(&self->b, row, 0);
-		ui_browser__set_color(&self->b, color);
+		ui_browser__gotorc(&browser->b, row, 0);
+		ui_browser__set_color(&browser->b, color);
 		slsmg_write_nstring(" ", offset);
 		slsmg_printf("%c ", folded_sign);
 		slsmg_write_nstring(s, width - 2);
 
-		if (++row == self->b.height)
+		if (++row == browser->b.height)
 			goto out;
 	}
 
 	if (folded_sign == '-')
-		row += hist_browser__show_callchain_node_rb_tree(self, node,
-								 self->hists->stats.total_period,
+		row += hist_browser__show_callchain_node_rb_tree(browser, node,
+								 browser->hists->stats.total_period,
 								 level + 1, row,
 								 row_offset,
 								 is_current_entry);
@@ -530,7 +530,7 @@
 	return row - first_row;
 }
 
-static int hist_browser__show_callchain(struct hist_browser *self,
+static int hist_browser__show_callchain(struct hist_browser *browser,
 					struct rb_root *chain,
 					int level, unsigned short row,
 					off_t *row_offset,
@@ -542,31 +542,31 @@
 	for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
 
-		row += hist_browser__show_callchain_node(self, node, level,
+		row += hist_browser__show_callchain_node(browser, node, level,
 							 row, row_offset,
 							 is_current_entry);
-		if (row == self->b.height)
+		if (row == browser->b.height)
 			break;
 	}
 
 	return row - first_row;
 }
 
-static int hist_browser__show_entry(struct hist_browser *self,
+static int hist_browser__show_entry(struct hist_browser *browser,
 				    struct hist_entry *entry,
 				    unsigned short row)
 {
 	char s[256];
 	double percent;
 	int printed = 0;
-	int width = self->b.width - 6; /* The percentage */
+	int width = browser->b.width - 6; /* The percentage */
 	char folded_sign = ' ';
-	bool current_entry = ui_browser__is_current_entry(&self->b, row);
+	bool current_entry = ui_browser__is_current_entry(&browser->b, row);
 	off_t row_offset = entry->row_offset;
 
 	if (current_entry) {
-		self->he_selection = entry;
-		self->selection = &entry->ms;
+		browser->he_selection = entry;
+		browser->selection = &entry->ms;
 	}
 
 	if (symbol_conf.use_callchain) {
@@ -575,11 +575,11 @@
 	}
 
 	if (row_offset == 0) {
-		hist_entry__snprintf(entry, s, sizeof(s), self->hists);
-		percent = (entry->period * 100.0) / self->hists->stats.total_period;
+		hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
+		percent = (entry->period * 100.0) / browser->hists->stats.total_period;
 
-		ui_browser__set_percent_color(&self->b, percent, current_entry);
-		ui_browser__gotorc(&self->b, row, 0);
+		ui_browser__set_percent_color(&browser->b, percent, current_entry);
+		ui_browser__gotorc(&browser->b, row, 0);
 		if (symbol_conf.use_callchain) {
 			slsmg_printf("%c ", folded_sign);
 			width -= 2;
@@ -588,11 +588,11 @@
 		slsmg_printf(" %5.2f%%", percent);
 
 		/* The scroll bar isn't being used */
-		if (!self->b.navkeypressed)
+		if (!browser->b.navkeypressed)
 			width += 1;
 
-		if (!current_entry || !self->b.navkeypressed)
-			ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+		if (!current_entry || !browser->b.navkeypressed)
+			ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
 
 		if (symbol_conf.show_nr_samples) {
 			slsmg_printf(" %11u", entry->nr_events);
@@ -610,12 +610,12 @@
 	} else
 		--row_offset;
 
-	if (folded_sign == '-' && row != self->b.height) {
-		printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+	if (folded_sign == '-' && row != browser->b.height) {
+		printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
 							1, row, &row_offset,
 							&current_entry);
 		if (current_entry)
-			self->he_selection = entry;
+			browser->he_selection = entry;
 	}
 
 	return printed;
@@ -631,22 +631,22 @@
 	}
 }
 
-static unsigned int hist_browser__refresh(struct ui_browser *self)
+static unsigned int hist_browser__refresh(struct ui_browser *browser)
 {
 	unsigned row = 0;
 	struct rb_node *nd;
-	struct hist_browser *hb = container_of(self, struct hist_browser, b);
+	struct hist_browser *hb = container_of(browser, struct hist_browser, b);
 
-	ui_browser__hists_init_top(self);
+	ui_browser__hists_init_top(browser);
 
-	for (nd = self->top; nd; nd = rb_next(nd)) {
+	for (nd = browser->top; nd; nd = rb_next(nd)) {
 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
 
 		if (h->filtered)
 			continue;
 
 		row += hist_browser__show_entry(hb, h, row);
-		if (row == self->height)
+		if (row == browser->height)
 			break;
 	}
 
@@ -679,27 +679,27 @@
 	return NULL;
 }
 
-static void ui_browser__hists_seek(struct ui_browser *self,
+static void ui_browser__hists_seek(struct ui_browser *browser,
 				   off_t offset, int whence)
 {
 	struct hist_entry *h;
 	struct rb_node *nd;
 	bool first = true;
 
-	if (self->nr_entries == 0)
+	if (browser->nr_entries == 0)
 		return;
 
-	ui_browser__hists_init_top(self);
+	ui_browser__hists_init_top(browser);
 
 	switch (whence) {
 	case SEEK_SET:
-		nd = hists__filter_entries(rb_first(self->entries));
+		nd = hists__filter_entries(rb_first(browser->entries));
 		break;
 	case SEEK_CUR:
-		nd = self->top;
+		nd = browser->top;
 		goto do_offset;
 	case SEEK_END:
-		nd = hists__filter_prev_entries(rb_last(self->entries));
+		nd = hists__filter_prev_entries(rb_last(browser->entries));
 		first = false;
 		break;
 	default:
@@ -710,7 +710,7 @@
 	 * Moves not relative to the first visible entry invalidates its
 	 * row_offset:
 	 */
-	h = rb_entry(self->top, struct hist_entry, rb_node);
+	h = rb_entry(browser->top, struct hist_entry, rb_node);
 	h->row_offset = 0;
 
 	/*
@@ -738,7 +738,7 @@
 				} else {
 					h->row_offset += offset;
 					offset = 0;
-					self->top = nd;
+					browser->top = nd;
 					break;
 				}
 			}
@@ -746,7 +746,7 @@
 			if (nd == NULL)
 				break;
 			--offset;
-			self->top = nd;
+			browser->top = nd;
 		} while (offset != 0);
 	} else if (offset < 0) {
 		while (1) {
@@ -759,7 +759,7 @@
 					} else {
 						h->row_offset += offset;
 						offset = 0;
-						self->top = nd;
+						browser->top = nd;
 						break;
 					}
 				} else {
@@ -769,7 +769,7 @@
 					} else {
 						h->row_offset = h->nr_rows + offset;
 						offset = 0;
-						self->top = nd;
+						browser->top = nd;
 						break;
 					}
 				}
@@ -779,7 +779,7 @@
 			if (nd == NULL)
 				break;
 			++offset;
-			self->top = nd;
+			browser->top = nd;
 			if (offset == 0) {
 				/*
 				 * Last unfiltered hist_entry, check if it is
@@ -794,7 +794,7 @@
 			first = false;
 		}
 	} else {
-		self->top = nd;
+		browser->top = nd;
 		h = rb_entry(nd, struct hist_entry, rb_node);
 		h->row_offset = 0;
 	}
@@ -802,46 +802,46 @@
 
 static struct hist_browser *hist_browser__new(struct hists *hists)
 {
-	struct hist_browser *self = zalloc(sizeof(*self));
+	struct hist_browser *browser = zalloc(sizeof(*browser));
 
-	if (self) {
-		self->hists = hists;
-		self->b.refresh = hist_browser__refresh;
-		self->b.seek = ui_browser__hists_seek;
-		self->b.use_navkeypressed = true;
+	if (browser) {
+		browser->hists = hists;
+		browser->b.refresh = hist_browser__refresh;
+		browser->b.seek = ui_browser__hists_seek;
+		browser->b.use_navkeypressed = true;
 		if (sort__branch_mode == 1)
-			self->has_symbols = sort_sym_from.list.next != NULL;
+			browser->has_symbols = sort_sym_from.list.next != NULL;
 		else
-			self->has_symbols = sort_sym.list.next != NULL;
+			browser->has_symbols = sort_sym.list.next != NULL;
 	}
 
-	return self;
+	return browser;
 }
 
-static void hist_browser__delete(struct hist_browser *self)
+static void hist_browser__delete(struct hist_browser *browser)
 {
-	free(self);
+	free(browser);
 }
 
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
 {
-	return self->he_selection;
+	return browser->he_selection;
 }
 
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
 {
-	return self->he_selection->thread;
+	return browser->he_selection->thread;
 }
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
 				const char *ev_name)
 {
 	char unit;
 	int printed;
-	const struct dso *dso = self->dso_filter;
-	const struct thread *thread = self->thread_filter;
-	unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
-	u64 nr_events = self->stats.total_period;
+	const struct dso *dso = hists->dso_filter;
+	const struct thread *thread = hists->thread_filter;
+	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+	u64 nr_events = hists->stats.total_period;
 
 	nr_samples = convert_unit(nr_samples, &unit);
 	printed = scnprintf(bf, size,
@@ -849,9 +849,9 @@
 			   nr_samples, unit, ev_name, nr_events);
 
 
-	if (self->uid_filter_str)
+	if (hists->uid_filter_str)
 		printed += snprintf(bf + printed, size - printed,
-				    ", UID: %s", self->uid_filter_str);
+				    ", UID: %s", hists->uid_filter_str);
 	if (thread)
 		printed += scnprintf(bf + printed, size - printed,
 				    ", Thread: %s(%d)",
@@ -879,8 +879,8 @@
 				    void(*timer)(void *arg), void *arg,
 				    int delay_secs)
 {
-	struct hists *self = &evsel->hists;
-	struct hist_browser *browser = hist_browser__new(self);
+	struct hists *hists = &evsel->hists;
+	struct hist_browser *browser = hist_browser__new(hists);
 	struct branch_info *bi;
 	struct pstack *fstack;
 	char *options[16];
@@ -946,8 +946,8 @@
 					"Please enter the name of symbol you want to see",
 					buf, "ENTER: OK, ESC: Cancel",
 					delay_secs * 2) == K_ENTER) {
-				self->symbol_filter_str = *buf ? buf : NULL;
-				hists__filter_by_symbol(self);
+				hists->symbol_filter_str = *buf ? buf : NULL;
+				hists__filter_by_symbol(hists);
 				hist_browser__reset(browser);
 			}
 			continue;
@@ -1128,7 +1128,7 @@
 				sort_dso.elide = true;
 				pstack__push(fstack, &browser->hists->dso_filter);
 			}
-			hists__filter_by_dso(self);
+			hists__filter_by_dso(hists);
 			hist_browser__reset(browser);
 		} else if (choice == zoom_thread) {
 zoom_thread:
@@ -1146,7 +1146,7 @@
 				sort_thread.elide = true;
 				pstack__push(fstack, &browser->hists->thread_filter);
 			}
-			hists__filter_by_thread(self);
+			hists__filter_by_thread(hists);
 			hist_browser__reset(browser);
 		}
 	}
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 9f5f888..791fb15 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -22,6 +22,7 @@
 			break;
 		/* fall through */
 	default:
+		use_browser = 0;
 		if (fallback_to_pager)
 			setup_pager();
 		break;
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index ad73300..95264f3 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -12,7 +12,7 @@
 # First check if there is a .git to get the version from git describe
 # otherwise try to get the version from the kernel makefile
 if test -d ../../.git -o -f ../../.git &&
-	VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+	VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) &&
 	case "$VN" in
 	*$LF*) (exit 1) ;;
 	v[0-9]*)
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 9f7106a..3a6bff4 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -18,6 +18,8 @@
 #include "util.h"
 #include "callchain.h"
 
+__thread struct callchain_cursor callchain_cursor;
+
 bool ip_callchain__valid(struct ip_callchain *chain,
 			 const union perf_event *event)
 {
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 7f9c0f1..3bdb407 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -76,6 +76,8 @@
 	struct callchain_cursor_node	*curr;
 };
 
+extern __thread struct callchain_cursor callchain_cursor;
+
 static inline void callchain_init(struct callchain_root *root)
 {
 	INIT_LIST_HEAD(&root->node.siblings);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0deac6a..6faa3a1 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -120,7 +120,7 @@
 
 static inline int iskeychar(int c)
 {
-	return isalnum(c) || c == '-';
+	return isalnum(c) || c == '-' || c == '_';
 }
 
 static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 4ac5f5a..7400fb3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -159,6 +159,17 @@
 	return -1;
 }
 
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+				     struct perf_event_attr *attrs, size_t nr_attrs)
+{
+	size_t i;
+
+	for (i = 0; i < nr_attrs; i++)
+		event_attr_init(attrs + i);
+
+	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
+}
+
 static int trace_event__id(const char *evname)
 {
 	char *filename, *colon;
@@ -263,7 +274,8 @@
 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
 		list_for_each_entry(pos, &evlist->entries, node) {
 			for (thread = 0; thread < evlist->threads->nr; thread++)
-				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
+				ioctl(FD(pos, cpu, thread),
+				      PERF_EVENT_IOC_DISABLE, 0);
 		}
 	}
 }
@@ -276,7 +288,8 @@
 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
 		list_for_each_entry(pos, &evlist->entries, node) {
 			for (thread = 0; thread < evlist->threads->nr; thread++)
-				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+				ioctl(FD(pos, cpu, thread),
+				      PERF_EVENT_IOC_ENABLE, 0);
 		}
 	}
 }
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 58abb63..989bee9 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,8 @@
 int perf_evlist__add_default(struct perf_evlist *evlist);
 int perf_evlist__add_attrs(struct perf_evlist *evlist,
 			   struct perf_event_attr *attrs, size_t nr_attrs);
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+				     struct perf_event_attr *attrs, size_t nr_attrs);
 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
 				 const char *tracepoints[], size_t nr_tracepoints);
 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
@@ -62,6 +64,8 @@
 
 #define perf_evlist__add_attrs_array(evlist, array) \
 	perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+#define perf_evlist__add_default_attrs(evlist, array) \
+	__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
 
 #define perf_evlist__add_tracepoints_array(evlist, array) \
 	perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 57e4ce5..9f6cebd 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,7 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
+#include "../../include/linux/perf_event.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -64,6 +65,95 @@
 	return evsel;
 }
 
+static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+	"cycles",
+	"instructions",
+	"cache-references",
+	"cache-misses",
+	"branches",
+	"branch-misses",
+	"bus-cycles",
+	"stalled-cycles-frontend",
+	"stalled-cycles-backend",
+	"ref-cycles",
+};
+
+const char *__perf_evsel__hw_name(u64 config)
+{
+	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+		return perf_evsel__hw_names[config];
+
+	return "unknown-hardware";
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+	int colon = 0;
+	struct perf_event_attr *attr = &evsel->attr;
+	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
+	bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod)	do {					\
+		if (!attr->exclude_##context) {				\
+			if (!colon) colon = r++;			\
+			r += scnprintf(bf + r, size - r, "%c", mod);	\
+		} } while(0)
+
+	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+		MOD_PRINT(kernel, 'k');
+		MOD_PRINT(user, 'u');
+		MOD_PRINT(hv, 'h');
+		exclude_guest_default = true;
+	}
+
+	if (attr->precise_ip) {
+		if (!colon)
+			colon = r++;
+		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+		exclude_guest_default = true;
+	}
+
+	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+		MOD_PRINT(host, 'H');
+		MOD_PRINT(guest, 'G');
+	}
+#undef MOD_PRINT
+	if (colon)
+		bf[colon] = ':';
+	return r;
+}
+
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+	int ret;
+
+	switch (evsel->attr.type) {
+	case PERF_TYPE_RAW:
+		ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+		break;
+
+	case PERF_TYPE_HARDWARE:
+		ret = perf_evsel__hw_name(evsel, bf, size);
+		break;
+	default:
+		/*
+		 * FIXME
+ 		 *
+		 * This is the minimal perf_evsel__name so that we can
+		 * reconstruct event names taking into account event modifiers.
+		 *
+		 * The old event_name uses it now for raw anr hw events, so that
+		 * we don't drag all the parsing stuff into the python binding.
+		 *
+		 * On the next devel cycle the rest of the event naming will be
+		 * brought here.
+ 		 */
+		return 0;
+	}
+
+	return ret;
+}
+
 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
 			struct perf_evsel *first)
 {
@@ -404,16 +494,24 @@
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
-				       struct perf_sample *sample)
+				       struct perf_sample *sample,
+				       bool swapped)
 {
 	const u64 *array = event->sample.array;
+	union u64_swap u;
 
 	array += ((event->header.size -
 		   sizeof(event->header)) / sizeof(u64)) - 1;
 
 	if (type & PERF_SAMPLE_CPU) {
-		u32 *p = (u32 *)array;
-		sample->cpu = *p;
+		u.val64 = *array;
+		if (swapped) {
+			/* undo swap of u64, then swap on individual u32s */
+			u.val64 = bswap_64(u.val64);
+			u.val32[0] = bswap_32(u.val32[0]);
+		}
+
+		sample->cpu = u.val32[0];
 		array--;
 	}
 
@@ -433,9 +531,16 @@
 	}
 
 	if (type & PERF_SAMPLE_TID) {
-		u32 *p = (u32 *)array;
-		sample->pid = p[0];
-		sample->tid = p[1];
+		u.val64 = *array;
+		if (swapped) {
+			/* undo swap of u64, then swap on individual u32s */
+			u.val64 = bswap_64(u.val64);
+			u.val32[0] = bswap_32(u.val32[0]);
+			u.val32[1] = bswap_32(u.val32[1]);
+		}
+
+		sample->pid = u.val32[0];
+		sample->tid = u.val32[1];
 	}
 
 	return 0;
@@ -472,7 +577,7 @@
 	if (event->header.type != PERF_RECORD_SAMPLE) {
 		if (!sample_id_all)
 			return 0;
-		return perf_event__parse_id_sample(event, type, data);
+		return perf_event__parse_id_sample(event, type, data, swapped);
 	}
 
 	array = event->sample.array;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3d6b3e4..4ba8b56 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,6 +83,9 @@
 			struct perf_record_opts *opts,
 			struct perf_evsel *first);
 
+const char* __perf_evsel__hw_name(u64 config);
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2dd5edf..e909d43 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1942,7 +1942,6 @@
 		else
 			return -1;
 	} else if (ph->needs_swap) {
-		unsigned int i;
 		/*
 		 * feature bitmap is declared as an array of unsigned longs --
 		 * not good since its size can differ between the host that
@@ -1958,14 +1957,17 @@
 		 * file), punt and fallback to the original behavior --
 		 * clearing all feature bits and setting buildid.
 		 */
-		for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
-			header->adds_features[i] = bswap_64(header->adds_features[i]);
+		mem_bswap_64(&header->adds_features,
+			    BITS_TO_U64(HEADER_FEAT_BITS));
 
 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
-			for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
-				header->adds_features[i] = bswap_64(header->adds_features[i]);
-				header->adds_features[i] = bswap_32(header->adds_features[i]);
-			}
+			/* unswap as u64 */
+			mem_bswap_64(&header->adds_features,
+				    BITS_TO_U64(HEADER_FEAT_BITS));
+
+			/* unswap as u32 */
+			mem_bswap_32(&header->adds_features,
+				    BITS_TO_U32(HEADER_FEAT_BITS));
 		}
 
 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
@@ -2091,6 +2093,35 @@
 	return ret <= 0 ? -1 : 0;
 }
 
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
+{
+	struct event_format *event = trace_find_event(evsel->attr.config);
+	char bf[128];
+
+	if (event == NULL)
+		return -1;
+
+	snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+	evsel->name = strdup(bf);
+	if (event->name == NULL)
+		return -1;
+
+	return 0;
+}
+
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
+{
+	struct perf_evsel *pos;
+
+	list_for_each_entry(pos, &evlist->entries, node) {
+		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+		    perf_evsel__set_tracepoint_name(pos))
+			return -1;
+	}
+
+	return 0;
+}
+
 int perf_session__read_header(struct perf_session *session, int fd)
 {
 	struct perf_header *header = &session->header;
@@ -2172,6 +2203,9 @@
 
 	lseek(fd, header->data_offset, SEEK_SET);
 
+	if (perf_evlist__set_tracepoint_names(session->evlist))
+		goto out_delete_evlist;
+
 	header->frozen = 1;
 	return 0;
 out_errno:
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 1293b5e..514e2a4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -378,7 +378,7 @@
  * collapse the histogram
  */
 
-static bool hists__collapse_insert_entry(struct hists *hists,
+static bool hists__collapse_insert_entry(struct hists *hists __used,
 					 struct rb_root *root,
 					 struct hist_entry *he)
 {
@@ -397,8 +397,9 @@
 			iter->period += he->period;
 			iter->nr_events += he->nr_events;
 			if (symbol_conf.use_callchain) {
-				callchain_cursor_reset(&hists->callchain_cursor);
-				callchain_merge(&hists->callchain_cursor, iter->callchain,
+				callchain_cursor_reset(&callchain_cursor);
+				callchain_merge(&callchain_cursor,
+						iter->callchain,
 						he->callchain);
 			}
 			hist_entry__free(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index cfc64e2..34bb556 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -67,8 +67,6 @@
 	struct events_stats	stats;
 	u64			event_stream;
 	u16			col_len[HISTC_NR_COLS];
-	/* Best would be to reuse the session callchain cursor */
-	struct callchain_cursor	callchain_cursor;
 };
 
 struct hist_entry *__hists__add_entry(struct hists *self,
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index f1584833..587a230 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -8,6 +8,8 @@
 #define BITS_PER_LONG __WORDSIZE
 #define BITS_PER_BYTE           8
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 
 #define for_each_set_bit(bit, addr, size) \
 	for ((bit) = find_first_bit((addr), (size));		\
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index 1915de2..3322b84 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -57,6 +57,10 @@
 	}
 	if (!pager)
 		pager = getenv("PAGER");
+	if (!pager) {
+		if (!access("/usr/bin/pager", X_OK))
+			pager = "/usr/bin/pager";
+	}
 	if (!pager)
 		pager = "less";
 	else if (!*pager || !strcmp(pager, "cat"))
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index fac7d59..05dbc8b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -62,19 +62,6 @@
 #define PERF_EVENT_TYPE(config)		__PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)		__PERF_EVENT_FIELD(config, EVENT)
 
-static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
-	"cycles",
-	"instructions",
-	"cache-references",
-	"cache-misses",
-	"branches",
-	"branch-misses",
-	"bus-cycles",
-	"stalled-cycles-frontend",
-	"stalled-cycles-backend",
-	"ref-cycles",
-};
-
 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
 	"cpu-clock",
 	"task-clock",
@@ -300,6 +287,16 @@
 	u64 config = evsel->attr.config;
 	int type = evsel->attr.type;
 
+	if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
+		/*
+ 		 * XXX minimal fix, see comment on perf_evsen__name, this static buffer
+ 		 * will go away together with event_name in the next devel cycle.
+ 		 */
+		static char bf[128];
+		perf_evsel__name(evsel, bf, sizeof(bf));
+		return bf;
+	}
+
 	if (evsel->name)
 		return evsel->name;
 
@@ -317,9 +314,7 @@
 
 	switch (type) {
 	case PERF_TYPE_HARDWARE:
-		if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
-			return hw_event_names[config];
-		return "unknown-hardware";
+		return __perf_evsel__hw_name(config);
 
 	case PERF_TYPE_HW_CACHE: {
 		u8 cache_type, cache_op, cache_result;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 59dccc9..0dda25d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2164,16 +2164,12 @@
 
 error:
 	if (kfd >= 0) {
-		if (namelist)
-			strlist__delete(namelist);
-
+		strlist__delete(namelist);
 		close(kfd);
 	}
 
 	if (ufd >= 0) {
-		if (unamelist)
-			strlist__delete(unamelist);
-
+		strlist__delete(unamelist);
 		close(ufd);
 	}
 
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 93d355d..c3e399b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -288,7 +288,8 @@
 	return bi;
 }
 
-int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+int machine__resolve_callchain(struct machine *self,
+			       struct perf_evsel *evsel __used,
 			       struct thread *thread,
 			       struct ip_callchain *chain,
 			       struct symbol **parent)
@@ -297,7 +298,12 @@
 	unsigned int i;
 	int err;
 
-	callchain_cursor_reset(&evsel->hists.callchain_cursor);
+	callchain_cursor_reset(&callchain_cursor);
+
+	if (chain->nr > PERF_MAX_STACK_DEPTH) {
+		pr_warning("corrupted callchain. skipping...\n");
+		return 0;
+	}
 
 	for (i = 0; i < chain->nr; i++) {
 		u64 ip;
@@ -317,7 +323,14 @@
 			case PERF_CONTEXT_USER:
 				cpumode = PERF_RECORD_MISC_USER;	break;
 			default:
-				break;
+				pr_debug("invalid callchain context: "
+					 "%"PRId64"\n", (s64) ip);
+				/*
+				 * It seems the callchain is corrupted.
+				 * Discard all.
+				 */
+				callchain_cursor_reset(&callchain_cursor);
+				return 0;
 			}
 			continue;
 		}
@@ -333,7 +346,7 @@
 				break;
 		}
 
-		err = callchain_cursor_append(&evsel->hists.callchain_cursor,
+		err = callchain_cursor_append(&callchain_cursor,
 					      ip, al.map, al.sym);
 		if (err)
 			return err;
@@ -429,6 +442,16 @@
 			tool->finished_round = process_finished_round_stub;
 	}
 }
+ 
+void mem_bswap_32(void *src, int byte_size)
+{
+	u32 *m = src;
+	while (byte_size > 0) {
+		*m = bswap_32(*m);
+		byte_size -= sizeof(u32);
+		++m;
+	}
+}
 
 void mem_bswap_64(void *src, int byte_size)
 {
@@ -441,37 +464,65 @@
 	}
 }
 
-static void perf_event__all64_swap(union perf_event *event)
+static void swap_sample_id_all(union perf_event *event, void *data)
+{
+	void *end = (void *) event + event->header.size;
+	int size = end - data;
+
+	BUG_ON(size % sizeof(u64));
+	mem_bswap_64(data, size);
+}
+
+static void perf_event__all64_swap(union perf_event *event,
+				   bool sample_id_all __used)
 {
 	struct perf_event_header *hdr = &event->header;
 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 }
 
-static void perf_event__comm_swap(union perf_event *event)
+static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 {
 	event->comm.pid = bswap_32(event->comm.pid);
 	event->comm.tid = bswap_32(event->comm.tid);
+
+	if (sample_id_all) {
+		void *data = &event->comm.comm;
+
+		data += ALIGN(strlen(data) + 1, sizeof(u64));
+		swap_sample_id_all(event, data);
+	}
 }
 
-static void perf_event__mmap_swap(union perf_event *event)
+static void perf_event__mmap_swap(union perf_event *event,
+				  bool sample_id_all)
 {
 	event->mmap.pid	  = bswap_32(event->mmap.pid);
 	event->mmap.tid	  = bswap_32(event->mmap.tid);
 	event->mmap.start = bswap_64(event->mmap.start);
 	event->mmap.len	  = bswap_64(event->mmap.len);
 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
+
+	if (sample_id_all) {
+		void *data = &event->mmap.filename;
+
+		data += ALIGN(strlen(data) + 1, sizeof(u64));
+		swap_sample_id_all(event, data);
+	}
 }
 
-static void perf_event__task_swap(union perf_event *event)
+static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 {
 	event->fork.pid	 = bswap_32(event->fork.pid);
 	event->fork.tid	 = bswap_32(event->fork.tid);
 	event->fork.ppid = bswap_32(event->fork.ppid);
 	event->fork.ptid = bswap_32(event->fork.ptid);
 	event->fork.time = bswap_64(event->fork.time);
+
+	if (sample_id_all)
+		swap_sample_id_all(event, &event->fork + 1);
 }
 
-static void perf_event__read_swap(union perf_event *event)
+static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 {
 	event->read.pid		 = bswap_32(event->read.pid);
 	event->read.tid		 = bswap_32(event->read.tid);
@@ -479,6 +530,9 @@
 	event->read.time_enabled = bswap_64(event->read.time_enabled);
 	event->read.time_running = bswap_64(event->read.time_running);
 	event->read.id		 = bswap_64(event->read.id);
+
+	if (sample_id_all)
+		swap_sample_id_all(event, &event->read + 1);
 }
 
 static u8 revbyte(u8 b)
@@ -530,7 +584,8 @@
 	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
 }
 
-static void perf_event__hdr_attr_swap(union perf_event *event)
+static void perf_event__hdr_attr_swap(union perf_event *event,
+				      bool sample_id_all __used)
 {
 	size_t size;
 
@@ -541,18 +596,21 @@
 	mem_bswap_64(event->attr.id, size);
 }
 
-static void perf_event__event_type_swap(union perf_event *event)
+static void perf_event__event_type_swap(union perf_event *event,
+					bool sample_id_all __used)
 {
 	event->event_type.event_type.event_id =
 		bswap_64(event->event_type.event_type.event_id);
 }
 
-static void perf_event__tracing_data_swap(union perf_event *event)
+static void perf_event__tracing_data_swap(union perf_event *event,
+					  bool sample_id_all __used)
 {
 	event->tracing_data.size = bswap_32(event->tracing_data.size);
 }
 
-typedef void (*perf_event__swap_op)(union perf_event *event);
+typedef void (*perf_event__swap_op)(union perf_event *event,
+				    bool sample_id_all);
 
 static perf_event__swap_op perf_event__swap_ops[] = {
 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
@@ -986,6 +1044,15 @@
 	}
 }
 
+static void event_swap(union perf_event *event, bool sample_id_all)
+{
+	perf_event__swap_op swap;
+
+	swap = perf_event__swap_ops[event->header.type];
+	if (swap)
+		swap(event, sample_id_all);
+}
+
 static int perf_session__process_event(struct perf_session *session,
 				       union perf_event *event,
 				       struct perf_tool *tool,
@@ -994,9 +1061,8 @@
 	struct perf_sample sample;
 	int ret;
 
-	if (session->header.needs_swap &&
-	    perf_event__swap_ops[event->header.type])
-		perf_event__swap_ops[event->header.type](event);
+	if (session->header.needs_swap)
+		event_swap(event, session->sample_id_all);
 
 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
 		return -EINVAL;
@@ -1428,7 +1494,6 @@
 			  int print_sym, int print_dso, int print_symoffset)
 {
 	struct addr_location al;
-	struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
 	struct callchain_cursor_node *node;
 
 	if (perf_event__preprocess_sample(event, machine, &al, sample,
@@ -1446,10 +1511,10 @@
 				error("Failed to resolve callchain. Skipping\n");
 			return;
 		}
-		callchain_cursor_commit(cursor);
+		callchain_cursor_commit(&callchain_cursor);
 
 		while (1) {
-			node = callchain_cursor_current(cursor);
+			node = callchain_cursor_current(&callchain_cursor);
 			if (!node)
 				break;
 
@@ -1460,12 +1525,12 @@
 			}
 			if (print_dso) {
 				printf(" (");
-				map__fprintf_dsoname(al.map, stdout);
+				map__fprintf_dsoname(node->map, stdout);
 				printf(")");
 			}
 			printf("\n");
 
-			callchain_cursor_advance(cursor);
+			callchain_cursor_advance(&callchain_cursor);
 		}
 
 	} else {
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7a5434c..0c702e3 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -80,6 +80,7 @@
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
 void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
 void perf_event__attr_swap(struct perf_event_attr *attr);
 
 int perf_session__create_kernel_maps(struct perf_session *self);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e2ba885..3e2e5ea 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -323,6 +323,7 @@
 		dso->sorted_by_name = 0;
 		dso->has_build_id = 0;
 		dso->kernel = DSO_TYPE_USER;
+		dso->needs_swap = DSO_SWAP__UNSET;
 		INIT_LIST_HEAD(&dso->node);
 	}
 
@@ -1156,6 +1157,33 @@
 	return -1;
 }
 
+static int dso__swap_init(struct dso *dso, unsigned char eidata)
+{
+	static unsigned int const endian = 1;
+
+	dso->needs_swap = DSO_SWAP__NO;
+
+	switch (eidata) {
+	case ELFDATA2LSB:
+		/* We are big endian, DSO is little endian. */
+		if (*(unsigned char const *)&endian != 1)
+			dso->needs_swap = DSO_SWAP__YES;
+		break;
+
+	case ELFDATA2MSB:
+		/* We are little endian, DSO is big endian. */
+		if (*(unsigned char const *)&endian != 0)
+			dso->needs_swap = DSO_SWAP__YES;
+		break;
+
+	default:
+		pr_err("unrecognized DSO data encoding %d\n", eidata);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
 			 int fd, symbol_filter_t filter, int kmodule,
 			 int want_symtab)
@@ -1187,6 +1215,9 @@
 		goto out_elf_end;
 	}
 
+	if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+		goto out_elf_end;
+
 	/* Always reject images with a mismatched build-id: */
 	if (dso->has_build_id) {
 		u8 build_id[BUILD_ID_SIZE];
@@ -1272,7 +1303,7 @@
 		if (opdsec && sym.st_shndx == opdidx) {
 			u32 offset = sym.st_value - opdshdr.sh_addr;
 			u64 *opd = opddata->d_buf + offset;
-			sym.st_value = *opd;
+			sym.st_value = DSO__SWAP(dso, u64, *opd);
 			sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
 		}
 
@@ -2786,8 +2817,11 @@
 
 struct map *dso__new_map(const char *name)
 {
+	struct map *map = NULL;
 	struct dso *dso = dso__new(name);
-	struct map *map = map__new2(0, dso, MAP__FUNCTION);
+
+	if (dso)
+		map = map__new2(0, dso, MAP__FUNCTION);
 
 	return map;
 }
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 5649d63..af0752b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -9,6 +9,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <stdio.h>
+#include <byteswap.h>
 
 #ifdef HAVE_CPLUS_DEMANGLE
 extern char *cplus_demangle(const char *, int);
@@ -160,11 +161,18 @@
 	DSO_TYPE_GUEST_KERNEL
 };
 
+enum dso_swap_type {
+	DSO_SWAP__UNSET,
+	DSO_SWAP__NO,
+	DSO_SWAP__YES,
+};
+
 struct dso {
 	struct list_head node;
 	struct rb_root	 symbols[MAP__NR_TYPES];
 	struct rb_root	 symbol_names[MAP__NR_TYPES];
 	enum dso_kernel_type	kernel;
+	enum dso_swap_type	needs_swap;
 	u8		 adjust_symbols:1;
 	u8		 has_build_id:1;
 	u8		 hit:1;
@@ -182,6 +190,28 @@
 	char		 name[0];
 };
 
+#define DSO__SWAP(dso, type, val)			\
+({							\
+	type ____r = val;				\
+	BUG_ON(dso->needs_swap == DSO_SWAP__UNSET);	\
+	if (dso->needs_swap == DSO_SWAP__YES) {		\
+		switch (sizeof(____r)) {		\
+		case 2:					\
+			____r = bswap_16(val);		\
+			break;				\
+		case 4:					\
+			____r = bswap_32(val);		\
+			break;				\
+		case 8:					\
+			____r = bswap_64(val);		\
+			break;				\
+		default:				\
+			BUG_ON(1);			\
+		}					\
+	}						\
+	____r;						\
+})
+
 struct dso *dso__new(const char *name);
 void dso__delete(struct dso *dso);
 
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 84d9bd78..9b5f856 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -188,28 +188,27 @@
 		nt = realloc(threads, (sizeof(*threads) +
 				       sizeof(pid_t) * total_tasks));
 		if (nt == NULL)
-			goto out_free_threads;
+			goto out_free_namelist;
 
 		threads = nt;
 
-		if (threads) {
-			for (i = 0; i < items; i++)
-				threads->map[j++] = atoi(namelist[i]->d_name);
-			threads->nr = total_tasks;
-		}
-
-		for (i = 0; i < items; i++)
+		for (i = 0; i < items; i++) {
+			threads->map[j++] = atoi(namelist[i]->d_name);
 			free(namelist[i]);
+		}
+		threads->nr = total_tasks;
 		free(namelist);
-
-		if (!threads)
-			break;
 	}
 
 out:
 	strlist__delete(slist);
 	return threads;
 
+out_free_namelist:
+	for (i = 0; i < items; i++)
+		free(namelist[i]);
+	free(namelist);
+
 out_free_threads:
 	free(threads);
 	threads = NULL;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ab2f682..16de7ad 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -73,8 +73,8 @@
 char *progname;
 
 int num_cpus;
-cpu_set_t *cpu_mask;
-size_t cpu_mask_size;
+cpu_set_t *cpu_present_set, *cpu_mask;
+size_t cpu_present_setsize, cpu_mask_size;
 
 struct counters {
 	unsigned long long tsc;		/* per thread */
@@ -103,6 +103,12 @@
 struct timeval tv_odd;
 struct timeval tv_delta;
 
+int mark_cpu_present(int pkg, int core, int cpu)
+{
+	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
+	return 0;
+}
+
 /*
  * cpu_mask_init(ncpus)
  *
@@ -118,6 +124,18 @@
 	}
 	cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
 	CPU_ZERO_S(cpu_mask_size, cpu_mask);
+
+	/*
+	 * Allocate and initialize cpu_present_set
+	 */
+	cpu_present_set = CPU_ALLOC(ncpus);
+	if (cpu_present_set == NULL) {
+		perror("CPU_ALLOC");
+		exit(3);
+	}
+	cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
+	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+	for_all_cpus(mark_cpu_present);
 }
 
 void cpu_mask_uninit()
@@ -125,6 +143,9 @@
 	CPU_FREE(cpu_mask);
 	cpu_mask = NULL;
 	cpu_mask_size = 0;
+	CPU_FREE(cpu_present_set);
+	cpu_present_set = NULL;
+	cpu_present_setsize = 0;
 }
 
 int cpu_migrate(int cpu)
@@ -912,6 +933,8 @@
 	switch (model) {
 	case 0x2A:
 	case 0x2D:
+	case 0x3A:	/* IVB */
+	case 0x3D:	/* IVB Xeon */
 		return 1;
 	}
 	return 0;
@@ -1047,6 +1070,9 @@
 	int retval;
 	pid_t child_pid;
 	get_counters(cnt_even);
+
+        /* clear affinity side-effect of get_counters() */
+        sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
 	gettimeofday(&tv_even, (struct timezone *)NULL);
 
 	child_pid = fork();
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 28bc57e..a4162e1 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,4 @@
-TARGETS = breakpoints vm
+TARGETS = breakpoints kcmp mqueue vm
 
 all:
 	for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
new file mode 100644
index 0000000..dc79b86
--- /dev/null
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -0,0 +1,29 @@
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
+ifeq ($(ARCH),i386)
+        ARCH := X86
+	CFLAGS := -DCONFIG_X86_32 -D__i386__
+endif
+ifeq ($(ARCH),x86_64)
+	ARCH := X86
+	CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+endif
+
+CFLAGS += -I../../../../arch/x86/include/generated/
+CFLAGS += -I../../../../include/
+CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../arch/x86/include/
+
+all:
+ifeq ($(ARCH),X86)
+	gcc $(CFLAGS) kcmp_test.c -o run_test
+else
+	echo "Not an x86 target, can't build kcmp selftest"
+endif
+
+run-tests: all
+	./kcmp_test
+
+clean:
+	rm -fr ./run_test
+	rm -fr ./test-file
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
new file mode 100644
index 0000000..358cc6b
--- /dev/null
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -0,0 +1,94 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <linux/unistd.h>
+#include <linux/kcmp.h>
+
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
+{
+	return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
+}
+
+int main(int argc, char **argv)
+{
+	const char kpath[] = "kcmp-test-file";
+	int pid1, pid2;
+	int fd1, fd2;
+	int status;
+
+	fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
+	pid1 = getpid();
+
+	if (fd1 < 0) {
+		perror("Can't create file");
+		exit(1);
+	}
+
+	pid2 = fork();
+	if (pid2 < 0) {
+		perror("fork failed");
+		exit(1);
+	}
+
+	if (!pid2) {
+		int pid2 = getpid();
+		int ret;
+
+		fd2 = open(kpath, O_RDWR, 0644);
+		if (fd2 < 0) {
+			perror("Can't open file");
+			exit(1);
+		}
+
+		/* An example of output and arguments */
+		printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
+		       "FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
+		       "INV: %2ld\n",
+		       pid1, pid2,
+		       sys_kcmp(pid1, pid2, KCMP_FILE,		fd1, fd2),
+		       sys_kcmp(pid1, pid2, KCMP_FILES,		0, 0),
+		       sys_kcmp(pid1, pid2, KCMP_VM,		0, 0),
+		       sys_kcmp(pid1, pid2, KCMP_FS,		0, 0),
+		       sys_kcmp(pid1, pid2, KCMP_SIGHAND,	0, 0),
+		       sys_kcmp(pid1, pid2, KCMP_IO,		0, 0),
+		       sys_kcmp(pid1, pid2, KCMP_SYSVSEM,	0, 0),
+
+			/* This one should fail */
+		       sys_kcmp(pid1, pid2, KCMP_TYPES + 1,	0, 0));
+
+		/* This one should return same fd */
+		ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
+		if (ret) {
+			printf("FAIL: 0 expected but %d returned\n", ret);
+			ret = -1;
+		} else
+			printf("PASS: 0 returned as expected\n");
+
+		/* Compare with self */
+		ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
+		if (ret) {
+			printf("FAIL: 0 expected but %li returned\n", ret);
+			ret = -1;
+		} else
+			printf("PASS: 0 returned as expected\n");
+
+		exit(ret);
+	}
+
+	waitpid(pid2, &status, P_ALL);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
new file mode 100644
index 0000000..d8d4237
--- /dev/null
+++ b/tools/testing/selftests/mqueue/.gitignore
@@ -0,0 +1,2 @@
+mq_open_tests
+mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
new file mode 100644
index 0000000..54c0aad
--- /dev/null
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -0,0 +1,10 @@
+all:
+	gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
+	gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
+
+run_tests:
+	./mq_open_tests /test1
+	./mq_perf_tests
+
+clean:
+	rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
new file mode 100644
index 0000000..711cc29
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_open_tests.c
@@ -0,0 +1,492 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *	Doug Ledford <dledford@redhat.com>
+ *
+ * mq_open_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_open_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_open_tests.c
+ *   Tests the various situations that should either succeed or fail to
+ *   open a posix message queue and then reports whether or not they
+ *   did as they were supposed to.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+
+static char *usage =
+"Usage:\n"
+"  %s path\n"
+"\n"
+"	path	Path name of the message queue to create\n"
+"\n"
+"	Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
+char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+int default_settings;
+struct rlimit saved_limits, cur_limits;
+int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
+int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
+FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
+char *queue_path;
+mqd_t queue = -1;
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+void validate_current_settings();
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+	rewind(stream);
+	if (fprintf(stream, "%d", value) < 0)
+		perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+	static int in_shutdown = 0;
+
+	/* In case we get called recursively by a set() call below */
+	if (in_shutdown++)
+		return;
+
+	seteuid(0);
+
+	if (queue != -1)
+		if (mq_close(queue))
+			perror("mq_close() during shutdown");
+	if (queue_path)
+		/*
+		 * Be silent if this fails, if we cleaned up already it's
+		 * expected to fail
+		 */
+		mq_unlink(queue_path);
+	if (default_settings) {
+		if (saved_def_msgs)
+			__set(def_msgs, saved_def_msgs,
+			      "failed to restore saved_def_msgs");
+		if (saved_def_msgsize)
+			__set(def_msgsize, saved_def_msgsize,
+			      "failed to restore saved_def_msgsize");
+	}
+	if (saved_max_msgs)
+		__set(max_msgs, saved_max_msgs,
+		      "failed to restore saved_max_msgs");
+	if (saved_max_msgsize)
+		__set(max_msgsize, saved_max_msgsize,
+		      "failed to restore saved_max_msgsize");
+	if (exit_val)
+		error(exit_val, errno, "%s at %d", err_cause, line_no);
+	exit(0);
+}
+
+static inline int get(FILE *stream)
+{
+	int value;
+	rewind(stream);
+	if (fscanf(stream, "%d", &value) != 1)
+		shutdown(4, "Error reading /proc entry", __LINE__ - 1);
+	return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+	int new_value;
+
+	rewind(stream);
+	if (fprintf(stream, "%d", value) < 0)
+		return shutdown(5, "Failed writing to /proc file",
+				__LINE__ - 1);
+	new_value = get(stream);
+	if (new_value != value)
+		return shutdown(5, "We didn't get what we wrote to /proc back",
+				__LINE__ - 1);
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+	if (getrlimit(type, rlim))
+		shutdown(6, "getrlimit()", __LINE__ - 1);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+	if (setrlimit(type, rlim))
+		shutdown(7, "setrlimit()", __LINE__ - 1);
+}
+
+void validate_current_settings()
+{
+	int rlim_needed;
+
+	if (cur_limits.rlim_cur < 4096) {
+		printf("Current rlimit value for POSIX message queue bytes is "
+		       "unreasonably low,\nincreasing.\n\n");
+		cur_limits.rlim_cur = 8192;
+		cur_limits.rlim_max = 16384;
+		setr(RLIMIT_MSGQUEUE, &cur_limits);
+	}
+
+	if (default_settings) {
+		rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
+						    2 * sizeof(void *));
+		if (rlim_needed > cur_limits.rlim_cur) {
+			printf("Temporarily lowering default queue parameters "
+			       "to something that will work\n"
+			       "with the current rlimit values.\n\n");
+			set(def_msgs, 10);
+			cur_def_msgs = 10;
+			set(def_msgsize, 128);
+			cur_def_msgsize = 128;
+		}
+	} else {
+		rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
+						    2 * sizeof(void *));
+		if (rlim_needed > cur_limits.rlim_cur) {
+			printf("Temporarily lowering maximum queue parameters "
+			       "to something that will work\n"
+			       "with the current rlimit values in case this is "
+			       "a kernel that ties the default\n"
+			       "queue parameters to the maximum queue "
+			       "parameters.\n\n");
+			set(max_msgs, 10);
+			cur_max_msgs = 10;
+			set(max_msgsize, 128);
+			cur_max_msgsize = 128;
+		}
+	}
+}
+
+/*
+ * test_queue - Test opening a queue, shutdown if we fail.  This should
+ * only be called in situations that should never fail.  We clean up
+ * after ourselves and return the queue attributes in *result.
+ */
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
+{
+	int flags = O_RDWR | O_EXCL | O_CREAT;
+	int perms = DEFFILEMODE;
+
+	if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+		shutdown(1, "mq_open()", __LINE__);
+	if (mq_getattr(queue, result))
+		shutdown(1, "mq_getattr()", __LINE__);
+	if (mq_close(queue))
+		shutdown(1, "mq_close()", __LINE__);
+	queue = -1;
+	if (mq_unlink(queue_path))
+		shutdown(1, "mq_unlink()", __LINE__);
+}
+
+/*
+ * Same as test_queue above, but failure is not fatal.
+ * Returns:
+ * 0 - Failed to create a queue
+ * 1 - Created a queue, attributes in *result
+ */
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
+{
+	int flags = O_RDWR | O_EXCL | O_CREAT;
+	int perms = DEFFILEMODE;
+
+	if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+		return 0;
+	if (mq_getattr(queue, result))
+		shutdown(1, "mq_getattr()", __LINE__);
+	if (mq_close(queue))
+		shutdown(1, "mq_close()", __LINE__);
+	queue = -1;
+	if (mq_unlink(queue_path))
+		shutdown(1, "mq_unlink()", __LINE__);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct mq_attr attr, result;
+
+	if (argc != 2) {
+		fprintf(stderr, "Must pass a valid queue name\n\n");
+		fprintf(stderr, usage, argv[0]);
+		exit(1);
+	}
+
+	/*
+	 * Although we can create a msg queue with a non-absolute path name,
+	 * unlink will fail.  So, if the name doesn't start with a /, add one
+	 * when we save it.
+	 */
+	if (*argv[1] == '/')
+		queue_path = strdup(argv[1]);
+	else {
+		queue_path = malloc(strlen(argv[1]) + 2);
+		if (!queue_path) {
+			perror("malloc()");
+			exit(1);
+		}
+		queue_path[0] = '/';
+		queue_path[1] = 0;
+		strcat(queue_path, argv[1]);
+	}
+
+	if (getuid() != 0) {
+		fprintf(stderr, "Not running as root, but almost all tests "
+			"require root in order to modify\nsystem settings.  "
+			"Exiting.\n");
+		exit(1);
+	}
+
+	/* Find out what files there are for us to make tweaks in */
+	def_msgs = fopen(DEF_MSGS, "r+");
+	def_msgsize = fopen(DEF_MSGSIZE, "r+");
+	max_msgs = fopen(MAX_MSGS, "r+");
+	max_msgsize = fopen(MAX_MSGSIZE, "r+");
+
+	if (!max_msgs)
+		shutdown(2, "Failed to open msg_max", __LINE__);
+	if (!max_msgsize)
+		shutdown(2, "Failed to open msgsize_max", __LINE__);
+	if (def_msgs || def_msgsize)
+		default_settings = 1;
+
+	/* Load up the current system values for everything we can */
+	getr(RLIMIT_MSGQUEUE, &saved_limits);
+	cur_limits = saved_limits;
+	if (default_settings) {
+		saved_def_msgs = cur_def_msgs = get(def_msgs);
+		saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
+	}
+	saved_max_msgs = cur_max_msgs = get(max_msgs);
+	saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+
+	/* Tell the user our initial state */
+	printf("\nInitial system state:\n");
+	printf("\tUsing queue path:\t\t%s\n", queue_path);
+	printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
+	printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
+	printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
+	printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
+	if (default_settings) {
+		printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
+		printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
+	} else {
+		printf("\tDefault Message Size:\t\tNot Supported\n");
+		printf("\tDefault Queue Size:\t\tNot Supported\n");
+	}
+	printf("\n");
+
+	validate_current_settings();
+
+	printf("Adjusted system state for testing:\n");
+	printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
+	printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
+	printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
+	printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
+	if (default_settings) {
+		printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
+		printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
+	}
+
+	printf("\n\nTest series 1, behavior when no attr struct "
+	       "passed to mq_open:\n");
+	if (!default_settings) {
+		test_queue(NULL, &result);
+		printf("Given sane system settings, mq_open without an attr "
+		       "struct succeeds:\tPASS\n");
+		if (result.mq_maxmsg != cur_max_msgs ||
+		    result.mq_msgsize != cur_max_msgsize) {
+			printf("Kernel does not support setting the default "
+			       "mq attributes,\nbut also doesn't tie the "
+			       "defaults to the maximums:\t\t\tPASS\n");
+		} else {
+			set(max_msgs, ++cur_max_msgs);
+			set(max_msgsize, ++cur_max_msgsize);
+			test_queue(NULL, &result);
+			if (result.mq_maxmsg == cur_max_msgs &&
+			    result.mq_msgsize == cur_max_msgsize)
+				printf("Kernel does not support setting the "
+				       "default mq attributes and\n"
+				       "also ties system wide defaults to "
+				       "the system wide maximums:\t\t"
+				       "FAIL\n");
+			else
+				printf("Kernel does not support setting the "
+				       "default mq attributes,\n"
+				       "but also doesn't tie the defaults to "
+				       "the maximums:\t\t\tPASS\n");
+		}
+	} else {
+		printf("Kernel supports setting defaults separately from "
+		       "maximums:\t\tPASS\n");
+		/*
+		 * While we are here, go ahead and test that the kernel
+		 * properly follows the default settings
+		 */
+		test_queue(NULL, &result);
+		printf("Given sane values, mq_open without an attr struct "
+		       "succeeds:\t\tPASS\n");
+		if (result.mq_maxmsg != cur_def_msgs ||
+		    result.mq_msgsize != cur_def_msgsize)
+			printf("Kernel supports setting defaults, but does "
+			       "not actually honor them:\tFAIL\n\n");
+		else {
+			set(def_msgs, ++cur_def_msgs);
+			set(def_msgsize, ++cur_def_msgsize);
+			/* In case max was the same as the default */
+			set(max_msgs, ++cur_max_msgs);
+			set(max_msgsize, ++cur_max_msgsize);
+			test_queue(NULL, &result);
+			if (result.mq_maxmsg != cur_def_msgs ||
+			    result.mq_msgsize != cur_def_msgsize)
+				printf("Kernel supports setting defaults, but "
+				       "does not actually honor them:\t"
+				       "FAIL\n");
+			else
+				printf("Kernel properly honors default setting "
+				       "knobs:\t\t\t\tPASS\n");
+		}
+		set(def_msgs, cur_max_msgs + 1);
+		cur_def_msgs = cur_max_msgs + 1;
+		set(def_msgsize, cur_max_msgsize + 1);
+		cur_def_msgsize = cur_max_msgsize + 1;
+		if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
+		    cur_limits.rlim_cur) {
+			cur_limits.rlim_cur = (cur_def_msgs + 2) *
+				(cur_def_msgsize + 2 * sizeof(void *));
+			cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
+			setr(RLIMIT_MSGQUEUE, &cur_limits);
+		}
+		if (test_queue_fail(NULL, &result)) {
+			if (result.mq_maxmsg == cur_max_msgs &&
+			    result.mq_msgsize == cur_max_msgsize)
+				printf("Kernel properly limits default values "
+				       "to lesser of default/max:\t\tPASS\n");
+			else
+				printf("Kernel does not properly set default "
+				       "queue parameters when\ndefaults > "
+				       "max:\t\t\t\t\t\t\t\tFAIL\n");
+		} else
+			printf("Kernel fails to open mq because defaults are "
+			       "greater than maximums:\tFAIL\n");
+		set(def_msgs, --cur_def_msgs);
+		set(def_msgsize, --cur_def_msgsize);
+		cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
+			cur_def_msgsize;
+		setr(RLIMIT_MSGQUEUE, &cur_limits);
+		if (test_queue_fail(NULL, &result))
+			printf("Kernel creates queue even though defaults "
+			       "would exceed\nrlimit setting:"
+			       "\t\t\t\t\t\t\t\tFAIL\n");
+		else
+			printf("Kernel properly fails to create queue when "
+			       "defaults would\nexceed rlimit:"
+			       "\t\t\t\t\t\t\t\tPASS\n");
+	}
+
+	/*
+	 * Test #2 - open with an attr struct that exceeds rlimit
+	 */
+	printf("\n\nTest series 2, behavior when attr struct is "
+	       "passed to mq_open:\n");
+	cur_max_msgs = 32;
+	cur_max_msgsize = cur_limits.rlim_max >> 4;
+	set(max_msgs, cur_max_msgs);
+	set(max_msgsize, cur_max_msgsize);
+	attr.mq_maxmsg = cur_max_msgs;
+	attr.mq_msgsize = cur_max_msgsize;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open in excess of rlimit max when euid = 0 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open in excess of rlimit max when euid = 0 "
+		       "failed:\t\tPASS\n");
+	attr.mq_maxmsg = cur_max_msgs + 1;
+	attr.mq_msgsize = 10;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with mq_maxmsg > limit when euid = 0 "
+		       "succeeded:\t\tPASS\n");
+	else
+		printf("Queue open with mq_maxmsg > limit when euid = 0 "
+		       "failed:\t\tFAIL\n");
+	attr.mq_maxmsg = 1;
+	attr.mq_msgsize = cur_max_msgsize + 1;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with mq_msgsize > limit when euid = 0 "
+		       "succeeded:\t\tPASS\n");
+	else
+		printf("Queue open with mq_msgsize > limit when euid = 0 "
+		       "failed:\t\tFAIL\n");
+	attr.mq_maxmsg = 65536;
+	attr.mq_msgsize = 65536;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with total size > 2GB when euid = 0 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open with total size > 2GB when euid = 0 "
+		       "failed:\t\t\tPASS\n");
+	seteuid(99);
+	attr.mq_maxmsg = cur_max_msgs;
+	attr.mq_msgsize = cur_max_msgsize;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open in excess of rlimit max when euid = 99 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open in excess of rlimit max when euid = 99 "
+		       "failed:\t\tPASS\n");
+	attr.mq_maxmsg = cur_max_msgs + 1;
+	attr.mq_msgsize = 10;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with mq_maxmsg > limit when euid = 99 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open with mq_maxmsg > limit when euid = 99 "
+		       "failed:\t\tPASS\n");
+	attr.mq_maxmsg = 1;
+	attr.mq_msgsize = cur_max_msgsize + 1;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with mq_msgsize > limit when euid = 99 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open with mq_msgsize > limit when euid = 99 "
+		       "failed:\t\tPASS\n");
+	attr.mq_maxmsg = 65536;
+	attr.mq_msgsize = 65536;
+	if (test_queue_fail(&attr, &result))
+		printf("Queue open with total size > 2GB when euid = 99 "
+		       "succeeded:\t\tFAIL\n");
+	else
+		printf("Queue open with total size > 2GB when euid = 99 "
+		       "failed:\t\t\tPASS\n");
+
+	shutdown(0,"",0);
+}
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
new file mode 100644
index 0000000..2fadd4b
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -0,0 +1,741 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *	Doug Ledford <dledford@redhat.com>
+ *
+ * mq_perf_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_perf_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_perf_tests.c
+ *   Tests various types of message queue workloads, concentrating on those
+ *   situations that invole large message sizes, large message queue depths,
+ *   or both, and reports back useful metrics about kernel message queue
+ *   performance.
+ *
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+#include <popt.h>
+
+static char *usage =
+"Usage:\n"
+"  %s [-c #[,#..] -f] path\n"
+"\n"
+"	-c #	Skip most tests and go straight to a high queue depth test\n"
+"		and then run that test continuously (useful for running at\n"
+"		the same time as some other workload to see how much the\n"
+"		cache thrashing caused by adding messages to a very deep\n"
+"		queue impacts the performance of other programs).  The number\n"
+"		indicates which CPU core we should bind the process to during\n"
+"		the run.  If you have more than one physical CPU, then you\n"
+"		will need one copy per physical CPU package, and you should\n"
+"		specify the CPU cores to pin ourself to via a comma separated\n"
+"		list of CPU values.\n"
+"	-f	Only usable with continuous mode.  Pin ourself to the CPUs\n"
+"		as requested, then instead of looping doing a high mq\n"
+"		workload, just busy loop.  This will allow us to lock up a\n"
+"		single CPU just like we normally would, but without actually\n"
+"		thrashing the CPU cache.  This is to make it easier to get\n"
+"		comparable numbers from some other workload running on the\n"
+"		other CPUs.  One set of numbers with # CPUs locked up running\n"
+"		an mq workload, and another set of numbers with those same\n"
+"		CPUs locked away from the test workload, but not doing\n"
+"		anything to trash the cache like the mq workload might.\n"
+"	path	Path name of the message queue to create\n"
+"\n"
+"	Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define MAX_CPUS 64
+char *cpu_option_string;
+int cpus_to_pin[MAX_CPUS];
+int num_cpus_to_pin;
+pthread_t cpu_threads[MAX_CPUS];
+pthread_t main_thread;
+cpu_set_t *cpu_set;
+int cpu_set_size;
+int cpus_online;
+
+#define MSG_SIZE 16
+#define TEST1_LOOPS 10000000
+#define TEST2_LOOPS 100000
+int continuous_mode;
+int continuous_mode_fake;
+
+struct rlimit saved_limits, cur_limits;
+int saved_max_msgs, saved_max_msgsize;
+int cur_max_msgs, cur_max_msgsize;
+FILE *max_msgs, *max_msgsize;
+int cur_nice;
+char *queue_path = "/mq_perf_tests";
+mqd_t queue = -1;
+struct mq_attr result;
+int mq_prio_max;
+
+const struct poptOption options[] = {
+	{
+		.longName = "continuous",
+		.shortName = 'c',
+		.argInfo = POPT_ARG_STRING,
+		.arg = &cpu_option_string,
+		.val = 'c',
+		.descrip = "Run continuous tests at a high queue depth in "
+			"order to test the effects of cache thrashing on "
+			"other tasks on the system.  This test is intended "
+			"to be run on one core of each physical CPU while "
+			"some other CPU intensive task is run on all the other "
+			"cores of that same physical CPU and the other task "
+			"is timed.  It is assumed that the process of adding "
+			"messages to the message queue in a tight loop will "
+			"impact that other task to some degree.  Once the "
+			"tests are performed in this way, you should then "
+			"re-run the tests using fake mode in order to check "
+			"the difference in time required to perform the CPU "
+			"intensive task",
+		.argDescrip = "cpu[,cpu]",
+	},
+	{
+		.longName = "fake",
+		.shortName = 'f',
+		.argInfo = POPT_ARG_NONE,
+		.arg = &continuous_mode_fake,
+		.val = 0,
+		.descrip = "Tie up the CPUs that we would normally tie up in"
+			"continuous mode, but don't actually do any mq stuff, "
+			"just keep the CPU busy so it can't be used to process "
+			"system level tasks as this would free up resources on "
+			"the other CPU cores and skew the comparison between "
+			"the no-mqueue work and mqueue work tests",
+		.argDescrip = NULL,
+	},
+	{
+		.longName = "path",
+		.shortName = 'p',
+		.argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
+		.arg = &queue_path,
+		.val = 'p',
+		.descrip = "The name of the path to use in the mqueue "
+			"filesystem for our tests",
+		.argDescrip = "pathname",
+	},
+	POPT_AUTOHELP
+	POPT_TABLEEND
+};
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
+void sig_action(int signum, siginfo_t *info, void *context);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline int try_set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+static inline void open_queue(struct mq_attr *attr);
+void increase_limits(void);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+	rewind(stream);
+	if (fprintf(stream, "%d", value) < 0)
+		perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+	static int in_shutdown = 0;
+	int errno_at_shutdown = errno;
+	int i;
+
+	/* In case we get called by multiple threads or from an sighandler */
+	if (in_shutdown++)
+		return;
+
+	for (i = 0; i < num_cpus_to_pin; i++)
+		if (cpu_threads[i]) {
+			pthread_kill(cpu_threads[i], SIGUSR1);
+			pthread_join(cpu_threads[i], NULL);
+		}
+
+	if (queue != -1)
+		if (mq_close(queue))
+			perror("mq_close() during shutdown");
+	if (queue_path)
+		/*
+		 * Be silent if this fails, if we cleaned up already it's
+		 * expected to fail
+		 */
+		mq_unlink(queue_path);
+	if (saved_max_msgs)
+		__set(max_msgs, saved_max_msgs,
+		      "failed to restore saved_max_msgs");
+	if (saved_max_msgsize)
+		__set(max_msgsize, saved_max_msgsize,
+		      "failed to restore saved_max_msgsize");
+	if (exit_val)
+		error(exit_val, errno_at_shutdown, "%s at %d",
+		      err_cause, line_no);
+	exit(0);
+}
+
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
+{
+	if (pthread_self() != main_thread)
+		pthread_exit(0);
+	else {
+		fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
+				"exiting\n", signum);
+		shutdown(0, "", 0);
+		fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+		exit(0);
+	}
+}
+
+void sig_action(int signum, siginfo_t *info, void *context)
+{
+	if (pthread_self() != main_thread)
+		pthread_kill(main_thread, signum);
+	else {
+		fprintf(stderr, "Caught signal %d, exiting\n", signum);
+		shutdown(0, "", 0);
+		fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+		exit(0);
+	}
+}
+
+static inline int get(FILE *stream)
+{
+	int value;
+	rewind(stream);
+	if (fscanf(stream, "%d", &value) != 1)
+		shutdown(4, "Error reading /proc entry", __LINE__);
+	return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+	int new_value;
+
+	rewind(stream);
+	if (fprintf(stream, "%d", value) < 0)
+		return shutdown(5, "Failed writing to /proc file", __LINE__);
+	new_value = get(stream);
+	if (new_value != value)
+		return shutdown(5, "We didn't get what we wrote to /proc back",
+				__LINE__);
+}
+
+static inline int try_set(FILE *stream, int value)
+{
+	int new_value;
+
+	rewind(stream);
+	fprintf(stream, "%d", value);
+	new_value = get(stream);
+	return new_value == value;
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+	if (getrlimit(type, rlim))
+		shutdown(6, "getrlimit()", __LINE__);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+	if (setrlimit(type, rlim))
+		shutdown(7, "setrlimit()", __LINE__);
+}
+
+/**
+ * open_queue - open the global queue for testing
+ * @attr - An attr struct specifying the desired queue traits
+ * @result - An attr struct that lists the actual traits the queue has
+ *
+ * This open is not allowed to fail, failure will result in an orderly
+ * shutdown of the program.  The global queue_path is used to set what
+ * queue to open, the queue descriptor is saved in the global queue
+ * variable.
+ */
+static inline void open_queue(struct mq_attr *attr)
+{
+	int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
+	int perms = DEFFILEMODE;
+
+	queue = mq_open(queue_path, flags, perms, attr);
+	if (queue == -1)
+		shutdown(1, "mq_open()", __LINE__);
+	if (mq_getattr(queue, &result))
+		shutdown(1, "mq_getattr()", __LINE__);
+	printf("\n\tQueue %s created:\n", queue_path);
+	printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
+	       "O_NONBLOCK" : "(null)");
+	printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
+	printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
+	printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
+}
+
+void *fake_cont_thread(void *arg)
+{
+	int i;
+
+	for (i = 0; i < num_cpus_to_pin; i++)
+		if (cpu_threads[i] == pthread_self())
+			break;
+	printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
+	       cpus_to_pin[i]);
+	while (1)
+		;
+}
+
+void *cont_thread(void *arg)
+{
+	char buff[MSG_SIZE];
+	int i, priority;
+
+	for (i = 0; i < num_cpus_to_pin; i++)
+		if (cpu_threads[i] == pthread_self())
+			break;
+	printf("\tStarted continuous mode thread %d on CPU %d\n", i,
+	       cpus_to_pin[i]);
+	while (1) {
+		while (mq_send(queue, buff, sizeof(buff), 0) == 0)
+			;
+		mq_receive(queue, buff, sizeof(buff), &priority);
+	}
+}
+
+#define drain_queue() \
+	while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
+
+#define do_untimed_send() \
+	do { \
+		if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+			shutdown(3, "Test send failure", __LINE__); \
+	} while (0)
+
+#define do_send_recv() \
+	do { \
+		clock_gettime(clock, &start); \
+		if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+			shutdown(3, "Test send failure", __LINE__); \
+		clock_gettime(clock, &middle); \
+		if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
+			shutdown(3, "Test receive failure", __LINE__); \
+		clock_gettime(clock, &end); \
+		nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
+			(middle.tv_nsec - start.tv_nsec); \
+		send_total.tv_nsec += nsec; \
+		if (send_total.tv_nsec >= 1000000000) { \
+			send_total.tv_sec++; \
+			send_total.tv_nsec -= 1000000000; \
+		} \
+		nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
+			(end.tv_nsec - middle.tv_nsec); \
+		recv_total.tv_nsec += nsec; \
+		if (recv_total.tv_nsec >= 1000000000) { \
+			recv_total.tv_sec++; \
+			recv_total.tv_nsec -= 1000000000; \
+		} \
+	} while (0)
+
+struct test {
+	char *desc;
+	void (*func)(int *);
+};
+
+void const_prio(int *prio)
+{
+	return;
+}
+
+void inc_prio(int *prio)
+{
+	if (++*prio == mq_prio_max)
+		*prio = 0;
+}
+
+void dec_prio(int *prio)
+{
+	if (--*prio < 0)
+		*prio = mq_prio_max - 1;
+}
+
+void random_prio(int *prio)
+{
+	*prio = random() % mq_prio_max;
+}
+
+struct test test2[] = {
+	{"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
+		const_prio},
+	{"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
+		inc_prio},
+	{"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
+		dec_prio},
+	{"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
+		random_prio},
+	{NULL, NULL}
+};
+
+/**
+ * Tests to perform (all done with MSG_SIZE messages):
+ *
+ * 1) Time to add/remove message with 0 messages on queue
+ * 1a) with constant prio
+ * 2) Time to add/remove message when queue close to capacity:
+ * 2a) with constant prio
+ * 2b) with increasing prio
+ * 2c) with decreasing prio
+ * 2d) with random prio
+ * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
+ */
+void *perf_test_thread(void *arg)
+{
+	char buff[MSG_SIZE];
+	int prio_out, prio_in;
+	int i;
+	clockid_t clock;
+	pthread_t *t;
+	struct timespec res, start, middle, end, send_total, recv_total;
+	unsigned long long nsec;
+	struct test *cur_test;
+
+	t = &cpu_threads[0];
+	printf("\n\tStarted mqueue performance test thread on CPU %d\n",
+	       cpus_to_pin[0]);
+	mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
+	if (mq_prio_max == -1)
+		shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
+	if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
+		shutdown(2, "pthread_getcpuclockid", __LINE__);
+
+	if (clock_getres(clock, &res))
+		shutdown(2, "clock_getres()", __LINE__);
+
+	printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
+	printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
+	       res.tv_nsec > 1 ? "s" : "");
+
+
+
+	printf("\n\tTest #1: Time send/recv message, queue empty\n");
+	printf("\t\t(%d iterations)\n", TEST1_LOOPS);
+	prio_out = 0;
+	send_total.tv_sec = 0;
+	send_total.tv_nsec = 0;
+	recv_total.tv_sec = 0;
+	recv_total.tv_nsec = 0;
+	for (i = 0; i < TEST1_LOOPS; i++)
+		do_send_recv();
+	printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+	       send_total.tv_sec, send_total.tv_nsec);
+	nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+		 send_total.tv_nsec) / TEST1_LOOPS;
+	printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+	printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+	       recv_total.tv_sec, recv_total.tv_nsec);
+	nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+		recv_total.tv_nsec) / TEST1_LOOPS;
+	printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+
+
+	for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
+		printf(cur_test->desc);
+		printf("\t\t(%d iterations)\n", TEST2_LOOPS);
+		prio_out = 0;
+		send_total.tv_sec = 0;
+		send_total.tv_nsec = 0;
+		recv_total.tv_sec = 0;
+		recv_total.tv_nsec = 0;
+		printf("\t\tFilling queue...");
+		fflush(stdout);
+		clock_gettime(clock, &start);
+		for (i = 0; i < result.mq_maxmsg - 1; i++) {
+			do_untimed_send();
+			cur_test->func(&prio_out);
+		}
+		clock_gettime(clock, &end);
+		nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+			1000000000) + (end.tv_nsec - start.tv_nsec);
+		printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+		       nsec % 1000000000);
+		printf("\t\tTesting...");
+		fflush(stdout);
+		for (i = 0; i < TEST2_LOOPS; i++) {
+			do_send_recv();
+			cur_test->func(&prio_out);
+		}
+		printf("done.\n");
+		printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+		       send_total.tv_sec, send_total.tv_nsec);
+		nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+			 send_total.tv_nsec) / TEST2_LOOPS;
+		printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+		printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+		       recv_total.tv_sec, recv_total.tv_nsec);
+		nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+			recv_total.tv_nsec) / TEST2_LOOPS;
+		printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+		printf("\t\tDraining queue...");
+		fflush(stdout);
+		clock_gettime(clock, &start);
+		drain_queue();
+		clock_gettime(clock, &end);
+		nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+			1000000000) + (end.tv_nsec - start.tv_nsec);
+		printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+		       nsec % 1000000000);
+	}
+	return 0;
+}
+
+void increase_limits(void)
+{
+	cur_limits.rlim_cur = RLIM_INFINITY;
+	cur_limits.rlim_max = RLIM_INFINITY;
+	setr(RLIMIT_MSGQUEUE, &cur_limits);
+	while (try_set(max_msgs, cur_max_msgs += 10))
+		;
+	cur_max_msgs = get(max_msgs);
+	while (try_set(max_msgsize, cur_max_msgsize += 1024))
+		;
+	cur_max_msgsize = get(max_msgsize);
+	if (setpriority(PRIO_PROCESS, 0, -20) != 0)
+		shutdown(2, "setpriority()", __LINE__);
+	cur_nice = -20;
+}
+
+int main(int argc, char *argv[])
+{
+	struct mq_attr attr;
+	char *option, *next_option;
+	int i, cpu;
+	struct sigaction sa;
+	poptContext popt_context;
+	char rc;
+	void *retval;
+
+	main_thread = pthread_self();
+	num_cpus_to_pin = 0;
+
+	if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
+		perror("sysconf(_SC_NPROCESSORS_ONLN)");
+		exit(1);
+	}
+	cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+	cpu_set = CPU_ALLOC(cpus_online);
+	if (cpu_set == NULL) {
+		perror("CPU_ALLOC()");
+		exit(1);
+	}
+	cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
+	CPU_ZERO_S(cpu_set_size, cpu_set);
+
+	popt_context = poptGetContext(NULL, argc, (const char **)argv,
+				      options, 0);
+
+	while ((rc = poptGetNextOpt(popt_context)) > 0) {
+		switch (rc) {
+		case 'c':
+			continuous_mode = 1;
+			option = cpu_option_string;
+			do {
+				next_option = strchr(option, ',');
+				if (next_option)
+					*next_option = '\0';
+				cpu = atoi(option);
+				if (cpu >= cpus_online)
+					fprintf(stderr, "CPU %d exceeds "
+						"cpus online, ignoring.\n",
+						cpu);
+				else
+					cpus_to_pin[num_cpus_to_pin++] = cpu;
+				if (next_option)
+					option = ++next_option;
+			} while (next_option && num_cpus_to_pin < MAX_CPUS);
+			/* Double check that they didn't give us the same CPU
+			 * more than once */
+			for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
+				if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
+						cpu_set)) {
+					fprintf(stderr, "Any given CPU may "
+						"only be given once.\n");
+					exit(1);
+				} else
+					CPU_SET_S(cpus_to_pin[cpu],
+						  cpu_set_size, cpu_set);
+			}
+			break;
+		case 'p':
+			/*
+			 * Although we can create a msg queue with a
+			 * non-absolute path name, unlink will fail.  So,
+			 * if the name doesn't start with a /, add one
+			 * when we save it.
+			 */
+			option = queue_path;
+			if (*option != '/') {
+				queue_path = malloc(strlen(option) + 2);
+				if (!queue_path) {
+					perror("malloc()");
+					exit(1);
+				}
+				queue_path[0] = '/';
+				queue_path[1] = 0;
+				strcat(queue_path, option);
+				free(option);
+			}
+			break;
+		}
+	}
+
+	if (continuous_mode && num_cpus_to_pin == 0) {
+		fprintf(stderr, "Must pass at least one CPU to continuous "
+			"mode.\n");
+		poptPrintUsage(popt_context, stderr, 0);
+		exit(1);
+	} else if (!continuous_mode) {
+		num_cpus_to_pin = 1;
+		cpus_to_pin[0] = cpus_online - 1;
+	}
+
+	if (getuid() != 0) {
+		fprintf(stderr, "Not running as root, but almost all tests "
+			"require root in order to modify\nsystem settings.  "
+			"Exiting.\n");
+		exit(1);
+	}
+
+	max_msgs = fopen(MAX_MSGS, "r+");
+	max_msgsize = fopen(MAX_MSGSIZE, "r+");
+	if (!max_msgs)
+		shutdown(2, "Failed to open msg_max", __LINE__);
+	if (!max_msgsize)
+		shutdown(2, "Failed to open msgsize_max", __LINE__);
+
+	/* Load up the current system values for everything we can */
+	getr(RLIMIT_MSGQUEUE, &saved_limits);
+	cur_limits = saved_limits;
+	saved_max_msgs = cur_max_msgs = get(max_msgs);
+	saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+	errno = 0;
+	cur_nice = getpriority(PRIO_PROCESS, 0);
+	if (errno)
+		shutdown(2, "getpriority()", __LINE__);
+
+	/* Tell the user our initial state */
+	printf("\nInitial system state:\n");
+	printf("\tUsing queue path:\t\t\t%s\n", queue_path);
+	printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
+	printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
+	printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
+	printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
+	printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+	printf("\n");
+
+	increase_limits();
+
+	printf("Adjusted system state for testing:\n");
+	if (cur_limits.rlim_cur == RLIM_INFINITY) {
+		printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
+		printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
+	} else {
+		printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
+		       cur_limits.rlim_cur);
+		printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
+		       cur_limits.rlim_max);
+	}
+	printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
+	printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
+	printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+	printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
+	       (continuous_mode_fake ? "fake mode" : "enabled") :
+	       "disabled");
+	printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
+	for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
+			printf(",%d", cpus_to_pin[cpu]);
+	printf("\n");
+
+	sa.sa_sigaction = sig_action_SIGUSR1;
+	sigemptyset(&sa.sa_mask);
+	sigaddset(&sa.sa_mask, SIGHUP);
+	sigaddset(&sa.sa_mask, SIGINT);
+	sigaddset(&sa.sa_mask, SIGQUIT);
+	sigaddset(&sa.sa_mask, SIGTERM);
+	sa.sa_flags = SA_SIGINFO;
+	if (sigaction(SIGUSR1, &sa, NULL) == -1)
+		shutdown(1, "sigaction(SIGUSR1)", __LINE__);
+	sa.sa_sigaction = sig_action;
+	if (sigaction(SIGHUP, &sa, NULL) == -1)
+		shutdown(1, "sigaction(SIGHUP)", __LINE__);
+	if (sigaction(SIGINT, &sa, NULL) == -1)
+		shutdown(1, "sigaction(SIGINT)", __LINE__);
+	if (sigaction(SIGQUIT, &sa, NULL) == -1)
+		shutdown(1, "sigaction(SIGQUIT)", __LINE__);
+	if (sigaction(SIGTERM, &sa, NULL) == -1)
+		shutdown(1, "sigaction(SIGTERM)", __LINE__);
+
+	if (!continuous_mode_fake) {
+		attr.mq_flags = O_NONBLOCK;
+		attr.mq_maxmsg = cur_max_msgs;
+		attr.mq_msgsize = MSG_SIZE;
+		open_queue(&attr);
+	}
+	for (i = 0; i < num_cpus_to_pin; i++) {
+		pthread_attr_t thread_attr;
+		void *thread_func;
+
+		if (continuous_mode_fake)
+			thread_func = &fake_cont_thread;
+		else if (continuous_mode)
+			thread_func = &cont_thread;
+		else
+			thread_func = &perf_test_thread;
+
+		CPU_ZERO_S(cpu_set_size, cpu_set);
+		CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
+		pthread_attr_init(&thread_attr);
+		pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
+					    cpu_set);
+		if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
+				   NULL))
+			shutdown(1, "pthread_create()", __LINE__);
+		pthread_attr_destroy(&thread_attr);
+	}
+
+	if (!continuous_mode) {
+		pthread_join(cpu_threads[0], &retval);
+		shutdown((long)retval, "perf_test_thread()", __LINE__);
+	} else {
+		while (1)
+			sleep(1);
+	}
+	shutdown(0, "", 0);
+}
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 7dab7b25..f576971 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -35,6 +35,7 @@
 #include <sys/mount.h>
 #include <sys/statfs.h>
 #include "../../include/linux/magic.h"
+#include "../../include/linux/kernel-page-flags.h"
 
 
 #ifndef MAX_PATH
@@ -73,33 +74,6 @@
 #define KPF_BYTES		8
 #define PROC_KPAGEFLAGS		"/proc/kpageflags"
 
-/* copied from kpageflags_read() */
-#define KPF_LOCKED		0
-#define KPF_ERROR		1
-#define KPF_REFERENCED		2
-#define KPF_UPTODATE		3
-#define KPF_DIRTY		4
-#define KPF_LRU			5
-#define KPF_ACTIVE		6
-#define KPF_SLAB		7
-#define KPF_WRITEBACK		8
-#define KPF_RECLAIM		9
-#define KPF_BUDDY		10
-
-/* [11-20] new additions in 2.6.31 */
-#define KPF_MMAP		11
-#define KPF_ANON		12
-#define KPF_SWAPCACHE		13
-#define KPF_SWAPBACKED		14
-#define KPF_COMPOUND_HEAD	15
-#define KPF_COMPOUND_TAIL	16
-#define KPF_HUGE		17
-#define KPF_UNEVICTABLE		18
-#define KPF_HWPOISON		19
-#define KPF_NOPAGE		20
-#define KPF_KSM			21
-#define KPF_THP			22
-
 /* [32-] kernel hacking assistances */
 #define KPF_RESERVED		32
 #define KPF_MLOCKED		33
@@ -326,7 +300,7 @@
 {
 	static char buf[65];
 	int present;
-	int i, j;
+	size_t i, j;
 
 	for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
 		present = (flags >> i) & 1;
@@ -344,7 +318,7 @@
 static char *page_flag_longname(uint64_t flags)
 {
 	static char buf[1024];
-	int i, n;
+	size_t i, n;
 
 	for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
 		if (!page_flag_names[i])
@@ -402,7 +376,7 @@
 
 static void show_summary(void)
 {
-	int i;
+	size_t i;
 
 	printf("             flags\tpage-count       MB"
 		"  symbolic-flags\t\t\tlong-symbolic-flags\n");
@@ -500,7 +474,7 @@
 /* find the path to the mounted debugfs */
 static const char *debugfs_find_mountpoint(void)
 {
-	const char **ptr;
+	const char *const *ptr;
 	char type[100];
 	FILE *fp;
 
@@ -537,7 +511,7 @@
 
 static void debugfs_mount(void)
 {
-	const char **ptr;
+	const char *const *ptr;
 
 	/* see if it's already mounted */
 	if (debugfs_find_mountpoint())
@@ -614,10 +588,10 @@
  * page frame walker
  */
 
-static int hash_slot(uint64_t flags)
+static size_t hash_slot(uint64_t flags)
 {
-	int k = HASH_KEY(flags);
-	int i;
+	size_t k = HASH_KEY(flags);
+	size_t i;
 
 	/* Explicitly reserve slot 0 for flags 0: the following logic
 	 * cannot distinguish an unoccupied slot from slot (flags==0).
@@ -670,7 +644,7 @@
 {
 	uint64_t buf[KPAGEFLAGS_BATCH];
 	unsigned long batch;
-	long pages;
+	unsigned long pages;
 	unsigned long i;
 
 	while (count) {
@@ -779,7 +753,7 @@
 
 static void usage(void)
 {
-	int i, j;
+	size_t i, j;
 
 	printf(
 "page-types [options]\n"
@@ -938,7 +912,7 @@
 
 static uint64_t parse_flag_name(const char *str, int len)
 {
-	int i;
+	size_t i;
 
 	if (!*str || !len)
 		return 0;
diff --git a/usr/Kconfig b/usr/Kconfig
index 65b845bd..085872b 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -134,7 +134,7 @@
 	depends on RD_BZIP2
 	help
 	  Its compression ratio and speed is intermediate.
-	  Decompression speed is slowest among the four.  The initramfs
+	  Decompression speed is slowest among the choices.  The initramfs
 	  size is about 10% smaller with bzip2, in comparison to gzip.
 	  Bzip2 uses a large amount of memory. For modern kernels you
 	  will need at least 8MB RAM or more for booting.
@@ -143,9 +143,9 @@
 	bool "LZMA"
 	depends on RD_LZMA
 	help
-	  The most recent compression algorithm.
-	  Its ratio is best, decompression speed is between the other
-	  three. Compression is slowest. The initramfs size is about 33%
+	  This algorithm's compression ratio is best.
+	  Decompression speed is between the other choices.
+	  Compression is slowest. The initramfs size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
 config INITRAMFS_COMPRESSION_XZ
@@ -161,7 +161,7 @@
 	bool "LZO"
 	depends on RD_LZO
 	help
-	  Its compression ratio is the poorest among the four. The kernel
+	  Its compression ratio is the poorest among the choices. The kernel
 	  size is about 10% bigger than gzip; however its speed
 	  (both compression and decompression) is the fastest.
 
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 01f572c..b1e091a 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -635,7 +635,6 @@
 	int r = 0, idx;
 	struct kvm_assigned_dev_kernel *match;
 	struct pci_dev *dev;
-	u8 header_type;
 
 	if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
 		return -EINVAL;
@@ -668,8 +667,7 @@
 	}
 
 	/* Don't allow bridges to be assigned */
-	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-	if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
 		r = -EPERM;
 		goto out_put;
 	}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a6a0365..5afb431 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -332,6 +332,7 @@
 	 */
 	hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
 		if (ei->type == KVM_IRQ_ROUTING_MSI ||
+		    ue->type == KVM_IRQ_ROUTING_MSI ||
 		    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
 			return r;